diff --git a/.mailmap b/.mailmap index 2875a92678..a34fc4823c 100644 --- a/.mailmap +++ b/.mailmap @@ -1,9 +1,11 @@ -# Generate AUTHORS: git log --format='%aN <%aE>' | sort -uf | grep -v vagrant-ubuntu-12 - +# Generate AUTHORS: git log --format='%aN <%aE>' | sort -uf + -Guillaume J. Charmes - +Guillaume J. Charmes + + + Thatcher Peskens dhrp @@ -15,8 +17,11 @@ Joffrey F Tim Terhorst Andy Smith - - + + + + + Thatcher Peskens @@ -38,3 +43,18 @@ Jean-Baptiste Barth Matthew Mueller Shih-Yuan Lee +Daniel Mizyrycki root +Jean-Baptiste Dalido + + + + + + + + + + + +Sven Dowideit ¨Sven <¨SvenDowideit@home.org.au¨> +unclejack diff --git a/.travis.yml b/.travis.yml index f6c83997aa..8a43d9a462 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,11 +13,18 @@ before_script: - sudo apt-get update -qq - sudo apt-get install -qq python-yaml - git remote add upstream git://github.com/dotcloud/docker.git - - git fetch --append --no-tags upstream refs/heads/master:refs/remotes/upstream/master + - upstream=master; + if [ "$TRAVIS_PULL_REQUEST" != false ]; then + upstream=$TRAVIS_BRANCH; + fi; + git fetch --append --no-tags upstream refs/heads/$upstream:refs/remotes/upstream/$upstream # sometimes we have upstream master already as origin/master (PRs), but other times we don't, so let's just make sure we have a completely unambiguous way to specify "upstream master" from here out +# but if it's a PR against non-master, we need that upstream branch instead :) + - sudo pip install -r docs/requirements.txt script: - hack/travis/dco.py - hack/travis/gofmt.py + - make -sC docs SPHINXOPTS=-q docs man # vim:set sw=2 ts=2: diff --git a/AUTHORS b/AUTHORS index 35cfc687bd..a54c6337cb 100644 --- a/AUTHORS +++ b/AUTHORS @@ -3,29 +3,42 @@ # # For a list of active project maintainers, see the MAINTAINERS file. # -Al Tobey -Alex Gaynor +Aanand Prasad +Aaron Feng +Abel Muiño Alexander Larsson Alexey Shamrin +Alex Gaynor +Alexis THOMAS +Al Tobey Andrea Luzzardi Andreas Savvides Andreas Tiefenthaler +Andrew Duckworth Andrew Macgregor Andrew Munsell Andrews Medina +Andy Chambers +andy diller Andy Rothfusz Andy Smith Anthony Bishopric +Anton Nikitin Antony Messerli +apocas Asbjørn Enge Barry Allard +Bartłomiej Piotrowski +Benoit Chesneau +Ben Sargent Ben Toews Ben Wiklund -Benoit Chesneau Bhiraj Butala Bouke Haarsma Brandon Liu -Brandon Philips +Brandon Philips +Brian Dorsey +Brian Goff Brian McCallister Brian Olsen Brian Shumate @@ -33,169 +46,298 @@ Briehan Lombaard Bruno Bigras Caleb Spare Calen Pennington +Carl X. Su Charles Hooper +Charles Lindsay +Chia-liang Kao +Chris St. Pierre Christopher Currie +Christopher Rigor +Christophe Troestler +Clayton Coleman Colin Dunklau Colin Rice +Cory Forsyth +cressie176 Dan Buch +Dan Hirsch +Daniel Exner Daniel Garcia Daniel Gasienica Daniel Mizyrycki +Daniel Norberg Daniel Nordberg Daniel Robinson Daniel Von Fange Daniel YC Lin +Danny Yates Darren Coxall +David Anderson David Calavera +David Mcanulty David Sissitka Deni Bertovic +Dinesh Subhraveti +dkumor +Dmitry Demeshchuk Dominik Honnef Don Spaulding -Dr Nic Williams Dražen Lučanin +Dr Nic Williams +Dustin Sallings +Edmund Wagner Elias Probst +Emil Hernvall Emily Rose Eric Hanchrow +Eric Lee Eric Myhre Erno Hopearuoho +eugenkrizo +Evan Krall Evan Phoenix Evan Wies +Eystein Måløy Stenberg ezbercih +Fabio Falci +Fabio Rehm Fabrizio Regini Faiz Khan Fareed Dudhia +Fernando Flavio Castelli Francisco Souza +Frank Macreery Frederick F. Kautz IV +Frederik Loeffert +Freek Kalter +Gabe Rosenhouse Gabriel Monroy +Galen Sampson Gareth Rushgrove +Gereon Frey +Gert van Valkenhoef Graydon Hoare Greg Thornton -Guillaume J. Charmes +grunny +Guillaume J. Charmes Gurjeet Singh Guruprasad Harley Laue Hector Castro Hunter Blanks +inglesp +Isaac Dupree Isao Jonas +Jake Moshenko +James Allen James Carr +James Mills James Turnbull +jaseg Jason McVetta Jean-Baptiste Barth +Jean-Baptiste Dalido Jeff Lindsay Jeremy Grosser +Jérôme Petazzoni +Jesse Dubay Jim Alateras Jimmy Cuadra +Joe Beda Joe Van Dyk Joffrey F Johan Euphrosine +Johannes 'fish' Ziemke +Johan Rydberg John Costa -Jon Wedaman +John Feminella +John Gardiner Myers +John Warwick Jonas Pfenniger Jonathan Mueller Jonathan Rudenberg +Jon Wedaman Joost Cassee Jordan Arentsen +Jordan Sissel Joseph Anthony Pasquale Holsten +Joseph Hager +Josh Hawn Josh Poimboeuf +JP Julien Barbier -Jérôme Petazzoni +Julien Dubois +Justin Force +Justin Plock Karan Lyons -Karl Grzeszczak +Karl Grzeszczak Kawsar Saiyeed Keli Hu Ken Cochrane Kevin Clark Kevin J. Lynagh +Keyvan Fatehi kim0 +Kim BKC Carlbacker Kimbro Staken Kiran Gangadharan Konstantin Pelykh Kyle Conroy Laurie Voss +Liang-Chi Hsieh +Lokesh Mandvekar Louis Opter +lukaspustina +Mahesh Tiyyagura Manuel Meurer -Manuel Woelker +Manuel Woelker +Marc Kuo Marco Hennings Marcus Farkas Marcus Ramberg +Marek Goldmann +Mark Allen Mark McGranaghan Marko Mikulicic Markus Fix +Martijn van Oosterhout Martin Redmond -Matt Apperson Mathieu Le Marec - Pasquet +Matt Apperson Matt Bachmann +Matt Haggard Matthew Mueller +mattymo +Maxime Petazzoni Maxim Treskin meejah -Michael Crosby +Michael Crosby Michael Gorsuch +Michael Stapelberg Miguel Angel Fernández Mike Gaffney +Mike Naberezny Mikhail Sobolev Mohit Soni Morten Siebuhr Nan Monnand Deng Nate Jones +Nathan Kleyn Nelson Chen Niall O'Higgins Nick Payne Nick Stenning Nick Stinemates +Nicolas Dudebout +Nicolas Kaiser Nolan Darilek odk- +Oguz Bilgic +Ole Reifschneider +O.S.Tezer +pandrew Pascal Borreli +pattichen Paul Bowsher Paul Hammond -Paul Liétar +Paul Lietar +Paul Morie Paul Nasrat +Paul +Peter Braden +Peter Waller Phil Spitler +Piergiuliano Bossi +Pierre-Alain RIVIERE Piotr Bogdan pysqz +Quentin Brossard +Rafal Jeczalik +Ramkumar Ramachandra Ramon van Alteren Renato Riccieri Santos Zannon +rgstephens Rhys Hiltner +Richo Healey +Rick Bradley Robert Obryk +Roberto G. Hashioka Roberto Hashioka +Rodrigo Vaz +Roel Van Nyen +Roger Peppe Ryan Fowler +Ryan O'Donnell +Ryan Seto Sam Alba Sam J Sharpe +Samuel Andaya Scott Bessler +Sean Cronin Sean P. Kane +Shawn Landden Shawn Siefkas Shih-Yuan Lee +shin- Silas Sewell +Simon Taranto +Sjoerd Langkemper Solomon Hykes Song Gao Sridatta Thatipamala Sridhar Ratnakumar Steeve Morin Stefan Praszalowicz -Sven Dowideit +sudosurootdev +Sven Dowideit +Sylvain Bellemare +tang0th +Tatsuki Sugiura +Tehmasp Chaudhri Thatcher Peskens Thermionix Thijs Terlouw Thomas Bikeev Thomas Frössman Thomas Hansen +Thomas LEVEIL Tianon Gravi +Tim Bosse Tim Terhorst Tobias Bieniek Tobias Schmidt Tobias Schwab +Todd Lunter Tom Hulihan Tommaso Visconti +Travis Cline Tyler Brock +Tzu-Jung Lee +Ulysse Carion unclejack +vgeta Victor Coisne Victor Lyuboslavsky -Victor Vieux +Victor Vieux +Vincent Batts Vincent Bernat +Vincent Woo +Vinod Kulkarni +Vitor Monteiro Vivek Agarwal Vladimir Kirillov +Vladimir Rutsky Walter Stanish +WarheadsSE Wes Morgan Will Dietz +William Delanoue +Will Rouesnel +Xiuming Chen Yang Bai +Yurii Rashkovskii +Zain Memon Zaiste! +Zilin Du +zimbatm diff --git a/CHANGELOG.md b/CHANGELOG.md index b416d450a2..e016472406 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,25 @@ # Changelog +## 0.8.0 (2014-02-04) + +#### Notable features since 0.7.0 + +* Images and containers can be removed much faster +* Building an image from source with docker build is now much faster +* The Docker daemon starts and stops much faster +* The memory footprint of many common operations has been reduced, by streaming files instead of buffering them in memory, fixing memory leaks, and fixing various suboptimal memory allocations +* Several race conditions were fixed, making Docker more stable under very high concurrency load. This makes Docker more stable and less likely to crash and reduces the memory footprint of many common operations +* All packaging operations are now built on the Go language’s standard tar implementation, which is bundled with Docker itself. This makes packaging more portable across host distributions, and solves several issues caused by quirks and incompatibilities between different distributions of tar +* Docker can now create, remove and modify larger numbers of containers and images graciously thanks to more aggressive releasing of system resources. For example the storage driver API now allows Docker to do reference counting on mounts created by the drivers +With the ongoing changes to the networking and execution subsystems of docker testing these areas have been a focus of the refactoring. By moving these subsystems into separate packages we can test, analyze, and monitor coverage and quality of these packages +* Many components have been separated into smaller sub-packages, each with a dedicated test suite. As a result the code is better-tested, more readable and easier to change + +* The ADD instruction now supports caching, which avoids unnecessarily re-uploading the same source content again and again when it hasn’t changed +* The new ONBUILD instruction adds to your image a “trigger” instruction to be executed at a later time, when the image is used as the base for another build +* Docker now ships with an experimental storage driver which uses the BTRFS filesystem for copy-on-write +* Docker is officially supported on Mac OSX +* The Docker daemon supports systemd socket activation + ## 0.7.6 (2014-01-14) #### Builder diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f318e41922..93c1d11b1f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -7,7 +7,7 @@ feels wrong or incomplete. ## Reporting Issues When reporting [issues](https://github.com/dotcloud/docker/issues) -on Github please include your host OS ( Ubuntu 12.04, Fedora 19, etc... ) +on GitHub please include your host OS ( Ubuntu 12.04, Fedora 19, etc... ) and the output of `docker version` along with the output of `docker info` if possible. This information will help us review and fix your issue faster. @@ -45,7 +45,7 @@ else is working on the same thing. ### Create issues... -Any significant improvement should be documented as [a github +Any significant improvement should be documented as [a GitHub issue](https://github.com/dotcloud/docker/issues) before anybody starts working on it. @@ -115,16 +115,28 @@ can certify the below: ``` Docker Developer Grant and Certificate of Origin 1.1 -By making a contribution to the Docker Project ("Project"), I represent and warrant that: +By making a contribution to the Docker Project ("Project"), I represent and +warrant that: -a. The contribution was created in whole or in part by me and I have the right to submit the contribution on my own behalf or on behalf of a third party who has authorized me to submit this contribution to the Project; or +a. The contribution was created in whole or in part by me and I have the right +to submit the contribution on my own behalf or on behalf of a third party who +has authorized me to submit this contribution to the Project; or -b. The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right and authorization to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license) that I have identified in the contribution; or +b. The contribution is based upon previous work that, to the best of my +knowledge, is covered under an appropriate open source license and I have the +right and authorization to submit that work with modifications, whether +created in whole or in part by me, under the same open source license (unless +I am permitted to submit under a different license) that I have identified in +the contribution; or -c. The contribution was provided directly to me by some other person who represented and warranted (a) or (b) and I have not modified it. - -d. I understand and agree that this Project and the contribution are publicly known and that a record of the contribution (including all personal information I submit with it, including my sign-off record) is maintained indefinitely and may be redistributed consistent with this Project or the open source license(s) involved. +c. The contribution was provided directly to me by some other person who +represented and warranted (a) or (b) and I have not modified it. +d. I understand and agree that this Project and the contribution are publicly +known and that a record of the contribution (including all personal +information I submit with it, including my sign-off record) is maintained +indefinitely and may be redistributed consistent with this Project or the open +source license(s) involved. ``` then you just add a line to every git commit message: @@ -134,20 +146,14 @@ then you just add a line to every git commit message: using your real name (sorry, no pseudonyms or anonymous contributions.) One way to automate this, is customise your get ``commit.template`` by adding -the following to your ``.git/hooks/prepare-commit-msg`` script (needs -``chmod 755 .git/hooks/prepare-commit-msg`` ) in the docker checkout: +a ``prepare-commit-msg`` hook to your docker checkout: ``` - #!/bin/sh - # Auto sign all commits to allow them to be used by the Docker project. - # see https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md#sign-your-work - # - GH_USER=$(git config --get github.user) - SOB=$(git var GIT_AUTHOR_IDENT | sed -n "s/^\(.*>\).*$/Docker-DCO-1.1-Signed-off-by: \1 \(github: $GH_USER\)/p") - grep -qs "^$SOB" "$1" || echo "\n$SOB" >> "$1" - +curl -o .git/hooks/prepare-commit-msg https://raw.github.com/dotcloud/docker/master/contrib/prepare-commit-msg.hook && chmod +x .git/hooks/prepare-commit-msg ``` +* Note: the above script expects to find your GitHub user name in ``git config --get github.user`` + If you have any questions, please refer to the FAQ in the [docs](http://docs.docker.io) diff --git a/Dockerfile b/Dockerfile index 9da4e8f039..8eb2459215 100644 --- a/Dockerfile +++ b/Dockerfile @@ -24,23 +24,23 @@ # docker-version 0.6.1 -FROM stackbrew/ubuntu:12.04 +FROM ubuntu:13.10 MAINTAINER Tianon Gravi (@tianon) -# Add precise-backports to get s3cmd >= 1.1.0 (so we get ENV variable support in our .s3cfg) -RUN echo 'deb http://archive.ubuntu.com/ubuntu precise-backports main universe' > /etc/apt/sources.list.d/backports.list - # Packaged dependencies RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \ apt-utils \ aufs-tools \ + automake \ + btrfs-tools \ build-essential \ curl \ dpkg-sig \ git \ iptables \ + libapparmor-dev \ + libcap-dev \ libsqlite3-dev \ - lxc \ mercurial \ reprepro \ ruby1.9.1 \ @@ -48,10 +48,14 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \ s3cmd=1.1.0* \ --no-install-recommends +# Get and compile LXC 0.8 (since it is the most stable) +RUN git clone --no-checkout https://github.com/lxc/lxc.git /usr/local/lxc && cd /usr/local/lxc && git checkout -q lxc-0.8.0 +RUN cd /usr/local/lxc && ./autogen.sh && ./configure --disable-docs && make && make install + # Get lvm2 source for compiling statically -RUN git clone https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103 +RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103 # see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags -# note: we can't use "git clone -b" above because it requires at least git 1.7.10 to be able to use that on a tag instead of a branch and we only have 1.7.9.5 +# note: we don't use "git clone -b" above because it then spews big nasty warnings about 'detached HEAD' state that we can't silence as easily as we can silence them using "git checkout" directly # Compile and install lvm2 RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper @@ -64,19 +68,23 @@ ENV GOPATH /go:/go/src/github.com/dotcloud/docker/vendor RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 # Compile Go for cross compilation -ENV DOCKER_CROSSPLATFORMS darwin/amd64 darwin/386 -# TODO add linux/386 and linux/arm +ENV DOCKER_CROSSPLATFORMS linux/386 linux/arm darwin/amd64 darwin/386 +# (set an explicit GOARM of 5 for maximum compatibility) +ENV GOARM 5 RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done' # Grab Go's cover tool for dead-simple code coverage testing RUN go get code.google.com/p/go.tools/cmd/cover # TODO replace FPM with some very minimal debhelper stuff -RUN gem install --no-rdoc --no-ri fpm --version 1.0.1 +RUN gem install --no-rdoc --no-ri fpm --version 1.0.2 # Setup s3cmd config RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + VOLUME /var/lib/docker WORKDIR /go/src/github.com/dotcloud/docker diff --git a/MAINTAINERS b/MAINTAINERS index 4a6c0ec22c..895fba563a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6,4 +6,4 @@ Michael Crosby (@crosbymichael) api.go: Victor Vieux (@vieux) Dockerfile: Tianon Gravi (@tianon) Makefile: Tianon Gravi (@tianon) -Vagrantfile: Daniel Mizyrycki (@mzdaniel) +Vagrantfile: Cristian Staretu (@unclejack) diff --git a/Makefile b/Makefile index d5253d86ce..168707a80f 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY: all binary build cross default docs shell test +.PHONY: all binary build cross default docs docs-build docs-shell shell test test-integration GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD) DOCKER_IMAGE := docker:$(GIT_BRANCH) @@ -16,18 +16,26 @@ binary: build cross: build $(DOCKER_RUN_DOCKER) hack/make.sh binary cross -docs: - docker build -rm -t "$(DOCKER_DOCS_IMAGE)" docs +docs: docs-build docker run -rm -i -t -p 8000:8000 "$(DOCKER_DOCS_IMAGE)" +docs-shell: docs-build + docker run -rm -i -t -p 8000:8000 "$(DOCKER_DOCS_IMAGE)" bash + test: build $(DOCKER_RUN_DOCKER) hack/make.sh test test-integration +test-integration: build + $(DOCKER_RUN_DOCKER) hack/make.sh test-integration + shell: build $(DOCKER_RUN_DOCKER) bash build: bundles docker build -rm -t "$(DOCKER_IMAGE)" . +docs-build: + docker build -rm -t "$(DOCKER_DOCS_IMAGE)" docs + bundles: mkdir bundles diff --git a/NOTICE b/NOTICE index fb6810bc28..d0e0639a5a 100644 --- a/NOTICE +++ b/NOTICE @@ -1,5 +1,5 @@ Docker -Copyright 2012-2013 Docker, Inc. +Copyright 2012-2014 Docker, Inc. This product includes software developed at Docker, Inc. (http://www.docker.com). diff --git a/README.md b/README.md index 12ffc2e8ec..b6b77d6e61 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,7 @@ hundreds of thousands of applications and databases. ## Better than VMs -A common method for distributing applications and sandbox their +A common method for distributing applications and sandboxing their execution is to use virtual machines, or VMs. Typical VM formats are VMWare's vmdk, Oracle Virtualbox's vdi, and Amazon EC2's ami. In theory these formats should allow every developer to automatically diff --git a/REMOTE_TODO.md b/REMOTE_TODO.md deleted file mode 100644 index 5bc8d0bf7f..0000000000 --- a/REMOTE_TODO.md +++ /dev/null @@ -1,46 +0,0 @@ -``` -**GET** - send objects deprecate multi-stream -TODO "/events": getEvents, N -ok "/info": getInfo, 1 -ok "/version": getVersion, 1 -... "/images/json": getImagesJSON, N -TODO "/images/viz": getImagesViz, 0 yes -TODO "/images/search": getImagesSearch, N -#3490 "/images/{name:.*}/get": getImagesGet, 0 -TODO "/images/{name:.*}/history": getImagesHistory, N -TODO "/images/{name:.*}/json": getImagesByName, 1 -TODO "/containers/ps": getContainersJSON, N -TODO "/containers/json": getContainersJSON, 1 -ok "/containers/{name:.*}/export": getContainersExport, 0 -TODO "/containers/{name:.*}/changes": getContainersChanges, N -TODO "/containers/{name:.*}/json": getContainersByName, 1 -TODO "/containers/{name:.*}/top": getContainersTop, N -#3512 "/containers/{name:.*}/attach/ws": wsContainersAttach, 0 yes - -**POST** -TODO "/auth": postAuth, 0 yes -ok "/commit": postCommit, 0 -TODO "/build": postBuild, 0 yes -TODO "/images/create": postImagesCreate, N yes yes (pull) -TODO "/images/{name:.*}/insert": postImagesInsert, N yes yes -TODO "/images/load": postImagesLoad, 1 yes (stdin) -TODO "/images/{name:.*}/push": postImagesPush, N yes -ok "/images/{name:.*}/tag": postImagesTag, 0 -ok "/containers/create": postContainersCreate, 0 -ok "/containers/{name:.*}/kill": postContainersKill, 0 -#3476 "/containers/{name:.*}/restart": postContainersRestart, 0 -ok "/containers/{name:.*}/start": postContainersStart, 0 -ok "/containers/{name:.*}/stop": postContainersStop, 0 -ok "/containers/{name:.*}/wait": postContainersWait, 0 -ok "/containers/{name:.*}/resize": postContainersResize, 0 -#3512 "/containers/{name:.*}/attach": postContainersAttach, 0 yes -TODO "/containers/{name:.*}/copy": postContainersCopy, 0 yes - -**DELETE** -#3180 "/containers/{name:.*}": deleteContainers, 0 -TODO "/images/{name:.*}": deleteImages, N - -**OPTIONS** -ok "": optionsHandler, 0 -``` diff --git a/VERSION b/VERSION index c006218557..a3df0a6959 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.7.6 +0.8.0 diff --git a/Vagrantfile b/Vagrantfile index c130587829..23f262020a 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -8,10 +8,20 @@ AWS_BOX_URI = ENV['BOX_URI'] || "https://github.com/mitchellh/vagrant-aws/raw/ma AWS_REGION = ENV['AWS_REGION'] || "us-east-1" AWS_AMI = ENV['AWS_AMI'] || "ami-69f5a900" AWS_INSTANCE_TYPE = ENV['AWS_INSTANCE_TYPE'] || 't1.micro' +SSH_PRIVKEY_PATH = ENV['SSH_PRIVKEY_PATH'] +PRIVATE_NETWORK = ENV['PRIVATE_NETWORK'] +# Boolean that forwards the Docker dynamic ports 49000-49900 +# See http://docs.docker.io/en/latest/use/port_redirection/ for more +# $ FORWARD_DOCKER_PORTS=1 vagrant [up|reload] FORWARD_DOCKER_PORTS = ENV['FORWARD_DOCKER_PORTS'] +VAGRANT_RAM = ENV['VAGRANT_RAM'] || 512 +VAGRANT_CORES = ENV['VAGRANT_CORES'] || 1 -SSH_PRIVKEY_PATH = ENV["SSH_PRIVKEY_PATH"] +# You may also provide a comma-separated list of ports +# for Vagrant to forward. For example: +# $ FORWARD_PORTS=8080,27017 vagrant [up|reload] +FORWARD_PORTS = ENV['FORWARD_PORTS'] # A script to upgrade from the 12.04 kernel to the raring backport kernel (3.8) # and install docker. @@ -23,6 +33,10 @@ if [ -z "$user" ]; then user=vagrant fi +# Enable memory cgroup and swap accounting +sed -i 's/GRUB_CMDLINE_LINUX=""/GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"/g' /etc/default/grub +update-grub + # Adding an apt gpg key is idempotent. apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 @@ -152,6 +166,8 @@ Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config| override.vm.provision :shell, :inline => $vbox_script vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"] vb.customize ["modifyvm", :id, "--natdnsproxy1", "on"] + vb.customize ["modifyvm", :id, "--memory", VAGRANT_RAM] + vb.customize ["modifyvm", :id, "--cpus", VAGRANT_CORES] end end @@ -161,16 +177,30 @@ Vagrant::VERSION < "1.1.0" and Vagrant::Config.run do |config| config.vm.provision :shell, :inline => $vbox_script end -if !FORWARD_DOCKER_PORTS.nil? +# Setup port forwarding per loaded environment variables +forward_ports = FORWARD_DOCKER_PORTS.nil? ? [] : [*49153..49900] +forward_ports += FORWARD_PORTS.split(',').map{|i| i.to_i } if FORWARD_PORTS +if forward_ports.any? Vagrant::VERSION < "1.1.0" and Vagrant::Config.run do |config| - (49000..49900).each do |port| + forward_ports.each do |port| config.vm.forward_port port, port end end Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config| - (49000..49900).each do |port| - config.vm.network :forwarded_port, :host => port, :guest => port + forward_ports.each do |port| + config.vm.network :forwarded_port, :host => port, :guest => port, auto_correct: true end end end + +if !PRIVATE_NETWORK.nil? + Vagrant::VERSION < "1.1.0" and Vagrant::Config.run do |config| + config.vm.network :hostonly, PRIVATE_NETWORK + end + + Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config| + config.vm.network "private_network", ip: PRIVATE_NETWORK + end +end + diff --git a/api.go b/api/api.go similarity index 50% rename from api.go rename to api/api.go index 9fe9bc716a..741dc69085 100644 --- a/api.go +++ b/api/api.go @@ -1,4 +1,4 @@ -package docker +package api import ( "bufio" @@ -8,8 +8,8 @@ import ( "encoding/json" "expvar" "fmt" - "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/auth" + "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/pkg/systemd" "github.com/dotcloud/docker/utils" "github.com/gorilla/mux" @@ -21,20 +21,24 @@ import ( "net/http" "net/http/pprof" "os" - "os/exec" "regexp" "strconv" "strings" + "syscall" ) const ( - APIVERSION = 1.8 + APIVERSION = 1.9 DEFAULTHTTPHOST = "127.0.0.1" DEFAULTHTTPPORT = 4243 DEFAULTUNIXSOCKET = "/var/run/docker.sock" ) -type HttpApiFunc func(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error +type HttpApiFunc func(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error + +func init() { + engine.Register("serveapi", ServeApi) +} func hijackServer(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) { conn, _, err := w.(http.Hijacker).Hijack() @@ -71,13 +75,13 @@ func httpError(w http.ResponseWriter, err error) { // create appropriate error types with clearly defined meaning. if strings.Contains(err.Error(), "No such") { statusCode = http.StatusNotFound - } else if strings.HasPrefix(err.Error(), "Bad parameter") { + } else if strings.Contains(err.Error(), "Bad parameter") { statusCode = http.StatusBadRequest - } else if strings.HasPrefix(err.Error(), "Conflict") { + } else if strings.Contains(err.Error(), "Conflict") { statusCode = http.StatusConflict - } else if strings.HasPrefix(err.Error(), "Impossible") { + } else if strings.Contains(err.Error(), "Impossible") { statusCode = http.StatusNotAcceptable - } else if strings.HasPrefix(err.Error(), "Wrong login/password") { + } else if strings.Contains(err.Error(), "Wrong login/password") { statusCode = http.StatusUnauthorized } else if strings.Contains(err.Error(), "hasn't been activated") { statusCode = http.StatusForbidden @@ -89,18 +93,10 @@ func httpError(w http.ResponseWriter, err error) { } } -func writeJSON(w http.ResponseWriter, code int, v interface{}) error { - b, err := json.Marshal(v) - - if err != nil { - return err - } - +func writeJSON(w http.ResponseWriter, code int, v engine.Env) error { w.Header().Set("Content-Type", "application/json") w.WriteHeader(code) - w.Write(b) - - return nil + return v.Encode(w) } func getBoolParam(value string) (bool, error) { @@ -114,7 +110,20 @@ func getBoolParam(value string) (bool, error) { return ret, nil } -func matchesContentType(contentType, expectedType string) bool { +//TODO remove, used on < 1.5 in getContainersJSON +func displayablePorts(ports *engine.Table) string { + result := []string{} + for _, port := range ports.Data { + if port.Get("IP") == "" { + result = append(result, fmt.Sprintf("%d/%s", port.GetInt("PublicPort"), port.Get("Type"))) + } else { + result = append(result, fmt.Sprintf("%s:%d->%d/%s", port.Get("IP"), port.GetInt("PublicPort"), port.GetInt("PrivatePort"), port.Get("Type"))) + } + } + return strings.Join(result, ", ") +} + +func MatchesContentType(contentType, expectedType string) bool { mimetype, _, err := mime.ParseMediaType(contentType) if err != nil { utils.Errorf("Error parsing media type: %s error: %s", contentType, err.Error()) @@ -122,37 +131,43 @@ func matchesContentType(contentType, expectedType string) bool { return err == nil && mimetype == expectedType } -func postAuth(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - authConfig := &auth.AuthConfig{} - err := json.NewDecoder(r.Body).Decode(authConfig) +func postAuth(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var ( + authConfig, err = ioutil.ReadAll(r.Body) + job = eng.Job("auth") + status string + ) if err != nil { return err } - status, err := auth.Login(authConfig, srv.HTTPRequestFactory(nil)) - if err != nil { + job.Setenv("authConfig", string(authConfig)) + job.Stdout.AddString(&status) + if err = job.Run(); err != nil { return err } if status != "" { - return writeJSON(w, http.StatusOK, &APIAuth{Status: status}) + var env engine.Env + env.Set("Status", status) + return writeJSON(w, http.StatusOK, env) } w.WriteHeader(http.StatusNoContent) return nil } -func getVersion(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func getVersion(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.Header().Set("Content-Type", "application/json") - srv.Eng.ServeHTTP(w, r) + eng.ServeHTTP(w, r) return nil } -func postContainersKill(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func postContainersKill(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } if err := parseForm(r); err != nil { return err } - job := srv.Eng.Job("kill", vars["name"]) + job := eng.Job("kill", vars["name"]) if sig := r.Form.Get("signal"); sig != "" { job.Args = append(job.Args, sig) } @@ -163,150 +178,116 @@ func postContainersKill(srv *Server, version float64, w http.ResponseWriter, r * return nil } -func getContainersExport(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func getContainersExport(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } - job := srv.Eng.Job("export", vars["name"]) - if err := job.Stdout.Add(w); err != nil { - return err - } + job := eng.Job("export", vars["name"]) + job.Stdout.Add(w) if err := job.Run(); err != nil { return err } return nil } -func getImagesJSON(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func getImagesJSON(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } - all, err := getBoolParam(r.Form.Get("all")) - if err != nil { - return err - } - filter := r.Form.Get("filter") + var ( + err error + outs *engine.Table + job = eng.Job("images") + ) - outs, err := srv.Images(all, filter) - if err != nil { + job.Setenv("filter", r.Form.Get("filter")) + job.Setenv("all", r.Form.Get("all")) + + if version >= 1.7 { + job.Stdout.Add(w) + } else if outs, err = job.Stdout.AddListTable(); err != nil { return err } - if version < 1.7 { - outs2 := []APIImagesOld{} - for _, ctnr := range outs { - outs2 = append(outs2, ctnr.ToLegacy()...) + if err := job.Run(); err != nil { + return err + } + + if version < 1.7 && outs != nil { // Convert to legacy format + outsLegacy := engine.NewTable("Created", 0) + for _, out := range outs.Data { + for _, repoTag := range out.GetList("RepoTags") { + parts := strings.Split(repoTag, ":") + outLegacy := &engine.Env{} + outLegacy.Set("Repository", parts[0]) + outLegacy.Set("Tag", parts[1]) + outLegacy.Set("ID", out.Get("ID")) + outLegacy.SetInt64("Created", out.GetInt64("Created")) + outLegacy.SetInt64("Size", out.GetInt64("Size")) + outLegacy.SetInt64("VirtualSize", out.GetInt64("VirtualSize")) + outsLegacy.Add(outLegacy) + } + } + if _, err := outsLegacy.WriteListTo(w); err != nil { + return err } - - return writeJSON(w, http.StatusOK, outs2) } - return writeJSON(w, http.StatusOK, outs) + return nil } -func getImagesViz(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func getImagesViz(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if version > 1.6 { w.WriteHeader(http.StatusNotFound) return fmt.Errorf("This is now implemented in the client.") } - - if err := srv.ImagesViz(w); err != nil { - return err - } + eng.ServeHTTP(w, r) return nil } -func getInfo(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func getInfo(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.Header().Set("Content-Type", "application/json") - srv.Eng.ServeHTTP(w, r) + eng.ServeHTTP(w, r) return nil } -func getEvents(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - sendEvent := func(wf *utils.WriteFlusher, event *utils.JSONMessage) error { - b, err := json.Marshal(event) - if err != nil { - return fmt.Errorf("JSON error") - } - _, err = wf.Write(b) - if err != nil { - // On error, evict the listener - utils.Errorf("%s", err) - srv.Lock() - delete(srv.listeners, r.RemoteAddr) - srv.Unlock() - return err - } - return nil - } - +func getEvents(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } - listener := make(chan utils.JSONMessage) - srv.Lock() - srv.listeners[r.RemoteAddr] = listener - srv.Unlock() - since, err := strconv.ParseInt(r.Form.Get("since"), 10, 0) - if err != nil { - since = 0 - } + w.Header().Set("Content-Type", "application/json") - wf := utils.NewWriteFlusher(w) - wf.Flush() - if since != 0 { - // If since, send previous events that happened after the timestamp - for _, event := range srv.GetEvents() { - if event.Time >= since { - err := sendEvent(wf, &event) - if err != nil && err.Error() == "JSON error" { - continue - } - if err != nil { - return err - } - } - } + var job = eng.Job("events", r.RemoteAddr) + job.Stdout.Add(utils.NewWriteFlusher(w)) + job.Setenv("since", r.Form.Get("since")) + return job.Run() +} + +func getImagesHistory(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") } - for event := range listener { - err := sendEvent(wf, &event) - if err != nil && err.Error() == "JSON error" { - continue - } - if err != nil { - return err - } + + var job = eng.Job("history", vars["name"]) + job.Stdout.Add(w) + + if err := job.Run(); err != nil { + return err } return nil } -func getImagesHistory(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func getContainersChanges(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } - name := vars["name"] - outs, err := srv.ImageHistory(name) - if err != nil { - return err - } + var job = eng.Job("changes", vars["name"]) + job.Stdout.Add(w) - return writeJSON(w, http.StatusOK, outs) + return job.Run() } -func getContainersChanges(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - name := vars["name"] - changesStr, err := srv.ContainerChanges(name) - if err != nil { - return err - } - - return writeJSON(w, http.StatusOK, changesStr) -} - -func getContainersTop(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func getContainersTop(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if version < 1.4 { return fmt.Errorf("top was improved a lot since 1.3, Please upgrade your docker client.") } @@ -316,46 +297,50 @@ func getContainersTop(srv *Server, version float64, w http.ResponseWriter, r *ht if err := parseForm(r); err != nil { return err } - procsStr, err := srv.ContainerTop(vars["name"], r.Form.Get("ps_args")) - if err != nil { - return err - } - return writeJSON(w, http.StatusOK, procsStr) + + job := eng.Job("top", vars["name"], r.Form.Get("ps_args")) + job.Stdout.Add(w) + return job.Run() } -func getContainersJSON(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func getContainersJSON(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } - all, err := getBoolParam(r.Form.Get("all")) - if err != nil { + var ( + err error + outs *engine.Table + job = eng.Job("containers") + ) + + job.Setenv("all", r.Form.Get("all")) + job.Setenv("size", r.Form.Get("size")) + job.Setenv("since", r.Form.Get("since")) + job.Setenv("before", r.Form.Get("before")) + job.Setenv("limit", r.Form.Get("limit")) + + if version >= 1.5 { + job.Stdout.Add(w) + } else if outs, err = job.Stdout.AddTable(); err != nil { return err } - size, err := getBoolParam(r.Form.Get("size")) - if err != nil { + if err = job.Run(); err != nil { return err } - since := r.Form.Get("since") - before := r.Form.Get("before") - n, err := strconv.Atoi(r.Form.Get("limit")) - if err != nil { - n = -1 - } - - outs := srv.Containers(all, size, n, since, before) - - if version < 1.5 { - outs2 := []APIContainersOld{} - for _, ctnr := range outs { - outs2 = append(outs2, *ctnr.ToLegacy()) + if version < 1.5 { // Convert to legacy format + for _, out := range outs.Data { + ports := engine.NewTable("", 0) + ports.ReadListFrom([]byte(out.Get("Ports"))) + out.Set("Ports", displayablePorts(ports)) + } + if _, err = outs.WriteListTo(w); err != nil { + return err } - - return writeJSON(w, http.StatusOK, outs2) } - return writeJSON(w, http.StatusOK, outs) + return nil } -func postImagesTag(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func postImagesTag(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } @@ -363,7 +348,7 @@ func postImagesTag(srv *Server, version float64, w http.ResponseWriter, r *http. return fmt.Errorf("Missing parameter") } - job := srv.Eng.Job("tag", vars["name"], r.Form.Get("repo"), r.Form.Get("tag")) + job := eng.Job("tag", vars["name"], r.Form.Get("repo"), r.Form.Get("tag")) job.Setenv("force", r.Form.Get("force")) if err := job.Run(); err != nil { return err @@ -372,42 +357,45 @@ func postImagesTag(srv *Server, version float64, w http.ResponseWriter, r *http. return nil } -func postCommit(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func postCommit(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } - config := &Config{} - if err := json.NewDecoder(r.Body).Decode(config); err != nil && err != io.EOF { + var ( + config engine.Env + env engine.Env + job = eng.Job("commit", r.Form.Get("container")) + ) + if err := config.Import(r.Body); err != nil { utils.Errorf("%s", err) } - job := srv.Eng.Job("commit", r.Form.Get("container")) job.Setenv("repo", r.Form.Get("repo")) job.Setenv("tag", r.Form.Get("tag")) job.Setenv("author", r.Form.Get("author")) job.Setenv("comment", r.Form.Get("comment")) - job.SetenvJson("config", config) + job.SetenvSubEnv("config", &config) var id string job.Stdout.AddString(&id) if err := job.Run(); err != nil { return err } - - return writeJSON(w, http.StatusCreated, &APIID{id}) + env.Set("Id", id) + return writeJSON(w, http.StatusCreated, env) } // Creates an image from Pull or from Import -func postImagesCreate(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func postImagesCreate(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } - src := r.Form.Get("fromSrc") - image := r.Form.Get("fromImage") - tag := r.Form.Get("tag") - repo := r.Form.Get("repo") - + var ( + image = r.Form.Get("fromImage") + tag = r.Form.Get("tag") + job *engine.Job + ) authEncoded := r.Header.Get("X-Registry-Auth") authConfig := &auth.AuthConfig{} if authEncoded != "" { @@ -421,7 +409,6 @@ func postImagesCreate(srv *Server, version float64, w http.ResponseWriter, r *ht if version > 1.0 { w.Header().Set("Content-Type", "application/json") } - sf := utils.NewStreamFormatter(version > 1.0) if image != "" { //pull metaHeaders := map[string][]string{} for k, v := range r.Header { @@ -429,67 +416,90 @@ func postImagesCreate(srv *Server, version float64, w http.ResponseWriter, r *ht metaHeaders[k] = v } } - if err := srv.ImagePull(image, tag, w, sf, authConfig, metaHeaders, version > 1.3); err != nil { - if sf.Used() { - w.Write(sf.FormatError(err)) - return nil - } - return err - } + job = eng.Job("pull", r.Form.Get("fromImage"), tag) + job.SetenvBool("parallel", version > 1.3) + job.SetenvJson("metaHeaders", metaHeaders) + job.SetenvJson("authConfig", authConfig) } else { //import - if err := srv.ImageImport(src, repo, tag, r.Body, w, sf); err != nil { - if sf.Used() { - w.Write(sf.FormatError(err)) - return nil - } + job = eng.Job("import", r.Form.Get("fromSrc"), r.Form.Get("repo"), tag) + job.Stdin.Add(r.Body) + } + + job.SetenvBool("json", version > 1.0) + job.Stdout.Add(utils.NewWriteFlusher(w)) + if err := job.Run(); err != nil { + if !job.Stdout.Used() { return err } + sf := utils.NewStreamFormatter(version > 1.0) + w.Write(sf.FormatError(err)) } + return nil } -func getImagesSearch(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func getImagesSearch(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } + var ( + authEncoded = r.Header.Get("X-Registry-Auth") + authConfig = &auth.AuthConfig{} + metaHeaders = map[string][]string{} + ) - term := r.Form.Get("term") - outs, err := srv.ImagesSearch(term) - if err != nil { - return err + if authEncoded != "" { + authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { + // for a search it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + authConfig = &auth.AuthConfig{} + } + } + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } } - return writeJSON(w, http.StatusOK, outs) + var job = eng.Job("search", r.Form.Get("term")) + job.SetenvJson("metaHeaders", metaHeaders) + job.SetenvJson("authConfig", authConfig) + job.Stdout.Add(w) + + return job.Run() } -func postImagesInsert(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func postImagesInsert(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } - - url := r.Form.Get("url") - path := r.Form.Get("path") if vars == nil { return fmt.Errorf("Missing parameter") } - name := vars["name"] if version > 1.0 { w.Header().Set("Content-Type", "application/json") } - sf := utils.NewStreamFormatter(version > 1.0) - err := srv.ImageInsert(name, url, path, w, sf) - if err != nil { - if sf.Used() { - w.Write(sf.FormatError(err)) - return nil + + job := eng.Job("insert", vars["name"], r.Form.Get("url"), r.Form.Get("path")) + job.SetenvBool("json", version > 1.0) + job.Stdout.Add(w) + if err := job.Run(); err != nil { + if !job.Stdout.Used() { + return err } - return err + sf := utils.NewStreamFormatter(version > 1.0) + w.Write(sf.FormatError(err)) } return nil } -func postImagesPush(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func postImagesPush(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + metaHeaders := map[string][]string{} for k, v := range r.Header { if strings.HasPrefix(k, "X-Meta-") { @@ -514,60 +524,62 @@ func postImagesPush(srv *Server, version float64, w http.ResponseWriter, r *http if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil { return err } - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - name := vars["name"] if version > 1.0 { w.Header().Set("Content-Type", "application/json") } - sf := utils.NewStreamFormatter(version > 1.0) - if err := srv.ImagePush(name, w, sf, authConfig, metaHeaders); err != nil { - if sf.Used() { - w.Write(sf.FormatError(err)) - return nil + job := eng.Job("push", vars["name"]) + job.SetenvJson("metaHeaders", metaHeaders) + job.SetenvJson("authConfig", authConfig) + job.SetenvBool("json", version > 1.0) + job.Stdout.Add(utils.NewWriteFlusher(w)) + + if err := job.Run(); err != nil { + if !job.Stdout.Used() { + return err } - return err + sf := utils.NewStreamFormatter(version > 1.0) + w.Write(sf.FormatError(err)) } return nil } -func getImagesGet(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - name := vars["name"] +func getImagesGet(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } if version > 1.0 { w.Header().Set("Content-Type", "application/x-tar") } - return srv.ImageExport(name, w) + job := eng.Job("image_export", vars["name"]) + job.Stdout.Add(w) + return job.Run() } -func postImagesLoad(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - return srv.ImageLoad(r.Body) +func postImagesLoad(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + job := eng.Job("load") + job.Stdin.Add(r.Body) + return job.Run() } -func postContainersCreate(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func postContainersCreate(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return nil } - out := &APIRun{} - job := srv.Eng.Job("create", r.Form.Get("name")) + var ( + out engine.Env + job = eng.Job("create", r.Form.Get("name")) + outWarnings []string + outId string + warnings = bytes.NewBuffer(nil) + ) if err := job.DecodeEnv(r.Body); err != nil { return err } - resolvConf, err := utils.GetResolvConf() - if err != nil { - return err - } - if !job.GetenvBool("NetworkDisabled") && len(job.Getenv("Dns")) == 0 && len(srv.runtime.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) { - out.Warnings = append(out.Warnings, fmt.Sprintf("Docker detected local DNS server on resolv.conf. Using default external servers: %v", defaultDns)) - job.SetenvList("Dns", defaultDns) - } // Read container ID from the first line of stdout - job.Stdout.AddString(&out.ID) + job.Stdout.AddString(&outId) // Read warnings from stderr - warnings := &bytes.Buffer{} job.Stderr.Add(warnings) if err := job.Run(); err != nil { return err @@ -575,100 +587,69 @@ func postContainersCreate(srv *Server, version float64, w http.ResponseWriter, r // Parse warnings from stderr scanner := bufio.NewScanner(warnings) for scanner.Scan() { - out.Warnings = append(out.Warnings, scanner.Text()) + outWarnings = append(outWarnings, scanner.Text()) } - if job.GetenvInt("Memory") > 0 && !srv.runtime.capabilities.MemoryLimit { - log.Println("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.") - out.Warnings = append(out.Warnings, "Your kernel does not support memory limit capabilities. Limitation discarded.") - } - if job.GetenvInt("Memory") > 0 && !srv.runtime.capabilities.SwapLimit { - log.Println("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.") - out.Warnings = append(out.Warnings, "Your kernel does not support memory swap capabilities. Limitation discarded.") - } - - if !job.GetenvBool("NetworkDisabled") && srv.runtime.capabilities.IPv4ForwardingDisabled { - log.Println("Warning: IPv4 forwarding is disabled.") - out.Warnings = append(out.Warnings, "IPv4 forwarding is disabled.") - } - + out.Set("Id", outId) + out.SetList("Warnings", outWarnings) return writeJSON(w, http.StatusCreated, out) } -func postContainersRestart(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - t, err := strconv.Atoi(r.Form.Get("t")) - if err != nil || t < 0 { - t = 10 - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - name := vars["name"] - if err := srv.ContainerRestart(name, t); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func deleteContainers(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func postContainersRestart(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } - name := vars["name"] - - removeVolume, err := getBoolParam(r.Form.Get("v")) - if err != nil { - return err - } - removeLink, err := getBoolParam(r.Form.Get("link")) - if err != nil { - return err - } - - if err := srv.ContainerDestroy(name, removeVolume, removeLink); err != nil { + job := eng.Job("restart", vars["name"]) + job.Setenv("t", r.Form.Get("t")) + if err := job.Run(); err != nil { return err } w.WriteHeader(http.StatusNoContent) return nil } -func deleteImages(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func deleteContainers(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } - name := vars["name"] - imgs, err := srv.ImageDelete(name, version > 1.1) - if err != nil { + job := eng.Job("container_delete", vars["name"]) + job.Setenv("removeVolume", r.Form.Get("v")) + job.Setenv("removeLink", r.Form.Get("link")) + if err := job.Run(); err != nil { return err } - if imgs != nil { - if len(imgs) != 0 { - return writeJSON(w, http.StatusOK, imgs) - } - return fmt.Errorf("Conflict, %s wasn't deleted", name) - } w.WriteHeader(http.StatusNoContent) return nil } -func postContainersStart(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func deleteImages(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + var job = eng.Job("image_delete", vars["name"]) + job.Stdout.Add(w) + job.SetenvBool("autoPrune", version > 1.1) + + return job.Run() +} + +func postContainersStart(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } name := vars["name"] - job := srv.Eng.Job("start", name) + job := eng.Job("start", name) // allow a nil body for backwards compatibility if r.Body != nil { - if matchesContentType(r.Header.Get("Content-Type"), "application/json") { + if MatchesContentType(r.Header.Get("Content-Type"), "application/json") { if err := job.DecodeEnv(r.Body); err != nil { return err } @@ -681,14 +662,14 @@ func postContainersStart(srv *Server, version float64, w http.ResponseWriter, r return nil } -func postContainersStop(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func postContainersStop(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } - job := srv.Eng.Job("stop", vars["name"]) + job := eng.Job("stop", vars["name"]) job.Setenv("t", r.Form.Get("t")) if err := job.Run(); err != nil { return err @@ -697,71 +678,59 @@ func postContainersStop(srv *Server, version float64, w http.ResponseWriter, r * return nil } -func postContainersWait(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func postContainersWait(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } - job := srv.Eng.Job("wait", vars["name"]) - var statusStr string - job.Stdout.AddString(&statusStr) + var ( + env engine.Env + status string + job = eng.Job("wait", vars["name"]) + ) + job.Stdout.AddString(&status) if err := job.Run(); err != nil { return err } // Parse a 16-bit encoded integer to map typical unix exit status. - status, err := strconv.ParseInt(statusStr, 10, 16) + _, err := strconv.ParseInt(status, 10, 16) if err != nil { return err } - return writeJSON(w, http.StatusOK, &APIWait{StatusCode: int(status)}) + env.Set("StatusCode", status) + return writeJSON(w, http.StatusOK, env) } -func postContainersResize(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func postContainersResize(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } - if err := srv.Eng.Job("resize", vars["name"], r.Form.Get("h"), r.Form.Get("w")).Run(); err != nil { + if err := eng.Job("resize", vars["name"], r.Form.Get("h"), r.Form.Get("w")).Run(); err != nil { return err } return nil } -func postContainersAttach(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func postContainersAttach(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } - logs, err := getBoolParam(r.Form.Get("logs")) - if err != nil { - return err - } - stream, err := getBoolParam(r.Form.Get("stream")) - if err != nil { - return err - } - stdin, err := getBoolParam(r.Form.Get("stdin")) - if err != nil { - return err - } - stdout, err := getBoolParam(r.Form.Get("stdout")) - if err != nil { - return err - } - stderr, err := getBoolParam(r.Form.Get("stderr")) - if err != nil { - return err - } - if vars == nil { return fmt.Errorf("Missing parameter") } - name := vars["name"] - c, err := srv.ContainerInspect(name) + var ( + job = eng.Job("inspect", vars["name"], "container") + c, err = job.Stdout.AddEnv() + ) if err != nil { return err } + if err = job.Run(); err != nil { + return err + } inStream, outStream, err := hijackServer(w) if err != nil { @@ -786,58 +755,53 @@ func postContainersAttach(srv *Server, version float64, w http.ResponseWriter, r fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") - if !c.Config.Tty && version >= 1.6 { + if c.GetSubEnv("Config") != nil && !c.GetSubEnv("Config").GetBool("Tty") && version >= 1.6 { errStream = utils.NewStdWriter(outStream, utils.Stderr) outStream = utils.NewStdWriter(outStream, utils.Stdout) } else { errStream = outStream } - if err := srv.ContainerAttach(name, logs, stream, stdin, stdout, stderr, inStream, outStream, errStream); err != nil { + job = eng.Job("attach", vars["name"]) + job.Setenv("logs", r.Form.Get("logs")) + job.Setenv("stream", r.Form.Get("stream")) + job.Setenv("stdin", r.Form.Get("stdin")) + job.Setenv("stdout", r.Form.Get("stdout")) + job.Setenv("stderr", r.Form.Get("stderr")) + job.Stdin.Add(inStream) + job.Stdout.Add(outStream) + job.Stderr.Set(errStream) + if err := job.Run(); err != nil { fmt.Fprintf(outStream, "Error: %s\n", err) + } return nil } -func wsContainersAttach(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - +func wsContainersAttach(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } - logs, err := getBoolParam(r.Form.Get("logs")) - if err != nil { - return err - } - stream, err := getBoolParam(r.Form.Get("stream")) - if err != nil { - return err - } - stdin, err := getBoolParam(r.Form.Get("stdin")) - if err != nil { - return err - } - stdout, err := getBoolParam(r.Form.Get("stdout")) - if err != nil { - return err - } - stderr, err := getBoolParam(r.Form.Get("stderr")) - if err != nil { - return err - } - if vars == nil { return fmt.Errorf("Missing parameter") } - name := vars["name"] - if _, err := srv.ContainerInspect(name); err != nil { + if err := eng.Job("inspect", vars["name"], "container").Run(); err != nil { return err } h := websocket.Handler(func(ws *websocket.Conn) { defer ws.Close() - - if err := srv.ContainerAttach(name, logs, stream, stdin, stdout, stderr, ws, ws, ws); err != nil { + job := eng.Job("attach", vars["name"]) + job.Setenv("logs", r.Form.Get("logs")) + job.Setenv("stream", r.Form.Get("stream")) + job.Setenv("stdin", r.Form.Get("stdin")) + job.Setenv("stdout", r.Form.Get("stdout")) + job.Setenv("stderr", r.Form.Get("stderr")) + job.Stdin.Add(ws) + job.Stdout.Add(ws) + job.Stderr.Set(ws) + if err := job.Run(); err != nil { utils.Errorf("Error: %s", err) } }) @@ -846,63 +810,43 @@ func wsContainersAttach(srv *Server, version float64, w http.ResponseWriter, r * return nil } -func getContainersByName(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func getContainersByName(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } - name := vars["name"] - - container, err := srv.ContainerInspect(name) - if err != nil { - return err - } - - _, err = srv.ImageInspect(name) - if err == nil { - return fmt.Errorf("Conflict between containers and images") - } - - container.readHostConfig() - c := APIContainer{container, container.hostConfig} - - return writeJSON(w, http.StatusOK, c) + var job = eng.Job("inspect", vars["name"], "container") + job.Stdout.Add(w) + job.SetenvBool("conflict", true) //conflict=true to detect conflict between containers and images in the job + return job.Run() } -func getImagesByName(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func getImagesByName(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } - name := vars["name"] - - image, err := srv.ImageInspect(name) - if err != nil { - return err - } - - _, err = srv.ContainerInspect(name) - if err == nil { - return fmt.Errorf("Conflict between containers and images") - } - - return writeJSON(w, http.StatusOK, image) + var job = eng.Job("inspect", vars["name"], "image") + job.Stdout.Add(w) + job.SetenvBool("conflict", true) //conflict=true to detect conflict between containers and images in the job + return job.Run() } -func postBuild(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func postBuild(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if version < 1.3 { return fmt.Errorf("Multipart upload for build is no longer supported. Please upgrade your docker client.") } var ( - remoteURL = r.FormValue("remote") - repoName = r.FormValue("t") - rawSuppressOutput = r.FormValue("q") - rawNoCache = r.FormValue("nocache") - rawRm = r.FormValue("rm") authEncoded = r.Header.Get("X-Registry-Auth") authConfig = &auth.AuthConfig{} - tag string + configFileEncoded = r.Header.Get("X-Registry-Config") + configFile = &auth.ConfigFile{} + job = eng.Job("build") ) - repoName, tag = utils.ParseRepositoryTag(repoName) - if authEncoded != "" { + + // This block can be removed when API versions prior to 1.9 are deprecated. + // Both headers will be parsed and sent along to the daemon, but if a non-empty + // ConfigFile is present, any value provided as an AuthConfig directly will + // be overridden. See BuildFile::CmdFrom for details. + if version < 1.9 && authEncoded != "" { authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { // for a pull it is not an error if no auth was given @@ -911,118 +855,69 @@ func postBuild(srv *Server, version float64, w http.ResponseWriter, r *http.Requ } } - var context io.Reader - - if remoteURL == "" { - context = r.Body - } else if utils.IsGIT(remoteURL) { - if !strings.HasPrefix(remoteURL, "git://") { - remoteURL = "https://" + remoteURL + if configFileEncoded != "" { + configFileJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(configFileEncoded)) + if err := json.NewDecoder(configFileJson).Decode(configFile); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + configFile = &auth.ConfigFile{} } - root, err := ioutil.TempDir("", "docker-build-git") - if err != nil { - return err - } - defer os.RemoveAll(root) - - if output, err := exec.Command("git", "clone", remoteURL, root).CombinedOutput(); err != nil { - return fmt.Errorf("Error trying to use git: %s (%s)", err, output) - } - - c, err := archive.Tar(root, archive.Uncompressed) - if err != nil { - return err - } - context = c - } else if utils.IsURL(remoteURL) { - f, err := utils.Download(remoteURL) - if err != nil { - return err - } - defer f.Body.Close() - dockerFile, err := ioutil.ReadAll(f.Body) - if err != nil { - return err - } - c, err := MkBuildContext(string(dockerFile), nil) - if err != nil { - return err - } - context = c - } - - suppressOutput, err := getBoolParam(rawSuppressOutput) - if err != nil { - return err - } - noCache, err := getBoolParam(rawNoCache) - if err != nil { - return err - } - rm, err := getBoolParam(rawRm) - if err != nil { - return err } if version >= 1.8 { w.Header().Set("Content-Type", "application/json") + job.SetenvBool("json", true) } - sf := utils.NewStreamFormatter(version >= 1.8) - b := NewBuildFile(srv, - &StdoutFormater{ - Writer: utils.NewWriteFlusher(w), - StreamFormatter: sf, - }, - &StderrFormater{ - Writer: utils.NewWriteFlusher(w), - StreamFormatter: sf, - }, - !suppressOutput, !noCache, rm, utils.NewWriteFlusher(w), sf, authConfig) - id, err := b.Build(context) - if err != nil { - if sf.Used() { - w.Write(sf.FormatError(err)) - return nil + + job.Stdout.Add(utils.NewWriteFlusher(w)) + job.Stdin.Add(r.Body) + job.Setenv("remote", r.FormValue("remote")) + job.Setenv("t", r.FormValue("t")) + job.Setenv("q", r.FormValue("q")) + job.Setenv("nocache", r.FormValue("nocache")) + job.Setenv("rm", r.FormValue("rm")) + + if err := job.Run(); err != nil { + if !job.Stdout.Used() { + return err } - return fmt.Errorf("Error build: %s", err) - } - if repoName != "" { - srv.runtime.repositories.Set(repoName, tag, id, false) + sf := utils.NewStreamFormatter(version >= 1.8) + w.Write(sf.FormatError(err)) } return nil } -func postContainersCopy(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func postContainersCopy(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } - name := vars["name"] - copyData := &APICopy{} - contentType := r.Header.Get("Content-Type") - if contentType == "application/json" { - if err := json.NewDecoder(r.Body).Decode(copyData); err != nil { + var copyData engine.Env + + if contentType := r.Header.Get("Content-Type"); contentType == "application/json" { + if err := copyData.Decode(r.Body); err != nil { return err } } else { return fmt.Errorf("Content-Type not supported: %s", contentType) } - if copyData.Resource == "" { + if copyData.Get("Resource") == "" { return fmt.Errorf("Path cannot be empty") } - if copyData.Resource[0] == '/' { - copyData.Resource = copyData.Resource[1:] + if copyData.Get("Resource")[0] == '/' { + copyData.Set("Resource", copyData.Get("Resource")[1:]) } - if err := srv.ContainerCopy(name, copyData.Resource, w); err != nil { + job := eng.Job("container_copy", vars["name"], copyData.Get("Resource")) + job.Stdout.Add(w) + if err := job.Run(); err != nil { utils.Errorf("%s", err.Error()) - return err } return nil } -func optionsHandler(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func optionsHandler(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.WriteHeader(http.StatusOK) return nil } @@ -1032,7 +927,7 @@ func writeCorsHeaders(w http.ResponseWriter, r *http.Request) { w.Header().Add("Access-Control-Allow-Methods", "GET, POST, DELETE, PUT, OPTIONS") } -func makeHttpHandler(srv *Server, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc) http.HandlerFunc { +func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, enableCors bool, dockerVersion string) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { // log the request utils.Debugf("Calling %s %s", localMethod, localRoute) @@ -1043,24 +938,24 @@ func makeHttpHandler(srv *Server, logging bool, localMethod string, localRoute s if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") { userAgent := strings.Split(r.Header.Get("User-Agent"), "/") - if len(userAgent) == 2 && userAgent[1] != VERSION { - utils.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], VERSION) + if len(userAgent) == 2 && userAgent[1] != dockerVersion { + utils.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion) } } version, err := strconv.ParseFloat(mux.Vars(r)["version"], 64) if err != nil { version = APIVERSION } - if srv.runtime.config.EnableCors { + if enableCors { writeCorsHeaders(w, r) } if version == 0 || version > APIVERSION { - w.WriteHeader(http.StatusNotFound) + http.Error(w, fmt.Errorf("client and server don't have same version (client : %g, server: %g)", version, APIVERSION).Error(), http.StatusNotFound) return } - if err := handlerFunc(srv, version, w, r, mux.Vars(r)); err != nil { + if err := handlerFunc(eng, version, w, r, mux.Vars(r)); err != nil { utils.Errorf("Error: %s", err) httpError(w, err) } @@ -1093,7 +988,7 @@ func AttachProfiler(router *mux.Router) { router.HandleFunc("/debug/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP) } -func createRouter(srv *Server, logging bool) (*mux.Router, error) { +func createRouter(eng *engine.Engine, logging, enableCors bool, dockerVersion string) (*mux.Router, error) { r := mux.NewRouter() if os.Getenv("DEBUG") != "" { AttachProfiler(r) @@ -1154,7 +1049,7 @@ func createRouter(srv *Server, logging bool) (*mux.Router, error) { localMethod := method // build the handler function - f := makeHttpHandler(srv, logging, localMethod, localRoute, localFct) + f := makeHttpHandler(eng, logging, localMethod, localRoute, localFct, enableCors, dockerVersion) // add the new route if localRoute == "" { @@ -1172,8 +1067,8 @@ func createRouter(srv *Server, logging bool) (*mux.Router, error) { // ServeRequest processes a single http request to the docker remote api. // FIXME: refactor this to be part of Server and not require re-creating a new // router each time. This requires first moving ListenAndServe into Server. -func ServeRequest(srv *Server, apiversion float64, w http.ResponseWriter, req *http.Request) error { - router, err := createRouter(srv, false) +func ServeRequest(eng *engine.Engine, apiversion float64, w http.ResponseWriter, req *http.Request) error { + router, err := createRouter(eng, false, true, "") if err != nil { return err } @@ -1183,16 +1078,66 @@ func ServeRequest(srv *Server, apiversion float64, w http.ResponseWriter, req *h return nil } -func ListenAndServe(proto, addr string, srv *Server, logging bool) error { - r, err := createRouter(srv, logging) - if err != nil { - return err - } - l, e := net.Listen(proto, addr) +// ServeFD creates an http.Server and sets it up to serve given a socket activated +// argument. +func ServeFd(addr string, handle http.Handler) error { + ls, e := systemd.ListenFD(addr) if e != nil { return e } + + chErrors := make(chan error, len(ls)) + + // Since ListenFD will return one or more sockets we have + // to create a go func to spawn off multiple serves + for i := range ls { + listener := ls[i] + go func() { + httpSrv := http.Server{Handler: handle} + chErrors <- httpSrv.Serve(listener) + }() + } + + for i := 0; i < len(ls); i += 1 { + err := <-chErrors + if err != nil { + return err + } + } + + return nil +} + +// ListenAndServe sets up the required http.Server and gets it listening for +// each addr passed in and does protocol specific checking. +func ListenAndServe(proto, addr string, eng *engine.Engine, logging, enableCors bool, dockerVersion string) error { + r, err := createRouter(eng, logging, enableCors, dockerVersion) + if err != nil { + return err + } + + if proto == "fd" { + return ServeFd(addr, r) + } + if proto == "unix" { + if err := syscall.Unlink(addr); err != nil && !os.IsNotExist(err) { + return err + } + } + + l, err := net.Listen(proto, addr) + if err != nil { + return err + } + + // Basic error and sanity checking + switch proto { + case "tcp": + if !strings.HasPrefix(addr, "127.0.0.1") { + log.Println("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") + } + case "unix": if err := os.Chmod(addr, 0660); err != nil { return err } @@ -1212,11 +1157,37 @@ func ListenAndServe(proto, addr string, srv *Server, logging bool) error { return err } } + default: + return fmt.Errorf("Invalid protocol format.") } - httpSrv := http.Server{Addr: addr, Handler: r} - log.Printf("Listening for HTTP on %s (%s)\n", addr, proto) - // Tell the init daemon we are accepting requests - go systemd.SdNotify("READY=1") + httpSrv := http.Server{Addr: addr, Handler: r} return httpSrv.Serve(l) } + +// ServeApi loops through all of the protocols sent in to docker and spawns +// off a go routine to setup a serving http.Server for each. +func ServeApi(job *engine.Job) engine.Status { + protoAddrs := job.Args + chErrors := make(chan error, len(protoAddrs)) + + for _, protoAddr := range protoAddrs { + protoAddrParts := strings.SplitN(protoAddr, "://", 2) + go func() { + log.Printf("Listening for HTTP on %s (%s)\n", protoAddrParts[0], protoAddrParts[1]) + chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version")) + }() + } + + for i := 0; i < len(protoAddrs); i += 1 { + err := <-chErrors + if err != nil { + return job.Error(err) + } + } + + // Tell the init daemon we are accepting requests + go systemd.SdNotify("READY=1") + + return engine.StatusOK +} diff --git a/http_test.go b/api/api_unit_test.go similarity index 82% rename from http_test.go rename to api/api_unit_test.go index b9ecd6a203..2b3e76e75c 100644 --- a/http_test.go +++ b/api/api_unit_test.go @@ -1,4 +1,4 @@ -package docker +package api import ( "fmt" @@ -7,6 +7,20 @@ import ( "testing" ) +func TestJsonContentType(t *testing.T) { + if !MatchesContentType("application/json", "application/json") { + t.Fail() + } + + if !MatchesContentType("application/json; charset=utf-8", "application/json") { + t.Fail() + } + + if MatchesContentType("dockerapplication/json", "application/json") { + t.Fail() + } +} + func TestGetBoolParam(t *testing.T) { if ret, err := getBoolParam("true"); err != nil || !ret { t.Fatalf("true -> true, nil | got %t %s", ret, err) diff --git a/api_params.go b/api_params.go deleted file mode 100644 index fa9eab0c15..0000000000 --- a/api_params.go +++ /dev/null @@ -1,131 +0,0 @@ -package docker - -import "strings" - -type ( - APIHistory struct { - ID string `json:"Id"` - Tags []string `json:",omitempty"` - Created int64 - CreatedBy string `json:",omitempty"` - Size int64 - } - - APIImages struct { - ID string `json:"Id"` - RepoTags []string `json:",omitempty"` - Created int64 - Size int64 - VirtualSize int64 - ParentId string `json:",omitempty"` - } - - APIImagesOld struct { - Repository string `json:",omitempty"` - Tag string `json:",omitempty"` - ID string `json:"Id"` - Created int64 - Size int64 - VirtualSize int64 - } - - APITop struct { - Titles []string - Processes [][]string - } - - APIRmi struct { - Deleted string `json:",omitempty"` - Untagged string `json:",omitempty"` - } - - APIContainers struct { - ID string `json:"Id"` - Image string - Command string - Created int64 - Status string - Ports []APIPort - SizeRw int64 - SizeRootFs int64 - Names []string - } - - APIContainersOld struct { - ID string `json:"Id"` - Image string - Command string - Created int64 - Status string - Ports string - SizeRw int64 - SizeRootFs int64 - } - - APIID struct { - ID string `json:"Id"` - } - - APIRun struct { - ID string `json:"Id"` - Warnings []string `json:",omitempty"` - } - - APIPort struct { - PrivatePort int64 - PublicPort int64 - Type string - IP string - } - - APIWait struct { - StatusCode int - } - - APIAuth struct { - Status string - } - - APIImageConfig struct { - ID string `json:"Id"` - *Config - } - - APICopy struct { - Resource string - HostPath string - } - APIContainer struct { - *Container - HostConfig *HostConfig - } -) - -func (api APIImages) ToLegacy() []APIImagesOld { - outs := []APIImagesOld{} - for _, repotag := range api.RepoTags { - components := strings.SplitN(repotag, ":", 2) - outs = append(outs, APIImagesOld{ - ID: api.ID, - Repository: components[0], - Tag: components[1], - Created: api.Created, - Size: api.Size, - VirtualSize: api.VirtualSize, - }) - } - return outs -} - -func (api APIContainers) ToLegacy() *APIContainersOld { - return &APIContainersOld{ - ID: api.ID, - Image: api.Image, - Command: api.Command, - Created: api.Created, - Status: api.Status, - Ports: displayablePorts(api.Ports), - SizeRw: api.SizeRw, - SizeRootFs: api.SizeRootFs, - } -} diff --git a/api_unit_test.go b/api_unit_test.go deleted file mode 100644 index 82095bd8b5..0000000000 --- a/api_unit_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package docker - -import ( - "testing" -) - -func TestJsonContentType(t *testing.T) { - if !matchesContentType("application/json", "application/json") { - t.Fail() - } - - if !matchesContentType("application/json; charset=utf-8", "application/json") { - t.Fail() - } - - if matchesContentType("dockerapplication/json", "application/json") { - t.Fail() - } -} diff --git a/archive/archive.go b/archive/archive.go index 4dd5f006ef..b1400c2210 100644 --- a/archive/archive.go +++ b/archive/archive.go @@ -13,6 +13,8 @@ import ( "os/exec" "path" "path/filepath" + "strings" + "syscall" ) type Archive io.Reader @@ -21,10 +23,7 @@ type Compression int type TarOptions struct { Includes []string - Excludes []string - Recursive bool Compression Compression - CreateFiles []string } const ( @@ -64,7 +63,7 @@ func DetectCompression(source []byte) Compression { func xzDecompress(archive io.Reader) (io.Reader, error) { args := []string{"xz", "-d", "-c", "-q"} - return CmdStream(exec.Command(args[0], args[1:]...), archive, nil) + return CmdStream(exec.Command(args[0], args[1:]...), archive) } func DecompressStream(archive io.Reader) (io.Reader, error) { @@ -98,16 +97,20 @@ func DecompressStream(archive io.Reader) (io.Reader, error) { } } -func (compression *Compression) Flag() string { - switch *compression { - case Bzip2: - return "j" +func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) { + + switch compression { + case Uncompressed: + return dest, nil case Gzip: - return "z" - case Xz: - return "J" + return gzip.NewWriter(dest), nil + case Bzip2, Xz: + // archive/bzip2 does not support writing, and there is no xz support at all + // However, this is not a problem as docker only currently generates gzipped tars + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } - return "" } func (compression *Compression) Extension() string { @@ -124,10 +127,145 @@ func (compression *Compression) Extension() string { return "" } +func addTarFile(path, name string, tw *tar.Writer) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + + link := "" + if fi.Mode()&os.ModeSymlink != 0 { + if link, err = os.Readlink(path); err != nil { + return err + } + } + + hdr, err := tar.FileInfoHeader(fi, link) + if err != nil { + return err + } + + if fi.IsDir() && !strings.HasSuffix(name, "/") { + name = name + "/" + } + + hdr.Name = name + + stat, ok := fi.Sys().(*syscall.Stat_t) + if ok { + // Currently go does not fill in the major/minors + if stat.Mode&syscall.S_IFBLK == syscall.S_IFBLK || + stat.Mode&syscall.S_IFCHR == syscall.S_IFCHR { + hdr.Devmajor = int64(major(uint64(stat.Rdev))) + hdr.Devminor = int64(minor(uint64(stat.Rdev))) + } + } + + if err := tw.WriteHeader(hdr); err != nil { + return err + } + + if hdr.Typeflag == tar.TypeReg { + if file, err := os.Open(path); err != nil { + return err + } else { + _, err := io.Copy(tw, file) + if err != nil { + return err + } + file.Close() + } + } + + return nil +} + +func createTarFile(path, extractDir string, hdr *tar.Header, reader *tar.Reader) error { + switch hdr.Typeflag { + case tar.TypeDir: + // Create directory unless it exists as a directory already. + // In that case we just want to merge the two + if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { + if err := os.Mkdir(path, os.FileMode(hdr.Mode)); err != nil { + return err + } + } + + case tar.TypeReg, tar.TypeRegA: + // Source is regular file + file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, os.FileMode(hdr.Mode)) + if err != nil { + return err + } + if _, err := io.Copy(file, reader); err != nil { + file.Close() + return err + } + file.Close() + + case tar.TypeBlock, tar.TypeChar, tar.TypeFifo: + mode := uint32(hdr.Mode & 07777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= syscall.S_IFBLK + case tar.TypeChar: + mode |= syscall.S_IFCHR + case tar.TypeFifo: + mode |= syscall.S_IFIFO + } + + if err := syscall.Mknod(path, mode, int(mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { + return err + } + + case tar.TypeLink: + if err := os.Link(filepath.Join(extractDir, hdr.Linkname), path); err != nil { + return err + } + + case tar.TypeSymlink: + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + + case tar.TypeXGlobalHeader: + utils.Debugf("PAX Global Extended Headers found and ignored") + return nil + + default: + return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) + } + + if err := syscall.Lchown(path, hdr.Uid, hdr.Gid); err != nil { + return err + } + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if hdr.Typeflag != tar.TypeSymlink { + if err := syscall.Chmod(path, uint32(hdr.Mode&07777)); err != nil { + return err + } + } + + ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} + // syscall.UtimesNano doesn't support a NOFOLLOW flag atm, and + if hdr.Typeflag != tar.TypeSymlink { + if err := syscall.UtimesNano(path, ts); err != nil { + return err + } + } else { + if err := LUtimesNano(path, ts); err != nil { + return err + } + } + return nil +} + // Tar creates an archive from the directory at `path`, and returns it as a // stream of bytes. func Tar(path string, compression Compression) (io.Reader, error) { - return TarFilter(path, &TarOptions{Recursive: true, Compression: compression}) + return TarFilter(path, &TarOptions{Compression: compression}) } func escapeName(name string) string { @@ -148,57 +286,55 @@ func escapeName(name string) string { // Tar creates an archive from the directory at `path`, only including files whose relative // paths are included in `filter`. If `filter` is nil, then all files are included. -func TarFilter(path string, options *TarOptions) (io.Reader, error) { - args := []string{"tar", "--numeric-owner", "-f", "-", "-C", path, "-T", "-"} - if options.Includes == nil { - options.Includes = []string{"."} - } - args = append(args, "-c"+options.Compression.Flag()) +func TarFilter(srcPath string, options *TarOptions) (io.Reader, error) { + pipeReader, pipeWriter := io.Pipe() - for _, exclude := range options.Excludes { - args = append(args, fmt.Sprintf("--exclude=%s", exclude)) + compressWriter, err := CompressStream(pipeWriter, options.Compression) + if err != nil { + return nil, err } - if !options.Recursive { - args = append(args, "--no-recursion") - } + tw := tar.NewWriter(compressWriter) - files := "" - for _, f := range options.Includes { - files = files + escapeName(f) + "\n" - } + go func() { + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this - tmpDir := "" - - if options.CreateFiles != nil { - var err error // Can't use := here or we override the outer tmpDir - tmpDir, err = ioutil.TempDir("", "docker-tar") - if err != nil { - return nil, err + if options.Includes == nil { + options.Includes = []string{"."} } - files = files + "-C" + tmpDir + "\n" - for _, f := range options.CreateFiles { - path := filepath.Join(tmpDir, f) - err := os.MkdirAll(filepath.Dir(path), 0600) - if err != nil { - return nil, err - } + for _, include := range options.Includes { + filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error { + if err != nil { + utils.Debugf("Tar: Can't stat file %s to tar: %s\n", srcPath, err) + return nil + } - if file, err := os.OpenFile(path, os.O_CREATE, 0600); err != nil { - return nil, err - } else { - file.Close() - } - files = files + escapeName(f) + "\n" - } - } + relFilePath, err := filepath.Rel(srcPath, filePath) + if err != nil { + return nil + } - return CmdStream(exec.Command(args[0], args[1:]...), bytes.NewBufferString(files), func() { - if tmpDir != "" { - _ = os.RemoveAll(tmpDir) + if err := addTarFile(filePath, relFilePath, tw); err != nil { + utils.Debugf("Can't add file %s to tar: %s\n", srcPath, err) + } + return nil + }) } - }) + + // Make sure to check the error on Close. + if err := tw.Close(); err != nil { + utils.Debugf("Can't close tar writer: %s\n", err) + } + if err := compressWriter.Close(); err != nil { + utils.Debugf("Can't close compress writer: %s\n", err) + } + }() + + return pipeReader, nil } // Untar reads a stream of bytes from `archive`, parses it as a tar archive, @@ -206,54 +342,88 @@ func TarFilter(path string, options *TarOptions) (io.Reader, error) { // The archive may be compressed with one of the following algorithms: // identity (uncompressed), gzip, bzip2, xz. // FIXME: specify behavior when target path exists vs. doesn't exist. -func Untar(archive io.Reader, path string, options *TarOptions) error { +func Untar(archive io.Reader, dest string, options *TarOptions) error { if archive == nil { return fmt.Errorf("Empty archive") } - buf := make([]byte, 10) - totalN := 0 - for totalN < 10 { - n, err := archive.Read(buf[totalN:]) + archive, err := DecompressStream(archive) + if err != nil { + return err + } + + tr := tar.NewReader(archive) + + var dirs []*tar.Header + + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } if err != nil { - if err == io.EOF { - return fmt.Errorf("Tarball too short") - } return err } - totalN += n - utils.Debugf("[tar autodetect] n: %d", n) - } - compression := DetectCompression(buf) + // Normalize name, for safety and for a simple is-root check + hdr.Name = filepath.Clean(hdr.Name) - utils.Debugf("Archive compression detected: %s", compression.Extension()) - args := []string{"--numeric-owner", "-f", "-", "-C", path, "-x" + compression.Flag()} + if !strings.HasSuffix(hdr.Name, "/") { + // Not the root directory, ensure that the parent directory exists + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = os.MkdirAll(parentPath, 600) + if err != nil { + return err + } + } + } - if options != nil { - for _, exclude := range options.Excludes { - args = append(args, fmt.Sprintf("--exclude=%s", exclude)) + path := filepath.Join(dest, hdr.Name) + + // If path exits we almost always just want to remove and replace it + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return err + } + } + } + + if err := createTarFile(path, dest, hdr, tr); err != nil { + return err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) } } - cmd := exec.Command("tar", args...) - cmd.Stdin = io.MultiReader(bytes.NewReader(buf), archive) - // Hardcode locale environment for predictable outcome regardless of host configuration. - // (see https://github.com/dotcloud/docker/issues/355) - cmd.Env = []string{"LANG=en_US.utf-8", "LC_ALL=en_US.utf-8"} - output, err := cmd.CombinedOutput() - if err != nil { - return fmt.Errorf("%s: %s", err, output) + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} + if err := syscall.UtimesNano(path, ts); err != nil { + return err + } } + return nil } // TarUntar is a convenience function which calls Tar and Untar, with // the output of one piped into the other. If either Tar or Untar fails, // TarUntar aborts and returns the error. -func TarUntar(src string, filter []string, dst string) error { - utils.Debugf("TarUntar(%s %s %s)", src, filter, dst) - archive, err := TarFilter(src, &TarOptions{Compression: Uncompressed, Includes: filter, Recursive: true}) +func TarUntar(src string, dst string) error { + utils.Debugf("TarUntar(%s %s)", src, dst) + archive, err := TarFilter(src, &TarOptions{Compression: Uncompressed}) if err != nil { return err } @@ -290,7 +460,7 @@ func CopyWithTar(src, dst string) error { return err } utils.Debugf("Calling TarUntar(%s, %s)", src, dst) - return TarUntar(src, nil, dst) + return TarUntar(src, dst) } // CopyFileWithTar emulates the behavior of the 'cp' command-line @@ -353,13 +523,10 @@ func CopyFileWithTar(src, dst string) (err error) { // CmdStream executes a command, and returns its stdout as a stream. // If the command fails to run or doesn't complete successfully, an error // will be returned, including anything written on stderr. -func CmdStream(cmd *exec.Cmd, input io.Reader, atEnd func()) (io.Reader, error) { +func CmdStream(cmd *exec.Cmd, input io.Reader) (io.Reader, error) { if input != nil { stdin, err := cmd.StdinPipe() if err != nil { - if atEnd != nil { - atEnd() - } return nil, err } // Write stdin if any @@ -370,16 +537,10 @@ func CmdStream(cmd *exec.Cmd, input io.Reader, atEnd func()) (io.Reader, error) } stdout, err := cmd.StdoutPipe() if err != nil { - if atEnd != nil { - atEnd() - } return nil, err } stderr, err := cmd.StderrPipe() if err != nil { - if atEnd != nil { - atEnd() - } return nil, err } pipeR, pipeW := io.Pipe() @@ -404,9 +565,6 @@ func CmdStream(cmd *exec.Cmd, input io.Reader, atEnd func()) (io.Reader, error) } else { pipeW.Close() } - if atEnd != nil { - atEnd() - } }() // Run the command and return the pipe if err := cmd.Start(); err != nil { diff --git a/archive/archive_test.go b/archive/archive_test.go index 684d99dc14..891f977dcf 100644 --- a/archive/archive_test.go +++ b/archive/archive_test.go @@ -1,6 +1,7 @@ package archive import ( + "archive/tar" "bytes" "fmt" "io" @@ -14,7 +15,7 @@ import ( func TestCmdStreamLargeStderr(t *testing.T) { cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") - out, err := CmdStream(cmd, nil, nil) + out, err := CmdStream(cmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } @@ -35,7 +36,7 @@ func TestCmdStreamLargeStderr(t *testing.T) { func TestCmdStreamBad(t *testing.T) { badCmd := exec.Command("/bin/sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") - out, err := CmdStream(badCmd, nil, nil) + out, err := CmdStream(badCmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } @@ -50,7 +51,7 @@ func TestCmdStreamBad(t *testing.T) { func TestCmdStreamGood(t *testing.T) { cmd := exec.Command("/bin/sh", "-c", "echo hello; exit 0") - out, err := CmdStream(cmd, nil, nil) + out, err := CmdStream(cmd, nil) if err != nil { t.Fatal(err) } @@ -89,6 +90,16 @@ func tarUntar(t *testing.T, origin string, compression Compression) error { if _, err := os.Stat(tmp); err != nil { return err } + + changes, err := ChangesDirs(origin, tmp) + if err != nil { + return err + } + + if len(changes) != 0 { + t.Fatalf("Unexpected differences after tarUntar: %v", changes) + } + return nil } @@ -108,11 +119,20 @@ func TestTarUntar(t *testing.T) { for _, c := range []Compression{ Uncompressed, Gzip, - Bzip2, - Xz, } { if err := tarUntar(t, origin, c); err != nil { t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) } } } + +// Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz +// use PAX Global Extended Headers. +// Failing prevents the archives from being uncompressed during ADD +func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { + hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} + err := createTarFile("pax_global_header", "some_dir", &hdr, nil) + if err != nil { + t.Fatal(err) + } +} diff --git a/archive/changes.go b/archive/changes.go index 8fe9ff2233..25406f5cec 100644 --- a/archive/changes.go +++ b/archive/changes.go @@ -1,7 +1,10 @@ package archive import ( + "archive/tar" "fmt" + "github.com/dotcloud/docker/utils" + "io" "os" "path/filepath" "strings" @@ -310,24 +313,51 @@ func ChangesSize(newDir string, changes []Change) int64 { return size } -func ExportChanges(dir string, changes []Change) (Archive, error) { - files := make([]string, 0) - deletions := make([]string, 0) - for _, change := range changes { - if change.Kind == ChangeModify || change.Kind == ChangeAdd { - files = append(files, change.Path) - } - if change.Kind == ChangeDelete { - base := filepath.Base(change.Path) - dir := filepath.Dir(change.Path) - deletions = append(deletions, filepath.Join(dir, ".wh."+base)) - } - } - // FIXME: Why do we create whiteout files inside Tar code ? - return TarFilter(dir, &TarOptions{ - Compression: Uncompressed, - Includes: files, - Recursive: false, - CreateFiles: deletions, - }) +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +func ExportChanges(dir string, changes []Change) (Archive, error) { + reader, writer := io.Pipe() + tw := tar.NewWriter(writer) + + go func() { + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + for _, change := range changes { + if change.Kind == ChangeDelete { + whiteOutDir := filepath.Dir(change.Path) + whiteOutBase := filepath.Base(change.Path) + whiteOut := filepath.Join(whiteOutDir, ".wh."+whiteOutBase) + hdr := &tar.Header{ + Name: whiteOut[1:], + Size: 0, + ModTime: time.Now(), + AccessTime: time.Now(), + ChangeTime: time.Now(), + } + if err := tw.WriteHeader(hdr); err != nil { + utils.Debugf("Can't write whiteout header: %s\n", err) + } + } else { + path := filepath.Join(dir, change.Path) + if err := addTarFile(path, change.Path[1:], tw); err != nil { + utils.Debugf("Can't add file %s to tar: %s\n", path, err) + } + } + } + + // Make sure to check the error on Close. + if err := tw.Close(); err != nil { + utils.Debugf("Can't close layer: %s\n", err) + } + writer.Close() + }() + return reader, nil } diff --git a/archive/diff.go b/archive/diff.go index 464d57a742..cdf06dd055 100644 --- a/archive/diff.go +++ b/archive/diff.go @@ -2,7 +2,6 @@ package archive import ( "archive/tar" - "github.com/dotcloud/docker/utils" "io" "os" "path/filepath" @@ -89,95 +88,22 @@ func ApplyLayer(dest string, layer Archive) error { // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). - hasDir := false if fi, err := os.Lstat(path); err == nil { - if fi.IsDir() && hdr.Typeflag == tar.TypeDir { - hasDir = true - } else { + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return err } } } - switch hdr.Typeflag { - case tar.TypeDir: - if !hasDir { - err = os.Mkdir(path, os.FileMode(hdr.Mode)) - if err != nil { - return err - } - } - dirs = append(dirs, hdr) - - case tar.TypeReg, tar.TypeRegA: - // Source is regular file - file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, os.FileMode(hdr.Mode)) - if err != nil { - return err - } - if _, err := io.Copy(file, tr); err != nil { - file.Close() - return err - } - file.Close() - - case tar.TypeBlock, tar.TypeChar, tar.TypeFifo: - mode := uint32(hdr.Mode & 07777) - switch hdr.Typeflag { - case tar.TypeBlock: - mode |= syscall.S_IFBLK - case tar.TypeChar: - mode |= syscall.S_IFCHR - case tar.TypeFifo: - mode |= syscall.S_IFIFO - } - - if err := syscall.Mknod(path, mode, int(mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { - return err - } - - case tar.TypeLink: - if err := os.Link(filepath.Join(dest, hdr.Linkname), path); err != nil { - return err - } - - case tar.TypeSymlink: - if err := os.Symlink(hdr.Linkname, path); err != nil { - return err - } - - default: - utils.Debugf("unhandled type %d\n", hdr.Typeflag) - } - - if err = syscall.Lchown(path, hdr.Uid, hdr.Gid); err != nil { + if err := createTarFile(path, dest, hdr, tr); err != nil { return err } - // There is no LChmod, so ignore mode for symlink. Also, this - // must happen after chown, as that can modify the file mode - if hdr.Typeflag != tar.TypeSymlink { - err = syscall.Chmod(path, uint32(hdr.Mode&07777)) - if err != nil { - return err - } - } - - // Directories must be handled at the end to avoid further - // file creation in them to modify the mtime - if hdr.Typeflag != tar.TypeDir { - ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} - // syscall.UtimesNano doesn't support a NOFOLLOW flag atm, and - if hdr.Typeflag != tar.TypeSymlink { - if err := syscall.UtimesNano(path, ts); err != nil { - return err - } - } else { - if err := LUtimesNano(path, ts); err != nil { - return err - } - } + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) } } } diff --git a/archive/stat_darwin.go b/archive/stat_darwin.go index e041783ec6..32203299dd 100644 --- a/archive/stat_darwin.go +++ b/archive/stat_darwin.go @@ -1,3 +1,5 @@ +// +build !linux !amd64 + package archive import "syscall" diff --git a/buildfile.go b/buildfile.go index 2b6d40c15d..2a4b163bec 100644 --- a/buildfile.go +++ b/buildfile.go @@ -8,6 +8,7 @@ import ( "fmt" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/auth" + "github.com/dotcloud/docker/registry" "github.com/dotcloud/docker/utils" "io" "io/ioutil" @@ -47,6 +48,7 @@ type buildFile struct { rm bool authConfig *auth.AuthConfig + configFile *auth.ConfigFile tmpContainers map[string]struct{} tmpImages map[string]struct{} @@ -72,7 +74,22 @@ func (b *buildFile) CmdFrom(name string) error { if err != nil { if b.runtime.graph.IsNotExist(err) { remote, tag := utils.ParseRepositoryTag(name) - if err := b.srv.ImagePull(remote, tag, b.outOld, b.sf, b.authConfig, nil, true); err != nil { + pullRegistryAuth := b.authConfig + if len(b.configFile.Configs) > 0 { + // The request came with a full auth config file, we prefer to use that + endpoint, _, err := registry.ResolveRepositoryName(remote) + if err != nil { + return err + } + resolvedAuth := b.configFile.ResolveAuthConfig(endpoint) + pullRegistryAuth = &resolvedAuth + } + job := b.srv.Eng.Job("pull", remote, tag) + job.SetenvBool("json", b.sf.Json()) + job.SetenvBool("parallel", true) + job.SetenvJson("authConfig", pullRegistryAuth) + job.Stdout.Add(b.outOld) + if err := job.Run(); err != nil { return err } image, err = b.runtime.repositories.LookupImage(name) @@ -91,9 +108,26 @@ func (b *buildFile) CmdFrom(name string) error { if b.config.Env == nil || len(b.config.Env) == 0 { b.config.Env = append(b.config.Env, "HOME=/", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin") } + // Process ONBUILD triggers if they exist + if nTriggers := len(b.config.OnBuild); nTriggers != 0 { + fmt.Fprintf(b.errStream, "# Executing %d build triggers\n", nTriggers) + } + for n, step := range b.config.OnBuild { + if err := b.BuildStep(fmt.Sprintf("onbuild-%d", n), step); err != nil { + return err + } + } + b.config.OnBuild = []string{} return nil } +// The ONBUILD command declares a build instruction to be executed in any future build +// using the current image as a base. +func (b *buildFile) CmdOnbuild(trigger string) error { + b.config.OnBuild = append(b.config.OnBuild, trigger) + return b.commit("", b.config.Cmd, fmt.Sprintf("ONBUILD %s", trigger)) +} + func (b *buildFile) CmdMaintainer(name string) error { b.maintainer = name return b.commit("", b.config.Cmd, fmt.Sprintf("MAINTAINER %s", name)) @@ -124,7 +158,7 @@ func (b *buildFile) CmdRun(args string) error { if b.image == "" { return fmt.Errorf("Please provide a source image with `from` prior to run") } - config, _, _, err := ParseRun([]string{b.image, "/bin/sh", "-c", args}, nil) + config, _, _, err := ParseRun(append([]string{b.image}, b.buildCmdFromJson(args)...), nil) if err != nil { return err } @@ -311,7 +345,7 @@ func (b *buildFile) checkPathForAddition(orig string) error { func (b *buildFile) addContext(container *Container, orig, dest string) error { var ( origPath = path.Join(b.contextPath, orig) - destPath = path.Join(container.RootfsPath(), dest) + destPath = path.Join(container.BasefsPath(), dest) ) // Preserve the trailing '/' if strings.HasSuffix(dest, "/") { @@ -476,7 +510,7 @@ func (b *buildFile) CmdAdd(args string) error { } b.tmpContainers[container.ID] = struct{}{} - if err := container.EnsureMounted(); err != nil { + if err := container.Mount(); err != nil { return err } defer container.Unmount() @@ -598,7 +632,7 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error { b.tmpContainers[container.ID] = struct{}{} fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(container.ID)) id = container.ID - if err := container.EnsureMounted(); err != nil { + if err := container.Mount(); err != nil { return err } defer container.Unmount() @@ -630,7 +664,13 @@ func (b *buildFile) Build(context io.Reader) (string, error) { if err != nil { return "", err } - b.context = &utils.TarSum{Reader: context, DisableCompression: true} + + decompressedStream, err := archive.DecompressStream(context) + if err != nil { + return "", err + } + + b.context = &utils.TarSum{Reader: decompressedStream, DisableCompression: true} if err := archive.Untar(b.context, tmpdirPath, nil); err != nil { return "", err } @@ -657,28 +697,11 @@ func (b *buildFile) Build(context io.Reader) (string, error) { if len(line) == 0 || line[0] == '#' { continue } - tmp := strings.SplitN(line, " ", 2) - if len(tmp) != 2 { - return "", fmt.Errorf("Invalid Dockerfile format") + if err := b.BuildStep(fmt.Sprintf("%d", stepN), line); err != nil { + return "", err } - instruction := strings.ToLower(strings.Trim(tmp[0], " ")) - arguments := strings.Trim(tmp[1], " ") - - method, exists := reflect.TypeOf(b).MethodByName("Cmd" + strings.ToUpper(instruction[:1]) + strings.ToLower(instruction[1:])) - if !exists { - fmt.Fprintf(b.errStream, "# Skipping unknown instruction %s\n", strings.ToUpper(instruction)) - continue - } - stepN += 1 - fmt.Fprintf(b.outStream, "Step %d : %s %s\n", stepN, strings.ToUpper(instruction), arguments) - ret := method.Func.Call([]reflect.Value{reflect.ValueOf(b), reflect.ValueOf(arguments)})[0].Interface() - if ret != nil { - return "", ret.(error) - } - - fmt.Fprintf(b.outStream, " ---> %s\n", utils.TruncateID(b.image)) } if b.image != "" { fmt.Fprintf(b.outStream, "Successfully built %s\n", utils.TruncateID(b.image)) @@ -690,7 +713,32 @@ func (b *buildFile) Build(context io.Reader) (string, error) { return "", fmt.Errorf("No image was generated. This may be because the Dockerfile does not, like, do anything.\n") } -func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *auth.AuthConfig) BuildFile { +// BuildStep parses a single build step from `instruction` and executes it in the current context. +func (b *buildFile) BuildStep(name, expression string) error { + fmt.Fprintf(b.outStream, "Step %s : %s\n", name, expression) + tmp := strings.SplitN(expression, " ", 2) + if len(tmp) != 2 { + return fmt.Errorf("Invalid Dockerfile format") + } + instruction := strings.ToLower(strings.Trim(tmp[0], " ")) + arguments := strings.Trim(tmp[1], " ") + + method, exists := reflect.TypeOf(b).MethodByName("Cmd" + strings.ToUpper(instruction[:1]) + strings.ToLower(instruction[1:])) + if !exists { + fmt.Fprintf(b.errStream, "# Skipping unknown instruction %s\n", strings.ToUpper(instruction)) + return nil + } + + ret := method.Func.Call([]reflect.Value{reflect.ValueOf(b), reflect.ValueOf(arguments)})[0].Interface() + if ret != nil { + return ret.(error) + } + + fmt.Fprintf(b.outStream, " ---> %s\n", utils.TruncateID(b.image)) + return nil +} + +func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *auth.AuthConfig, authConfigFile *auth.ConfigFile) BuildFile { return &buildFile{ runtime: srv.runtime, srv: srv, @@ -704,6 +752,7 @@ func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeC rm: rm, sf: sf, authConfig: auth, + configFile: authConfigFile, outOld: outOld, } } diff --git a/cgroups/cgroups.go b/cgroups/cgroups.go deleted file mode 100644 index 30de8d4d1e..0000000000 --- a/cgroups/cgroups.go +++ /dev/null @@ -1,101 +0,0 @@ -package cgroups - -import ( - "bufio" - "fmt" - "github.com/dotcloud/docker/mount" - "io" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" -) - -// https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt - -func FindCgroupMountpoint(subsystem string) (string, error) { - mounts, err := mount.GetMounts() - if err != nil { - return "", err - } - - for _, mount := range mounts { - if mount.Fstype == "cgroup" { - for _, opt := range strings.Split(mount.VfsOpts, ",") { - if opt == subsystem { - return mount.Mountpoint, nil - } - } - } - } - - return "", fmt.Errorf("cgroup mountpoint not found for %s", subsystem) -} - -// Returns the relative path to the cgroup docker is running in. -func getThisCgroupDir(subsystem string) (string, error) { - f, err := os.Open("/proc/self/cgroup") - if err != nil { - return "", err - } - defer f.Close() - - return parseCgroupFile(subsystem, f) -} - -func parseCgroupFile(subsystem string, r io.Reader) (string, error) { - s := bufio.NewScanner(r) - - for s.Scan() { - if err := s.Err(); err != nil { - return "", err - } - text := s.Text() - parts := strings.Split(text, ":") - if parts[1] == subsystem { - return parts[2], nil - } - } - return "", fmt.Errorf("cgroup '%s' not found in /proc/self/cgroup", subsystem) -} - -// Returns a list of pids for the given container. -func GetPidsForContainer(id string) ([]int, error) { - pids := []int{} - - // memory is chosen randomly, any cgroup used by docker works - subsystem := "memory" - - cgroupRoot, err := FindCgroupMountpoint(subsystem) - if err != nil { - return pids, err - } - - cgroupDir, err := getThisCgroupDir(subsystem) - if err != nil { - return pids, err - } - - filename := filepath.Join(cgroupRoot, cgroupDir, id, "tasks") - if _, err := os.Stat(filename); os.IsNotExist(err) { - // With more recent lxc versions use, cgroup will be in lxc/ - filename = filepath.Join(cgroupRoot, cgroupDir, "lxc", id, "tasks") - } - - output, err := ioutil.ReadFile(filename) - if err != nil { - return pids, err - } - for _, p := range strings.Split(string(output), "\n") { - if len(p) == 0 { - continue - } - pid, err := strconv.Atoi(p) - if err != nil { - return pids, fmt.Errorf("Invalid pid '%s': %s", p, err) - } - pids = append(pids, pid) - } - return pids, nil -} diff --git a/commands.go b/commands.go index 10d3697c19..15d4507030 100644 --- a/commands.go +++ b/commands.go @@ -7,11 +7,13 @@ import ( "encoding/base64" "encoding/json" "errors" - "flag" "fmt" + "github.com/dotcloud/docker/api" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/auth" "github.com/dotcloud/docker/engine" + flag "github.com/dotcloud/docker/pkg/mflag" + "github.com/dotcloud/docker/pkg/sysinfo" "github.com/dotcloud/docker/pkg/term" "github.com/dotcloud/docker/registry" "github.com/dotcloud/docker/utils" @@ -78,7 +80,7 @@ func (cli *DockerCli) CmdHelp(args ...string) error { return nil } } - help := fmt.Sprintf("Usage: docker [OPTIONS] COMMAND [arg...]\n -H=[unix://%s]: tcp://host:port to bind/connect to or unix://path/to/socket to use\n\nA self-sufficient runtime for linux containers.\n\nCommands:\n", DEFAULTUNIXSOCKET) + help := fmt.Sprintf("Usage: docker [OPTIONS] COMMAND [arg...]\n -H=[unix://%s]: tcp://host:port to bind/connect to or unix://path/to/socket to use\n\nA self-sufficient runtime for linux containers.\n\nCommands:\n", api.DEFAULTUNIXSOCKET) for _, command := range [][]string{ {"attach", "Attach to a running container"}, {"build", "Build a container from a Dockerfile"}, @@ -164,10 +166,10 @@ func MkBuildContext(dockerfile string, files [][2]string) (archive.Archive, erro func (cli *DockerCli) CmdBuild(args ...string) error { cmd := cli.Subcmd("build", "[OPTIONS] PATH | URL | -", "Build a new container image from the source code at PATH") - tag := cmd.String("t", "", "Repository name (and optionally a tag) to be applied to the resulting image in case of success") - suppressOutput := cmd.Bool("q", false, "Suppress verbose build output") - noCache := cmd.Bool("no-cache", false, "Do not use cache when building the image") - rm := cmd.Bool("rm", false, "Remove intermediate containers after a successful build") + tag := cmd.String([]string{"t", "-tag"}, "", "Repository name (and optionally a tag) to be applied to the resulting image in case of success") + suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress verbose build output") + noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image") + rm := cmd.Bool([]string{"#rm", "-rm"}, false, "Remove intermediate containers after a successful build") if err := cmd.Parse(args); err != nil { return nil } @@ -226,12 +228,14 @@ func (cli *DockerCli) CmdBuild(args ...string) error { v.Set("rm", "1") } + cli.LoadConfigFile() + headers := http.Header(make(map[string][]string)) buf, err := json.Marshal(cli.configFile) if err != nil { return err } - headers.Add("X-Registry-Auth", base64.URLEncoding.EncodeToString(buf)) + headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) if context != nil { headers.Set("Content-Type", "application/tar") @@ -253,9 +257,9 @@ func (cli *DockerCli) CmdLogin(args ...string) error { var username, password, email string - cmd.StringVar(&username, "u", "", "username") - cmd.StringVar(&password, "p", "", "password") - cmd.StringVar(&email, "e", "", "email") + cmd.StringVar(&username, []string{"u", "-username"}, "", "username") + cmd.StringVar(&password, []string{"p", "-password"}, "", "password") + cmd.StringVar(&email, []string{"e", "-email"}, "", "email") err := cmd.Parse(args) if err != nil { return nil @@ -332,7 +336,7 @@ func (cli *DockerCli) CmdLogin(args ...string) error { authconfig.ServerAddress = serverAddress cli.configFile.Configs[serverAddress] = authconfig - body, statusCode, err := cli.call("POST", "/auth", cli.configFile.Configs[serverAddress]) + stream, statusCode, err := cli.call("POST", "/auth", cli.configFile.Configs[serverAddress], false) if statusCode == 401 { delete(cli.configFile.Configs, serverAddress) auth.SaveConfig(cli.configFile) @@ -341,16 +345,15 @@ func (cli *DockerCli) CmdLogin(args ...string) error { if err != nil { return err } - - var out2 APIAuth - err = json.Unmarshal(body, &out2) + var out2 engine.Env + err = out2.Decode(stream) if err != nil { cli.configFile, _ = auth.LoadConfig(os.Getenv("HOME")) return err } auth.SaveConfig(cli.configFile) - if out2.Status != "" { - fmt.Fprintf(cli.out, "%s\n", out2.Status) + if out2.Get("Status") != "" { + fmt.Fprintf(cli.out, "%s\n", out2.Get("Status")) } return nil } @@ -397,7 +400,7 @@ func (cli *DockerCli) CmdVersion(args ...string) error { fmt.Fprintf(cli.out, "Git commit (client): %s\n", GITCOMMIT) } - body, _, err := cli.call("GET", "/version", nil) + body, _, err := readBody(cli.call("GET", "/version", nil, false)) if err != nil { return err } @@ -438,7 +441,7 @@ func (cli *DockerCli) CmdInfo(args ...string) error { return nil } - body, _, err := cli.call("GET", "/info", nil) + body, _, err := readBody(cli.call("GET", "/info", nil, false)) if err != nil { return err } @@ -470,7 +473,7 @@ func (cli *DockerCli) CmdInfo(args ...string) error { fmt.Fprintf(cli.out, "Debug mode (client): %v\n", os.Getenv("DEBUG") != "") fmt.Fprintf(cli.out, "Fds: %d\n", remoteInfo.GetInt("NFd")) fmt.Fprintf(cli.out, "Goroutines: %d\n", remoteInfo.GetInt("NGoroutines")) - fmt.Fprintf(cli.out, "LXC Version: %s\n", remoteInfo.Get("LXCVersion")) + fmt.Fprintf(cli.out, "Execution Driver: %s\n", remoteInfo.Get("ExecutionDriver")) fmt.Fprintf(cli.out, "EventsListeners: %d\n", remoteInfo.GetInt("NEventsListener")) fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion")) @@ -504,7 +507,7 @@ func (cli *DockerCli) CmdInfo(args ...string) error { func (cli *DockerCli) CmdStop(args ...string) error { cmd := cli.Subcmd("stop", "[OPTIONS] CONTAINER [CONTAINER...]", "Stop a running container (Send SIGTERM, and then SIGKILL after grace period)") - nSeconds := cmd.Int("t", 10, "Number of seconds to wait for the container to stop before killing it.") + nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to wait for the container to stop before killing it.") if err := cmd.Parse(args); err != nil { return nil } @@ -518,7 +521,7 @@ func (cli *DockerCli) CmdStop(args ...string) error { var encounteredError error for _, name := range cmd.Args() { - _, _, err := cli.call("POST", "/containers/"+name+"/stop?"+v.Encode(), nil) + _, _, err := readBody(cli.call("POST", "/containers/"+name+"/stop?"+v.Encode(), nil, false)) if err != nil { fmt.Fprintf(cli.err, "%s\n", err) encounteredError = fmt.Errorf("Error: failed to stop one or more containers") @@ -531,7 +534,7 @@ func (cli *DockerCli) CmdStop(args ...string) error { func (cli *DockerCli) CmdRestart(args ...string) error { cmd := cli.Subcmd("restart", "[OPTIONS] CONTAINER [CONTAINER...]", "Restart a running container") - nSeconds := cmd.Int("t", 10, "Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10") + nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10") if err := cmd.Parse(args); err != nil { return nil } @@ -545,7 +548,7 @@ func (cli *DockerCli) CmdRestart(args ...string) error { var encounteredError error for _, name := range cmd.Args() { - _, _, err := cli.call("POST", "/containers/"+name+"/restart?"+v.Encode(), nil) + _, _, err := readBody(cli.call("POST", "/containers/"+name+"/restart?"+v.Encode(), nil, false)) if err != nil { fmt.Fprintf(cli.err, "%s\n", err) encounteredError = fmt.Errorf("Error: failed to restart one or more containers") @@ -564,7 +567,7 @@ func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { if s == syscall.SIGCHLD { continue } - if _, _, err := cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%d", cid, s), nil); err != nil { + if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%d", cid, s), nil, false)); err != nil { utils.Debugf("Error sending signal: %s", err) } } @@ -574,8 +577,8 @@ func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { func (cli *DockerCli) CmdStart(args ...string) error { cmd := cli.Subcmd("start", "CONTAINER [CONTAINER...]", "Restart a stopped container") - attach := cmd.Bool("a", false, "Attach container's stdout/stderr and forward all signals to the process") - openStdin := cmd.Bool("i", false, "Attach container's stdin") + attach := cmd.Bool([]string{"a", "-attach"}, false, "Attach container's stdout/stderr and forward all signals to the process") + openStdin := cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's stdin") if err := cmd.Parse(args); err != nil { return nil } @@ -591,7 +594,7 @@ func (cli *DockerCli) CmdStart(args ...string) error { return fmt.Errorf("Impossible to start and attach multiple containers at once.") } - body, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil) + body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false)) if err != nil { return err } @@ -627,7 +630,7 @@ func (cli *DockerCli) CmdStart(args ...string) error { var encounteredError error for _, name := range cmd.Args() { - _, _, err := cli.call("POST", "/containers/"+name+"/start", nil) + _, _, err := readBody(cli.call("POST", "/containers/"+name+"/start", nil, false)) if err != nil { if !*attach || !*openStdin { fmt.Fprintf(cli.err, "%s\n", err) @@ -660,7 +663,7 @@ func (cli *DockerCli) CmdStart(args ...string) error { func (cli *DockerCli) CmdInspect(args ...string) error { cmd := cli.Subcmd("inspect", "CONTAINER|IMAGE [CONTAINER|IMAGE...]", "Return low-level information on a container/image") - tmplStr := cmd.String("format", "", "Format the output using the given go template.") + tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template.") if err := cmd.Parse(args); err != nil { return nil } @@ -684,9 +687,9 @@ func (cli *DockerCli) CmdInspect(args ...string) error { status := 0 for _, name := range cmd.Args() { - obj, _, err := cli.call("GET", "/containers/"+name+"/json", nil) + obj, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false)) if err != nil { - obj, _, err = cli.call("GET", "/images/"+name+"/json", nil) + obj, _, err = readBody(cli.call("GET", "/images/"+name+"/json", nil, false)) if err != nil { if strings.Contains(err.Error(), "No such") { fmt.Fprintf(cli.err, "Error: No such image or container: %s\n", name) @@ -752,18 +755,21 @@ func (cli *DockerCli) CmdTop(args ...string) error { val.Set("ps_args", strings.Join(cmd.Args()[1:], " ")) } - body, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/top?"+val.Encode(), nil) + stream, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/top?"+val.Encode(), nil, false) if err != nil { return err } - procs := APITop{} - err = json.Unmarshal(body, &procs) - if err != nil { + var procs engine.Env + if err := procs.Decode(stream); err != nil { return err } w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) - fmt.Fprintln(w, strings.Join(procs.Titles, "\t")) - for _, proc := range procs.Processes { + fmt.Fprintln(w, strings.Join(procs.GetList("Titles"), "\t")) + processes := [][]string{} + if err := procs.GetJson("Processes", &processes); err != nil { + return err + } + for _, proc := range processes { fmt.Fprintln(w, strings.Join(proc, "\t")) } w.Flush() @@ -787,7 +793,7 @@ func (cli *DockerCli) CmdPort(args ...string) error { port = parts[0] proto = parts[1] } - body, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil) + body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false)) if err != nil { return err } @@ -820,23 +826,22 @@ func (cli *DockerCli) CmdRmi(args ...string) error { var encounteredError error for _, name := range cmd.Args() { - body, _, err := cli.call("DELETE", "/images/"+name, nil) + body, _, err := readBody(cli.call("DELETE", "/images/"+name, nil, false)) if err != nil { fmt.Fprintf(cli.err, "%s\n", err) encounteredError = fmt.Errorf("Error: failed to remove one or more images") } else { - var outs []APIRmi - err = json.Unmarshal(body, &outs) - if err != nil { + outs := engine.NewTable("Created", 0) + if _, err := outs.ReadListFrom(body); err != nil { fmt.Fprintf(cli.err, "%s\n", err) encounteredError = fmt.Errorf("Error: failed to remove one or more images") continue } - for _, out := range outs { - if out.Deleted != "" { - fmt.Fprintf(cli.out, "Deleted: %s\n", out.Deleted) + for _, out := range outs.Data { + if out.Get("Deleted") != "" { + fmt.Fprintf(cli.out, "Deleted: %s\n", out.Get("Deleted")) } else { - fmt.Fprintf(cli.out, "Untagged: %s\n", out.Untagged) + fmt.Fprintf(cli.out, "Untagged: %s\n", out.Get("Untagged")) } } } @@ -846,8 +851,8 @@ func (cli *DockerCli) CmdRmi(args ...string) error { func (cli *DockerCli) CmdHistory(args ...string) error { cmd := cli.Subcmd("history", "[OPTIONS] IMAGE", "Show the history of an image") - quiet := cmd.Bool("q", false, "only show numeric IDs") - noTrunc := cmd.Bool("notrunc", false, "Don't truncate output") + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "only show numeric IDs") + noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") if err := cmd.Parse(args); err != nil { return nil @@ -857,14 +862,13 @@ func (cli *DockerCli) CmdHistory(args ...string) error { return nil } - body, _, err := cli.call("GET", "/images/"+cmd.Arg(0)+"/history", nil) + body, _, err := readBody(cli.call("GET", "/images/"+cmd.Arg(0)+"/history", nil, false)) if err != nil { return err } - var outs []APIHistory - err = json.Unmarshal(body, &outs) - if err != nil { + outs := engine.NewTable("Created", 0) + if _, err := outs.ReadListFrom(body); err != nil { return err } @@ -873,27 +877,28 @@ func (cli *DockerCli) CmdHistory(args ...string) error { fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE") } - for _, out := range outs { + for _, out := range outs.Data { + outID := out.Get("Id") if !*quiet { if *noTrunc { - fmt.Fprintf(w, "%s\t", out.ID) + fmt.Fprintf(w, "%s\t", outID) } else { - fmt.Fprintf(w, "%s\t", utils.TruncateID(out.ID)) + fmt.Fprintf(w, "%s\t", utils.TruncateID(outID)) } - fmt.Fprintf(w, "%s ago\t", utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.Created, 0)))) + fmt.Fprintf(w, "%s ago\t", utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0)))) if *noTrunc { - fmt.Fprintf(w, "%s\t", out.CreatedBy) + fmt.Fprintf(w, "%s\t", out.Get("CreatedBy")) } else { - fmt.Fprintf(w, "%s\t", utils.Trunc(out.CreatedBy, 45)) + fmt.Fprintf(w, "%s\t", utils.Trunc(out.Get("CreatedBy"), 45)) } - fmt.Fprintf(w, "%s\n", utils.HumanSize(out.Size)) + fmt.Fprintf(w, "%s\n", utils.HumanSize(out.GetInt64("Size"))) } else { if *noTrunc { - fmt.Fprintln(w, out.ID) + fmt.Fprintln(w, outID) } else { - fmt.Fprintln(w, utils.TruncateID(out.ID)) + fmt.Fprintln(w, utils.TruncateID(outID)) } } } @@ -903,8 +908,8 @@ func (cli *DockerCli) CmdHistory(args ...string) error { func (cli *DockerCli) CmdRm(args ...string) error { cmd := cli.Subcmd("rm", "[OPTIONS] CONTAINER [CONTAINER...]", "Remove one or more containers") - v := cmd.Bool("v", false, "Remove the volumes associated to the container") - link := cmd.Bool("link", false, "Remove the specified link and not the underlying container") + v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated to the container") + link := cmd.Bool([]string{"l", "#link", "-link"}, false, "Remove the specified link and not the underlying container") if err := cmd.Parse(args); err != nil { return nil @@ -923,7 +928,7 @@ func (cli *DockerCli) CmdRm(args ...string) error { var encounteredError error for _, name := range cmd.Args() { - _, _, err := cli.call("DELETE", "/containers/"+name+"?"+val.Encode(), nil) + _, _, err := readBody(cli.call("DELETE", "/containers/"+name+"?"+val.Encode(), nil, false)) if err != nil { fmt.Fprintf(cli.err, "%s\n", err) encounteredError = fmt.Errorf("Error: failed to remove one or more containers") @@ -936,7 +941,9 @@ func (cli *DockerCli) CmdRm(args ...string) error { // 'docker kill NAME' kills a running container func (cli *DockerCli) CmdKill(args ...string) error { - cmd := cli.Subcmd("kill", "CONTAINER [CONTAINER...]", "Kill a running container (send SIGKILL)") + cmd := cli.Subcmd("kill", "[OPTIONS] CONTAINER [CONTAINER...]", "Kill a running container (send SIGKILL, or specified signal)") + signal := cmd.String([]string{"s", "-signal"}, "KILL", "Signal to send to the container") + if err := cmd.Parse(args); err != nil { return nil } @@ -946,8 +953,8 @@ func (cli *DockerCli) CmdKill(args ...string) error { } var encounteredError error - for _, name := range args { - if _, _, err := cli.call("POST", "/containers/"+name+"/kill", nil); err != nil { + for _, name := range cmd.Args() { + if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", name, *signal), nil, false)); err != nil { fmt.Fprintf(cli.err, "%s\n", err) encounteredError = fmt.Errorf("Error: failed to kill one or more containers") } else { @@ -1040,7 +1047,7 @@ func (cli *DockerCli) CmdPush(args ...string) error { } if err := push(authConfig); err != nil { - if err.Error() == registry.ErrLoginRequired.Error() { + if strings.Contains(err.Error(), "Status 401") { fmt.Fprintln(cli.out, "\nPlease login prior to push:") if err := cli.CmdLogin(endpoint); err != nil { return err @@ -1055,7 +1062,7 @@ func (cli *DockerCli) CmdPush(args ...string) error { func (cli *DockerCli) CmdPull(args ...string) error { cmd := cli.Subcmd("pull", "NAME", "Pull an image or a repository from the registry") - tag := cmd.String("t", "", "Download tagged image in repository") + tag := cmd.String([]string{"t", "-tag"}, "", "Download tagged image in repository") if err := cmd.Parse(args); err != nil { return nil } @@ -1099,7 +1106,7 @@ func (cli *DockerCli) CmdPull(args ...string) error { } if err := pull(authConfig); err != nil { - if err.Error() == registry.ErrLoginRequired.Error() { + if strings.Contains(err.Error(), "Status 401") { fmt.Fprintln(cli.out, "\nPlease login prior to pull:") if err := cli.CmdLogin(endpoint); err != nil { return err @@ -1115,11 +1122,11 @@ func (cli *DockerCli) CmdPull(args ...string) error { func (cli *DockerCli) CmdImages(args ...string) error { cmd := cli.Subcmd("images", "[OPTIONS] [NAME]", "List images") - quiet := cmd.Bool("q", false, "only show numeric IDs") - all := cmd.Bool("a", false, "show all images (by default filter out the intermediate images used to build)") - noTrunc := cmd.Bool("notrunc", false, "Don't truncate output") - flViz := cmd.Bool("viz", false, "output graph in graphviz format") - flTree := cmd.Bool("tree", false, "output graph in tree format") + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "only show numeric IDs") + all := cmd.Bool([]string{"a", "-all"}, false, "show all images (by default filter out the intermediate images used to build)") + noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") + flViz := cmd.Bool([]string{"v", "#viz", "-viz"}, false, "output graph in graphviz format") + flTree := cmd.Bool([]string{"t", "#tree", "-tree"}, false, "output graph in tree format") if err := cmd.Parse(args); err != nil { return nil @@ -1132,41 +1139,42 @@ func (cli *DockerCli) CmdImages(args ...string) error { filter := cmd.Arg(0) if *flViz || *flTree { - body, _, err := cli.call("GET", "/images/json?all=1", nil) + body, _, err := readBody(cli.call("GET", "/images/json?all=1", nil, false)) if err != nil { return err } - var outs []APIImages - if err := json.Unmarshal(body, &outs); err != nil { + outs := engine.NewTable("Created", 0) + if _, err := outs.ReadListFrom(body); err != nil { return err } var ( - printNode func(cli *DockerCli, noTrunc bool, image APIImages, prefix string) - startImage APIImages + printNode func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string) + startImage *engine.Env - roots []APIImages - byParent = make(map[string][]APIImages) + roots = engine.NewTable("Created", outs.Len()) + byParent = make(map[string]*engine.Table) ) - for _, image := range outs { - if image.ParentId == "" { - roots = append(roots, image) + for _, image := range outs.Data { + if image.Get("ParentId") == "" { + roots.Add(image) } else { - if children, exists := byParent[image.ParentId]; exists { - byParent[image.ParentId] = append(children, image) + if children, exists := byParent[image.Get("ParentId")]; exists { + children.Add(image) } else { - byParent[image.ParentId] = []APIImages{image} + byParent[image.Get("ParentId")] = engine.NewTable("Created", 1) + byParent[image.Get("ParentId")].Add(image) } } if filter != "" { - if filter == image.ID || filter == utils.TruncateID(image.ID) { + if filter == image.Get("Id") || filter == utils.TruncateID(image.Get("Id")) { startImage = image } - for _, repotag := range image.RepoTags { + for _, repotag := range image.GetList("RepoTags") { if repotag == filter { startImage = image } @@ -1181,10 +1189,12 @@ func (cli *DockerCli) CmdImages(args ...string) error { printNode = (*DockerCli).printTreeNode } - if startImage.ID != "" { - cli.WalkTree(*noTrunc, &[]APIImages{startImage}, byParent, "", printNode) + if startImage != nil { + root := engine.NewTable("Created", 1) + root.Add(startImage) + cli.WalkTree(*noTrunc, root, byParent, "", printNode) } else if filter == "" { - cli.WalkTree(*noTrunc, &roots, byParent, "", printNode) + cli.WalkTree(*noTrunc, roots, byParent, "", printNode) } if *flViz { fmt.Fprintf(cli.out, " base [style=invisible]\n}\n") @@ -1198,14 +1208,14 @@ func (cli *DockerCli) CmdImages(args ...string) error { v.Set("all", "1") } - body, _, err := cli.call("GET", "/images/json?"+v.Encode(), nil) + body, _, err := readBody(cli.call("GET", "/images/json?"+v.Encode(), nil, false)) + if err != nil { return err } - var outs []APIImages - err = json.Unmarshal(body, &outs) - if err != nil { + outs := engine.NewTable("Created", 0) + if _, err := outs.ReadListFrom(body); err != nil { return err } @@ -1214,19 +1224,19 @@ func (cli *DockerCli) CmdImages(args ...string) error { fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tVIRTUAL SIZE") } - for _, out := range outs { - for _, repotag := range out.RepoTags { + for _, out := range outs.Data { + for _, repotag := range out.GetList("RepoTags") { repo, tag := utils.ParseRepositoryTag(repotag) - + outID := out.Get("Id") if !*noTrunc { - out.ID = utils.TruncateID(out.ID) + outID = utils.TruncateID(outID) } if !*quiet { - fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, out.ID, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.Created, 0))), utils.HumanSize(out.VirtualSize)) + fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, outID, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), utils.HumanSize(out.GetInt64("VirtualSize"))) } else { - fmt.Fprintln(w, out.ID) + fmt.Fprintln(w, outID) } } } @@ -1238,78 +1248,78 @@ func (cli *DockerCli) CmdImages(args ...string) error { return nil } -func (cli *DockerCli) WalkTree(noTrunc bool, images *[]APIImages, byParent map[string][]APIImages, prefix string, printNode func(cli *DockerCli, noTrunc bool, image APIImages, prefix string)) { - length := len(*images) +func (cli *DockerCli) WalkTree(noTrunc bool, images *engine.Table, byParent map[string]*engine.Table, prefix string, printNode func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string)) { + length := images.Len() if length > 1 { - for index, image := range *images { + for index, image := range images.Data { if index+1 == length { printNode(cli, noTrunc, image, prefix+"└─") - if subimages, exists := byParent[image.ID]; exists { - cli.WalkTree(noTrunc, &subimages, byParent, prefix+" ", printNode) + if subimages, exists := byParent[image.Get("Id")]; exists { + cli.WalkTree(noTrunc, subimages, byParent, prefix+" ", printNode) } } else { - printNode(cli, noTrunc, image, prefix+"├─") - if subimages, exists := byParent[image.ID]; exists { - cli.WalkTree(noTrunc, &subimages, byParent, prefix+"│ ", printNode) + printNode(cli, noTrunc, image, prefix+"\u251C─") + if subimages, exists := byParent[image.Get("Id")]; exists { + cli.WalkTree(noTrunc, subimages, byParent, prefix+"\u2502 ", printNode) } } } } else { - for _, image := range *images { + for _, image := range images.Data { printNode(cli, noTrunc, image, prefix+"└─") - if subimages, exists := byParent[image.ID]; exists { - cli.WalkTree(noTrunc, &subimages, byParent, prefix+" ", printNode) + if subimages, exists := byParent[image.Get("Id")]; exists { + cli.WalkTree(noTrunc, subimages, byParent, prefix+" ", printNode) } } } } -func (cli *DockerCli) printVizNode(noTrunc bool, image APIImages, prefix string) { +func (cli *DockerCli) printVizNode(noTrunc bool, image *engine.Env, prefix string) { var ( imageID string parentID string ) if noTrunc { - imageID = image.ID - parentID = image.ParentId + imageID = image.Get("Id") + parentID = image.Get("ParentId") } else { - imageID = utils.TruncateID(image.ID) - parentID = utils.TruncateID(image.ParentId) + imageID = utils.TruncateID(image.Get("Id")) + parentID = utils.TruncateID(image.Get("ParentId")) } - if image.ParentId == "" { + if parentID == "" { fmt.Fprintf(cli.out, " base -> \"%s\" [style=invis]\n", imageID) } else { fmt.Fprintf(cli.out, " \"%s\" -> \"%s\"\n", parentID, imageID) } - if image.RepoTags[0] != ":" { + if image.GetList("RepoTags")[0] != ":" { fmt.Fprintf(cli.out, " \"%s\" [label=\"%s\\n%s\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n", - imageID, imageID, strings.Join(image.RepoTags, "\\n")) + imageID, imageID, strings.Join(image.GetList("RepoTags"), "\\n")) } } -func (cli *DockerCli) printTreeNode(noTrunc bool, image APIImages, prefix string) { +func (cli *DockerCli) printTreeNode(noTrunc bool, image *engine.Env, prefix string) { var imageID string if noTrunc { - imageID = image.ID + imageID = image.Get("Id") } else { - imageID = utils.TruncateID(image.ID) + imageID = utils.TruncateID(image.Get("Id")) } - fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, utils.HumanSize(image.VirtualSize)) - if image.RepoTags[0] != ":" { - fmt.Fprintf(cli.out, " Tags: %s\n", strings.Join(image.RepoTags, ", ")) + fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, utils.HumanSize(image.GetInt64("VirtualSize"))) + if image.GetList("RepoTags")[0] != ":" { + fmt.Fprintf(cli.out, " Tags: %s\n", strings.Join(image.GetList("RepoTags"), ", ")) } else { fmt.Fprint(cli.out, "\n") } } -func displayablePorts(ports []APIPort) string { +func displayablePorts(ports *engine.Table) string { result := []string{} - for _, port := range ports { - if port.IP == "" { - result = append(result, fmt.Sprintf("%d/%s", port.PublicPort, port.Type)) + for _, port := range ports.Data { + if port.Get("IP") == "" { + result = append(result, fmt.Sprintf("%d/%s", port.GetInt("PublicPort"), port.Get("Type"))) } else { - result = append(result, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type)) + result = append(result, fmt.Sprintf("%s:%d->%d/%s", port.Get("IP"), port.GetInt("PublicPort"), port.GetInt("PrivatePort"), port.Get("Type"))) } } sort.Strings(result) @@ -1318,14 +1328,14 @@ func displayablePorts(ports []APIPort) string { func (cli *DockerCli) CmdPs(args ...string) error { cmd := cli.Subcmd("ps", "[OPTIONS]", "List containers") - quiet := cmd.Bool("q", false, "Only display numeric IDs") - size := cmd.Bool("s", false, "Display sizes") - all := cmd.Bool("a", false, "Show all containers. Only running containers are shown by default.") - noTrunc := cmd.Bool("notrunc", false, "Don't truncate output") - nLatest := cmd.Bool("l", false, "Show only the latest created container, include non-running ones.") - since := cmd.String("sinceId", "", "Show only containers created since Id, include non-running ones.") - before := cmd.String("beforeId", "", "Show only container created before Id, include non-running ones.") - last := cmd.Int("n", -1, "Show n last created containers, include non-running ones.") + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs") + size := cmd.Bool([]string{"s", "-size"}, false, "Display sizes") + all := cmd.Bool([]string{"a", "-all"}, false, "Show all containers. Only running containers are shown by default.") + noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") + nLatest := cmd.Bool([]string{"l", "-latest"}, false, "Show only the latest created container, include non-running ones.") + since := cmd.String([]string{"#sinceId", "-since-id"}, "", "Show only containers created since Id, include non-running ones.") + before := cmd.String([]string{"#beforeId", "-before-id"}, "", "Show only container created before Id, include non-running ones.") + last := cmd.Int([]string{"n"}, -1, "Show n last created containers, include non-running ones.") if err := cmd.Parse(args); err != nil { return nil @@ -1350,14 +1360,13 @@ func (cli *DockerCli) CmdPs(args ...string) error { v.Set("size", "1") } - body, _, err := cli.call("GET", "/containers/json?"+v.Encode(), nil) + body, _, err := readBody(cli.call("GET", "/containers/json?"+v.Encode(), nil, false)) if err != nil { return err } - var outs []APIContainers - err = json.Unmarshal(body, &outs) - if err != nil { + outs := engine.NewTable("Created", 0) + if _, err := outs.ReadListFrom(body); err != nil { return err } w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) @@ -1370,32 +1379,42 @@ func (cli *DockerCli) CmdPs(args ...string) error { } } - for _, out := range outs { + for _, out := range outs.Data { + var ( + outID = out.Get("Id") + outNames = out.GetList("Names") + ) + if !*noTrunc { - out.ID = utils.TruncateID(out.ID) + outID = utils.TruncateID(outID) } // Remove the leading / from the names - for i := 0; i < len(out.Names); i++ { - out.Names[i] = out.Names[i][1:] + for i := 0; i < len(outNames); i++ { + outNames[i] = outNames[i][1:] } if !*quiet { + var ( + outCommand = out.Get("Command") + ports = engine.NewTable("", 0) + ) if !*noTrunc { - out.Command = utils.Trunc(out.Command, 20) + outCommand = utils.Trunc(outCommand, 20) } - fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t%s\t", out.ID, out.Image, out.Command, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.Created, 0))), out.Status, displayablePorts(out.Ports), strings.Join(out.Names, ",")) + ports.ReadListFrom([]byte(out.Get("Ports"))) + fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t%s\t", outID, out.Get("Image"), outCommand, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), out.Get("Status"), displayablePorts(ports), strings.Join(outNames, ",")) if *size { - if out.SizeRootFs > 0 { - fmt.Fprintf(w, "%s (virtual %s)\n", utils.HumanSize(out.SizeRw), utils.HumanSize(out.SizeRootFs)) + if out.GetInt("SizeRootFs") > 0 { + fmt.Fprintf(w, "%s (virtual %s)\n", utils.HumanSize(out.GetInt64("SizeRw")), utils.HumanSize(out.GetInt64("SizeRootFs"))) } else { - fmt.Fprintf(w, "%s\n", utils.HumanSize(out.SizeRw)) + fmt.Fprintf(w, "%s\n", utils.HumanSize(out.GetInt64("SizeRw"))) } } else { fmt.Fprint(w, "\n") } } else { - fmt.Fprintln(w, out.ID) + fmt.Fprintln(w, outID) } } @@ -1407,9 +1426,9 @@ func (cli *DockerCli) CmdPs(args ...string) error { func (cli *DockerCli) CmdCommit(args ...string) error { cmd := cli.Subcmd("commit", "[OPTIONS] CONTAINER [REPOSITORY[:TAG]]", "Create a new image from a container's changes") - flComment := cmd.String("m", "", "Commit message") - flAuthor := cmd.String("author", "", "Author (eg. \"John Hannibal Smith \"") - flConfig := cmd.String("run", "", "Config automatically applied when the image is run. "+`(ex: -run='{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')`) + flComment := cmd.String([]string{"m", "-message"}, "", "Commit message") + flAuthor := cmd.String([]string{"a", "#author", "-author"}, "", "Author (eg. \"John Hannibal Smith \"") + flConfig := cmd.String([]string{"#run", "-run"}, "", "Config automatically applied when the image is run. "+`(ex: -run='{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')`) if err := cmd.Parse(args); err != nil { return nil } @@ -1435,31 +1454,31 @@ func (cli *DockerCli) CmdCommit(args ...string) error { v.Set("tag", tag) v.Set("comment", *flComment) v.Set("author", *flAuthor) - var config *Config + var ( + config *Config + env engine.Env + ) if *flConfig != "" { config = &Config{} if err := json.Unmarshal([]byte(*flConfig), config); err != nil { return err } } - body, _, err := cli.call("POST", "/commit?"+v.Encode(), config) + stream, _, err := cli.call("POST", "/commit?"+v.Encode(), config, false) if err != nil { return err } - - apiID := &APIID{} - err = json.Unmarshal(body, apiID) - if err != nil { + if err := env.Decode(stream); err != nil { return err } - fmt.Fprintf(cli.out, "%s\n", apiID.ID) + fmt.Fprintf(cli.out, "%s\n", env.Get("Id")) return nil } func (cli *DockerCli) CmdEvents(args ...string) error { cmd := cli.Subcmd("events", "[OPTIONS]", "Get real time events from the server") - since := cmd.String("since", "", "Show previously created events and then stream.") + since := cmd.String([]string{"#since", "-since"}, "", "Show previously created events and then stream.") if err := cmd.Parse(args); err != nil { return nil } @@ -1517,25 +1536,34 @@ func (cli *DockerCli) CmdDiff(args ...string) error { return nil } - body, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/changes", nil) + body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/changes", nil, false)) + if err != nil { return err } - changes := []Change{} - err = json.Unmarshal(body, &changes) - if err != nil { + outs := engine.NewTable("", 0) + if _, err := outs.ReadListFrom(body); err != nil { return err } - for _, change := range changes { - fmt.Fprintf(cli.out, "%s\n", change.String()) + for _, change := range outs.Data { + var kind string + switch change.GetInt("Kind") { + case archive.ChangeModify: + kind = "C" + case archive.ChangeAdd: + kind = "A" + case archive.ChangeDelete: + kind = "D" + } + fmt.Fprintf(cli.out, "%s %s\n", kind, change.Get("Path")) } return nil } func (cli *DockerCli) CmdLogs(args ...string) error { cmd := cli.Subcmd("logs", "CONTAINER", "Fetch the logs of a container") - follow := cmd.Bool("f", false, "Follow log output") + follow := cmd.Bool([]string{"f", "-follow"}, false, "Follow log output") if err := cmd.Parse(args); err != nil { return nil } @@ -1544,7 +1572,7 @@ func (cli *DockerCli) CmdLogs(args ...string) error { return nil } name := cmd.Arg(0) - body, _, err := cli.call("GET", "/containers/"+name+"/json", nil) + body, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false)) if err != nil { return err } @@ -1571,8 +1599,8 @@ func (cli *DockerCli) CmdLogs(args ...string) error { func (cli *DockerCli) CmdAttach(args ...string) error { cmd := cli.Subcmd("attach", "[OPTIONS] CONTAINER", "Attach to a running container") - noStdin := cmd.Bool("nostdin", false, "Do not attach stdin") - proxy := cmd.Bool("sig-proxy", true, "Proxify all received signal to the process (even in non-tty mode)") + noStdin := cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach stdin") + proxy := cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)") if err := cmd.Parse(args); err != nil { return nil } @@ -1581,7 +1609,7 @@ func (cli *DockerCli) CmdAttach(args ...string) error { return nil } name := cmd.Arg(0) - body, _, err := cli.call("GET", "/containers/"+name+"/json", nil) + body, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false)) if err != nil { return err } @@ -1635,9 +1663,9 @@ func (cli *DockerCli) CmdAttach(args ...string) error { func (cli *DockerCli) CmdSearch(args ...string) error { cmd := cli.Subcmd("search", "TERM", "Search the docker index for images") - noTrunc := cmd.Bool("notrunc", false, "Don't truncate output") - trusted := cmd.Bool("trusted", false, "Only show trusted builds") - stars := cmd.Int("stars", 0, "Only displays with at least xxx stars") + noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") + trusted := cmd.Bool([]string{"t", "#trusted", "-trusted"}, false, "Only show trusted builds") + stars := cmd.Int([]string{"s", "#stars", "-stars"}, 0, "Only displays with at least xxx stars") if err := cmd.Parse(args); err != nil { return nil } @@ -1648,34 +1676,34 @@ func (cli *DockerCli) CmdSearch(args ...string) error { v := url.Values{} v.Set("term", cmd.Arg(0)) - body, _, err := cli.call("GET", "/images/search?"+v.Encode(), nil) + + body, _, err := readBody(cli.call("GET", "/images/search?"+v.Encode(), nil, true)) + if err != nil { return err } - - outs := []registry.SearchResult{} - err = json.Unmarshal(body, &outs) - if err != nil { + outs := engine.NewTable("star_count", 0) + if _, err := outs.ReadListFrom(body); err != nil { return err } w := tabwriter.NewWriter(cli.out, 10, 1, 3, ' ', 0) fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tTRUSTED\n") - for _, out := range outs { - if (*trusted && !out.IsTrusted) || (*stars > out.StarCount) { + for _, out := range outs.Data { + if (*trusted && !out.GetBool("is_trusted")) || (*stars > out.GetInt("star_count")) { continue } - desc := strings.Replace(out.Description, "\n", " ", -1) + desc := strings.Replace(out.Get("description"), "\n", " ", -1) desc = strings.Replace(desc, "\r", " ", -1) if !*noTrunc && len(desc) > 45 { desc = utils.Trunc(desc, 42) + "..." } - fmt.Fprintf(w, "%s\t%s\t%d\t", out.Name, desc, out.StarCount) - if out.IsOfficial { + fmt.Fprintf(w, "%s\t%s\t%d\t", out.Get("name"), desc, out.GetInt("star_count")) + if out.GetBool("is_official") { fmt.Fprint(w, "[OK]") } fmt.Fprint(w, "\t") - if out.IsTrusted { + if out.GetBool("is_trusted") { fmt.Fprint(w, "[OK]") } fmt.Fprint(w, "\n") @@ -1689,7 +1717,7 @@ type ports []int func (cli *DockerCli) CmdTag(args ...string) error { cmd := cli.Subcmd("tag", "[OPTIONS] IMAGE REPOSITORY[:TAG]", "Tag an image into a repository") - force := cmd.Bool("f", false, "Force") + force := cmd.Bool([]string{"f", "#force", "-force"}, false, "Force") if err := cmd.Parse(args); err != nil { return nil } @@ -1715,21 +1743,21 @@ func (cli *DockerCli) CmdTag(args ...string) error { v.Set("force", "1") } - if _, _, err := cli.call("POST", "/images/"+cmd.Arg(0)+"/tag?"+v.Encode(), nil); err != nil { + if _, _, err := readBody(cli.call("POST", "/images/"+cmd.Arg(0)+"/tag?"+v.Encode(), nil, false)); err != nil { return err } return nil } //FIXME Only used in tests -func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig, *flag.FlagSet, error) { +func ParseRun(args []string, sysInfo *sysinfo.SysInfo) (*Config, *HostConfig, *flag.FlagSet, error) { cmd := flag.NewFlagSet("run", flag.ContinueOnError) cmd.SetOutput(ioutil.Discard) cmd.Usage = nil - return parseRun(cmd, args, capabilities) + return parseRun(cmd, args, sysInfo) } -func parseRun(cmd *flag.FlagSet, args []string, capabilities *Capabilities) (*Config, *HostConfig, *flag.FlagSet, error) { +func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, *HostConfig, *flag.FlagSet, error) { var ( // FIXME: use utils.ListOpts for attach and volumes? flAttach = NewListOpts(ValidateAttach) @@ -1743,43 +1771,43 @@ func parseRun(cmd *flag.FlagSet, args []string, capabilities *Capabilities) (*Co flVolumesFrom ListOpts flLxcOpts ListOpts - flAutoRemove = cmd.Bool("rm", false, "Automatically remove the container when it exits (incompatible with -d)") - flDetach = cmd.Bool("d", false, "Detached mode: Run container in the background, print new container id") - flNetwork = cmd.Bool("n", true, "Enable networking for this container") - flPrivileged = cmd.Bool("privileged", false, "Give extended privileges to this container") - flPublishAll = cmd.Bool("P", false, "Publish all exposed ports to the host interfaces") - flStdin = cmd.Bool("i", false, "Keep stdin open even if not attached") - flTty = cmd.Bool("t", false, "Allocate a pseudo-tty") - flContainerIDFile = cmd.String("cidfile", "", "Write the container ID to the file") - flEntrypoint = cmd.String("entrypoint", "", "Overwrite the default entrypoint of the image") - flHostname = cmd.String("h", "", "Container host name") - flMemoryString = cmd.String("m", "", "Memory limit (format: , where unit = b, k, m or g)") - flUser = cmd.String("u", "", "Username or UID") - flWorkingDir = cmd.String("w", "", "Working directory inside the container") - flCpuShares = cmd.Int64("c", 0, "CPU shares (relative weight)") + flAutoRemove = cmd.Bool([]string{"#rm", "-rm"}, false, "Automatically remove the container when it exits (incompatible with -d)") + flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: Run container in the background, print new container id") + flNetwork = cmd.Bool([]string{"n", "-networking"}, true, "Enable networking for this container") + flPrivileged = cmd.Bool([]string{"#privileged", "-privileged"}, false, "Give extended privileges to this container") + flPublishAll = cmd.Bool([]string{"P", "-publish-all"}, false, "Publish all exposed ports to the host interfaces") + flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep stdin open even if not attached") + flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-tty") + flContainerIDFile = cmd.String([]string{"#cidfile", "-cidfile"}, "", "Write the container ID to the file") + flEntrypoint = cmd.String([]string{"#entrypoint", "-entrypoint"}, "", "Overwrite the default entrypoint of the image") + flHostname = cmd.String([]string{"h", "-hostname"}, "", "Container host name") + flMemoryString = cmd.String([]string{"m", "-memory"}, "", "Memory limit (format: , where unit = b, k, m or g)") + flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID") + flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container") + flCpuShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") // For documentation purpose - _ = cmd.Bool("sig-proxy", true, "Proxify all received signal to the process (even in non-tty mode)") - _ = cmd.String("name", "", "Assign a name to the container") + _ = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)") + _ = cmd.String([]string{"#name", "-name"}, "", "Assign a name to the container") ) - cmd.Var(&flAttach, "a", "Attach to stdin, stdout or stderr.") - cmd.Var(&flVolumes, "v", "Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)") - cmd.Var(&flLinks, "link", "Add link to another container (name:alias)") - cmd.Var(&flEnv, "e", "Set environment variables") + cmd.Var(&flAttach, []string{"a", "-attach"}, "Attach to stdin, stdout or stderr.") + cmd.Var(&flVolumes, []string{"v", "-volume"}, "Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)") + cmd.Var(&flLinks, []string{"#link", "-link"}, "Add link to another container (name:alias)") + cmd.Var(&flEnv, []string{"e", "-env"}, "Set environment variables") - cmd.Var(&flPublish, "p", fmt.Sprintf("Publish a container's port to the host (format: %s) (use 'docker port' to see the actual mapping)", PortSpecTemplateFormat)) - cmd.Var(&flExpose, "expose", "Expose a port from the container without publishing it to your host") - cmd.Var(&flDns, "dns", "Set custom dns servers") - cmd.Var(&flVolumesFrom, "volumes-from", "Mount volumes from the specified container(s)") - cmd.Var(&flLxcOpts, "lxc-conf", "Add custom lxc options -lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"") + cmd.Var(&flPublish, []string{"p", "-publish"}, fmt.Sprintf("Publish a container's port to the host (format: %s) (use 'docker port' to see the actual mapping)", PortSpecTemplateFormat)) + cmd.Var(&flExpose, []string{"#expose", "-expose"}, "Expose a port from the container without publishing it to your host") + cmd.Var(&flDns, []string{"#dns", "-dns"}, "Set custom dns servers") + cmd.Var(&flVolumesFrom, []string{"#volumes-from", "-volumes-from"}, "Mount volumes from the specified container(s)") + cmd.Var(&flLxcOpts, []string{"#lxc-conf", "-lxc-conf"}, "Add custom lxc options -lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"") if err := cmd.Parse(args); err != nil { return nil, nil, cmd, err } // Check if the kernel supports memory limit cgroup. - if capabilities != nil && *flMemoryString != "" && !capabilities.MemoryLimit { + if sysInfo != nil && *flMemoryString != "" && !sysInfo.MemoryLimit { *flMemoryString = "" } @@ -1869,7 +1897,7 @@ func parseRun(cmd *flag.FlagSet, args []string, capabilities *Capabilities) (*Co // Merge in exposed ports to the map of published ports for _, e := range flExpose.GetAll() { if strings.Contains(e, ":") { - return nil, nil, cmd, fmt.Errorf("Invalid port format for -expose: %s", e) + return nil, nil, cmd, fmt.Errorf("Invalid port format for --expose: %s", e) } p := NewPort(splitProtoPort(e)) if _, exists := ports[p]; !exists { @@ -1911,7 +1939,7 @@ func parseRun(cmd *flag.FlagSet, args []string, capabilities *Capabilities) (*Co PublishAllPorts: *flPublishAll, } - if capabilities != nil && flMemory > 0 && !capabilities.SwapLimit { + if sysInfo != nil && flMemory > 0 && !sysInfo.SwapLimit { //fmt.Fprintf(stdout, "WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n") config.MemorySwap = -1 } @@ -1964,7 +1992,7 @@ func (cli *DockerCli) CmdRun(args ...string) error { } //create the container - body, statusCode, err := cli.call("POST", "/containers/create?"+containerValues.Encode(), config) + stream, statusCode, err := cli.call("POST", "/containers/create?"+containerValues.Encode(), config, false) //if image not found try to pull it if statusCode == 404 { _, tag := utils.ParseRepositoryTag(config.Image) @@ -2001,30 +2029,30 @@ func (cli *DockerCli) CmdRun(args ...string) error { if err = cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.err, map[string][]string{"X-Registry-Auth": registryAuthHeader}); err != nil { return err } - if body, _, err = cli.call("POST", "/containers/create?"+containerValues.Encode(), config); err != nil { + if stream, _, err = cli.call("POST", "/containers/create?"+containerValues.Encode(), config, false); err != nil { return err } } else if err != nil { return err } - var runResult APIRun - if err := json.Unmarshal(body, &runResult); err != nil { + var runResult engine.Env + if err := runResult.Decode(stream); err != nil { return err } - for _, warning := range runResult.Warnings { + for _, warning := range runResult.GetList("Warnings") { fmt.Fprintf(cli.err, "WARNING: %s\n", warning) } if len(hostConfig.ContainerIDFile) > 0 { - if _, err = containerIDFile.Write([]byte(runResult.ID)); err != nil { + if _, err = containerIDFile.Write([]byte(runResult.Get("Id"))); err != nil { return fmt.Errorf("failed to write the container ID to the file: %s", err) } } if sigProxy { - sigc := cli.forwardAllSignals(runResult.ID) + sigc := cli.forwardAllSignals(runResult.Get("Id")) defer utils.StopCatch(sigc) } @@ -2038,7 +2066,7 @@ func (cli *DockerCli) CmdRun(args ...string) error { waitDisplayId = make(chan struct{}) go func() { defer close(waitDisplayId) - fmt.Fprintf(cli.out, "%s\n", runResult.ID) + fmt.Fprintf(cli.out, "%s\n", runResult.Get("Id")) }() } @@ -2080,7 +2108,7 @@ func (cli *DockerCli) CmdRun(args ...string) error { } errCh = utils.Go(func() error { - return cli.hijack("POST", "/containers/"+runResult.ID+"/attach?"+v.Encode(), config.Tty, in, out, stderr, hijacked) + return cli.hijack("POST", "/containers/"+runResult.Get("Id")+"/attach?"+v.Encode(), config.Tty, in, out, stderr, hijacked) }) } else { close(hijacked) @@ -2102,12 +2130,12 @@ func (cli *DockerCli) CmdRun(args ...string) error { } //start the container - if _, _, err = cli.call("POST", "/containers/"+runResult.ID+"/start", hostConfig); err != nil { + if _, _, err = readBody(cli.call("POST", "/containers/"+runResult.Get("Id")+"/start", hostConfig, false)); err != nil { return err } if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminal { - if err := cli.monitorTtySize(runResult.ID); err != nil { + if err := cli.monitorTtySize(runResult.Get("Id")); err != nil { utils.Errorf("Error monitoring TTY size: %s\n", err) } } @@ -2132,19 +2160,28 @@ func (cli *DockerCli) CmdRun(args ...string) error { if autoRemove { // Autoremove: wait for the container to finish, retrieve // the exit code and remove the container - if _, _, err := cli.call("POST", "/containers/"+runResult.ID+"/wait", nil); err != nil { + if _, _, err := readBody(cli.call("POST", "/containers/"+runResult.Get("Id")+"/wait", nil, false)); err != nil { return err } - if _, status, err = getExitCode(cli, runResult.ID); err != nil { + if _, status, err = getExitCode(cli, runResult.Get("Id")); err != nil { return err } - if _, _, err := cli.call("DELETE", "/containers/"+runResult.ID, nil); err != nil { + if _, _, err := readBody(cli.call("DELETE", "/containers/"+runResult.Get("Id")+"?v=1", nil, false)); err != nil { return err } } else { - // No Autoremove: Simply retrieve the exit code - if _, status, err = getExitCode(cli, runResult.ID); err != nil { - return err + if !config.Tty { + // In non-tty mode, we can't dettach, so we know we need to wait. + if status, err = waitForExit(cli, runResult.Get("Id")); err != nil { + return err + } + } else { + // In TTY mode, there is a race. If the process dies too slowly, the state can be update after the getExitCode call + // and result in a wrong exit code. + // No Autoremove: Simply retrieve the exit code + if _, status, err = getExitCode(cli, runResult.Get("Id")); err != nil { + return err + } } } if status != 0 { @@ -2164,24 +2201,26 @@ func (cli *DockerCli) CmdCp(args ...string) error { return nil } - var copyData APICopy + var copyData engine.Env info := strings.Split(cmd.Arg(0), ":") if len(info) != 2 { return fmt.Errorf("Error: Path not specified") } - copyData.Resource = info[1] - copyData.HostPath = cmd.Arg(1) + copyData.Set("Resource", info[1]) + copyData.Set("HostPath", cmd.Arg(1)) - data, statusCode, err := cli.call("POST", "/containers/"+info[0]+"/copy", copyData) + stream, statusCode, err := cli.call("POST", "/containers/"+info[0]+"/copy", copyData, false) + if stream != nil { + defer stream.Close() + } if err != nil { return err } if statusCode == 200 { - r := bytes.NewReader(data) - if err := archive.Untar(r, copyData.HostPath, nil); err != nil { + if err := archive.Untar(stream, copyData.Get("HostPath"), nil); err != nil { return err } } @@ -2223,24 +2262,51 @@ func (cli *DockerCli) CmdLoad(args ...string) error { return nil } -func (cli *DockerCli) call(method, path string, data interface{}) ([]byte, int, error) { - var params io.Reader +func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo bool) (io.ReadCloser, int, error) { + params := bytes.NewBuffer(nil) if data != nil { - buf, err := json.Marshal(data) - if err != nil { - return nil, -1, err + if env, ok := data.(engine.Env); ok { + if err := env.Encode(params); err != nil { + return nil, -1, err + } + } else { + buf, err := json.Marshal(data) + if err != nil { + return nil, -1, err + } + if _, err := params.Write(buf); err != nil { + return nil, -1, err + } } - params = bytes.NewBuffer(buf) } - // fixme: refactor client to support redirect re := regexp.MustCompile("/+") path = re.ReplaceAllString(path, "/") - req, err := http.NewRequest(method, fmt.Sprintf("/v%g%s", APIVERSION, path), params) + req, err := http.NewRequest(method, fmt.Sprintf("/v%g%s", api.APIVERSION, path), params) if err != nil { return nil, -1, err } + if passAuthInfo { + cli.LoadConfigFile() + // Resolve the Auth config relevant for this server + authConfig := cli.configFile.ResolveAuthConfig(auth.IndexServerAddress()) + getHeaders := func(authConfig auth.AuthConfig) (map[string][]string, error) { + buf, err := json.Marshal(authConfig) + if err != nil { + return nil, err + } + registryAuthHeader := []string{ + base64.URLEncoding.EncodeToString(buf), + } + return map[string][]string{"X-Registry-Auth": registryAuthHeader}, nil + } + if headers, err := getHeaders(authConfig); err == nil && headers != nil { + for k, v := range headers { + req.Header[k] = v + } + } + } req.Header.Set("User-Agent", "Docker-Client/"+VERSION) req.Host = cli.addr if data != nil { @@ -2257,26 +2323,32 @@ func (cli *DockerCli) call(method, path string, data interface{}) ([]byte, int, } clientconn := httputil.NewClientConn(dial, nil) resp, err := clientconn.Do(req) - defer clientconn.Close() if err != nil { + clientconn.Close() if strings.Contains(err.Error(), "connection refused") { return nil, -1, ErrConnectionRefused } return nil, -1, err } - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, -1, err - } if resp.StatusCode < 200 || resp.StatusCode >= 400 { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, -1, err + } if len(body) == 0 { - return nil, resp.StatusCode, fmt.Errorf("Error: %s", http.StatusText(resp.StatusCode)) + return nil, resp.StatusCode, fmt.Errorf("Error :%s", http.StatusText(resp.StatusCode)) } return nil, resp.StatusCode, fmt.Errorf("Error: %s", bytes.TrimSpace(body)) } - return body, resp.StatusCode, nil + + wrapper := utils.NewReadCloserWrapper(resp.Body, func() error { + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + return clientconn.Close() + }) + return wrapper, resp.StatusCode, nil } func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, headers map[string][]string) error { @@ -2288,7 +2360,7 @@ func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, h re := regexp.MustCompile("/+") path = re.ReplaceAllString(path, "/") - req, err := http.NewRequest(method, fmt.Sprintf("/v%g%s", APIVERSION, path), in) + req, err := http.NewRequest(method, fmt.Sprintf("/v%g%s", api.APIVERSION, path), in) if err != nil { return err } @@ -2333,7 +2405,7 @@ func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, h return fmt.Errorf("Error: %s", bytes.TrimSpace(body)) } - if matchesContentType(resp.Header.Get("Content-Type"), "application/json") { + if api.MatchesContentType(resp.Header.Get("Content-Type"), "application/json") { return utils.DisplayJSONMessagesStream(resp.Body, out, cli.terminalFd, cli.isTerminal) } if _, err := io.Copy(out, resp.Body); err != nil { @@ -2352,7 +2424,7 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea re := regexp.MustCompile("/+") path = re.ReplaceAllString(path, "/") - req, err := http.NewRequest(method, fmt.Sprintf("/v%g%s", APIVERSION, path), nil) + req, err := http.NewRequest(method, fmt.Sprintf("/v%g%s", api.APIVERSION, path), nil) if err != nil { return err } @@ -2471,7 +2543,7 @@ func (cli *DockerCli) resizeTty(id string) { v := url.Values{} v.Set("h", strconv.Itoa(height)) v.Set("w", strconv.Itoa(width)) - if _, _, err := cli.call("POST", "/containers/"+id+"/resize?"+v.Encode(), nil); err != nil { + if _, _, err := readBody(cli.call("POST", "/containers/"+id+"/resize?"+v.Encode(), nil, false)); err != nil { utils.Errorf("Error resize: %s", err) } } @@ -2508,22 +2580,22 @@ func (cli *DockerCli) LoadConfigFile() (err error) { } func waitForExit(cli *DockerCli, containerId string) (int, error) { - body, _, err := cli.call("POST", "/containers/"+containerId+"/wait", nil) + stream, _, err := cli.call("POST", "/containers/"+containerId+"/wait", nil, false) if err != nil { return -1, err } - var out APIWait - if err := json.Unmarshal(body, &out); err != nil { + var out engine.Env + if err := out.Decode(stream); err != nil { return -1, err } - return out.StatusCode, nil + return out.GetInt("StatusCode"), nil } // getExitCode perform an inspect on the container. It returns // the running state and the exit code. func getExitCode(cli *DockerCli, containerId string) (bool, int, error) { - body, _, err := cli.call("GET", "/containers/"+containerId+"/json", nil) + body, _, err := readBody(cli.call("GET", "/containers/"+containerId+"/json", nil, false)) if err != nil { // If we can't connect, then the daemon probably died. if err != ErrConnectionRefused { @@ -2538,6 +2610,20 @@ func getExitCode(cli *DockerCli, containerId string) (bool, int, error) { return c.State.IsRunning(), c.State.GetExitCode(), nil } +func readBody(stream io.ReadCloser, statusCode int, err error) ([]byte, int, error) { + if stream != nil { + defer stream.Close() + } + if err != nil { + return nil, statusCode, err + } + body, err := ioutil.ReadAll(stream) + if err != nil { + return nil, -1, err + } + return body, statusCode, nil +} + func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string) *DockerCli { var ( isTerminal = false diff --git a/config.go b/config.go index 5a6de7a873..fc04c9ff16 100644 --- a/config.go +++ b/config.go @@ -1,8 +1,15 @@ package docker import ( - "github.com/dotcloud/docker/engine" "net" + + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/networkdriver" +) + +const ( + defaultNetworkMtu = 1500 + DisableNetworkBridge = "none" ) // FIXME: separate runtime configuration from http api configuration @@ -10,42 +17,48 @@ type DaemonConfig struct { Pidfile string Root string AutoRestart bool - EnableCors bool Dns []string EnableIptables bool - BridgeIface string - BridgeIp string + EnableIpForward bool DefaultIp net.IP + BridgeIface string + BridgeIP string InterContainerCommunication bool GraphDriver string Mtu int + DisableNetwork bool } // ConfigFromJob creates and returns a new DaemonConfig object // by parsing the contents of a job's environment. -func ConfigFromJob(job *engine.Job) *DaemonConfig { - var config DaemonConfig - config.Pidfile = job.Getenv("Pidfile") - config.Root = job.Getenv("Root") - config.AutoRestart = job.GetenvBool("AutoRestart") - config.EnableCors = job.GetenvBool("EnableCors") +func DaemonConfigFromJob(job *engine.Job) *DaemonConfig { + config := &DaemonConfig{ + Pidfile: job.Getenv("Pidfile"), + Root: job.Getenv("Root"), + AutoRestart: job.GetenvBool("AutoRestart"), + EnableIptables: job.GetenvBool("EnableIptables"), + EnableIpForward: job.GetenvBool("EnableIpForward"), + BridgeIP: job.Getenv("BridgeIP"), + DefaultIp: net.ParseIP(job.Getenv("DefaultIp")), + InterContainerCommunication: job.GetenvBool("InterContainerCommunication"), + GraphDriver: job.Getenv("GraphDriver"), + } if dns := job.GetenvList("Dns"); dns != nil { config.Dns = dns } - config.EnableIptables = job.GetenvBool("EnableIptables") - if br := job.Getenv("BridgeIface"); br != "" { - config.BridgeIface = br - } else { - config.BridgeIface = DefaultNetworkBridge - } - config.BridgeIp = job.Getenv("BridgeIp") - config.DefaultIp = net.ParseIP(job.Getenv("DefaultIp")) - config.InterContainerCommunication = job.GetenvBool("InterContainerCommunication") - config.GraphDriver = job.Getenv("GraphDriver") - if mtu := job.GetenvInt("Mtu"); mtu != -1 { + if mtu := job.GetenvInt("Mtu"); mtu != 0 { config.Mtu = mtu } else { - config.Mtu = DefaultNetworkMtu + config.Mtu = GetDefaultNetworkMtu() } - return &config + config.DisableNetwork = job.Getenv("BridgeIface") == DisableNetworkBridge + + return config +} + +func GetDefaultNetworkMtu() int { + if iface, err := networkdriver.GetDefaultRouteIface(); err == nil { + return iface.MTU + } + return defaultNetworkMtu } diff --git a/container.go b/container.go index 9e4495890a..81e8749d2a 100644 --- a/container.go +++ b/container.go @@ -1,24 +1,23 @@ package docker import ( - "bytes" "encoding/json" "errors" "fmt" "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/execdriver" "github.com/dotcloud/docker/graphdriver" - "github.com/dotcloud/docker/mount" + "github.com/dotcloud/docker/pkg/mount" "github.com/dotcloud/docker/pkg/term" "github.com/dotcloud/docker/utils" "github.com/kr/pty" "io" "io/ioutil" "log" - "net" "os" - "os/exec" "path" - "strconv" + "path/filepath" "strings" "sync" "syscall" @@ -33,7 +32,7 @@ var ( type Container struct { sync.Mutex root string // Path to the "home" of the container, including metadata. - rootfs string // Path to the root filesystem of the container. + basefs string // Path to the graphdriver mountpoint ID string @@ -46,7 +45,6 @@ type Container struct { State State Image string - network *NetworkInterface NetworkSettings *NetworkSettings ResolvConfPath string @@ -55,7 +53,7 @@ type Container struct { Name string Driver string - cmd *exec.Cmd + command *execdriver.Command stdout *utils.WriteBroadcaster stderr *utils.WriteBroadcaster stdin io.ReadCloser @@ -101,6 +99,47 @@ type Config struct { WorkingDir string Entrypoint []string NetworkDisabled bool + OnBuild []string +} + +func ContainerConfigFromJob(job *engine.Job) *Config { + config := &Config{ + Hostname: job.Getenv("Hostname"), + Domainname: job.Getenv("Domainname"), + User: job.Getenv("User"), + Memory: job.GetenvInt64("Memory"), + MemorySwap: job.GetenvInt64("MemorySwap"), + CpuShares: job.GetenvInt64("CpuShares"), + AttachStdin: job.GetenvBool("AttachStdin"), + AttachStdout: job.GetenvBool("AttachStdout"), + AttachStderr: job.GetenvBool("AttachStderr"), + Tty: job.GetenvBool("Tty"), + OpenStdin: job.GetenvBool("OpenStdin"), + StdinOnce: job.GetenvBool("StdinOnce"), + Image: job.Getenv("Image"), + VolumesFrom: job.Getenv("VolumesFrom"), + WorkingDir: job.Getenv("WorkingDir"), + NetworkDisabled: job.GetenvBool("NetworkDisabled"), + } + job.GetenvJson("ExposedPorts", &config.ExposedPorts) + job.GetenvJson("Volumes", &config.Volumes) + if PortSpecs := job.GetenvList("PortSpecs"); PortSpecs != nil { + config.PortSpecs = PortSpecs + } + if Env := job.GetenvList("Env"); Env != nil { + config.Env = Env + } + if Cmd := job.GetenvList("Cmd"); Cmd != nil { + config.Cmd = Cmd + } + if Dns := job.GetenvList("Dns"); Dns != nil { + config.Dns = Dns + } + if Entrypoint := job.GetenvList("Entrypoint"); Entrypoint != nil { + config.Entrypoint = Entrypoint + } + + return config } type HostConfig struct { @@ -113,6 +152,24 @@ type HostConfig struct { PublishAllPorts bool } +func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { + hostConfig := &HostConfig{ + ContainerIDFile: job.Getenv("ContainerIDFile"), + Privileged: job.GetenvBool("Privileged"), + PublishAllPorts: job.GetenvBool("PublishAllPorts"), + } + job.GetenvJson("LxcConf", &hostConfig.LxcConf) + job.GetenvJson("PortBindings", &hostConfig.PortBindings) + if Binds := job.GetenvList("Binds"); Binds != nil { + hostConfig.Binds = Binds + } + if Links := job.GetenvList("Links"); Links != nil { + hostConfig.Links = Links + } + + return hostConfig +} + type BindMap struct { SrcPath string DstPath string @@ -175,39 +232,39 @@ type NetworkSettings struct { Ports map[Port][]PortBinding } -func (settings *NetworkSettings) PortMappingAPI() []APIPort { - var mapping []APIPort +func (settings *NetworkSettings) PortMappingAPI() *engine.Table { + var outs = engine.NewTable("", 0) for port, bindings := range settings.Ports { p, _ := parsePort(port.Port()) if len(bindings) == 0 { - mapping = append(mapping, APIPort{ - PublicPort: int64(p), - Type: port.Proto(), - }) + out := &engine.Env{} + out.SetInt("PublicPort", p) + out.Set("Type", port.Proto()) + outs.Add(out) continue } for _, binding := range bindings { - p, _ := parsePort(port.Port()) + out := &engine.Env{} h, _ := parsePort(binding.HostPort) - mapping = append(mapping, APIPort{ - PrivatePort: int64(p), - PublicPort: int64(h), - Type: port.Proto(), - IP: binding.HostIp, - }) + out.SetInt("PrivatePort", p) + out.SetInt("PublicPort", h) + out.Set("Type", port.Proto()) + out.Set("IP", binding.HostIp) + outs.Add(out) } } - return mapping + return outs } // Inject the io.Reader at the given path. Note: do not close the reader func (container *Container) Inject(file io.Reader, pth string) error { - if err := container.EnsureMounted(); err != nil { + if err := container.Mount(); err != nil { return fmt.Errorf("inject: error mounting container %s: %s", container.ID, err) } + defer container.Unmount() // Return error if path exists - destPath := path.Join(container.RootfsPath(), pth) + destPath := path.Join(container.basefs, pth) if _, err := os.Stat(destPath); err == nil { // Since err is nil, the path could be stat'd and it exists return fmt.Errorf("%s exists", pth) @@ -219,7 +276,7 @@ func (container *Container) Inject(file io.Reader, pth string) error { } // Make sure the directory exists - if err := os.MkdirAll(path.Join(container.RootfsPath(), path.Dir(pth)), 0755); err != nil { + if err := os.MkdirAll(path.Join(container.basefs, path.Dir(pth)), 0755); err != nil { return err } @@ -235,10 +292,6 @@ func (container *Container) Inject(file io.Reader, pth string) error { return nil } -func (container *Container) Cmd() *exec.Cmd { - return container.cmd -} - func (container *Container) When() time.Time { return container.Created } @@ -305,23 +358,14 @@ func (container *Container) generateEnvConfig(env []string) error { return nil } -func (container *Container) generateLXCConfig() error { - fo, err := os.Create(container.lxcConfigPath()) - if err != nil { - return err - } - defer fo.Close() - return LxcTemplateCompiled.Execute(fo, container) -} - -func (container *Container) startPty() error { +func (container *Container) setupPty() error { ptyMaster, ptySlave, err := pty.Open() if err != nil { return err } container.ptyMaster = ptyMaster - container.cmd.Stdout = ptySlave - container.cmd.Stderr = ptySlave + container.command.Stdout = ptySlave + container.command.Stderr = ptySlave // Copy the PTYs to our broadcasters go func() { @@ -333,8 +377,8 @@ func (container *Container) startPty() error { // stdin if container.Config.OpenStdin { - container.cmd.Stdin = ptySlave - container.cmd.SysProcAttr.Setctty = true + container.command.Stdin = ptySlave + container.command.SysProcAttr.Setctty = true go func() { defer container.stdin.Close() utils.Debugf("startPty: begin of stdin pipe") @@ -342,18 +386,14 @@ func (container *Container) startPty() error { utils.Debugf("startPty: end of stdin pipe") }() } - if err := container.cmd.Start(); err != nil { - return err - } - ptySlave.Close() return nil } -func (container *Container) start() error { - container.cmd.Stdout = container.stdout - container.cmd.Stderr = container.stderr +func (container *Container) setupStd() error { + container.command.Stdout = container.stdout + container.command.Stderr = container.stderr if container.Config.OpenStdin { - stdin, err := container.cmd.StdinPipe() + stdin, err := container.command.StdinPipe() if err != nil { return err } @@ -364,7 +404,7 @@ func (container *Container) start() error { utils.Debugf("start: end of stdin pipe") }() } - return container.cmd.Start() + return nil } func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, stdout io.Writer, stderr io.Writer) chan error { @@ -384,12 +424,14 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s if container.Config.StdinOnce && !container.Config.Tty { defer cStdin.Close() } else { - if cStdout != nil { - defer cStdout.Close() - } - if cStderr != nil { - defer cStderr.Close() - } + defer func() { + if cStdout != nil { + cStdout.Close() + } + if cStderr != nil { + cStderr.Close() + } + }() } if container.Config.Tty { _, err = utils.CopyEscapable(cStdin, stdin) @@ -485,12 +527,15 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s } return utils.Go(func() error { - if cStdout != nil { - defer cStdout.Close() - } - if cStderr != nil { - defer cStderr.Close() - } + defer func() { + if cStdout != nil { + cStdout.Close() + } + if cStderr != nil { + cStderr.Close() + } + }() + // FIXME: how to clean up the stdin goroutine without the unwanted side effect // of closing the passed stdin? Add an intermediary io.Pipe? for i := 0; i < nJobs; i += 1 { @@ -506,6 +551,50 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s }) } +func populateCommand(c *Container) { + var ( + en *execdriver.Network + driverConfig []string + ) + + if !c.Config.NetworkDisabled { + network := c.NetworkSettings + en = &execdriver.Network{ + Gateway: network.Gateway, + Bridge: network.Bridge, + IPAddress: network.IPAddress, + IPPrefixLen: network.IPPrefixLen, + Mtu: c.runtime.config.Mtu, + } + } + + if lxcConf := c.hostConfig.LxcConf; lxcConf != nil { + for _, pair := range lxcConf { + driverConfig = append(driverConfig, fmt.Sprintf("%s = %s", pair.Key, pair.Value)) + } + } + resources := &execdriver.Resources{ + Memory: c.Config.Memory, + MemorySwap: c.Config.MemorySwap, + CpuShares: c.Config.CpuShares, + } + c.command = &execdriver.Command{ + ID: c.ID, + Privileged: c.hostConfig.Privileged, + Rootfs: c.RootfsPath(), + InitPath: "/.dockerinit", + Entrypoint: c.Path, + Arguments: c.Args, + WorkingDir: c.Config.WorkingDir, + Network: en, + Tty: c.Config.Tty, + User: c.Config.User, + Config: driverConfig, + Resources: resources, + } + c.command.SysProcAttr = &syscall.SysProcAttr{Setsid: true} +} + func (container *Container) Start() (err error) { container.Lock() defer container.Unlock() @@ -513,15 +602,18 @@ func (container *Container) Start() (err error) { if container.State.IsRunning() { return fmt.Errorf("The container %s is already running.", container.ID) } + defer func() { if err != nil { container.cleanup() } }() - if err := container.EnsureMounted(); err != nil { + + if err := container.Mount(); err != nil { return err } - if container.runtime.networkManager.disabled { + + if container.runtime.config.DisableNetwork { container.Config.NetworkDisabled = true container.buildHostnameAndHostsFiles("127.0.1.1") } else { @@ -532,16 +624,16 @@ func (container *Container) Start() (err error) { } // Make sure the config is compatible with the current kernel - if container.Config.Memory > 0 && !container.runtime.capabilities.MemoryLimit { + if container.Config.Memory > 0 && !container.runtime.sysInfo.MemoryLimit { log.Printf("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n") container.Config.Memory = 0 } - if container.Config.Memory > 0 && !container.runtime.capabilities.SwapLimit { + if container.Config.Memory > 0 && !container.runtime.sysInfo.SwapLimit { log.Printf("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n") container.Config.MemorySwap = -1 } - if container.runtime.capabilities.IPv4ForwardingDisabled { + if container.runtime.sysInfo.IPv4ForwardingDisabled { log.Printf("WARNING: IPv4 forwarding is disabled. Networking will not work") } @@ -559,38 +651,6 @@ func (container *Container) Start() (err error) { return err } - if err := container.generateLXCConfig(); err != nil { - return err - } - - var lxcStart string = "lxc-start" - if container.hostConfig.Privileged && container.runtime.capabilities.AppArmor { - lxcStart = path.Join(container.runtime.config.Root, "lxc-start-unconfined") - } - - params := []string{ - lxcStart, - "-n", container.ID, - "-f", container.lxcConfigPath(), - "--", - "/.dockerinit", - } - - // Networking - if !container.Config.NetworkDisabled { - network := container.NetworkSettings - params = append(params, - "-g", network.Gateway, - "-i", fmt.Sprintf("%s/%d", network.IPAddress, network.IPPrefixLen), - "-mtu", strconv.Itoa(container.runtime.config.Mtu), - ) - } - - // User - if container.Config.User != "" { - params = append(params, "-u", container.Config.User) - } - // Setup environment env := []string{ "HOME=/", @@ -602,10 +662,6 @@ func (container *Container) Start() (err error) { env = append(env, "TERM=xterm") } - if container.hostConfig.Privileged { - params = append(params, "-privileged") - } - // Init any links between the parent and children runtime := container.runtime @@ -627,7 +683,7 @@ func (container *Container) Start() (err error) { } for p, child := range children { - link, err := NewLink(container, child, p, runtime.networkManager.bridgeIface) + link, err := NewLink(container, child, p, runtime.eng) if err != nil { rollback() return err @@ -654,44 +710,34 @@ func (container *Container) Start() (err error) { } if container.Config.WorkingDir != "" { - workingDir := path.Clean(container.Config.WorkingDir) - utils.Debugf("[working dir] working dir is %s", workingDir) - - if err := os.MkdirAll(path.Join(container.RootfsPath(), workingDir), 0755); err != nil { + container.Config.WorkingDir = path.Clean(container.Config.WorkingDir) + if err := os.MkdirAll(path.Join(container.basefs, container.Config.WorkingDir), 0755); err != nil { return nil } - - params = append(params, - "-w", workingDir, - ) } - // Program - params = append(params, "--", container.Path) - params = append(params, container.Args...) - - if RootIsShared() { - // lxc-start really needs / to be non-shared, or all kinds of stuff break - // when lxc-start unmount things and those unmounts propagate to the main - // mount namespace. - // What we really want is to clone into a new namespace and then - // mount / MS_REC|MS_SLAVE, but since we can't really clone or fork - // without exec in go we have to do this horrible shell hack... - shellString := - "mount --make-rslave /; exec " + - utils.ShellQuoteArguments(params) - - params = []string{ - "unshare", "-m", "--", "/bin/sh", "-c", shellString, - } - } - - root := container.RootfsPath() envPath, err := container.EnvConfigPath() if err != nil { return err } + // Setup the root fs as a bind mount of the base fs + root := container.RootfsPath() + if err := os.MkdirAll(root, 0755); err != nil && !os.IsExist(err) { + return nil + } + + // Create a bind mount of the base fs as a place where we can add mounts + // without affecting the ability to access the base fs + if err := mount.Mount(container.basefs, root, "none", "bind,rw"); err != nil { + return err + } + + // Make sure the root fs is private so the mounts here don't propagate to basefs + if err := mount.ForceMount(root, root, "none", "private"); err != nil { + return err + } + // Mount docker specific files into the containers root fs if err := mount.Mount(runtime.sysInitPath, path.Join(root, "/.dockerinit"), "none", "bind,ro"); err != nil { return err @@ -713,19 +759,25 @@ func (container *Container) Start() (err error) { } // Mount user specified volumes - for r, v := range container.Volumes { mountAs := "ro" if container.VolumesRW[r] { mountAs = "rw" } - if err := mount.Mount(v, path.Join(root, r), "none", fmt.Sprintf("bind,%s", mountAs)); err != nil { + r = path.Join(root, r) + if p, err := utils.FollowSymlinkInScope(r, root); err != nil { + return err + } else { + r = p + } + + if err := mount.Mount(v, r, "none", fmt.Sprintf("bind,%s", mountAs)); err != nil { return err } } - container.cmd = exec.Command(params[0], params[1:]...) + populateCommand(container) // Setup logging of stdout and stderr to disk if err := container.runtime.LogToDisk(container.stdout, container.logPath("json"), "stdout"); err != nil { @@ -734,59 +786,47 @@ func (container *Container) Start() (err error) { if err := container.runtime.LogToDisk(container.stderr, container.logPath("json"), "stderr"); err != nil { return err } - - container.cmd.SysProcAttr = &syscall.SysProcAttr{Setsid: true} - - if container.Config.Tty { - err = container.startPty() - } else { - err = container.start() - } - if err != nil { - return err - } - // FIXME: save state on disk *first*, then converge - // this way disk state is used as a journal, eg. we can restore after crash etc. - container.State.SetRunning(container.cmd.Process.Pid) - - // Init the lock container.waitLock = make(chan struct{}) - container.ToDisk() - go container.monitor() + // Setuping pipes and/or Pty + var setup func() error + if container.Config.Tty { + setup = container.setupPty + } else { + setup = container.setupStd + } + if err := setup(); err != nil { + return err + } - defer utils.Debugf("Container running: %v", container.State.IsRunning()) - // We wait for the container to be fully running. - // Timeout after 5 seconds. In case of broken pipe, just retry. - // Note: The container can run and finish correctly before - // the end of this loop - for now := time.Now(); time.Since(now) < 5*time.Second; { - // If the container dies while waiting for it, just return - if !container.State.IsRunning() { - return nil - } - output, err := exec.Command("lxc-info", "-s", "-n", container.ID).CombinedOutput() - if err != nil { - utils.Debugf("Error with lxc-info: %s (%s)", err, output) - - output, err = exec.Command("lxc-info", "-s", "-n", container.ID).CombinedOutput() - if err != nil { - utils.Debugf("Second Error with lxc-info: %s (%s)", err, output) - return err + callbackLock := make(chan struct{}) + callback := func(command *execdriver.Command) { + container.State.SetRunning(command.Pid()) + if command.Tty { + // The callback is called after the process Start() + // so we are in the parent process. In TTY mode, stdin/out/err is the PtySlace + // which we close here. + if c, ok := command.Stdout.(io.Closer); ok { + c.Close() } - } - if strings.Contains(string(output), "RUNNING") { - return nil + if err := container.ToDisk(); err != nil { + utils.Debugf("%s", err) } - utils.Debugf("Waiting for the container to start (running: %v): %s", container.State.IsRunning(), bytes.TrimSpace(output)) - time.Sleep(50 * time.Millisecond) + close(callbackLock) } - if container.State.IsRunning() { - return ErrContainerStartTimeout + // We use a callback here instead of a goroutine and an chan for + // syncronization purposes + cErr := utils.Go(func() error { return container.monitor(callback) }) + + // Start should not return until the process is actually running + select { + case <-callbackLock: + case err := <-cErr: + return err } - return ErrContainerStart + return nil } func (container *Container) getBindMap() (map[string]BindMap, error) { @@ -852,7 +892,7 @@ func (container *Container) createVolumes() error { if strings.ToLower(bindMap.Mode) == "rw" { srcRW = true } - if stat, err := os.Lstat(bindMap.SrcPath); err != nil { + if stat, err := os.Stat(bindMap.SrcPath); err != nil { return err } else { volIsDir = stat.IsDir() @@ -873,12 +913,19 @@ func (container *Container) createVolumes() error { } srcRW = true // RW by default } + + if p, err := filepath.EvalSymlinks(srcPath); err != nil { + return err + } else { + srcPath = p + } + container.Volumes[volPath] = srcPath container.VolumesRW[volPath] = srcRW // Create the mountpoint - volPath = path.Join(container.RootfsPath(), volPath) - rootVolPath, err := utils.FollowSymlinkInScope(volPath, container.RootfsPath()) + volPath = path.Join(container.basefs, volPath) + rootVolPath, err := utils.FollowSymlinkInScope(volPath, container.basefs) if err != nil { return err } @@ -928,7 +975,7 @@ func (container *Container) createVolumes() error { return err } // Change the source volume's ownership if it differs from the root - // files that where just copied + // files that were just copied if stat.Uid != srcStat.Uid || stat.Gid != srcStat.Gid { if err := os.Chown(srcPath, int(stat.Uid), int(stat.Gid)); err != nil { return err @@ -956,7 +1003,7 @@ func (container *Container) applyExternalVolumes() error { mountRW = false case "rw": // mountRW is already true default: - return fmt.Errorf("Malformed volumes-from speficication: %s", containerSpec) + return fmt.Errorf("Malformed volumes-from specification: %s", containerSpec) } } c := container.runtime.Get(specParts[0]) @@ -967,7 +1014,7 @@ func (container *Container) applyExternalVolumes() error { if _, exists := container.Volumes[volPath]; exists { continue } - if err := os.MkdirAll(path.Join(container.RootfsPath(), volPath), 0755); err != nil { + if err := os.MkdirAll(path.Join(container.basefs, volPath), 0755); err != nil { return err } container.Volumes[volPath] = id @@ -1057,33 +1104,40 @@ func (container *Container) allocateNetwork() error { } var ( - iface *NetworkInterface - err error + env *engine.Env + err error + eng = container.runtime.eng ) + if container.State.IsGhost() { - if manager := container.runtime.networkManager; manager.disabled { - iface = &NetworkInterface{disabled: true} + if container.runtime.config.DisableNetwork { + env = &engine.Env{} } else { - iface = &NetworkInterface{ - IPNet: net.IPNet{IP: net.ParseIP(container.NetworkSettings.IPAddress), Mask: manager.bridgeNetwork.Mask}, - Gateway: manager.bridgeNetwork.IP, - manager: manager, + currentIP := container.NetworkSettings.IPAddress + + job := eng.Job("allocate_interface", container.ID) + if currentIP != "" { + job.Setenv("RequestIP", currentIP) } - if iface != nil && iface.IPNet.IP != nil { - ipNum := ipToInt(iface.IPNet.IP) - manager.ipAllocator.inUse[ipNum] = struct{}{} - } else { - iface, err = container.runtime.networkManager.Allocate() - if err != nil { - return err - } + + env, err = job.Stdout.AddEnv() + if err != nil { + return err + } + + if err := job.Run(); err != nil { + return err } } } else { - iface, err = container.runtime.networkManager.Allocate() + job := eng.Job("allocate_interface", container.ID) + env, err = job.Stdout.AddEnv() if err != nil { return err } + if err := job.Run(); err != nil { + return err + } } if container.Config.PortSpecs != nil { @@ -1125,81 +1179,69 @@ func (container *Container) allocateNetwork() error { if container.hostConfig.PublishAllPorts && len(binding) == 0 { binding = append(binding, PortBinding{}) } + for i := 0; i < len(binding); i++ { b := binding[i] - nat, err := iface.AllocatePort(port, b) + + portJob := eng.Job("allocate_port", container.ID) + portJob.Setenv("HostIP", b.HostIp) + portJob.Setenv("HostPort", b.HostPort) + portJob.Setenv("Proto", port.Proto()) + portJob.Setenv("ContainerPort", port.Port()) + + portEnv, err := portJob.Stdout.AddEnv() if err != nil { - iface.Release() return err } - utils.Debugf("Allocate port: %s:%s->%s", nat.Binding.HostIp, port, nat.Binding.HostPort) - binding[i] = nat.Binding + if err := portJob.Run(); err != nil { + eng.Job("release_interface", container.ID).Run() + return err + } + b.HostIp = portEnv.Get("HostIP") + b.HostPort = portEnv.Get("HostPort") + + binding[i] = b } bindings[port] = binding } container.writeHostConfig() container.NetworkSettings.Ports = bindings - container.network = iface - container.NetworkSettings.Bridge = container.runtime.networkManager.bridgeIface - container.NetworkSettings.IPAddress = iface.IPNet.IP.String() - container.NetworkSettings.IPPrefixLen, _ = iface.IPNet.Mask.Size() - container.NetworkSettings.Gateway = iface.Gateway.String() + container.NetworkSettings.Bridge = env.Get("Bridge") + container.NetworkSettings.IPAddress = env.Get("IP") + container.NetworkSettings.IPPrefixLen = env.GetInt("IPPrefixLen") + container.NetworkSettings.Gateway = env.Get("Gateway") return nil } func (container *Container) releaseNetwork() { - if container.Config.NetworkDisabled || container.network == nil { + if container.Config.NetworkDisabled { return } - container.network.Release() - container.network = nil + eng := container.runtime.eng + + eng.Job("release_interface", container.ID).Run() container.NetworkSettings = &NetworkSettings{} } -// FIXME: replace this with a control socket within dockerinit -func (container *Container) waitLxc() error { - for { - output, err := exec.Command("lxc-info", "-n", container.ID).CombinedOutput() - if err != nil { - return err - } - if !strings.Contains(string(output), "RUNNING") { - return nil - } - time.Sleep(500 * time.Millisecond) - } -} +func (container *Container) monitor(callback execdriver.StartCallback) error { + var ( + err error + exitCode int + ) -func (container *Container) monitor() { - // Wait for the program to exit - - // If the command does not exist, try to wait via lxc - // (This probably happens only for ghost containers, i.e. containers that were running when Docker started) - if container.cmd == nil { - utils.Debugf("monitor: waiting for container %s using waitLxc", container.ID) - if err := container.waitLxc(); err != nil { - utils.Errorf("monitor: while waiting for container %s, waitLxc had a problem: %s", container.ID, err) - } + if container.command == nil { + // This happends when you have a GHOST container with lxc + populateCommand(container) + err = container.runtime.RestoreCommand(container) } else { - utils.Debugf("monitor: waiting for container %s using cmd.Wait", container.ID) - if err := container.cmd.Wait(); err != nil { - // Since non-zero exit status and signal terminations will cause err to be non-nil, - // we have to actually discard it. Still, log it anyway, just in case. - utils.Debugf("monitor: cmd.Wait reported exit status %s for container %s", err, container.ID) - } - } - utils.Debugf("monitor: container %s finished", container.ID) - - exitCode := -1 - if container.cmd != nil { - exitCode = container.cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() + exitCode, err = container.runtime.Run(container, callback) } - if container.runtime != nil && container.runtime.srv != nil { - container.runtime.srv.LogEvent("die", container.ID, container.runtime.repositories.ImageName(container.Image)) + if err != nil { + utils.Errorf("Error running container: %s", err) } // Cleanup @@ -1210,21 +1252,24 @@ func (container *Container) monitor() { container.stdin, container.stdinPipe = io.Pipe() } - // Report status back container.State.SetStopped(exitCode) - // Release the lock + if container.runtime != nil && container.runtime.srv != nil { + container.runtime.srv.LogEvent("die", container.ID, container.runtime.repositories.ImageName(container.Image)) + } + close(container.waitLock) - if err := container.ToDisk(); err != nil { - // FIXME: there is a race condition here which causes this to fail during the unit tests. - // If another goroutine was waiting for Wait() to return before removing the container's root - // from the filesystem... At this point it may already have done so. - // This is because State.setStopped() has already been called, and has caused Wait() - // to return. - // FIXME: why are we serializing running state to disk in the first place? - //log.Printf("%s: Failed to dump configuration to the disk: %s", container.ID, err) - } + // FIXME: there is a race condition here which causes this to fail during the unit tests. + // If another goroutine was waiting for Wait() to return before removing the container's root + // from the filesystem... At this point it may already have done so. + // This is because State.setStopped() has already been called, and has caused Wait() + // to return. + // FIXME: why are we serializing running state to disk in the first place? + //log.Printf("%s: Failed to dump configuration to the disk: %s", container.ID, err) + container.ToDisk() + + return err } func (container *Container) cleanup() { @@ -1255,6 +1300,30 @@ func (container *Container) cleanup() { } } + var ( + root = container.RootfsPath() + mounts = []string{ + root, + path.Join(root, "/.dockerinit"), + path.Join(root, "/.dockerenv"), + path.Join(root, "/etc/resolv.conf"), + } + ) + + if container.HostnamePath != "" && container.HostsPath != "" { + mounts = append(mounts, path.Join(root, "/etc/hostname"), path.Join(root, "/etc/hosts")) + } + + for r := range container.Volumes { + mounts = append(mounts, path.Join(root, r)) + } + + for i := len(mounts) - 1; i >= 0; i-- { + if lastError := mount.Unmount(mounts[i]); lastError != nil { + log.Printf("Failed to umount %v: %v", mounts[i], lastError) + } + } + if err := container.Unmount(); err != nil { log.Printf("%v: Failed to umount filesystem: %v", container.ID, err) } @@ -1267,13 +1336,7 @@ func (container *Container) kill(sig int) error { if !container.State.IsRunning() { return nil } - - if output, err := exec.Command("lxc-kill", "-n", container.ID, strconv.Itoa(sig)).CombinedOutput(); err != nil { - log.Printf("error killing container %s (%s, %s)", utils.TruncateID(container.ID), output, err) - return err - } - - return nil + return container.runtime.Kill(container, sig) } func (container *Container) Kill() error { @@ -1288,11 +1351,11 @@ func (container *Container) Kill() error { // 2. Wait for the process to die, in last resort, try to kill the process directly if err := container.WaitTimeout(10 * time.Second); err != nil { - if container.cmd == nil { + if container.command == nil { return fmt.Errorf("lxc-kill failed, impossible to kill the container %s", utils.TruncateID(container.ID)) } log.Printf("Container %s failed to exit within 10 seconds of lxc-kill %s - trying direct SIGKILL", "SIGKILL", utils.TruncateID(container.ID)) - if err := container.cmd.Process.Kill(); err != nil { + if err := container.runtime.Kill(container, 9); err != nil { return err } } @@ -1348,21 +1411,31 @@ func (container *Container) Resize(h, w int) error { } func (container *Container) ExportRw() (archive.Archive, error) { - if err := container.EnsureMounted(); err != nil { + if err := container.Mount(); err != nil { return nil, err } if container.runtime == nil { return nil, fmt.Errorf("Can't load storage driver for unregistered container %s", container.ID) } - - return container.runtime.Diff(container) + archive, err := container.runtime.Diff(container) + if err != nil { + container.Unmount() + return nil, err + } + return EofReader(archive, func() { container.Unmount() }), nil } func (container *Container) Export() (archive.Archive, error) { - if err := container.EnsureMounted(); err != nil { + if err := container.Mount(); err != nil { return nil, err } - return archive.Tar(container.RootfsPath(), archive.Uncompressed) + + archive, err := archive.Tar(container.basefs, archive.Uncompressed) + if err != nil { + container.Unmount() + return nil, err + } + return EofReader(archive, func() { container.Unmount() }), nil } func (container *Container) WaitTimeout(timeout time.Duration) error { @@ -1380,12 +1453,6 @@ func (container *Container) WaitTimeout(timeout time.Duration) error { } } -func (container *Container) EnsureMounted() error { - // FIXME: EnsureMounted is deprecated because drivers are now responsible - // for re-entrant mounting in their Get() method. - return container.Mount() -} - func (container *Container) Mount() error { return container.runtime.Mount(container) } @@ -1402,32 +1469,6 @@ func (container *Container) GetImage() (*Image, error) { } func (container *Container) Unmount() error { - var ( - err error - root = container.RootfsPath() - mounts = []string{ - path.Join(root, "/.dockerinit"), - path.Join(root, "/.dockerenv"), - path.Join(root, "/etc/resolv.conf"), - } - ) - - if container.HostnamePath != "" && container.HostsPath != "" { - mounts = append(mounts, path.Join(root, "/etc/hostname"), path.Join(root, "/etc/hosts")) - } - - for r := range container.Volumes { - mounts = append(mounts, path.Join(root, r)) - } - - for _, m := range mounts { - if lastError := mount.Unmount(m); lastError != nil { - err = lastError - } - } - if err != nil { - return err - } return container.runtime.Unmount(container) } @@ -1463,13 +1504,16 @@ func (container *Container) EnvConfigPath() (string, error) { return p, nil } -func (container *Container) lxcConfigPath() string { - return path.Join(container.root, "config.lxc") +// This method must be exported to be used from the lxc template +// This directory is only usable when the container is running +func (container *Container) RootfsPath() string { + return path.Join(container.root, "root") } -// This method must be exported to be used from the lxc template -func (container *Container) RootfsPath() string { - return container.rootfs +// This is the stand-alone version of the root fs, without any additional mounts. +// This directory is usable whenever the container is mounted (and not unmounted) +func (container *Container) BasefsPath() string { + return container.basefs } func validateID(id string) error { @@ -1487,10 +1531,11 @@ func (container *Container) GetSize() (int64, int64) { driver = container.runtime.driver ) - if err := container.EnsureMounted(); err != nil { + if err := container.Mount(); err != nil { utils.Errorf("Warning: failed to compute size of container rootfs %s: %s", container.ID, err) return sizeRw, sizeRootfs } + defer container.Unmount() if differ, ok := container.runtime.driver.(graphdriver.Differ); ok { sizeRw, err = differ.DiffSize(container.ID) @@ -1503,14 +1548,14 @@ func (container *Container) GetSize() (int64, int64) { } else { changes, _ := container.Changes() if changes != nil { - sizeRw = archive.ChangesSize(container.RootfsPath(), changes) + sizeRw = archive.ChangesSize(container.basefs, changes) } else { sizeRw = -1 } } - if _, err = os.Stat(container.RootfsPath()); err != nil { - if sizeRootfs, err = utils.TreeSize(container.RootfsPath()); err != nil { + if _, err = os.Stat(container.basefs); err != nil { + if sizeRootfs, err = utils.TreeSize(container.basefs); err != nil { sizeRootfs = -1 } } @@ -1518,13 +1563,14 @@ func (container *Container) GetSize() (int64, int64) { } func (container *Container) Copy(resource string) (archive.Archive, error) { - if err := container.EnsureMounted(); err != nil { + if err := container.Mount(); err != nil { return nil, err } var filter []string - basePath := path.Join(container.RootfsPath(), resource) + basePath := path.Join(container.basefs, resource) stat, err := os.Stat(basePath) if err != nil { + container.Unmount() return nil, err } if !stat.IsDir() { @@ -1535,11 +1581,15 @@ func (container *Container) Copy(resource string) (archive.Archive, error) { filter = []string{path.Base(basePath)} basePath = path.Dir(basePath) } - return archive.TarFilter(basePath, &archive.TarOptions{ + + archive, err := archive.TarFilter(basePath, &archive.TarOptions{ Compression: archive.Uncompressed, Includes: filter, - Recursive: true, }) + if err != nil { + return nil, err + } + return EofReader(archive, func() { container.Unmount() }), nil } // Returns true if the container exposes a certain port diff --git a/contrib/completion/bash/docker b/contrib/completion/bash/docker index f1a515d00a..1449330986 100755 --- a/contrib/completion/bash/docker +++ b/contrib/completion/bash/docker @@ -21,64 +21,88 @@ # If the docker daemon is using a unix socket for communication your user # must have access to the socket for the completions to function correctly +__docker_q() { + docker 2>/dev/null "$@" +} + __docker_containers_all() { - local containers - containers="$( docker ps -a -q )" - names="$( docker inspect -format '{{.Name}}' $containers | sed 's,^/,,' )" + local containers="$( __docker_q ps -a -q )" + local names="$( __docker_q inspect --format '{{.Name}}' $containers | sed 's,^/,,' )" COMPREPLY=( $( compgen -W "$names $containers" -- "$cur" ) ) } __docker_containers_running() { - local containers - containers="$( docker ps -q )" - names="$( docker inspect -format '{{.Name}}' $containers | sed 's,^/,,' )" + local containers="$( __docker_q ps -q )" + local names="$( __docker_q inspect --format '{{.Name}}' $containers | sed 's,^/,,' )" COMPREPLY=( $( compgen -W "$names $containers" -- "$cur" ) ) } __docker_containers_stopped() { - local containers - containers="$( comm -13 <(docker ps -q | sort -u) <(docker ps -a -q | sort -u) )" - names="$( docker inspect -format '{{.Name}}' $containers | sed 's,^/,,' )" + local containers="$( { __docker_q ps -a -q; __docker_q ps -q; } | sort | uniq -u )" + local names="$( __docker_q inspect --format '{{.Name}}' $containers | sed 's,^/,,' )" COMPREPLY=( $( compgen -W "$names $containers" -- "$cur" ) ) } __docker_image_repos() { - local repos - repos="$( docker images | awk 'NR>1{print $1}' )" + local repos="$( __docker_q images | awk 'NR>1{print $1}' | grep -v '^$' )" COMPREPLY=( $( compgen -W "$repos" -- "$cur" ) ) } -__docker_images() -{ - local images - images="$( docker images | awk 'NR>1{print $1":"$2}' )" - COMPREPLY=( $( compgen -W "$images" -- "$cur" ) ) - __ltrim_colon_completions "$cur" -} - __docker_image_repos_and_tags() { - local repos images - repos="$( docker images | awk 'NR>1{print $1}' )" - images="$( docker images | awk 'NR>1{print $1":"$2}' )" + local repos="$( __docker_q images | awk 'NR>1{print $1}' | grep -v '^$' )" + local images="$( __docker_q images | awk 'NR>1{print $1":"$2}' | grep -v '^:' )" COMPREPLY=( $( compgen -W "$repos $images" -- "$cur" ) ) __ltrim_colon_completions "$cur" } +__docker_image_repos_and_tags_and_ids() +{ + local repos="$( __docker_q images | awk 'NR>1{print $1}' | grep -v '^$' )" + local images="$( __docker_q images | awk 'NR>1{print $1":"$2}' | grep -v '^:' )" + local ids="$( __docker_q images -a -q )" + COMPREPLY=( $( compgen -W "$repos $images $ids" -- "$cur" ) ) + __ltrim_colon_completions "$cur" +} + __docker_containers_and_images() { - local containers images - containers="$( docker ps -a -q )" - names="$( docker inspect -format '{{.Name}}' $containers | sed 's,^/,,' )" - images="$( docker images | awk 'NR>1{print $1":"$2}' )" - COMPREPLY=( $( compgen -W "$images $names $containers" -- "$cur" ) ) + local containers="$( __docker_q ps -a -q )" + local names="$( __docker_q inspect --format '{{.Name}}' $containers | sed 's,^/,,' )" + local repos="$( __docker_q images | awk 'NR>1{print $1}' | grep -v '^$' )" + local images="$( __docker_q images | awk 'NR>1{print $1":"$2}' | grep -v '^:' )" + local ids="$( __docker_q images -a -q )" + COMPREPLY=( $( compgen -W "$containers $names $repos $images $ids" -- "$cur" ) ) __ltrim_colon_completions "$cur" } +__docker_pos_first_nonflag() +{ + local argument_flags=$1 + + local counter=$cpos + while [ $counter -le $cword ]; do + if [ -n "$argument_flags" ] && eval "case '${words[$counter]}' in $argument_flags) true ;; *) false ;; esac"; then + (( counter++ )) + else + case "${words[$counter]}" in + -*) + ;; + *) + break + ;; + esac + fi + (( counter++ )) + done + + echo $counter +} + _docker_docker() { case "$prev" in @@ -101,15 +125,24 @@ _docker_docker() _docker_attach() { - if [ $cpos -eq $cword ]; then - __docker_containers_running - fi + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--no-stdin --sig-proxy" -- "$cur" ) ) + ;; + *) + local counter="$(__docker_pos_first_nonflag)" + if [ $cword -eq $counter ]; then + __docker_containers_running + fi + ;; + esac } _docker_build() { case "$prev" in - -t) + -t|--tag) + __docker_image_repos_and_tags return ;; *) @@ -118,10 +151,13 @@ _docker_build() case "$cur" in -*) - COMPREPLY=( $( compgen -W "-no-cache -t -q -rm" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "-t --tag -q --quiet --no-cache --rm" -- "$cur" ) ) ;; *) - _filedir + local counter="$(__docker_pos_first_nonflag '-t|--tag')" + if [ $cword -eq $counter ]; then + _filedir + fi ;; esac } @@ -129,7 +165,7 @@ _docker_build() _docker_commit() { case "$prev" in - -author|-m|-run) + -m|--message|-a|--author|--run) return ;; *) @@ -138,26 +174,20 @@ _docker_commit() case "$cur" in -*) - COMPREPLY=( $( compgen -W "-author -m -run" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "-m --message -a --author --run" -- "$cur" ) ) ;; *) - local counter=$cpos - while [ $counter -le $cword ]; do - case "${words[$counter]}" in - -author|-m|-run) - (( counter++ )) - ;; - -*) - ;; - *) - break - ;; - esac - (( counter++ )) - done + local counter=$(__docker_pos_first_nonflag '-m|--message|-a|--author|--run') - if [ $counter -eq $cword ]; then + if [ $cword -eq $counter ]; then __docker_containers_all + return + fi + (( counter++ )) + + if [ $cword -eq $counter ]; then + __docker_image_repos_and_tags + return fi ;; esac @@ -165,16 +195,32 @@ _docker_commit() _docker_cp() { - if [ $cpos -eq $cword ]; then - __docker_containers_all - else + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + case "$cur" in + *:) + return + ;; + *) + __docker_containers_all + COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) + compopt -o nospace + return + ;; + esac + fi + (( counter++ )) + + if [ $cword -eq $counter ]; then _filedir + return fi } _docker_diff() { - if [ $cpos -eq $cword ]; then + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then __docker_containers_all fi } @@ -182,7 +228,7 @@ _docker_diff() _docker_events() { case "$prev" in - -since) + --since) return ;; *) @@ -191,7 +237,7 @@ _docker_events() case "$cur" in -*) - COMPREPLY=( $( compgen -W "-since" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "--since" -- "$cur" ) ) ;; *) ;; @@ -200,45 +246,44 @@ _docker_events() _docker_export() { - if [ $cpos -eq $cword ]; then + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then __docker_containers_all fi } _docker_help() { - if [ $cpos -eq $cword ]; then + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then COMPREPLY=( $( compgen -W "$commands" -- "$cur" ) ) fi } _docker_history() { - if [ $cpos -eq $cword ]; then - __docker_image_repos_and_tags - fi + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "-q --quiet --no-trunc" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_image_repos_and_tags_and_ids + fi + ;; + esac } _docker_images() { case "$cur" in -*) - COMPREPLY=( $( compgen -W "-a -notrunc -q -viz" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "-q --quiet -a --all --no-trunc -v --viz -t --tree" -- "$cur" ) ) ;; *) - local counter=$cpos - while [ $counter -le $cword ]; do - case "${words[$counter]}" in - -*) - ;; - *) - break - ;; - esac - (( counter++ )) - done - - if [ $counter -eq $cword ]; then + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then __docker_image_repos fi ;; @@ -247,7 +292,16 @@ _docker_images() _docker_import() { - return + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + return + fi + (( counter++ )) + + if [ $cword -eq $counter ]; then + __docker_image_repos_and_tags + return + fi } _docker_info() @@ -257,25 +311,16 @@ _docker_info() _docker_insert() { - if [ $cpos -eq $cword ]; then - __docker_image_repos_and_tags + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_image_repos_and_tags_and_ids fi } _docker_inspect() -{ - __docker_containers_and_images -} - -_docker_kill() -{ - __docker_containers_running -} - -_docker_login() { case "$prev" in - -e|-p|-u) + -f|--format) return ;; *) @@ -284,7 +329,37 @@ _docker_login() case "$cur" in -*) - COMPREPLY=( $( compgen -W "-e -p -u" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "-f --format" -- "$cur" ) ) + ;; + *) + __docker_containers_and_images + ;; + esac +} + +_docker_kill() +{ + __docker_containers_running +} + +_docker_load() +{ + return +} + +_docker_login() +{ + case "$prev" in + -u|--username|-p|--password|-e|--email) + return + ;; + *) + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "-u --username -p --password -e --email" -- "$cur" ) ) ;; *) ;; @@ -293,14 +368,23 @@ _docker_login() _docker_logs() { - if [ $cpos -eq $cword ]; then - __docker_containers_all - fi + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "-f --follow" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_containers_all + fi + ;; + esac } _docker_port() { - if [ $cpos -eq $cword ]; then + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then __docker_containers_all fi } @@ -308,7 +392,13 @@ _docker_port() _docker_ps() { case "$prev" in - -beforeId|-n|-sinceId) + --since-id|--before-id) + COMPREPLY=( $( compgen -W "$( __docker_q ps -a -q )" -- "$cur" ) ) + # TODO replace this with __docker_containers_all + # see https://github.com/dotcloud/docker/issues/3565 + return + ;; + -n) return ;; *) @@ -317,7 +407,7 @@ _docker_ps() case "$cur" in -*) - COMPREPLY=( $( compgen -W "-a -beforeId -l -n -notrunc -q -s -sinceId" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "-q --quiet -s --size -a --all --no-trunc -l --latest --since-id --before-id -n" -- "$cur" ) ) ;; *) ;; @@ -327,7 +417,7 @@ _docker_ps() _docker_pull() { case "$prev" in - -t) + -t|--tag) return ;; *) @@ -336,22 +426,31 @@ _docker_pull() case "$cur" in -*) - COMPREPLY=( $( compgen -W "-t" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "-t --tag" -- "$cur" ) ) ;; *) + local counter=$(__docker_pos_first_nonflag '-t|--tag') + if [ $cword -eq $counter ]; then + __docker_image_repos_and_tags + fi ;; esac } _docker_push() { - __docker_image_repos + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_image_repos + # TODO replace this with __docker_image_repos_and_tags + # see https://github.com/dotcloud/docker/issues/3411 + fi } _docker_restart() { case "$prev" in - -t) + -t|--time) return ;; *) @@ -360,7 +459,7 @@ _docker_restart() case "$cur" in -*) - COMPREPLY=( $( compgen -W "-t" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "-t --time" -- "$cur" ) ) ;; *) __docker_containers_all @@ -372,7 +471,7 @@ _docker_rm() { case "$cur" in -*) - COMPREPLY=( $( compgen -W "-v" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "-v --volumes -l --link" -- "$cur" ) ) ;; *) __docker_containers_stopped @@ -382,19 +481,27 @@ _docker_rm() _docker_rmi() { - __docker_image_repos_and_tags + __docker_image_repos_and_tags_and_ids } _docker_run() { case "$prev" in - -cidfile) + --cidfile) _filedir ;; - -volumes-from) + --volumes-from) __docker_containers_all ;; - -a|-c|-dns|-e|-entrypoint|-h|-lxc-conf|-m|-p|-u|-v|-w) + -v|--volume) + # TODO something magical with colons and _filedir ? + return + ;; + -e|--env) + COMPREPLY=( $( compgen -e -- "$cur" ) ) + return + ;; + --entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf) return ;; *) @@ -403,45 +510,30 @@ _docker_run() case "$cur" in -*) - COMPREPLY=( $( compgen -W "-a -c -cidfile -d -dns -e -entrypoint -h -i -lxc-conf -m -n -p -privileged -t -u -v -volumes-from -w" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "--rm -d --detach -n --networking --privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir -c --cpu-shares --sig-proxy --name -a --attach -v --volume --link -e --env -p --publish --expose --dns --volumes-from --lxc-conf" -- "$cur" ) ) ;; *) - local counter=$cpos - while [ $counter -le $cword ]; do - case "${words[$counter]}" in - -a|-c|-cidfile|-dns|-e|-entrypoint|-h|-lxc-conf|-m|-p|-u|-v|-volumes-from|-w) - (( counter++ )) - ;; - -*) - ;; - *) - break - ;; - esac - (( counter++ )) - done + local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf') - if [ $counter -eq $cword ]; then - __docker_image_repos_and_tags + if [ $cword -eq $counter ]; then + __docker_image_repos_and_tags_and_ids fi ;; esac } +_docker_save() +{ + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_image_repos_and_tags_and_ids + fi +} + _docker_search() -{ - COMPREPLY=( $( compgen -W "-notrunc" "-stars" "-trusted" -- "$cur" ) ) -} - -_docker_start() -{ - __docker_containers_stopped -} - -_docker_stop() { case "$prev" in - -t) + -s|--stars) return ;; *) @@ -450,7 +542,38 @@ _docker_stop() case "$cur" in -*) - COMPREPLY=( $( compgen -W "-t" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "--no-trunc -t --trusted -s --stars" -- "$cur" ) ) + ;; + *) + ;; + esac +} + +_docker_start() +{ + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "-a --attach -i --interactive" -- "$cur" ) ) + ;; + *) + __docker_containers_stopped + ;; + esac +} + +_docker_stop() +{ + case "$prev" in + -t|--time) + return + ;; + *) + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "-t --time" -- "$cur" ) ) ;; *) __docker_containers_running @@ -460,12 +583,31 @@ _docker_stop() _docker_tag() { - COMPREPLY=( $( compgen -W "-f" -- "$cur" ) ) + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "-f --force" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + + if [ $cword -eq $counter ]; then + __docker_image_repos_and_tags + return + fi + (( counter++ )) + + if [ $cword -eq $counter ]; then + __docker_image_repos_and_tags + return + fi + ;; + esac } _docker_top() { - if [ $cpos -eq $cword ]; then + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then __docker_containers_running fi } @@ -482,7 +624,6 @@ _docker_wait() _docker() { - local cur prev words cword command="docker" counter=1 word cpos local commands=" attach build @@ -498,6 +639,7 @@ _docker() insert inspect kill + load login logs port @@ -508,6 +650,7 @@ _docker() rm rmi run + save search start stop @@ -518,18 +661,20 @@ _docker() " COMPREPLY=() + local cur prev words cword _get_comp_words_by_ref -n : cur prev words cword + local command='docker' + local counter=1 while [ $counter -lt $cword ]; do - word="${words[$counter]}" - case "$word" in + case "${words[$counter]}" in -H) (( counter++ )) ;; -*) ;; *) - command="$word" + command="${words[$counter]}" cpos=$counter (( cpos++ )) break diff --git a/contrib/completion/zsh/_docker b/contrib/completion/zsh/_docker index 92acdb13dd..8b50bac01b 100755 --- a/contrib/completion/zsh/_docker +++ b/contrib/completion/zsh/_docker @@ -174,7 +174,7 @@ __docker_subcommand () { (ps) _arguments '-a[Show all containers. Only running containers are shown by default]' \ '-h[Show help]' \ - '-beforeId=-[Show only container created before Id, include non-running one]:containers:__docker_containers' \ + '-before-id=-[Show only container created before Id, include non-running one]:containers:__docker_containers' \ '-n=-[Show n last created containers, include non-running one]:n:(1 5 10 25 50)' ;; (tag) @@ -189,9 +189,9 @@ __docker_subcommand () { '-a=-[Attach to stdin, stdout or stderr]:toggle:(true false)' \ '-c=-[CPU shares (relative weight)]:CPU shares: ' \ '-d[Detached mode: leave the container running in the background]' \ - '*-dns=[Set custom dns servers]:dns server: ' \ + '*--dns=[Set custom dns servers]:dns server: ' \ '*-e=[Set environment variables]:environment variable: ' \ - '-entrypoint=-[Overwrite the default entrypoint of the image]:entry point: ' \ + '--entrypoint=-[Overwrite the default entrypoint of the image]:entry point: ' \ '-h=-[Container host name]:hostname:_hosts' \ '-i[Keep stdin open even if not attached]' \ '-m=-[Memory limit (in bytes)]:limit: ' \ @@ -199,7 +199,7 @@ __docker_subcommand () { '-t=-[Allocate a pseudo-tty]:toggle:(true false)' \ '-u=-[Username or UID]:user:_users' \ '*-v=-[Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)]:volume: '\ - '-volumes-from=-[Mount volumes from the specified container]:volume: ' \ + '--volumes-from=-[Mount volumes from the specified container]:volume: ' \ '(-):images:__docker_images' \ '(-):command: _command_names -e' \ '*::arguments: _normal' diff --git a/contrib/init/systemd/docker.service b/contrib/init/systemd/docker.service index aae7b6daf9..387be2eb1c 100644 --- a/contrib/init/systemd/docker.service +++ b/contrib/init/systemd/docker.service @@ -1,11 +1,11 @@ [Unit] -Description=Docker Application Container Engine +Description=Docker Application Container Engine Documentation=http://docs.docker.io After=network.target [Service] -ExecStartPre=/bin/mount --make-rprivate / ExecStart=/usr/bin/docker -d +Restart=on-failure [Install] WantedBy=multi-user.target diff --git a/contrib/init/systemd/socket-activation/docker.service b/contrib/init/systemd/socket-activation/docker.service new file mode 100644 index 0000000000..c795f9c3b4 --- /dev/null +++ b/contrib/init/systemd/socket-activation/docker.service @@ -0,0 +1,11 @@ +[Unit] +Description=Docker Application Container Engine +Documentation=http://docs.docker.io +After=network.target + +[Service] +ExecStart=/usr/bin/docker -d -H fd:// +Restart=on-failure + +[Install] +WantedBy=multi-user.target diff --git a/contrib/init/systemd/socket-activation/docker.socket b/contrib/init/systemd/socket-activation/docker.socket new file mode 100644 index 0000000000..3635c89385 --- /dev/null +++ b/contrib/init/systemd/socket-activation/docker.socket @@ -0,0 +1,8 @@ +[Unit] +Description=Docker Socket for the API + +[Socket] +ListenStream=/var/run/docker.sock + +[Install] +WantedBy=sockets.target diff --git a/contrib/mkimage-arch-pacman.conf b/contrib/mkimage-arch-pacman.conf new file mode 100644 index 0000000000..45fe03dc96 --- /dev/null +++ b/contrib/mkimage-arch-pacman.conf @@ -0,0 +1,92 @@ +# +# /etc/pacman.conf +# +# See the pacman.conf(5) manpage for option and repository directives + +# +# GENERAL OPTIONS +# +[options] +# The following paths are commented out with their default values listed. +# If you wish to use different paths, uncomment and update the paths. +#RootDir = / +#DBPath = /var/lib/pacman/ +#CacheDir = /var/cache/pacman/pkg/ +#LogFile = /var/log/pacman.log +#GPGDir = /etc/pacman.d/gnupg/ +HoldPkg = pacman glibc +#XferCommand = /usr/bin/curl -C - -f %u > %o +#XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u +#CleanMethod = KeepInstalled +#UseDelta = 0.7 +Architecture = auto + +# Pacman won't upgrade packages listed in IgnorePkg and members of IgnoreGroup +#IgnorePkg = +#IgnoreGroup = + +#NoUpgrade = +#NoExtract = + +# Misc options +#UseSyslog +#Color +#TotalDownload +# We cannot check disk space from within a chroot environment +#CheckSpace +#VerbosePkgLists + +# By default, pacman accepts packages signed by keys that its local keyring +# trusts (see pacman-key and its man page), as well as unsigned packages. +SigLevel = Required DatabaseOptional +LocalFileSigLevel = Optional +#RemoteFileSigLevel = Required + +# NOTE: You must run `pacman-key --init` before first using pacman; the local +# keyring can then be populated with the keys of all official Arch Linux +# packagers with `pacman-key --populate archlinux`. + +# +# REPOSITORIES +# - can be defined here or included from another file +# - pacman will search repositories in the order defined here +# - local/custom mirrors can be added here or in separate files +# - repositories listed first will take precedence when packages +# have identical names, regardless of version number +# - URLs will have $repo replaced by the name of the current repo +# - URLs will have $arch replaced by the name of the architecture +# +# Repository entries are of the format: +# [repo-name] +# Server = ServerName +# Include = IncludePath +# +# The header [repo-name] is crucial - it must be present and +# uncommented to enable the repo. +# + +# The testing repositories are disabled by default. To enable, uncomment the +# repo name header and Include lines. You can add preferred servers immediately +# after the header, and they will be used before the default mirrors. + +#[testing] +#Include = /etc/pacman.d/mirrorlist + +[core] +Include = /etc/pacman.d/mirrorlist + +[extra] +Include = /etc/pacman.d/mirrorlist + +#[community-testing] +#Include = /etc/pacman.d/mirrorlist + +[community] +Include = /etc/pacman.d/mirrorlist + +# An example of a custom package repository. See the pacman manpage for +# tips on creating your own repositories. +#[custom] +#SigLevel = Optional TrustAll +#Server = file:///home/custompkgs + diff --git a/contrib/mkimage-arch.sh b/contrib/mkimage-arch.sh index db14e8674e..d178a1df3d 100755 --- a/contrib/mkimage-arch.sh +++ b/contrib/mkimage-arch.sh @@ -1,30 +1,29 @@ -#!/bin/bash +#!/usr/bin/env bash # Generate a minimal filesystem for archlinux and load it into the local # docker as "archlinux" # requires root set -e -PACSTRAP=$(which pacstrap) -[ "$PACSTRAP" ] || { +hash pacstrap &>/dev/null || { echo "Could not find pacstrap. Run pacman -S arch-install-scripts" exit 1 } -EXPECT=$(which expect) -[ "$EXPECT" ] || { + +hash expect &>/dev/null || { echo "Could not find expect. Run pacman -S expect" exit 1 } -ROOTFS=~/rootfs-arch-$$-$RANDOM -mkdir $ROOTFS +ROOTFS=$(mktemp -d /tmp/rootfs-archlinux-XXXXXXXXXX) +chmod 755 $ROOTFS -#packages to ignore for space savings +# packages to ignore for space savings PKGIGNORE=linux,jfsutils,lvm2,cryptsetup,groff,man-db,man-pages,mdadm,pciutils,pcmciautils,reiserfsprogs,s-nail,xfsprogs - + expect < $ROOTFS/etc/locale.gen < $ROOTFS/etc/locale.gen arch-chroot $ROOTFS locale-gen arch-chroot $ROOTFS /bin/sh -c 'echo "Server = http://mirrors.kernel.org/archlinux/\$repo/os/\$arch" > /etc/pacman.d/mirrorlist' # udev doesn't work in containers, rebuild /dev -DEV=${ROOTFS}/dev -mv ${DEV} ${DEV}.old -mkdir -p ${DEV} -mknod -m 666 ${DEV}/null c 1 3 -mknod -m 666 ${DEV}/zero c 1 5 -mknod -m 666 ${DEV}/random c 1 8 -mknod -m 666 ${DEV}/urandom c 1 9 -mkdir -m 755 ${DEV}/pts -mkdir -m 1777 ${DEV}/shm -mknod -m 666 ${DEV}/tty c 5 0 -mknod -m 600 ${DEV}/console c 5 1 -mknod -m 666 ${DEV}/tty0 c 4 0 -mknod -m 666 ${DEV}/full c 1 7 -mknod -m 600 ${DEV}/initctl p -mknod -m 666 ${DEV}/ptmx c 5 2 +DEV=$ROOTFS/dev +rm -rf $DEV +mkdir -p $DEV +mknod -m 666 $DEV/null c 1 3 +mknod -m 666 $DEV/zero c 1 5 +mknod -m 666 $DEV/random c 1 8 +mknod -m 666 $DEV/urandom c 1 9 +mkdir -m 755 $DEV/pts +mkdir -m 1777 $DEV/shm +mknod -m 666 $DEV/tty c 5 0 +mknod -m 600 $DEV/console c 5 1 +mknod -m 666 $DEV/tty0 c 4 0 +mknod -m 666 $DEV/full c 1 7 +mknod -m 600 $DEV/initctl p +mknod -m 666 $DEV/ptmx c 5 2 tar --numeric-owner -C $ROOTFS -c . | docker import - archlinux docker run -i -t archlinux echo Success. diff --git a/contrib/mkimage-busybox.sh b/contrib/mkimage-busybox.sh index 7eed0c800c..c1bb88c350 100755 --- a/contrib/mkimage-busybox.sh +++ b/contrib/mkimage-busybox.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Generate a very minimal filesystem based on busybox-static, # and load it into the local docker under the name "busybox". diff --git a/contrib/mkimage-crux.sh b/contrib/mkimage-crux.sh new file mode 100755 index 0000000000..074c334bba --- /dev/null +++ b/contrib/mkimage-crux.sh @@ -0,0 +1,75 @@ +#!/usr/bin/env bash +# Generate a minimal filesystem for CRUX/Linux and load it into the local +# docker as "cruxlinux" +# requires root and the crux iso (http://crux.nu) + +set -e + +die () { + echo >&2 "$@" + exit 1 +} + +[ "$#" -eq 1 ] || die "1 argument(s) required, $# provided. Usage: ./mkimage-crux.sh /path/to/iso" + +ISO=${1} + +ROOTFS=$(mktemp -d /tmp/rootfs-crux-XXXXXXXXXX) +CRUX=$(mktemp -d /tmp/crux-XXXXXXXXXX) +TMP=$(mktemp -d /tmp/XXXXXXXXXX) + +VERSION=$(basename --suffix=.iso $ISO | sed 's/[^0-9.]*\([0-9.]*\).*/\1/') + +# Mount the ISO +mount -o ro,loop $ISO $CRUX + +# Extract pkgutils +tar -C $TMP -xf $CRUX/tools/pkgutils#*.pkg.tar.gz + +# Put pkgadd in the $PATH +export PATH="$TMP/usr/bin:$PATH" + +# Install core packages +mkdir -p $ROOTFS/var/lib/pkg +touch $ROOTFS/var/lib/pkg/db +for pkg in $CRUX/crux/core/*; do + pkgadd -r $ROOTFS $pkg +done + +# Remove agetty and inittab config +if (grep agetty ${ROOTFS}/etc/inittab 2>&1 > /dev/null); then + echo "Removing agetty from /etc/inittab ..." + chroot ${ROOTFS} sed -i -e "/agetty/d" /etc/inittab + chroot ${ROOTFS} sed -i -e "/shutdown/d" /etc/inittab + chroot ${ROOTFS} sed -i -e "/^$/N;/^\n$/d" /etc/inittab +fi + +# Remove kernel source +rm -rf $ROOTFS/usr/src/* + +# udev doesn't work in containers, rebuild /dev +DEV=$ROOTFS/dev +rm -rf $DEV +mkdir -p $DEV +mknod -m 666 $DEV/null c 1 3 +mknod -m 666 $DEV/zero c 1 5 +mknod -m 666 $DEV/random c 1 8 +mknod -m 666 $DEV/urandom c 1 9 +mkdir -m 755 $DEV/pts +mkdir -m 1777 $DEV/shm +mknod -m 666 $DEV/tty c 5 0 +mknod -m 600 $DEV/console c 5 1 +mknod -m 666 $DEV/tty0 c 4 0 +mknod -m 666 $DEV/full c 1 7 +mknod -m 600 $DEV/initctl p +mknod -m 666 $DEV/ptmx c 5 2 + +IMAGE_ID=$(tar --numeric-owner -C $ROOTFS -c . | docker import - crux:$VERSION) +docker tag $IMAGE_ID crux:latest +docker run -i -t crux echo Success. + +# Cleanup +umount $CRUX +rm -rf $ROOTFS +rm -rf $CRUX +rm -rf $TMP diff --git a/contrib/mkimage-debootstrap.sh b/contrib/mkimage-debootstrap.sh index 3f268b52da..1c18c120ef 100755 --- a/contrib/mkimage-debootstrap.sh +++ b/contrib/mkimage-debootstrap.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e variant='minbase' @@ -117,6 +117,11 @@ target="/tmp/docker-rootfs-debootstrap-$suite-$$-$RANDOM" cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" returnTo="$(pwd -P)" +if [ "$suite" = 'lucid' ]; then + # lucid fails and doesn't include gpgv in minbase; "apt-get update" fails + include+=',gpgv' +fi + set -x # bootstrap @@ -138,18 +143,26 @@ if [ -z "$strictDebootstrap" ]; then # shrink the image, since apt makes us fat (wheezy: ~157.5MB vs ~120MB) sudo chroot . apt-get clean - # while we're at it, apt is unnecessarily slow inside containers - # this forces dpkg not to call sync() after package extraction and speeds up install - # the benefit is huge on spinning disks, and the penalty is nonexistent on SSD or decent server virtualization - echo 'force-unsafe-io' | sudo tee etc/dpkg/dpkg.cfg.d/02apt-speedup > /dev/null - # we want to effectively run "apt-get clean" after every install to keep images small (see output of "apt-get clean -s" for context) + if strings usr/bin/dpkg | grep -q unsafe-io; then + # while we're at it, apt is unnecessarily slow inside containers + # this forces dpkg not to call sync() after package extraction and speeds up install + # the benefit is huge on spinning disks, and the penalty is nonexistent on SSD or decent server virtualization + echo 'force-unsafe-io' | sudo tee etc/dpkg/dpkg.cfg.d/02apt-speedup > /dev/null + # we have this wrapped up in an "if" because the "force-unsafe-io" + # option was added in dpkg 1.15.8.6 + # (see http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=584254#82), + # and ubuntu lucid/10.04 only has 1.15.5.6 + fi + + # we want to effectively run "apt-get clean" after every install to keep images small (see output of "apt-get clean -s" for context) { aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";' echo "DPkg::Post-Invoke { ${aptGetClean} };" echo "APT::Update::Post-Invoke { ${aptGetClean} };" echo 'Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache "";' } | sudo tee etc/apt/apt.conf.d/no-cache > /dev/null - # and remove the translations, too + + # and remove the translations, too echo 'Acquire::Languages "none";' | sudo tee etc/apt/apt.conf.d/no-languages > /dev/null # helpful undo lines for each the above tweaks (for lack of a better home to keep track of them): @@ -190,6 +203,9 @@ if [ -z "$strictDebootstrap" ]; then ;; esac fi + + # make sure our packages lists are as up to date as we can get them + sudo chroot . apt-get update fi if [ "$justTar" ]; then diff --git a/contrib/mkimage-rinse.sh b/contrib/mkimage-rinse.sh index ff8f173f98..dfe9999d92 100755 --- a/contrib/mkimage-rinse.sh +++ b/contrib/mkimage-rinse.sh @@ -1,4 +1,11 @@ -#!/bin/bash +#!/usr/bin/env bash +# +# Create a base CentOS Docker image. + +# This script is useful on systems with rinse available (e.g., +# building a CentOS image on Debian). See contrib/mkimage-yum.sh for +# a way to build CentOS images on systems with yum installed. + set -e repo="$1" diff --git a/contrib/mkimage-unittest.sh b/contrib/mkimage-unittest.sh index af6488e9b7..a33f238845 100755 --- a/contrib/mkimage-unittest.sh +++ b/contrib/mkimage-unittest.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Generate a very minimal filesystem based on busybox-static, # and load it into the local docker under the name "docker-ut". diff --git a/contrib/mkimage-yum.sh b/contrib/mkimage-yum.sh new file mode 100755 index 0000000000..54e99f1f04 --- /dev/null +++ b/contrib/mkimage-yum.sh @@ -0,0 +1,90 @@ +#!/usr/bin/env bash +# +# Create a base CentOS Docker image. +# +# This script is useful on systems with yum installed (e.g., building +# a CentOS image on CentOS). See contrib/mkimage-rinse.sh for a way +# to build CentOS images on other systems. + +usage() { + cat < +OPTIONS: + -y The path to the yum config to install packages from. The + default is /etc/yum.conf. +EOOPTS + exit 1 +} + +# option defaults +yum_config=/etc/yum.conf +while getopts ":y:h" opt; do + case $opt in + y) + yum_config=$OPTARG + ;; + h) + usage + ;; + \?) + echo "Invalid option: -$OPTARG" + usage + ;; + esac +done +shift $((OPTIND - 1)) +name=$1 + +if [[ -z $name ]]; then + usage +fi + +#-------------------- + +target=$(mktemp -d --tmpdir $(basename $0).XXXXXX) + +set -x + +for dev in console null zero urandom; do + /sbin/MAKEDEV -d "$target"/dev -x $dev +done + +yum -c "$yum_config" --installroot="$target" --setopt=tsflags=nodocs \ + --setopt=group_package_types=mandatory -y groupinstall Core +yum -c "$yum_config" --installroot="$mount" -y clean all + +cat > "$target"/etc/sysconfig/network <&2 "warning: cannot autodetect OS version, using '$name' as tag" + version=$name +fi + +tar --numeric-owner -c -C "$target" . | docker import - $name:$version +docker run -i -t $name:$version echo success + +rm -rf "$target" diff --git a/contrib/mkseccomp.pl b/contrib/mkseccomp.pl index 44088f952c..5c583cc3d3 100755 --- a/contrib/mkseccomp.pl +++ b/contrib/mkseccomp.pl @@ -41,7 +41,7 @@ use warnings; if( -t ) { print STDERR "Helper script to make seccomp filters for Docker/LXC.\n"; - print STDERR "Usage: mkseccomp.pl [files...]\n"; + print STDERR "Usage: mkseccomp.pl < [files...]\n"; exit 1; } diff --git a/contrib/mkseccomp.sample b/contrib/mkseccomp.sample index 25bf4822dc..7a0c8d1925 100644 --- a/contrib/mkseccomp.sample +++ b/contrib/mkseccomp.sample @@ -195,6 +195,7 @@ shutdown socket // (*) socketcall socketpair +sethostname // (*) // Signal related pause @@ -261,7 +262,7 @@ vmsplice // Process control capget -//capset +capset // (*) clone // (*) execve // (*) exit // (*) @@ -401,7 +402,6 @@ tkill //quotactl //reboot //setdomainname -//sethostname //setns //settimeofday //sgetmask // Obsolete diff --git a/contrib/prepare-commit-msg.hook b/contrib/prepare-commit-msg.hook new file mode 100644 index 0000000000..595f7a783b --- /dev/null +++ b/contrib/prepare-commit-msg.hook @@ -0,0 +1,7 @@ +#!/bin/sh +# Auto sign all commits to allow them to be used by the Docker project. +# see https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md#sign-your-work +# +GH_USER=$(git config --get github.user) +SOB=$(git var GIT_AUTHOR_IDENT | sed -n "s/^\(.*>\).*$/Docker-DCO-1.1-Signed-off-by: \1 \(github: $GH_USER\)/p") +grep -qs "^$SOB" "$1" || echo "\n$SOB" >> "$1" diff --git a/docker/docker.go b/docker/docker.go index 2d7e04ce92..d92f4d98ea 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -1,15 +1,17 @@ package main import ( - "flag" "fmt" - "github.com/dotcloud/docker" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/sysinit" - "github.com/dotcloud/docker/utils" "log" "os" "strings" + + "github.com/dotcloud/docker" + "github.com/dotcloud/docker/api" + "github.com/dotcloud/docker/engine" + flag "github.com/dotcloud/docker/pkg/mflag" + "github.com/dotcloud/docker/sysinit" + "github.com/dotcloud/docker/utils" ) var ( @@ -25,25 +27,26 @@ func main() { } var ( - flVersion = flag.Bool("v", false, "Print version information and quit") - flDaemon = flag.Bool("d", false, "Enable daemon mode") - flDebug = flag.Bool("D", false, "Enable debug mode") - flAutoRestart = flag.Bool("r", true, "Restart previously running containers") - bridgeName = flag.String("b", "", "Attach containers to a pre-existing network bridge; use 'none' to disable container networking") - bridgeIp = flag.String("bip", "", "Use this CIDR notation address for the network bridge's IP, not compatible with -b") - pidfile = flag.String("p", "/var/run/docker.pid", "Path to use for daemon PID file") - flRoot = flag.String("g", "/var/lib/docker", "Path to use as the root of the docker runtime") - flEnableCors = flag.Bool("api-enable-cors", false, "Enable CORS headers in the remote API") + flVersion = flag.Bool([]string{"v", "-version"}, false, "Print version information and quit") + flDaemon = flag.Bool([]string{"d", "-daemon"}, false, "Enable daemon mode") + flDebug = flag.Bool([]string{"D", "-debug"}, false, "Enable debug mode") + flAutoRestart = flag.Bool([]string{"r", "-restart"}, true, "Restart previously running containers") + bridgeName = flag.String([]string{"b", "-bridge"}, "", "Attach containers to a pre-existing network bridge; use 'none' to disable container networking") + bridgeIp = flag.String([]string{"#bip", "-bip"}, "", "Use this CIDR notation address for the network bridge's IP, not compatible with -b") + pidfile = flag.String([]string{"p", "-pidfile"}, "/var/run/docker.pid", "Path to use for daemon PID file") + flRoot = flag.String([]string{"g", "-graph"}, "/var/lib/docker", "Path to use as the root of the docker runtime") + flEnableCors = flag.Bool([]string{"#api-enable-cors", "-api-enable-cors"}, false, "Enable CORS headers in the remote API") flDns = docker.NewListOpts(docker.ValidateIp4Address) - flEnableIptables = flag.Bool("iptables", true, "Disable docker's addition of iptables rules") - flDefaultIp = flag.String("ip", "0.0.0.0", "Default IP address to use when binding container ports") - flInterContainerComm = flag.Bool("icc", true, "Enable inter-container communication") - flGraphDriver = flag.String("s", "", "Force the docker runtime to use a specific storage driver") + flEnableIptables = flag.Bool([]string{"#iptables", "-iptables"}, true, "Disable docker's addition of iptables rules") + flEnableIpForward = flag.Bool([]string{"#ip-forward", "-ip-forward"}, true, "Disable enabling of net.ipv4.ip_forward") + flDefaultIp = flag.String([]string{"#ip", "-ip"}, "0.0.0.0", "Default IP address to use when binding container ports") + flInterContainerComm = flag.Bool([]string{"#icc", "-icc"}, true, "Enable inter-container communication") + flGraphDriver = flag.String([]string{"s", "-storage-driver"}, "", "Force the docker runtime to use a specific storage driver") flHosts = docker.NewListOpts(docker.ValidateHost) - flMtu = flag.Int("mtu", docker.DefaultNetworkMtu, "Set the containers network mtu") + flMtu = flag.Int([]string{"#mtu", "-mtu"}, 0, "Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if not default route is available") ) - flag.Var(&flDns, "dns", "Force docker to use specific DNS servers") - flag.Var(&flHosts, "H", "Multiple tcp://host:port or unix://path/to/socket to bind in daemon mode, single connection otherwise") + flag.Var(&flDns, []string{"#dns", "-dns"}, "Force docker to use specific DNS servers") + flag.Var(&flHosts, []string{"H", "-host"}, "tcp://host:port, unix://path/to/socket, fd://* or fd://socketfd to use in daemon mode. Multiple sockets can be specified") flag.Parse() @@ -56,13 +59,13 @@ func main() { if defaultHost == "" || *flDaemon { // If we do not have a host, default to unix socket - defaultHost = fmt.Sprintf("unix://%s", docker.DEFAULTUNIXSOCKET) + defaultHost = fmt.Sprintf("unix://%s", api.DEFAULTUNIXSOCKET) } flHosts.Set(defaultHost) } if *bridgeName != "" && *bridgeIp != "" { - log.Fatal("You specified -b & -bip, mutually exclusive options. Please specify only one.") + log.Fatal("You specified -b & --bip, mutually exclusive options. Please specify only one.") } if *flDebug { @@ -81,15 +84,15 @@ func main() { log.Fatal(err) } // Load plugin: httpapi - job := eng.Job("initapi") + job := eng.Job("initserver") job.Setenv("Pidfile", *pidfile) job.Setenv("Root", *flRoot) job.SetenvBool("AutoRestart", *flAutoRestart) - job.SetenvBool("EnableCors", *flEnableCors) job.SetenvList("Dns", flDns.GetAll()) job.SetenvBool("EnableIptables", *flEnableIptables) + job.SetenvBool("EnableIpForward", *flEnableIpForward) job.Setenv("BridgeIface", *bridgeName) - job.Setenv("BridgeIp", *bridgeIp) + job.Setenv("BridgeIP", *bridgeIp) job.Setenv("DefaultIp", *flDefaultIp) job.SetenvBool("InterContainerCommunication", *flInterContainerComm) job.Setenv("GraphDriver", *flGraphDriver) @@ -100,6 +103,8 @@ func main() { // Serve api job = eng.Job("serveapi", flHosts.GetAll()...) job.SetenvBool("Logging", true) + job.SetenvBool("EnableCors", *flEnableCors) + job.Setenv("Version", VERSION) if err := job.Run(); err != nil { log.Fatal(err) } diff --git a/docs/Dockerfile b/docs/Dockerfile index 53a5dfba9c..69aa5cb409 100644 --- a/docs/Dockerfile +++ b/docs/Dockerfile @@ -1,20 +1,19 @@ -from ubuntu:12.04 -maintainer Nick Stinemates +FROM ubuntu:12.04 +MAINTAINER Nick Stinemates # # docker build -t docker:docs . && docker run -p 8000:8000 docker:docs # -run apt-get update -run apt-get install -y python-setuptools make -run easy_install pip -#from docs/requirements.txt, but here to increase cacheability -run pip install Sphinx==1.1.3 -run pip install sphinxcontrib-httpdomain==1.1.9 -add . /docs -run cd /docs; make docs +# TODO switch to http://packages.ubuntu.com/trusty/python-sphinxcontrib-httpdomain once trusty is released -expose 8000 +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq make python-pip python-setuptools +# pip installs from docs/requirements.txt, but here to increase cacheability +RUN pip install Sphinx==1.2.1 +RUN pip install sphinxcontrib-httpdomain==1.2.0 +ADD . /docs +RUN make -C /docs clean docs -workdir /docs/_build/html - -entrypoint ["python", "-m", "SimpleHTTPServer"] +WORKDIR /docs/_build/html +CMD ["python", "-m", "SimpleHTTPServer"] +# note, EXPOSE is only last because of https://github.com/dotcloud/docker/issues/3525 +EXPOSE 8000 diff --git a/docs/MAINTAINERS b/docs/MAINTAINERS index d782e43c5e..e816670419 100644 --- a/docs/MAINTAINERS +++ b/docs/MAINTAINERS @@ -1,4 +1,3 @@ Andy Rothfusz (@metalivedev) -Ken Cochrane (@kencochrane) James Turnbull (@jamtur01) Sven Dowideit (@SvenDowideit) diff --git a/docs/requirements.txt b/docs/requirements.txt index 095f7600cd..6f41142a84 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,2 +1,2 @@ -Sphinx==1.1.3 -sphinxcontrib-httpdomain==1.1.9 +Sphinx==1.2.1 +sphinxcontrib-httpdomain==1.2.0 diff --git a/docs/sources/api/MAINTAINERS b/docs/sources/api/MAINTAINERS deleted file mode 100644 index 1887dfc232..0000000000 --- a/docs/sources/api/MAINTAINERS +++ /dev/null @@ -1 +0,0 @@ -Solomon Hykes (@shykes) diff --git a/docs/sources/api/docker_remote_api_v1.8.rst b/docs/sources/api/docker_remote_api_v1.8.rst deleted file mode 100644 index 3fe5cd73e0..0000000000 --- a/docs/sources/api/docker_remote_api_v1.8.rst +++ /dev/null @@ -1,1281 +0,0 @@ -:title: Remote API v1.8 -:description: API Documentation for Docker -:keywords: API, Docker, rcli, REST, documentation - -:orphan: - -====================== -Docker Remote API v1.8 -====================== - -.. contents:: Table of Contents - -1. Brief introduction -===================== - -- The Remote API has replaced rcli -- The daemon listens on ``unix:///var/run/docker.sock``, but you can - :ref:`bind_docker`. -- The API tends to be REST, but for some complex commands, like - ``attach`` or ``pull``, the HTTP connection is hijacked to transport - ``stdout, stdin`` and ``stderr`` - -2. Endpoints -============ - -2.1 Containers --------------- - -List containers -*************** - -.. http:get:: /containers/json - - List containers - - **Example request**: - - .. sourcecode:: http - - GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Id": "8dfafdbc3a40", - "Image": "base:latest", - "Command": "echo 1", - "Created": 1367854155, - "Status": "Exit 0", - "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], - "SizeRw":12288, - "SizeRootFs":0 - }, - { - "Id": "9cd87474be90", - "Image": "base:latest", - "Command": "echo 222222", - "Created": 1367854155, - "Status": "Exit 0", - "Ports":[], - "SizeRw":12288, - "SizeRootFs":0 - }, - { - "Id": "3176a2479c92", - "Image": "base:latest", - "Command": "echo 3333333333333333", - "Created": 1367854154, - "Status": "Exit 0", - "Ports":[], - "SizeRw":12288, - "SizeRootFs":0 - }, - { - "Id": "4cb07b47f9fb", - "Image": "base:latest", - "Command": "echo 444444444444444444444444444444444", - "Created": 1367854152, - "Status": "Exit 0", - "Ports":[], - "SizeRw":12288, - "SizeRootFs":0 - } - ] - - :query all: 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default - :query limit: Show ``limit`` last created containers, include non-running ones. - :query since: Show only containers created since Id, include non-running ones. - :query before: Show only containers created before Id, include non-running ones. - :query size: 1/True/true or 0/False/false, Show the containers sizes - :statuscode 200: no error - :statuscode 400: bad parameter - :statuscode 500: server error - - -Create a container -****************** - -.. http:post:: /containers/create - - Create a container - - **Example request**: - - .. sourcecode:: http - - POST /containers/create HTTP/1.1 - Content-Type: application/json - - { - "Hostname":"", - "User":"", - "Memory":0, - "MemorySwap":0, - "AttachStdin":false, - "AttachStdout":true, - "AttachStderr":true, - "PortSpecs":null, - "Tty":false, - "OpenStdin":false, - "StdinOnce":false, - "Env":null, - "Cmd":[ - "date" - ], - "Dns":null, - "Image":"base", - "Volumes":{ - "/tmp": {} - }, - "VolumesFrom":"", - "WorkingDir":"", - "ExposedPorts":{ - "22/tcp": {} - } - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - Content-Type: application/json - - { - "Id":"e90e34656806" - "Warnings":[] - } - - :jsonparam config: the container's configuration - :query name: Assign the specified name to the container. Must match ``/?[a-zA-Z0-9_-]+``. - :statuscode 201: no error - :statuscode 404: no such container - :statuscode 406: impossible to attach (container not running) - :statuscode 500: server error - - -Inspect a container -******************* - -.. http:get:: /containers/(id)/json - - Return low-level information on the container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/json HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", - "Created": "2013-05-07T14:51:42.041847+02:00", - "Path": "date", - "Args": [], - "Config": { - "Hostname": "4fa6e0f0c678", - "User": "", - "Memory": 0, - "MemorySwap": 0, - "AttachStdin": false, - "AttachStdout": true, - "AttachStderr": true, - "PortSpecs": null, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": null, - "Cmd": [ - "date" - ], - "Dns": null, - "Image": "base", - "Volumes": {}, - "VolumesFrom": "", - "WorkingDir":"" - - }, - "State": { - "Running": false, - "Pid": 0, - "ExitCode": 0, - "StartedAt": "2013-05-07T14:51:42.087658+02:01360", - "Ghost": false - }, - "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "NetworkSettings": { - "IpAddress": "", - "IpPrefixLen": 0, - "Gateway": "", - "Bridge": "", - "PortMapping": null - }, - "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", - "ResolvConfPath": "/etc/resolv.conf", - "Volumes": {}, - "HostConfig": { - "Binds": null, - "ContainerIDFile": "", - "LxcConf": [], - "Privileged": false, - "PortBindings": { - "80/tcp": [ - { - "HostIp": "0.0.0.0", - "HostPort": "49153" - } - ] - }, - "Links": null, - "PublishAllPorts": false - } - } - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -List processes running inside a container -***************************************** - -.. http:get:: /containers/(id)/top - - List processes running inside the container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/top HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Titles":[ - "USER", - "PID", - "%CPU", - "%MEM", - "VSZ", - "RSS", - "TTY", - "STAT", - "START", - "TIME", - "COMMAND" - ], - "Processes":[ - ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"], - ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"] - ] - } - - :query ps_args: ps arguments to use (eg. aux) - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Inspect changes on a container's filesystem -******************************************* - -.. http:get:: /containers/(id)/changes - - Inspect changes on container ``id`` 's filesystem - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/changes HTTP/1.1 - - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Path":"/dev", - "Kind":0 - }, - { - "Path":"/dev/kmsg", - "Kind":1 - }, - { - "Path":"/test", - "Kind":1 - } - ] - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Export a container -****************** - -.. http:get:: /containers/(id)/export - - Export the contents of container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/export HTTP/1.1 - - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/octet-stream - - {{ STREAM }} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Start a container -***************** - -.. http:post:: /containers/(id)/start - - Start the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/(id)/start HTTP/1.1 - Content-Type: application/json - - { - "Binds":["/tmp:/tmp"], - "LxcConf":{"lxc.utsname":"docker"}, - "PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] }, - "PublishAllPorts":false, - "Privileged":false - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 No Content - Content-Type: text/plain - - :jsonparam hostConfig: the container's host configuration (optional) - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Stop a container -**************** - -.. http:post:: /containers/(id)/stop - - Stop the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/stop?t=5 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query t: number of seconds to wait before killing the container - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Restart a container -******************* - -.. http:post:: /containers/(id)/restart - - Restart the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/restart?t=5 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query t: number of seconds to wait before killing the container - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Kill a container -**************** - -.. http:post:: /containers/(id)/kill - - Kill the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/kill HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Attach to a container -********************* - -.. http:post:: /containers/(id)/attach - - Attach to the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/vnd.docker.raw-stream - - {{ STREAM }} - - :query logs: 1/True/true or 0/False/false, return logs. Default false - :query stream: 1/True/true or 0/False/false, return stream. Default false - :query stdin: 1/True/true or 0/False/false, if stream=true, attach to stdin. Default false - :query stdout: 1/True/true or 0/False/false, if logs=true, return stdout log, if stream=true, attach to stdout. Default false - :query stderr: 1/True/true or 0/False/false, if logs=true, return stderr log, if stream=true, attach to stderr. Default false - :statuscode 200: no error - :statuscode 400: bad parameter - :statuscode 404: no such container - :statuscode 500: server error - - **Stream details**: - - When using the TTY setting is enabled in - :http:post:`/containers/create`, the stream is the raw data - from the process PTY and client's stdin. When the TTY is - disabled, then the stream is multiplexed to separate stdout - and stderr. - - The format is a **Header** and a **Payload** (frame). - - **HEADER** - - The header will contain the information on which stream write - the stream (stdout or stderr). It also contain the size of - the associated frame encoded on the last 4 bytes (uint32). - - It is encoded on the first 8 bytes like this:: - - header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} - - ``STREAM_TYPE`` can be: - - - 0: stdin (will be writen on stdout) - - 1: stdout - - 2: stderr - - ``SIZE1, SIZE2, SIZE3, SIZE4`` are the 4 bytes of the uint32 size encoded as big endian. - - **PAYLOAD** - - The payload is the raw stream. - - **IMPLEMENTATION** - - The simplest way to implement the Attach protocol is the following: - - 1) Read 8 bytes - 2) chose stdout or stderr depending on the first byte - 3) Extract the frame size from the last 4 byets - 4) Read the extracted size and output it on the correct output - 5) Goto 1) - - - -Wait a container -**************** - -.. http:post:: /containers/(id)/wait - - Block until container ``id`` stops, then returns the exit code - - **Example request**: - - .. sourcecode:: http - - POST /containers/16253994b7c4/wait HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"StatusCode":0} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Remove a container -******************* - -.. http:delete:: /containers/(id) - - Remove the container ``id`` from the filesystem - - **Example request**: - - .. sourcecode:: http - - DELETE /containers/16253994b7c4?v=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query v: 1/True/true or 0/False/false, Remove the volumes associated to the container. Default false - :statuscode 204: no error - :statuscode 400: bad parameter - :statuscode 404: no such container - :statuscode 500: server error - - -Copy files or folders from a container -************************************** - -.. http:post:: /containers/(id)/copy - - Copy files or folders of container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/4fa6e0f0c678/copy HTTP/1.1 - Content-Type: application/json - - { - "Resource":"test.txt" - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/octet-stream - - {{ STREAM }} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -2.2 Images ----------- - -List Images -*********** - -.. http:get:: /images/json - - **Example request**: - - .. sourcecode:: http - - GET /images/json?all=0 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "RepoTag": [ - "ubuntu:12.04", - "ubuntu:precise", - "ubuntu:latest" - ], - "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", - "Created": 1365714795, - "Size": 131506275, - "VirtualSize": 131506275 - }, - { - "RepoTag": [ - "ubuntu:12.10", - "ubuntu:quantal" - ], - "ParentId": "27cf784147099545", - "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "Created": 1364102658, - "Size": 24653, - "VirtualSize": 180116135 - } - ] - - -Create an image -*************** - -.. http:post:: /images/create - - Create an image, either by pull it from the registry or by importing it - - **Example request**: - - .. sourcecode:: http - - POST /images/create?fromImage=base HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Pulling..."} - {"status":"Pulling", "progress":"1 B/ 100 B", "progressDetail":{"current":1, "total":100}} - {"error":"Invalid..."} - ... - - When using this endpoint to pull an image from the registry, - the ``X-Registry-Auth`` header can be used to include a - base64-encoded AuthConfig object. - - :query fromImage: name of the image to pull - :query fromSrc: source to import, - means stdin - :query repo: repository - :query tag: tag - :query registry: the registry to pull from - :reqheader X-Registry-Auth: base64-encoded AuthConfig object - :statuscode 200: no error - :statuscode 500: server error - - - -Insert a file in an image -************************* - -.. http:post:: /images/(name)/insert - - Insert a file from ``url`` in the image ``name`` at ``path`` - - **Example request**: - - .. sourcecode:: http - - POST /images/test/insert?path=/usr&url=myurl HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Inserting..."} - {"status":"Inserting", "progress":"1/? (n/a)", "progressDetail":{"current":1}} - {"error":"Invalid..."} - ... - - :statuscode 200: no error - :statuscode 500: server error - - -Inspect an image -**************** - -.. http:get:: /images/(name)/json - - Return low-level information on the image ``name`` - - **Example request**: - - .. sourcecode:: http - - GET /images/base/json HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "parent":"27cf784147099545", - "created":"2013-03-23T22:24:18.818426-07:00", - "container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", - "container_config": - { - "Hostname":"", - "User":"", - "Memory":0, - "MemorySwap":0, - "AttachStdin":false, - "AttachStdout":false, - "AttachStderr":false, - "PortSpecs":null, - "Tty":true, - "OpenStdin":true, - "StdinOnce":false, - "Env":null, - "Cmd": ["/bin/bash"] - ,"Dns":null, - "Image":"base", - "Volumes":null, - "VolumesFrom":"", - "WorkingDir":"" - }, - "Size": 6824592 - } - - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Get the history of an image -*************************** - -.. http:get:: /images/(name)/history - - Return the history of the image ``name`` - - **Example request**: - - .. sourcecode:: http - - GET /images/base/history HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Id":"b750fe79269d", - "Created":1364102658, - "CreatedBy":"/bin/bash" - }, - { - "Id":"27cf78414709", - "Created":1364068391, - "CreatedBy":"" - } - ] - - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Push an image on the registry -***************************** - -.. http:post:: /images/(name)/push - - Push the image ``name`` on the registry - - **Example request**: - - .. sourcecode:: http - - POST /images/test/push HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Pushing..."} - {"status":"Pushing", "progress":"1/? (n/a)", "progressDetail":{"current":1}}} - {"error":"Invalid..."} - ... - - :query registry: the registry you wan to push, optional - :reqheader X-Registry-Auth: include a base64-encoded AuthConfig object. - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Tag an image into a repository -****************************** - -.. http:post:: /images/(name)/tag - - Tag the image ``name`` into a repository - - **Example request**: - - .. sourcecode:: http - - POST /images/test/tag?repo=myrepo&force=0 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - - :query repo: The repository to tag in - :query force: 1/True/true or 0/False/false, default false - :statuscode 200: no error - :statuscode 400: bad parameter - :statuscode 404: no such image - :statuscode 409: conflict - :statuscode 500: server error - - -Remove an image -*************** - -.. http:delete:: /images/(name) - - Remove the image ``name`` from the filesystem - - **Example request**: - - .. sourcecode:: http - - DELETE /images/test HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-type: application/json - - [ - {"Untagged":"3e2f21a89f"}, - {"Deleted":"3e2f21a89f"}, - {"Deleted":"53b4f83ac9"} - ] - - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 409: conflict - :statuscode 500: server error - - -Search images -************* - -.. http:get:: /images/search - - Search for an image in the docker index. - - .. note:: - - The response keys have changed from API v1.6 to reflect the JSON - sent by the registry server to the docker daemon's request. - - **Example request**: - - .. sourcecode:: http - - GET /images/search?term=sshd HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "description": "", - "is_official": false, - "is_trusted": false, - "name": "wma55/u1210sshd", - "star_count": 0 - }, - { - "description": "", - "is_official": false, - "is_trusted": false, - "name": "jdswinbank/sshd", - "star_count": 0 - }, - { - "description": "", - "is_official": false, - "is_trusted": false, - "name": "vgauthier/sshd", - "star_count": 0 - } - ... - ] - - :query term: term to search - :statuscode 200: no error - :statuscode 500: server error - - -2.3 Misc --------- - -Build an image from Dockerfile via stdin -**************************************** - -.. http:post:: /build - - Build an image from Dockerfile via stdin - - **Example request**: - - .. sourcecode:: http - - POST /build HTTP/1.1 - - {{ STREAM }} - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"stream":"Step 1..."} - {"stream":"..."} - {"error":"Error...", "errorDetail":{"code": 123, "message": "Error..."}} - - - The stream must be a tar archive compressed with one of the - following algorithms: identity (no compression), gzip, bzip2, - xz. - - The archive must include a file called ``Dockerfile`` at its - root. It may include any number of other files, which will be - accessible in the build context (See the :ref:`ADD build command - `). - - :query t: repository name (and optionally a tag) to be applied to the resulting image in case of success - :query q: suppress verbose build output - :query nocache: do not use the cache when building the image - :reqheader Content-type: should be set to ``"application/tar"``. - :reqheader X-Registry-Auth: base64-encoded AuthConfig object - :statuscode 200: no error - :statuscode 500: server error - - - -Check auth configuration -************************ - -.. http:post:: /auth - - Get the default username and email - - **Example request**: - - .. sourcecode:: http - - POST /auth HTTP/1.1 - Content-Type: application/json - - { - "username":"hannibal", - "password:"xxxx", - "email":"hannibal@a-team.com", - "serveraddress":"https://index.docker.io/v1/" - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - - :statuscode 200: no error - :statuscode 204: no error - :statuscode 500: server error - - -Display system-wide information -******************************* - -.. http:get:: /info - - Display system-wide information - - **Example request**: - - .. sourcecode:: http - - GET /info HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Containers":11, - "Images":16, - "Debug":false, - "NFd": 11, - "NGoroutines":21, - "MemoryLimit":true, - "SwapLimit":false, - "IPv4Forwarding":true - } - - :statuscode 200: no error - :statuscode 500: server error - - -Show the docker version information -*********************************** - -.. http:get:: /version - - Show the docker version information - - **Example request**: - - .. sourcecode:: http - - GET /version HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Version":"0.2.2", - "GitCommit":"5a2a5cc+CHANGES", - "GoVersion":"go1.0.3" - } - - :statuscode 200: no error - :statuscode 500: server error - - -Create a new image from a container's changes -********************************************* - -.. http:post:: /commit - - Create a new image from a container's changes - - **Example request**: - - .. sourcecode:: http - - POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - Content-Type: application/vnd.docker.raw-stream - - {"Id":"596069db4bf5"} - - :query container: source container - :query repo: repository - :query tag: tag - :query m: commit message - :query author: author (eg. "John Hannibal Smith ") - :query run: config automatically applied when the image is run. (ex: {"Cmd": ["cat", "/world"], "PortSpecs":["22"]}) - :statuscode 201: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Monitor Docker's events -*********************** - -.. http:get:: /events - - Get events from docker, either in real time via streaming, or via polling (using `since`) - - **Example request**: - - .. sourcecode:: http - - GET /events?since=1374067924 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"create","id":"dfdf82bd3881","from":"base:latest","time":1374067924} - {"status":"start","id":"dfdf82bd3881","from":"base:latest","time":1374067924} - {"status":"stop","id":"dfdf82bd3881","from":"base:latest","time":1374067966} - {"status":"destroy","id":"dfdf82bd3881","from":"base:latest","time":1374067970} - - :query since: timestamp used for polling - :statuscode 200: no error - :statuscode 500: server error - -Get a tarball containing all images and tags in a repository -************************************************************ - -.. http:get:: /images/(name)/get - - Get a tarball containing all images and metadata for the repository specified by ``name``. - - **Example request** - - .. sourcecode:: http - - GET /images/ubuntu/get - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/x-tar - - Binary data stream - :statuscode 200: no error - :statuscode 500: server error - -Load a tarball with a set of images and tags into docker -******************************************************** - -.. http:post:: /images/load - - Load a set of images and tags into the docker repository. - - **Example request** - - .. sourcecode:: http - - POST /images/load - - Tarball in body - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - - :statuscode 200: no error - :statuscode 500: server error - -3. Going further -================ - -3.1 Inside 'docker run' ------------------------ - -Here are the steps of 'docker run' : - -* Create the container -* If the status code is 404, it means the image doesn't exists: - * Try to pull it - * Then retry to create the container -* Start the container -* If you are not in detached mode: - * Attach to the container, using logs=1 (to have stdout and stderr from the container's start) and stream=1 -* If in detached mode or only stdin is attached: - * Display the container's id - - -3.2 Hijacking -------------- - -In this version of the API, /attach, uses hijacking to transport stdin, stdout and stderr on the same socket. This might change in the future. - -3.3 CORS Requests ------------------ - -To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode. - -.. code-block:: bash - - docker -d -H="192.168.1.9:4243" -api-enable-cors - diff --git a/docs/sources/use/baseimages.rst b/docs/sources/articles/baseimages.rst similarity index 88% rename from docs/sources/use/baseimages.rst rename to docs/sources/articles/baseimages.rst index 51a51e2f93..6fd1823f8d 100644 --- a/docs/sources/use/baseimages.rst +++ b/docs/sources/articles/baseimages.rst @@ -34,10 +34,13 @@ It can be as simple as this to create an Ubuntu base image:: DISTRIB_DESCRIPTION="Ubuntu 13.04" There are more example scripts for creating base images in the -Docker Github Repo: +Docker GitHub Repo: * `BusyBox `_ -* `CentOS / Scientific Linux CERN (SLC) +* CentOS / Scientific Linux CERN (SLC) `on Debian/Ubuntu `_ + or + `on CentOS/RHEL/SLC/etc. + `_ * `Debian / Ubuntu `_ diff --git a/docs/sources/articles/index.rst b/docs/sources/articles/index.rst new file mode 100644 index 0000000000..75c0cd3fa9 --- /dev/null +++ b/docs/sources/articles/index.rst @@ -0,0 +1,15 @@ +:title: Docker articles +:description: various articles related to Docker +:keywords: docker, articles + +.. _articles_list: + +Articles +======== + +.. toctree:: + :maxdepth: 1 + + security + baseimages + runmetrics diff --git a/docs/sources/articles/runmetrics.rst b/docs/sources/articles/runmetrics.rst new file mode 100644 index 0000000000..afb7f82e39 --- /dev/null +++ b/docs/sources/articles/runmetrics.rst @@ -0,0 +1,463 @@ +:title: Runtime Metrics +:description: Measure the behavior of running containers +:keywords: docker, metrics, CPU, memory, disk, IO, run, runtime + +.. _run_metrics: + + +Runtime Metrics +=============== + +Linux Containers rely on `control groups +`_ which +not only track groups of processes, but also expose metrics about CPU, +memory, and block I/O usage. You can access those metrics and obtain +network usage metrics as well. This is relevant for "pure" LXC +containers, as well as for Docker containers. + +Control Groups +-------------- + +Control groups are exposed through a pseudo-filesystem. In recent +distros, you should find this filesystem under +``/sys/fs/cgroup``. Under that directory, you will see multiple +sub-directories, called devices, freezer, blkio, etc.; each +sub-directory actually corresponds to a different cgroup hierarchy. + +On older systems, the control groups might be mounted on ``/cgroup``, +without distinct hierarchies. In that case, instead of seeing the +sub-directories, you will see a bunch of files in that directory, and +possibly some directories corresponding to existing containers. + +To figure out where your control groups are mounted, you can run: + +:: + + grep cgroup /proc/mounts + +.. _run_findpid: + +Enumerating Cgroups +------------------- + +You can look into ``/proc/cgroups`` to see the different control group +subsystems known to the system, the hierarchy they belong to, and how +many groups they contain. + +You can also look at ``/proc//cgroup`` to see which control +groups a process belongs to. The control group will be shown as a path +relative to the root of the hierarchy mountpoint; e.g. ``/`` means +“this process has not been assigned into a particular group”, while +``/lxc/pumpkin`` means that the process is likely to be a member of a +container named ``pumpkin``. + +Finding the Cgroup for a Given Container +---------------------------------------- + +For each container, one cgroup will be created in each hierarchy. On +older systems with older versions of the LXC userland tools, the name +of the cgroup will be the name of the container. With more recent +versions of the LXC tools, the cgroup will be ``lxc/.`` + +For Docker containers using cgroups, the container name will be the +full ID or long ID of the container. If a container shows up as +ae836c95b4c3 in ``docker ps``, its long ID might be something like +``ae836c95b4c3c9e9179e0e91015512da89fdec91612f63cebae57df9a5444c79``. You +can look it up with ``docker inspect`` or ``docker ps -notrunc``. + +Putting everything together to look at the memory metrics for a Docker +container, take a look at ``/sys/fs/cgroup/memory/lxc//``. + +Metrics from Cgroups: Memory, CPU, Block IO +------------------------------------------- + +For each subsystem (memory, CPU, and block I/O), you will find one or +more pseudo-files containing statistics. + +Memory Metrics: ``memory.stat`` +............................... + +Memory metrics are found in the "memory" cgroup. Note that the memory +control group adds a little overhead, because it does very +fine-grained accounting of the memory usage on your host. Therefore, +many distros chose to not enable it by default. Generally, to enable +it, all you have to do is to add some kernel command-line parameters: +``cgroup_enable=memory swapaccount=1``. + +The metrics are in the pseudo-file ``memory.stat``. Here is what it +will look like: + +:: + + cache 11492564992 + rss 1930993664 + mapped_file 306728960 + pgpgin 406632648 + pgpgout 403355412 + swap 0 + pgfault 728281223 + pgmajfault 1724 + inactive_anon 46608384 + active_anon 1884520448 + inactive_file 7003344896 + active_file 4489052160 + unevictable 32768 + hierarchical_memory_limit 9223372036854775807 + hierarchical_memsw_limit 9223372036854775807 + total_cache 11492564992 + total_rss 1930993664 + total_mapped_file 306728960 + total_pgpgin 406632648 + total_pgpgout 403355412 + total_swap 0 + total_pgfault 728281223 + total_pgmajfault 1724 + total_inactive_anon 46608384 + total_active_anon 1884520448 + total_inactive_file 7003344896 + total_active_file 4489052160 + total_unevictable 32768 + +The first half (without the ``total_`` prefix) contains statistics +relevant to the processes within the cgroup, excluding +sub-cgroups. The second half (with the ``total_`` prefix) includes +sub-cgroups as well. + +Some metrics are "gauges", i.e. values that can increase or decrease +(e.g. swap, the amount of swap space used by the members of the +cgroup). Some others are "counters", i.e. values that can only go up, +because they represent occurrences of a specific event (e.g. pgfault, +which indicates the number of page faults which happened since the +creation of the cgroup; this number can never decrease). + +cache + the amount of memory used by the processes of this control group + that can be associated precisely with a block on a block + device. When you read from and write to files on disk, this amount + will increase. This will be the case if you use "conventional" I/O + (``open``, ``read``, ``write`` syscalls) as well as mapped files + (with ``mmap``). It also accounts for the memory used by ``tmpfs`` + mounts, though the reasons are unclear. + +rss + the amount of memory that *doesn't* correspond to anything on + disk: stacks, heaps, and anonymous memory maps. + +mapped_file + indicates the amount of memory mapped by the processes in the + control group. It doesn't give you information about *how much* + memory is used; it rather tells you *how* it is used. + +pgfault and pgmajfault + indicate the number of times that a process of the cgroup triggered + a "page fault" and a "major fault", respectively. A page fault + happens when a process accesses a part of its virtual memory space + which is nonexistent or protected. The former can happen if the + process is buggy and tries to access an invalid address (it will + then be sent a ``SIGSEGV`` signal, typically killing it with the + famous ``Segmentation fault`` message). The latter can happen when + the process reads from a memory zone which has been swapped out, or + which corresponds to a mapped file: in that case, the kernel will + load the page from disk, and let the CPU complete the memory + access. It can also happen when the process writes to a + copy-on-write memory zone: likewise, the kernel will preempt the + process, duplicate the memory page, and resume the write operation + on the process' own copy of the page. "Major" faults happen when the + kernel actually has to read the data from disk. When it just has to + duplicate an existing page, or allocate an empty page, it's a + regular (or "minor") fault. + +swap + the amount of swap currently used by the processes in this cgroup. + +active_anon and inactive_anon + the amount of *anonymous* memory that has been identified has + respectively *active* and *inactive* by the kernel. "Anonymous" + memory is the memory that is *not* linked to disk pages. In other + words, that's the equivalent of the rss counter described above. In + fact, the very definition of the rss counter is **active_anon** + + **inactive_anon** - **tmpfs** (where tmpfs is the amount of memory + used up by ``tmpfs`` filesystems mounted by this control + group). Now, what's the difference between "active" and "inactive"? + Pages are initially "active"; and at regular intervals, the kernel + sweeps over the memory, and tags some pages as "inactive". Whenever + they are accessed again, they are immediately retagged + "active". When the kernel is almost out of memory, and time comes to + swap out to disk, the kernel will swap "inactive" pages. + +active_file and inactive_file + cache memory, with *active* and *inactive* similar to the *anon* + memory above. The exact formula is cache = **active_file** + + **inactive_file** + **tmpfs**. The exact rules used by the kernel to + move memory pages between active and inactive sets are different + from the ones used for anonymous memory, but the general principle + is the same. Note that when the kernel needs to reclaim memory, it + is cheaper to reclaim a clean (=non modified) page from this pool, + since it can be reclaimed immediately (while anonymous pages and + dirty/modified pages have to be written to disk first). + +unevictable + the amount of memory that cannot be reclaimed; generally, it will + account for memory that has been "locked" with ``mlock``. It is + often used by crypto frameworks to make sure that secret keys and + other sensitive material never gets swapped out to disk. + +memory and memsw limits + These are not really metrics, but a reminder of the limits applied + to this cgroup. The first one indicates the maximum amount of + physical memory that can be used by the processes of this control + group; the second one indicates the maximum amount of RAM+swap. + +Accounting for memory in the page cache is very complex. If two +processes in different control groups both read the same file +(ultimately relying on the same blocks on disk), the corresponding +memory charge will be split between the control groups. It's nice, but +it also means that when a cgroup is terminated, it could increase the +memory usage of another cgroup, because they are not splitting the +cost anymore for those memory pages. + +CPU metrics: ``cpuacct.stat`` +............................. + +Now that we've covered memory metrics, everything else will look very +simple in comparison. CPU metrics will be found in the ``cpuacct`` +controller. + +For each container, you will find a pseudo-file ``cpuacct.stat``, +containing the CPU usage accumulated by the processes of the +container, broken down between ``user`` and ``system`` time. If you're +not familiar with the distinction, ``user`` is the time during which +the processes were in direct control of the CPU (i.e. executing +process code), and ``system`` is the time during which the CPU was +executing system calls on behalf of those processes. + +Those times are expressed in ticks of 1/100th of a second. Actually, +they are expressed in "user jiffies". There are ``USER_HZ`` +*"jiffies"* per second, and on x86 systems, ``USER_HZ`` is 100. This +used to map exactly to the number of scheduler "ticks" per second; but +with the advent of higher frequency scheduling, as well as `tickless +kernels `_, the number of kernel +ticks wasn't relevant anymore. It stuck around anyway, mainly for +legacy and compatibility reasons. + +Block I/O metrics +................. + +Block I/O is accounted in the ``blkio`` controller. Different metrics +are scattered across different files. While you can find in-depth +details in the `blkio-controller +`_ +file in the kernel documentation, here is a short list of the most +relevant ones: + +blkio.sectors + contain the number of 512-bytes sectors read and written by the + processes member of the cgroup, device by device. Reads and writes + are merged in a single counter. + +blkio.io_service_bytes + indicates the number of bytes read and written by the cgroup. It has + 4 counters per device, because for each device, it differentiates + between synchronous vs. asynchronous I/O, and reads vs. writes. + +blkio.io_serviced + the number of I/O operations performed, regardless of their size. It + also has 4 counters per device. + +blkio.io_queued + indicates the number of I/O operations currently queued for this + cgroup. In other words, if the cgroup isn't doing any I/O, this will + be zero. Note that the opposite is not true. In other words, if + there is no I/O queued, it does not mean that the cgroup is idle + (I/O-wise). It could be doing purely synchronous reads on an + otherwise quiescent device, which is therefore able to handle them + immediately, without queuing. Also, while it is helpful to figure + out which cgroup is putting stress on the I/O subsystem, keep in + mind that is is a relative quantity. Even if a process group does + not perform more I/O, its queue size can increase just because the + device load increases because of other devices. + +Network Metrics +--------------- + +Network metrics are not exposed directly by control groups. There is a +good explanation for that: network interfaces exist within the context +of *network namespaces*. The kernel could probably accumulate metrics +about packets and bytes sent and received by a group of processes, but +those metrics wouldn't be very useful. You want per-interface metrics +(because traffic happening on the local ``lo`` interface doesn't +really count). But since processes in a single cgroup can belong to +multiple network namespaces, those metrics would be harder to +interpret: multiple network namespaces means multiple ``lo`` +interfaces, potentially multiple ``eth0`` interfaces, etc.; so this is +why there is no easy way to gather network metrics with control +groups. + +Instead we can gather network metrics from other sources: + +IPtables +........ + +IPtables (or rather, the netfilter framework for which iptables is +just an interface) can do some serious accounting. + +For instance, you can setup a rule to account for the outbound HTTP +traffic on a web server: + +:: + + iptables -I OUTPUT -p tcp --sport 80 + + +There is no ``-j`` or ``-g`` flag, so the rule will just count matched +packets and go to the following rule. + +Later, you can check the values of the counters, with: + +:: + + iptables -nxvL OUTPUT + +Technically, ``-n`` is not required, but it will prevent iptables from +doing DNS reverse lookups, which are probably useless in this +scenario. + +Counters include packets and bytes. If you want to setup metrics for +container traffic like this, you could execute a ``for`` loop to add +two ``iptables`` rules per container IP address (one in each +direction), in the ``FORWARD`` chain. This will only meter traffic +going through the NAT layer; you will also have to add traffic going +through the userland proxy. + +Then, you will need to check those counters on a regular basis. If you +happen to use ``collectd``, there is a nice plugin to automate +iptables counters collection. + +Interface-level counters +........................ + +Since each container has a virtual Ethernet interface, you might want +to check directly the TX and RX counters of this interface. You will +notice that each container is associated to a virtual Ethernet +interface in your host, with a name like ``vethKk8Zqi``. Figuring out +which interface corresponds to which container is, unfortunately, +difficult. + +But for now, the best way is to check the metrics *from within the +containers*. To accomplish this, you can run an executable from the +host environment within the network namespace of a container using +**ip-netns magic**. + +The ``ip-netns exec`` command will let you execute any program +(present in the host system) within any network namespace visible to +the current process. This means that your host will be able to enter +the network namespace of your containers, but your containers won't be +able to access the host, nor their sibling containers. Containers will +be able to “see” and affect their sub-containers, though. + +The exact format of the command is:: + + ip netns exec + +For example:: + + ip netns exec mycontainer netstat -i + +``ip netns`` finds the "mycontainer" container by using namespaces +pseudo-files. Each process belongs to one network namespace, one PID +namespace, one ``mnt`` namespace, etc., and those namespaces are +materialized under ``/proc//ns/``. For example, the network +namespace of PID 42 is materialized by the pseudo-file +``/proc/42/ns/net``. + +When you run ``ip netns exec mycontainer ...``, it expects +``/var/run/netns/mycontainer`` to be one of those +pseudo-files. (Symlinks are accepted.) + +In other words, to execute a command within the network namespace of a +container, we need to: + +* Find out the PID of any process within the container that we want to + investigate; +* Create a symlink from ``/var/run/netns/`` to + ``/proc//ns/net`` +* Execute ``ip netns exec ....`` + +Please review :ref:`run_findpid` to learn how to find the cgroup of a +pprocess running in the container of which you want to measure network +usage. From there, you can examine the pseudo-file named ``tasks``, +which containes the PIDs that are in the control group (i.e. in the +container). Pick any one of them. + +Putting everything together, if the "short ID" of a container is held +in the environment variable ``$CID``, then you can do this:: + + TASKS=/sys/fs/cgroup/devices/$CID*/tasks + PID=$(head -n 1 $TASKS) + mkdir -p /var/run/netns + ln -sf /proc/$PID/ns/net /var/run/netns/$CID + ip netns exec $CID netstat -i + + +Tips for high-performance metric collection +------------------------------------------- + +Note that running a new process each time you want to update metrics +is (relatively) expensive. If you want to collect metrics at high +resolutions, and/or over a large number of containers (think 1000 +containers on a single host), you do not want to fork a new process +each time. + +Here is how to collect metrics from a single process. You will have to +write your metric collector in C (or any language that lets you do +low-level system calls). You need to use a special system call, +``setns()``, which lets the current process enter any arbitrary +namespace. It requires, however, an open file descriptor to the +namespace pseudo-file (remember: that’s the pseudo-file in +``/proc//ns/net``). + +However, there is a catch: you must not keep this file descriptor +open. If you do, when the last process of the control group exits, the +namespace will not be destroyed, and its network resources (like the +virtual interface of the container) will stay around for ever (or +until you close that file descriptor). + +The right approach would be to keep track of the first PID of each +container, and re-open the namespace pseudo-file each time. + +Collecting metrics when a container exits +----------------------------------------- + +Sometimes, you do not care about real time metric collection, but when +a container exits, you want to know how much CPU, memory, etc. it has +used. + +Docker makes this difficult because it relies on ``lxc-start``, which +carefully cleans up after itself, but it is still possible. It is +usually easier to collect metrics at regular intervals (e.g. every +minute, with the collectd LXC plugin) and rely on that instead. + +But, if you'd still like to gather the stats when a container stops, +here is how: + +For each container, start a collection process, and move it to the +control groups that you want to monitor by writing its PID to the +tasks file of the cgroup. The collection process should periodically +re-read the tasks file to check if it's the last process of the +control group. (If you also want to collect network statistics as +explained in the previous section, you should also move the process to +the appropriate network namespace.) + +When the container exits, ``lxc-start`` will try to delete the control +groups. It will fail, since the control group is still in use; but +that’s fine. You process should now detect that it is the only one +remaining in the group. Now is the right time to collect all the +metrics you need! + +Finally, your process should move itself back to the root control +group, and remove the container control group. To remove a control +group, just ``rmdir`` its directory. It's counter-intuitive to +``rmdir`` a directory as it still contains files; but remember that +this is a pseudo-filesystem, so usual rules don't apply. After the +cleanup is done, the collection process can exit safely. + diff --git a/docs/sources/installation/security.rst b/docs/sources/articles/security.rst similarity index 100% rename from docs/sources/installation/security.rst rename to docs/sources/articles/security.rst diff --git a/docs/sources/conf.py b/docs/sources/conf.py index a143e821be..12f5b57841 100644 --- a/docs/sources/conf.py +++ b/docs/sources/conf.py @@ -62,7 +62,7 @@ master_doc = 'toctree' # General information about the project. project = u'Docker' -copyright = u'2013, Team Docker' +copyright = u'2014 Docker, Inc.' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -175,7 +175,7 @@ html_show_sourcelink = False #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True +html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the @@ -235,8 +235,10 @@ latex_documents = [ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ - ('commandline/cli', 'docker', u'Docker Documentation', - [u'Team Docker'], 1) + ('reference/commandline/cli', 'docker', u'Docker CLI Documentation', + [u'Team Docker'], 1), + ('reference/builder', 'Dockerfile', u'Dockerfile Documentation', + [u'Team Docker'], 5), ] # If true, show URL addresses after external links. diff --git a/docs/sources/examples/couchdb_data_volumes.rst b/docs/sources/examples/couchdb_data_volumes.rst index 1f6b4b7910..6cf3fab68c 100644 --- a/docs/sources/examples/couchdb_data_volumes.rst +++ b/docs/sources/examples/couchdb_data_volumes.rst @@ -41,7 +41,7 @@ This time, we're requesting shared access to ``$COUCH1``'s volumes. .. code-block:: bash - COUCH2=$(sudo docker run -d -p 5984 -volumes-from $COUCH1 shykes/couchdb:2013-05-03) + COUCH2=$(sudo docker run -d -p 5984 --volumes-from $COUCH1 shykes/couchdb:2013-05-03) Browse data on the second database ---------------------------------- diff --git a/docs/sources/examples/hello_world.rst b/docs/sources/examples/hello_world.rst index aeb95881d2..fd5d6421c3 100644 --- a/docs/sources/examples/hello_world.rst +++ b/docs/sources/examples/hello_world.rst @@ -9,25 +9,23 @@ Hello World .. _running_examples: -Running the Examples -==================== +Check your Docker install +------------------------- -All the examples assume your machine is running the ``docker`` daemon. To -run the ``docker`` daemon in the background, simply type: +This guide assumes you have a working installation of Docker. To check +your Docker install, run the following command: .. code-block:: bash - sudo docker -d & + # Check that you have a working install + docker info -Now you can run Docker in client mode: by default all commands will be -forwarded to the ``docker`` daemon via a protected Unix socket, so you -must run as the ``root`` or via the ``sudo`` command. +If you get ``docker: command not found`` or something like +``/var/lib/docker/repositories: permission denied`` you may have an incomplete +Docker installation or insufficient privileges to access docker on your machine. -.. code-block:: bash +Please refer to :ref:`installation_list` for installation instructions. - sudo docker help - ----- .. _hello_world: @@ -72,10 +70,12 @@ See the example in action .. raw:: html -
- -
- + ---- @@ -88,9 +88,7 @@ Hello World Daemon And now for the most boring daemon ever written! -This example assumes you have Docker installed and the Ubuntu -image already imported with ``docker pull ubuntu``. We will use the Ubuntu -image to run a simple hello world daemon that will just print hello +We will use the Ubuntu image to run a simple hello world daemon that will just print hello world to standard out every second. It will continue to do this until we stop it. @@ -167,9 +165,12 @@ See the example in action .. raw:: html -
- -
+ The next example in the series is a :ref:`python_web_app` example, or you could skip to any of the other examples: diff --git a/docs/sources/examples/python_web_app.rst b/docs/sources/examples/python_web_app.rst index 3034bf980a..f31b31b7d2 100644 --- a/docs/sources/examples/python_web_app.rst +++ b/docs/sources/examples/python_web_app.rst @@ -43,7 +43,7 @@ container. The ``BUILD_JOB`` environment variable will be set with the new conta [...] While this container is running, we can attach to the new container to -see what is going on. The flag ``-sig-proxy`` set as ``false`` allows you to connect and +see what is going on. The flag ``--sig-proxy`` set as ``false`` allows you to connect and disconnect (Ctrl-C) to it without stopping the container. .. code-block:: bash @@ -107,8 +107,11 @@ See the example in action .. raw:: html -
- -
+ Continue to :ref:`running_ssh_service`. diff --git a/docs/sources/examples/running_redis_service.rst b/docs/sources/examples/running_redis_service.rst index 886f473ef2..9687f0cfa8 100644 --- a/docs/sources/examples/running_redis_service.rst +++ b/docs/sources/examples/running_redis_service.rst @@ -44,7 +44,7 @@ use a container link to provide access to our Redis database. .. code-block:: bash - sudo docker run -name redis -d /redis + sudo docker run --name redis -d /redis Create your web application container ------------------------------------- @@ -56,7 +56,7 @@ Redis instance running inside that container to only this container. .. code-block:: bash - sudo docker run -link redis:db -i -t ubuntu:12.10 /bin/bash + sudo docker run --link redis:db -i -t ubuntu:12.10 /bin/bash Once inside our freshly created container we need to install Redis to get the ``redis-cli`` binary to test our connection. diff --git a/docs/sources/examples/running_ssh_service.rst b/docs/sources/examples/running_ssh_service.rst index a0ce532d8d..52fe1f5914 100644 --- a/docs/sources/examples/running_ssh_service.rst +++ b/docs/sources/examples/running_ssh_service.rst @@ -25,9 +25,12 @@ smooth, but it gives you a good idea. .. raw:: html -
- -
+ You can also get this sshd container by using: diff --git a/docs/sources/examples/using_supervisord.rst b/docs/sources/examples/using_supervisord.rst index c32ba0cc0b..eed063292d 100644 --- a/docs/sources/examples/using_supervisord.rst +++ b/docs/sources/examples/using_supervisord.rst @@ -70,7 +70,7 @@ Let's see what is inside our ``supervisord.conf`` file. command=/usr/sbin/sshd -D [program:apache2] - command=/bin/bash -c "source /etc/apache2/envvars && /usr/sbin/apache2 -DFOREGROUND" + command=/bin/bash -c "source /etc/apache2/envvars && exec /usr/sbin/apache2 -DFOREGROUND" The ``supervisord.conf`` configuration file contains directives that configure Supervisor and the processes it manages. The first block ``[supervisord]`` diff --git a/docs/sources/faq.rst b/docs/sources/faq.rst index e2e16c362b..037f4d797b 100644 --- a/docs/sources/faq.rst +++ b/docs/sources/faq.rst @@ -26,7 +26,7 @@ Does Docker run on Mac OS X or Windows? Not at this time, Docker currently only runs on Linux, but you can use VirtualBox to run Docker in a virtual machine on your box, and get the best of both worlds. Check out the - :ref:`install_using_vagrant` and :ref:`windows` installation + :ref:`macosx` and :ref:`windows` installation guides. How do containers compare to virtual machines? @@ -172,8 +172,9 @@ Linux: - Fedora 19/20+ - RHEL 6.5+ - Centos 6+ -- Gento +- Gentoo - ArchLinux +- openSUSE 12.3+ Cloud: @@ -195,7 +196,7 @@ Where can I find more answers? * `Docker user mailinglist`_ * `Docker developer mailinglist`_ * `IRC, docker on freenode`_ - * `Github`_ + * `GitHub`_ * `Ask questions on Stackoverflow`_ * `Join the conversation on Twitter`_ diff --git a/docs/sources/index.rst b/docs/sources/index.rst index 1fb82f3bec..346a6619c5 100644 --- a/docs/sources/index.rst +++ b/docs/sources/index.rst @@ -5,26 +5,21 @@ Introduction ------------ -``docker``, the Linux Container Runtime, runs Unix processes with -strong guarantees of isolation across servers. Your software runs -repeatably everywhere because its :ref:`container_def` includes any -dependencies. +Docker is an open-source engine to easily create lightweight, portable, +self-sufficient containers from any application. The same container that a +developer builds and tests on a laptop can run at scale, in production, on +VMs, bare metal, OpenStack clusters, or any major infrastructure provider. -``docker`` runs three ways: +Common use cases for Docker include: -* as a daemon to manage LXC containers on your :ref:`Linux host - ` (``sudo docker -d``) -* as a :ref:`CLI ` which talks to the daemon's `REST API - `_ (``docker run ...``) -* as a client of :ref:`Repositories ` - that let you share what you've built (``docker pull, docker - commit``). +- Automating the packaging and deployment of web applications. +- Automated testing and continuous integration/deployment. +- Deploying and scaling databases and backend services in a service-oriented environment. +- Building custom PaaS environments, either from scratch or as an extension of off-the-shelf platforms like OpenShift or Cloud Foundry. -Each use of ``docker`` is documented here. The features of Docker are -currently in active development, so this documentation will change -frequently. +Please note Docker is currently under heavy developement. It should not be used in production (yet). -For an overview of Docker, please see the `Introduction +For a high-level overview of Docker, please see the `Introduction `_. When you're ready to start working with Docker, we have a `quick start `_ and a more in-depth guide to :ref:`ubuntu_linux` and other diff --git a/docs/sources/installation/archlinux.rst b/docs/sources/installation/archlinux.rst index 3859317c44..2d823bfd46 100644 --- a/docs/sources/installation/archlinux.rst +++ b/docs/sources/installation/archlinux.rst @@ -71,21 +71,3 @@ To start on system boot: :: sudo systemctl enable docker - -Network Configuration ---------------------- - -IPv4 packet forwarding is disabled by default on Arch, so internet access from inside -the container may not work. - -To enable the forwarding, run as root on the host system: - -:: - - sysctl net.ipv4.ip_forward=1 - -And, to make it persistent across reboots, enable it on the host's **/etc/sysctl.d/docker.conf**: - -:: - - net.ipv4.ip_forward=1 diff --git a/docs/sources/installation/binaries.rst b/docs/sources/installation/binaries.rst index f06a8d6c5f..976e94e344 100644 --- a/docs/sources/installation/binaries.rst +++ b/docs/sources/installation/binaries.rst @@ -12,19 +12,37 @@ Binaries **This instruction set is meant for hackers who want to try out Docker on a variety of environments.** -Before following these directions, you should really check if a packaged version -of Docker is already available for your distribution. We have packages for many -distributions, and more keep showing up all the time! +Before following these directions, you should really check if a +packaged version of Docker is already available for your distribution. +We have packages for many distributions, and more keep showing up all +the time! -Check Your Kernel ------------------ -Your host's Linux kernel must meet the Docker :ref:`kernel` - -Check for User Space Tools +Check runtime dependencies -------------------------- -You must have a working installation of the `lxc `_ utilities and library. +.. DOC COMMENT: this should be kept in sync with + https://github.com/dotcloud/docker/blob/master/hack/PACKAGERS.md#runtime-dependencies + +To run properly, docker needs the following software to be installed at runtime: + +- iproute2 version 3.5 or later (build after 2012-05-21), and + specifically the "ip" utility +- iptables version 1.4 or later +- The LXC utility scripts (http://lxc.sourceforge.net) version 0.8 or later +- Git version 1.7 or later +- XZ Utils 4.9 or later + + +Check kernel dependencies +------------------------- + +Docker in daemon mode has specific kernel requirements. For details, +check your distribution in :ref:`installation_list`. + +Note that Docker also has a client mode, which can run on virtually +any linux kernel (it even builds on OSX!). + Get the docker binary: ---------------------- @@ -44,6 +62,40 @@ Run the docker daemon sudo ./docker -d & +.. _dockergroup: + +Giving non-root access +---------------------- + +The ``docker`` daemon always runs as the root user, and since Docker +version 0.5.2, the ``docker`` daemon binds to a Unix socket instead of +a TCP port. By default that Unix socket is owned by the user *root*, +and so, by default, you can access it with ``sudo``. + +Starting in version 0.5.3, if you (or your Docker installer) create a +Unix group called *docker* and add users to it, then the ``docker`` +daemon will make the ownership of the Unix socket read/writable by the +*docker* group when the daemon starts. The ``docker`` daemon must +always run as the root user, but if you run the ``docker`` client as a +user in the *docker* group then you don't need to add ``sudo`` to all +the client commands. + +.. warning:: The *docker* group is root-equivalent. + + +Upgrades +-------- + +To upgrade your manual installation of Docker, first kill the docker +daemon: + +.. code-block:: bash + + killall docker + +Then follow the regular installation steps. + + Run your first container! ------------------------- diff --git a/docs/sources/installation/frugalware.rst b/docs/sources/installation/frugalware.rst index cda6c4bfc4..de2b92ae10 100644 --- a/docs/sources/installation/frugalware.rst +++ b/docs/sources/installation/frugalware.rst @@ -60,21 +60,3 @@ To start on system boot: :: sudo systemctl enable lxc-docker - -Network Configuration ---------------------- - -IPv4 packet forwarding is disabled by default on FrugalWare, so Internet access from inside -the container may not work. - -To enable packet forwarding, run the following command as the ``root`` user on the host system: - -:: - - sysctl net.ipv4.ip_forward=1 - -And, to make it persistent across reboots, add the following to a file named **/etc/sysctl.d/docker.conf**: - -:: - - net.ipv4.ip_forward=1 diff --git a/docs/sources/installation/gentoolinux.rst b/docs/sources/installation/gentoolinux.rst index 0e8809f7b5..421af0a1e7 100644 --- a/docs/sources/installation/gentoolinux.rst +++ b/docs/sources/installation/gentoolinux.rst @@ -82,19 +82,3 @@ To start on system boot: .. code-block:: bash sudo systemctl enable docker.service - -Network Configuration -^^^^^^^^^^^^^^^^^^^^^ - -IPv4 packet forwarding is disabled by default, so internet access from inside -the container will not work unless ``net.ipv4.ip_forward`` is enabled: - -.. code-block:: bash - - sudo sysctl -w net.ipv4.ip_forward=1 - -Or, to enable it more permanently: - -.. code-block:: bash - - echo net.ipv4.ip_forward = 1 | sudo tee /etc/sysctl.d/docker.conf diff --git a/docs/sources/installation/google.rst b/docs/sources/installation/google.rst index 62af581fea..88118778a2 100644 --- a/docs/sources/installation/google.rst +++ b/docs/sources/installation/google.rst @@ -43,21 +43,14 @@ $ gcutil ssh docker-playground docker-playground:~$ -5. Enable IP forwarding: - -.. code-block:: bash - - docker-playground:~$ echo net.ipv4.ip_forward=1 | sudo tee /etc/sysctl.d/99-docker.conf - docker-playground:~$ sudo sysctl --system - -6. Install the latest Docker release and configure it to start when the instance boots: +5. Install the latest Docker release and configure it to start when the instance boots: .. code-block:: bash docker-playground:~$ curl get.docker.io | bash docker-playground:~$ sudo update-rc.d docker defaults -7. If running in zones: ``us-central1-a``, ``europe-west1-1``, and ``europe-west1-b``, the docker daemon must be started with the ``-mtu`` flag. Without the flag, you may experience intermittent network pauses. +6. If running in zones: ``us-central1-a``, ``europe-west1-1``, and ``europe-west1-b``, the docker daemon must be started with the ``-mtu`` flag. Without the flag, you may experience intermittent network pauses. `See this issue `_ for more details. .. code-block:: bash @@ -65,7 +58,7 @@ docker-playground:~$ echo 'DOCKER_OPTS="$DOCKER_OPTS -mtu 1460"' | sudo tee -a /etc/default/docker docker-playground:~$ sudo service docker restart -8. Start a new container: +7. Start a new container: .. code-block:: bash diff --git a/docs/sources/installation/index.rst b/docs/sources/installation/index.rst index 9026b1f7f4..04c155d885 100644 --- a/docs/sources/installation/index.rst +++ b/docs/sources/installation/index.rst @@ -22,13 +22,11 @@ Contents: fedora archlinux gentoolinux + openSUSE frugalware - vagrant + mac windows amazon rackspace google - kernel binaries - security - upgrading diff --git a/docs/sources/installation/kernel.rst b/docs/sources/installation/kernel.rst deleted file mode 100644 index 8338cfdc88..0000000000 --- a/docs/sources/installation/kernel.rst +++ /dev/null @@ -1,152 +0,0 @@ -:title: Kernel Requirements -:description: Kernel supports -:keywords: kernel requirements, kernel support, docker, installation, cgroups, namespaces - -.. _kernel: - -Kernel Requirements -=================== - -In short, Docker has the following kernel requirements: - -- Linux version 3.8 or above. - -- Cgroups and namespaces must be enabled. - -*Note: as of 0.7 docker no longer requires aufs. AUFS support is still available as an optional driver.* - -The officially supported kernel is the one recommended by the -:ref:`ubuntu_linux` installation path. It is the one that most developers -will use, and the one that receives the most attention from the core -contributors. If you decide to go with a different kernel and hit a bug, -please try to reproduce it with the official kernels first. - -If you cannot or do not want to use the "official" kernels, -here is some technical background about the features (both optional and -mandatory) that docker needs to run successfully. - - -Linux version 3.8 or above --------------------------- - -Kernel versions 3.2 to 3.5 are not stable when used with docker. -In some circumstances, you will experience kernel "oopses", or even crashes. -The symptoms include: - -- a container being killed in the middle of an operation (e.g. an ``apt-get`` - command doesn't complete); -- kernel messages including mentioning calls to ``mntput`` or - ``d_hash_and_lookup``; -- kernel crash causing the machine to freeze for a few minutes, or even - completely. - -Additionally, kernels prior 3.4 did not implement ``reboot_pid_ns``, -which means that the ``reboot()`` syscall could reboot the host machine, -instead of terminating the container. To work around that problem, -LXC userland tools (since version 0.8) automatically drop the ``SYS_BOOT`` -capability when necessary. Still, if you run a pre-3.4 kernel with pre-0.8 -LXC tools, be aware that containers can reboot the whole host! This is -not something that Docker wants to address in the short term, since you -shouldn't use kernels prior 3.8 with Docker anyway. - -While it is still possible to use older kernels for development, it is -really not advised to do so. - -Docker checks the kernel version when it starts, and emits a warning if it -detects something older than 3.8. - -See issue `#407 `_ for details. - - -Cgroups and namespaces ----------------------- - -You need to enable namespaces and cgroups, to the extent of what is needed -to run LXC containers. Technically, while namespaces have been introduced -in the early 2.6 kernels, we do not advise to try any kernel before 2.6.32 -to run LXC containers. Note that 2.6.32 has some documented issues regarding -network namespace setup and teardown; those issues are not a risk if you -run containers in a private environment, but can lead to denial-of-service -attacks if you want to run untrusted code in your containers. For more details, -see `LP#720095 `_. - -Kernels 2.6.38, and every version since 3.2, have been deployed successfully -to run containerized production workloads. Feature-wise, there is no huge -improvement between 2.6.38 and up to 3.6 (as far as docker is concerned!). - - - - -Extra Cgroup Controllers ------------------------- - -Most control groups can be enabled or disabled individually. For instance, -you can decide that you do not want to compile support for the CPU or memory -controller. In some cases, the feature can be enabled or disabled at boot -time. It is worth mentioning that some distributions (like Debian) disable -"expensive" features, like the memory controller, because they can have -a significant performance impact. - -In the specific case of the memory cgroup, docker will detect if the cgroup -is available or not. If it's not, it will print a warning, and it won't -use the feature. If you want to enable that feature -- read on! - - -Memory and Swap Accounting on Debian/Ubuntu -------------------------------------------- - -If you use Debian or Ubuntu kernels, and want to enable memory and swap -accounting, you must add the following command-line parameters to your kernel:: - - cgroup_enable=memory swapaccount=1 - -On Debian or Ubuntu systems, if you use the default GRUB bootloader, you can -add those parameters by editing ``/etc/default/grub`` and extending -``GRUB_CMDLINE_LINUX``. Look for the following line:: - - GRUB_CMDLINE_LINUX="" - -And replace it by the following one:: - - GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1" - -Then run ``update-grub``, and reboot. - -Details -------- - -To automatically check some of the requirements below, you can run `lxc-checkconfig`. - -Networking: - -- CONFIG_BRIDGE -- CONFIG_NETFILTER_XT_MATCH_ADDRTYPE -- CONFIG_NF_NAT -- CONFIG_NF_NAT_IPV4 -- CONFIG_NF_NAT_NEEDED - -LVM: - -- CONFIG_BLK_DEV_DM -- CONFIG_DM_THIN_PROVISIONING -- CONFIG_EXT4_FS - -Namespaces: - -- CONFIG_NAMESPACES -- CONFIG_UTS_NS -- CONFIG_IPC_NS -- CONFIG_UID_NS -- CONFIG_PID_NS -- CONFIG_NET_NS - -Cgroups: - -- CONFIG_CGROUPS - -Cgroup controllers (optional but highly recommended): - -- CONFIG_CGROUP_CPUACCT -- CONFIG_BLK_CGROUP -- CONFIG_MEMCG -- CONFIG_MEMCG_SWAP diff --git a/docs/sources/installation/mac.rst b/docs/sources/installation/mac.rst new file mode 100644 index 0000000000..6fc5ed10bf --- /dev/null +++ b/docs/sources/installation/mac.rst @@ -0,0 +1,160 @@ +:title: Requirements and Installation on Mac OS X 10.6 Snow Leopard +:description: Please note this project is currently under heavy development. It should not be used in production. +:keywords: Docker, Docker documentation, requirements, virtualbox, ssh, linux, os x, osx, mac + +.. _macosx: + +======== +Mac OS X +======== + +.. note:: + + These instructions are available with the new release of Docker + (version 0.8). However, they are subject to change. + +.. include:: install_header.inc + +Docker is supported on Mac OS X 10.6 "Snow Leopard" or newer. + +How To Install Docker On Mac OS X +================================= + +VirtualBox +---------- + +Docker on OS X needs VirtualBox to run. To begin with, head over to +`VirtualBox Download Page`_ and get the tool for ``OS X hosts x86/amd64``. + +.. _VirtualBox Download Page: https://www.virtualbox.org/wiki/Downloads + +Once the download is complete, open the disk image, run the set up file +(i.e. ``VirtualBox.pkg``) and install VirtualBox. Do not simply copy the +package without running the installer. + +boot2docker +----------- + +`boot2docker`_ provides a handy script to easily manage the VM running the +``docker`` daemon. It also takes care of the installation for the OS image +that is used for the job. + +.. _GitHub page: https://github.com/steeve/boot2docker + +Open up a new terminal window, if you have not already. + +Run the following commands to get boot2docker: + +.. code-block:: bash + + # Enter the installation directory + cd ~/bin + + # Get the file + curl https://raw.github.com/steeve/boot2docker/master/boot2docker > boot2docker + + # Mark it executable + chmod +x boot2docker + +Docker OS X Client +------------------ + +The ``docker`` daemon is accessed using the ``docker`` client. + +Run the following commands to get it downloaded and set up: + +.. code-block:: bash + + # Get the file + curl -o docker http://get.docker.io/builds/Darwin/x86_64/docker-latest + + # Mark it executable + chmod +x docker + + # Set the environment variable for the docker daemon + export DOCKER_HOST=tcp:// + + # Copy the executable file + sudo cp docker /usr/local/bin/ + +And that’s it! Let’s check out how to use it. + +How To Use Docker On Mac OS X +============================= + +The ``docker`` daemon (via boot2docker) +--------------------------------------- + +Inside the ``~/bin`` directory, run the following commands: + +.. code-block:: bash + + # Initiate the VM + ./boot2docker init + + # Run the VM (the docker daemon) + ./boot2docker up + + # To see all available commands: + ./boot2docker + + # Usage ./boot2docker {init|start|up|pause|stop|restart|status|info|delete|ssh|download} + +The ``docker`` client +--------------------- + +Once the VM with the ``docker`` daemon is up, you can use the ``docker`` +client just like any other application. + +.. code-block:: bash + + docker version + # Client version: 0.7.6 + # Go version (client): go1.2 + # Git commit (client): bc3b2ec + # Server version: 0.7.5 + # Git commit (server): c348c04 + # Go version (server): go1.2 + +SSH-ing The VM +-------------- + +If you feel the need to connect to the VM, you can simply run: + +.. code-block:: bash + + ./boot2docker ssh + + # User: docker + # Pwd: tcuser + +You can now continue with the :ref:`hello_world` example. + +Learn More +========== + +boot2docker: +------------ + +See the GitHub page for `boot2docker`_. + +.. _boot2docker: https://github.com/steeve/boot2docker + +If SSH complains about keys: +---------------------------- + +.. code-block:: bash + + ssh-keygen -R '[localhost]:2022' + +About the way Docker works on Mac OS X: +--------------------------------------- + +Docker has two key components: the ``docker`` daemon and the ``docker`` +client. The tool works by client commanding the daemon. In order to +work and do its magic, the daemon makes use of some Linux Kernel +features (e.g. LXC, name spaces etc.), which are not supported by OS X. +Therefore, the solution of getting Docker to run on OS X consists of +running it inside a lightweight virtual machine. In order to simplify +things, Docker comes with a bash script to make this whole process as +easy as possible (i.e. boot2docker). diff --git a/docs/sources/installation/openSUSE.rst b/docs/sources/installation/openSUSE.rst new file mode 100644 index 0000000000..ded5de44a4 --- /dev/null +++ b/docs/sources/installation/openSUSE.rst @@ -0,0 +1,73 @@ +:title: Installation on openSUSE +:description: Docker installation on openSUSE. +:keywords: openSUSE, virtualbox, docker, documentation, installation + +.. _openSUSE: + +openSUSE +======== + +.. include:: install_header.inc + +.. include:: install_unofficial.inc + +Docker is available in **openSUSE 12.3 and later**. Please note that due to the +current Docker limitations Docker is able to run only on the **64 bit** +architecture. + +Installation +------------ + +The ``docker`` package from the `Virtualization project`_ on `OBS`_ provides +Docker on openSUSE. + + +To proceed with Docker installation please add the right Virtualization +repository. + +.. code-block:: bash + + # openSUSE 12.3 + sudo zypper ar -f http://download.opensuse.org/repositories/Virtualization/openSUSE_12.3/ Virtualization + + # openSUSE 13.1 + sudo zypper ar -f http://download.opensuse.org/repositories/Virtualization/openSUSE_13.1/ Virtualization + + +Install the Docker package. + +.. code-block:: bash + + sudo zypper in docker + +It's also possible to install Docker using openSUSE's 1-click install. Just +visit `this`_ page, select your openSUSE version and click on the installation +link. This will add the right repository to your system and it will +also install the `docker` package. + +Now that it's installed, let's start the Docker daemon. + +.. code-block:: bash + + sudo systemctl start docker + +If we want Docker to start at boot, we should also: + +.. code-block:: bash + + sudo systemctl enable docker + +The `docker` package creates a new group named `docker`. Users, other than +`root` user, need to be part of this group in order to interact with the +Docker daemon. + +.. code-block:: bash + + sudo usermod -G docker + + +**Done!**, now continue with the :ref:`hello_world` example. + +.. _Virtualization project: https://build.opensuse.org/project/show/Virtualization +.. _OBS: https://build.opensuse.org/ +.. _this: http://software.opensuse.org/package/docker diff --git a/docs/sources/installation/ubuntulinux.rst b/docs/sources/installation/ubuntulinux.rst index 8480979099..3d6ee6415d 100644 --- a/docs/sources/installation/ubuntulinux.rst +++ b/docs/sources/installation/ubuntulinux.rst @@ -35,7 +35,7 @@ Dependencies **Linux kernel 3.8** -Due to a bug in LXC, docker works best on the 3.8 kernel. Precise +Due to a bug in LXC, Docker works best on the 3.8 kernel. Precise comes with a 3.2 kernel, so we need to upgrade it. The kernel you'll install when following these steps comes with AUFS built in. We also include the generic headers to enable packages that depend on them, @@ -167,13 +167,73 @@ Type ``exit`` to exit **Done!**, now continue with the :ref:`hello_world` example. + +Giving non-root access +---------------------- + +The ``docker`` daemon always runs as the root user, and since Docker version +0.5.2, the ``docker`` daemon binds to a Unix socket instead of a TCP port. By +default that Unix socket is owned by the user *root*, and so, by default, you +can access it with ``sudo``. + +Starting in version 0.5.3, if you (or your Docker installer) create a +Unix group called *docker* and add users to it, then the ``docker`` +daemon will make the ownership of the Unix socket read/writable by the +*docker* group when the daemon starts. The ``docker`` daemon must +always run as the root user, but if you run the ``docker`` client as a user in +the *docker* group then you don't need to add ``sudo`` to all the +client commands. + +.. warning:: The *docker* group is root-equivalent. + +**Example:** + +.. code-block:: bash + + # Add the docker group if it doesn't already exist. + sudo groupadd docker + + # Add the connected user "${USER}" to the docker group. + # Change the user name to match your preferred user. + # You may have to logout and log back in again for + # this to take effect. + sudo gpasswd -a ${USER} docker + + # Restart the Docker daemon. + sudo service docker restart + + +Upgrade +-------- + +To install the latest version of docker, use the standard ``apt-get`` method: + + +.. code-block:: bash + + # update your sources list + sudo apt-get update + + # install the latest + sudo apt-get install lxc-docker + +Troubleshooting +^^^^^^^^^^^^^^^ + +On Linux Mint, the ``cgroups-lite`` package is not installed by default. +Before Docker will work correctly, you will need to install this via: + +.. code-block:: bash + + sudo apt-get update && sudo apt-get install cgroups-lite + .. _ufw: Docker and UFW ^^^^^^^^^^^^^^ Docker uses a bridge to manage container networking. By default, UFW drops all -`forwarding` traffic. As a result will you need to enable UFW forwarding: +`forwarding` traffic. As a result you will need to enable UFW forwarding: .. code-block:: bash diff --git a/docs/sources/installation/upgrading.rst b/docs/sources/installation/upgrading.rst deleted file mode 100644 index c760115545..0000000000 --- a/docs/sources/installation/upgrading.rst +++ /dev/null @@ -1,73 +0,0 @@ -:title: Upgrading -:description: These instructions are for upgrading Docker -:keywords: Docker, Docker documentation, upgrading docker, upgrade - -.. _upgrading: - -Upgrading -========= - -The technique for upgrading ``docker`` to a newer version depends on -how you installed ``docker``. - -.. versionadded:: 0.5.3 - You may wish to add a ``docker`` group to your system to avoid using sudo with ``docker``. (see :ref:`dockergroup`) - - -After ``apt-get`` ------------------ - -If you installed Docker using ``apt-get`` or Vagrant, then you should -use ``apt-get`` to upgrade. - -.. versionadded:: 0.6 - Add Docker repository information to your system first. - -.. code-block:: bash - - # Add the Docker repository key to your local keychain - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 - - # Add the Docker repository to your apt sources list. - sudo sh -c "echo deb https://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list" - - # update your sources list - sudo apt-get update - - # install the latest - sudo apt-get install lxc-docker - - -After manual installation -------------------------- - -If you installed the Docker :ref:`binaries` then follow these steps: - - -.. code-block:: bash - - # kill the running docker daemon - killall docker - - -.. code-block:: bash - - # get the latest binary - wget http://get.docker.io/builds/Linux/x86_64/docker-latest -O docker - - # make it executable - chmod +x docker - - -Start docker in daemon mode (``-d``) and disconnect, running the -daemon in the background (``&``). Starting as ``./docker`` guarantees -to run the version in your current directory rather than a version -which might reside in your path. - -.. code-block:: bash - - # start the new version - sudo ./docker -d & - - -Alternatively you can replace the docker binary in ``/usr/local/bin``. diff --git a/docs/sources/installation/vagrant.rst b/docs/sources/installation/vagrant.rst deleted file mode 100644 index 81acf76f5e..0000000000 --- a/docs/sources/installation/vagrant.rst +++ /dev/null @@ -1,80 +0,0 @@ -:title: Using Vagrant (Mac, Linux) -:description: This guide will setup a new virtualbox virtual machine with docker installed on your computer. -:keywords: Docker, Docker documentation, virtualbox, vagrant, git, ssh, putty, cygwin - -.. _install_using_vagrant: - -Using Vagrant (Mac, Linux) -========================== - -This guide will setup a new virtualbox virtual machine with docker -installed on your computer. This works on most operating systems, -including MacOSX, Windows, Linux, FreeBSD and others. If you can -install these and have at least 400MB RAM to spare you should be good. - -Install Vagrant and Virtualbox ------------------------------- - -.. include:: install_header.inc - -.. include:: install_unofficial.inc - -#. Install virtualbox from https://www.virtualbox.org/ (or use your - package manager) -#. Install vagrant from http://www.vagrantup.com/ (or use your package - manager) -#. Install git if you had not installed it before, check if it is - installed by running ``git`` in a terminal window - - -Spin it up ----------- - -1. Fetch the docker sources (this includes the ``Vagrantfile`` for - machine setup). - - .. code-block:: bash - - git clone https://github.com/dotcloud/docker.git - -2. Change directory to docker - - .. code-block:: bash - - cd docker - -3. Run vagrant from the sources directory - - .. code-block:: bash - - vagrant up - - Vagrant will: - - * Download the 'official' Precise64 base ubuntu virtual machine image from vagrantup.com - * Boot this image in virtualbox - * Follow official :ref:`ubuntu_linux` installation path - - You now have a Ubuntu Virtual Machine running with docker pre-installed. - -Connect -------- - -To access the VM and use Docker, Run ``vagrant ssh`` from the same directory as where you ran -``vagrant up``. Vagrant will connect you to the correct VM. - -.. code-block:: bash - - vagrant ssh - -Run ------ - -Now you are in the VM, run docker - -.. code-block:: bash - - sudo docker - - -Continue with the :ref:`hello_world` example. diff --git a/docs/sources/installation/windows.rst b/docs/sources/installation/windows.rst index b487606a48..c980a32df9 100644 --- a/docs/sources/installation/windows.rst +++ b/docs/sources/installation/windows.rst @@ -4,8 +4,8 @@ .. _windows: -Using Vagrant (Windows) -======================= +Installing Docker on Windows +============================ Docker can run on Windows using a VM like VirtualBox. You then run Linux within the VM. diff --git a/docs/sources/api/README.md b/docs/sources/reference/api/README.md similarity index 100% rename from docs/sources/api/README.md rename to docs/sources/reference/api/README.md diff --git a/docs/sources/api/docker_remote_api.rst b/docs/sources/reference/api/docker_remote_api.rst similarity index 94% rename from docs/sources/api/docker_remote_api.rst rename to docs/sources/reference/api/docker_remote_api.rst index b6615ad7d6..f7cd7faf4f 100644 --- a/docs/sources/api/docker_remote_api.rst +++ b/docs/sources/reference/api/docker_remote_api.rst @@ -26,15 +26,33 @@ Docker Remote API 2. Versions =========== -The current version of the API is 1.8 +The current version of the API is 1.9 Calling /images//insert is the same as calling -/v1.8/images//insert +/v1.9/images//insert You can still call an old version of the api using /v1.0/images//insert +v1.9 +**** + +Full Documentation +------------------ + +:doc:`docker_remote_api_v1.9` + +What's new +---------- + +.. http:post:: /build + + **New!** This endpoint now takes a serialized ConfigFile which it uses to + resolve the proper registry auth credentials for pulling the base image. + Clients which previously implemented the version accepting an AuthConfig + object must be updated. + v1.8 **** @@ -139,7 +157,7 @@ What's new [ { - "RepoTag": [ + "RepoTags": [ "ubuntu:12.04", "ubuntu:precise", "ubuntu:latest" @@ -150,7 +168,7 @@ What's new "VirtualSize": 131506275 }, { - "RepoTag": [ + "RepoTags": [ "ubuntu:12.10", "ubuntu:quantal" ], diff --git a/docs/sources/api/docker_remote_api_v1.0.rst b/docs/sources/reference/api/docker_remote_api_v1.0.rst similarity index 100% rename from docs/sources/api/docker_remote_api_v1.0.rst rename to docs/sources/reference/api/docker_remote_api_v1.0.rst diff --git a/docs/sources/api/docker_remote_api_v1.1.rst b/docs/sources/reference/api/docker_remote_api_v1.1.rst similarity index 100% rename from docs/sources/api/docker_remote_api_v1.1.rst rename to docs/sources/reference/api/docker_remote_api_v1.1.rst diff --git a/docs/sources/api/docker_remote_api_v1.2.rst b/docs/sources/reference/api/docker_remote_api_v1.2.rst similarity index 100% rename from docs/sources/api/docker_remote_api_v1.2.rst rename to docs/sources/reference/api/docker_remote_api_v1.2.rst diff --git a/docs/sources/api/docker_remote_api_v1.3.rst b/docs/sources/reference/api/docker_remote_api_v1.3.rst similarity index 100% rename from docs/sources/api/docker_remote_api_v1.3.rst rename to docs/sources/reference/api/docker_remote_api_v1.3.rst diff --git a/docs/sources/api/docker_remote_api_v1.4.rst b/docs/sources/reference/api/docker_remote_api_v1.4.rst similarity index 100% rename from docs/sources/api/docker_remote_api_v1.4.rst rename to docs/sources/reference/api/docker_remote_api_v1.4.rst diff --git a/docs/sources/api/docker_remote_api_v1.5.rst b/docs/sources/reference/api/docker_remote_api_v1.5.rst similarity index 100% rename from docs/sources/api/docker_remote_api_v1.5.rst rename to docs/sources/reference/api/docker_remote_api_v1.5.rst diff --git a/docs/sources/api/docker_remote_api_v1.6.rst b/docs/sources/reference/api/docker_remote_api_v1.6.rst similarity index 100% rename from docs/sources/api/docker_remote_api_v1.6.rst rename to docs/sources/reference/api/docker_remote_api_v1.6.rst diff --git a/docs/sources/api/docker_remote_api_v1.7.rst b/docs/sources/reference/api/docker_remote_api_v1.7.rst similarity index 99% rename from docs/sources/api/docker_remote_api_v1.7.rst rename to docs/sources/reference/api/docker_remote_api_v1.7.rst index d47f672df0..28c5ba30f2 100644 --- a/docs/sources/api/docker_remote_api_v1.7.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.7.rst @@ -643,7 +643,7 @@ List Images [ { - "RepoTag": [ + "RepoTags": [ "ubuntu:12.04", "ubuntu:precise", "ubuntu:latest" @@ -654,7 +654,7 @@ List Images "VirtualSize": 131506275 }, { - "RepoTag": [ + "RepoTags": [ "ubuntu:12.10", "ubuntu:quantal" ], diff --git a/docs/sources/reference/api/docker_remote_api_v1.8.rst b/docs/sources/reference/api/docker_remote_api_v1.8.rst new file mode 100644 index 0000000000..6ccc6eca94 --- /dev/null +++ b/docs/sources/reference/api/docker_remote_api_v1.8.rst @@ -0,0 +1,1281 @@ +:title: Remote API v1.8 +:description: API Documentation for Docker +:keywords: API, Docker, rcli, REST, documentation + +:orphan: + +====================== +Docker Remote API v1.8 +====================== + +.. contents:: Table of Contents + +1. Brief introduction +===================== + +- The Remote API has replaced rcli +- The daemon listens on ``unix:///var/run/docker.sock``, but you can + :ref:`bind_docker`. +- The API tends to be REST, but for some complex commands, like + ``attach`` or ``pull``, the HTTP connection is hijacked to transport + ``stdout, stdin`` and ``stderr`` + +2. Endpoints +============ + +2.1 Containers +-------------- + +List containers +*************** + +.. http:get:: /containers/json + + List containers + + **Example request**: + + .. sourcecode:: http + + GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Image": "base:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "9cd87474be90", + "Image": "base:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "3176a2479c92", + "Image": "base:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Image": "base:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + } + ] + + :query all: 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default + :query limit: Show ``limit`` last created containers, include non-running ones. + :query since: Show only containers created since Id, include non-running ones. + :query before: Show only containers created before Id, include non-running ones. + :query size: 1/True/true or 0/False/false, Show the containers sizes + :statuscode 200: no error + :statuscode 400: bad parameter + :statuscode 500: server error + + +Create a container +****************** + +.. http:post:: /containers/create + + Create a container + + **Example request**: + + .. sourcecode:: http + + POST /containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "date" + ], + "Dns":null, + "Image":"base", + "Volumes":{ + "/tmp": {} + }, + "VolumesFrom":"", + "WorkingDir":"", + "ExposedPorts":{ + "22/tcp": {} + } + } + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 201 OK + Content-Type: application/json + + { + "Id":"e90e34656806" + "Warnings":[] + } + + :jsonparam config: the container's configuration + :query name: Assign the specified name to the container. Must match ``/?[a-zA-Z0-9_-]+``. + :statuscode 201: no error + :statuscode 404: no such container + :statuscode 406: impossible to attach (container not running) + :statuscode 500: server error + + +Inspect a container +******************* + +.. http:get:: /containers/(id)/json + + Return low-level information on the container ``id`` + + **Example request**: + + .. sourcecode:: http + + GET /containers/4fa6e0f0c678/json HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "Created": "2013-05-07T14:51:42.041847+02:00", + "Path": "date", + "Args": [], + "Config": { + "Hostname": "4fa6e0f0c678", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Dns": null, + "Image": "base", + "Volumes": {}, + "VolumesFrom": "", + "WorkingDir":"" + + }, + "State": { + "Running": false, + "Pid": 0, + "ExitCode": 0, + "StartedAt": "2013-05-07T14:51:42.087658+02:01360", + "Ghost": false + }, + "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "NetworkSettings": { + "IpAddress": "", + "IpPrefixLen": 0, + "Gateway": "", + "Bridge": "", + "PortMapping": null + }, + "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", + "ResolvConfPath": "/etc/resolv.conf", + "Volumes": {}, + "HostConfig": { + "Binds": null, + "ContainerIDFile": "", + "LxcConf": [], + "Privileged": false, + "PortBindings": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "49153" + } + ] + }, + "Links": null, + "PublishAllPorts": false + } + } + + :statuscode 200: no error + :statuscode 404: no such container + :statuscode 500: server error + + +List processes running inside a container +***************************************** + +.. http:get:: /containers/(id)/top + + List processes running inside the container ``id`` + + **Example request**: + + .. sourcecode:: http + + GET /containers/4fa6e0f0c678/top HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles":[ + "USER", + "PID", + "%CPU", + "%MEM", + "VSZ", + "RSS", + "TTY", + "STAT", + "START", + "TIME", + "COMMAND" + ], + "Processes":[ + ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"], + ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"] + ] + } + + :query ps_args: ps arguments to use (eg. aux) + :statuscode 200: no error + :statuscode 404: no such container + :statuscode 500: server error + + +Inspect changes on a container's filesystem +******************************************* + +.. http:get:: /containers/(id)/changes + + Inspect changes on container ``id`` 's filesystem + + **Example request**: + + .. sourcecode:: http + + GET /containers/4fa6e0f0c678/changes HTTP/1.1 + + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path":"/dev", + "Kind":0 + }, + { + "Path":"/dev/kmsg", + "Kind":1 + }, + { + "Path":"/test", + "Kind":1 + } + ] + + :statuscode 200: no error + :statuscode 404: no such container + :statuscode 500: server error + + +Export a container +****************** + +.. http:get:: /containers/(id)/export + + Export the contents of container ``id`` + + **Example request**: + + .. sourcecode:: http + + GET /containers/4fa6e0f0c678/export HTTP/1.1 + + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ STREAM }} + + :statuscode 200: no error + :statuscode 404: no such container + :statuscode 500: server error + + +Start a container +***************** + +.. http:post:: /containers/(id)/start + + Start the container ``id`` + + **Example request**: + + .. sourcecode:: http + + POST /containers/(id)/start HTTP/1.1 + Content-Type: application/json + + { + "Binds":["/tmp:/tmp"], + "LxcConf":{"lxc.utsname":"docker"}, + "PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts":false, + "Privileged":false + } + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 204 No Content + Content-Type: text/plain + + :jsonparam hostConfig: the container's host configuration (optional) + :statuscode 204: no error + :statuscode 404: no such container + :statuscode 500: server error + + +Stop a container +**************** + +.. http:post:: /containers/(id)/stop + + Stop the container ``id`` + + **Example request**: + + .. sourcecode:: http + + POST /containers/e90e34656806/stop?t=5 HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 204 OK + + :query t: number of seconds to wait before killing the container + :statuscode 204: no error + :statuscode 404: no such container + :statuscode 500: server error + + +Restart a container +******************* + +.. http:post:: /containers/(id)/restart + + Restart the container ``id`` + + **Example request**: + + .. sourcecode:: http + + POST /containers/e90e34656806/restart?t=5 HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 204 OK + + :query t: number of seconds to wait before killing the container + :statuscode 204: no error + :statuscode 404: no such container + :statuscode 500: server error + + +Kill a container +**************** + +.. http:post:: /containers/(id)/kill + + Kill the container ``id`` + + **Example request**: + + .. sourcecode:: http + + POST /containers/e90e34656806/kill HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 204 OK + + :statuscode 204: no error + :statuscode 404: no such container + :statuscode 500: server error + + +Attach to a container +********************* + +.. http:post:: /containers/(id)/attach + + Attach to the container ``id`` + + **Example request**: + + .. sourcecode:: http + + POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + + :query logs: 1/True/true or 0/False/false, return logs. Default false + :query stream: 1/True/true or 0/False/false, return stream. Default false + :query stdin: 1/True/true or 0/False/false, if stream=true, attach to stdin. Default false + :query stdout: 1/True/true or 0/False/false, if logs=true, return stdout log, if stream=true, attach to stdout. Default false + :query stderr: 1/True/true or 0/False/false, if logs=true, return stderr log, if stream=true, attach to stderr. Default false + :statuscode 200: no error + :statuscode 400: bad parameter + :statuscode 404: no such container + :statuscode 500: server error + + **Stream details**: + + When using the TTY setting is enabled in + :http:post:`/containers/create`, the stream is the raw data + from the process PTY and client's stdin. When the TTY is + disabled, then the stream is multiplexed to separate stdout + and stderr. + + The format is a **Header** and a **Payload** (frame). + + **HEADER** + + The header will contain the information on which stream write + the stream (stdout or stderr). It also contain the size of + the associated frame encoded on the last 4 bytes (uint32). + + It is encoded on the first 8 bytes like this:: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + + ``STREAM_TYPE`` can be: + + - 0: stdin (will be writen on stdout) + - 1: stdout + - 2: stderr + + ``SIZE1, SIZE2, SIZE3, SIZE4`` are the 4 bytes of the uint32 size encoded as big endian. + + **PAYLOAD** + + The payload is the raw stream. + + **IMPLEMENTATION** + + The simplest way to implement the Attach protocol is the following: + + 1) Read 8 bytes + 2) chose stdout or stderr depending on the first byte + 3) Extract the frame size from the last 4 byets + 4) Read the extracted size and output it on the correct output + 5) Goto 1) + + + +Wait a container +**************** + +.. http:post:: /containers/(id)/wait + + Block until container ``id`` stops, then returns the exit code + + **Example request**: + + .. sourcecode:: http + + POST /containers/16253994b7c4/wait HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode":0} + + :statuscode 200: no error + :statuscode 404: no such container + :statuscode 500: server error + + +Remove a container +******************* + +.. http:delete:: /containers/(id) + + Remove the container ``id`` from the filesystem + + **Example request**: + + .. sourcecode:: http + + DELETE /containers/16253994b7c4?v=1 HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 204 OK + + :query v: 1/True/true or 0/False/false, Remove the volumes associated to the container. Default false + :statuscode 204: no error + :statuscode 400: bad parameter + :statuscode 404: no such container + :statuscode 500: server error + + +Copy files or folders from a container +************************************** + +.. http:post:: /containers/(id)/copy + + Copy files or folders of container ``id`` + + **Example request**: + + .. sourcecode:: http + + POST /containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource":"test.txt" + } + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ STREAM }} + + :statuscode 200: no error + :statuscode 404: no such container + :statuscode 500: server error + + +2.2 Images +---------- + +List Images +*********** + +.. http:get:: /images/json + + **Example request**: + + .. sourcecode:: http + + GET /images/json?all=0 HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275 + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135 + } + ] + + +Create an image +*************** + +.. http:post:: /images/create + + Create an image, either by pull it from the registry or by importing it + + **Example request**: + + .. sourcecode:: http + + POST /images/create?fromImage=base HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Pulling..."} + {"status":"Pulling", "progress":"1 B/ 100 B", "progressDetail":{"current":1, "total":100}} + {"error":"Invalid..."} + ... + + When using this endpoint to pull an image from the registry, + the ``X-Registry-Auth`` header can be used to include a + base64-encoded AuthConfig object. + + :query fromImage: name of the image to pull + :query fromSrc: source to import, - means stdin + :query repo: repository + :query tag: tag + :query registry: the registry to pull from + :reqheader X-Registry-Auth: base64-encoded AuthConfig object + :statuscode 200: no error + :statuscode 500: server error + + + +Insert a file in an image +************************* + +.. http:post:: /images/(name)/insert + + Insert a file from ``url`` in the image ``name`` at ``path`` + + **Example request**: + + .. sourcecode:: http + + POST /images/test/insert?path=/usr&url=myurl HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Inserting..."} + {"status":"Inserting", "progress":"1/? (n/a)", "progressDetail":{"current":1}} + {"error":"Invalid..."} + ... + + :statuscode 200: no error + :statuscode 500: server error + + +Inspect an image +**************** + +.. http:get:: /images/(name)/json + + Return low-level information on the image ``name`` + + **Example request**: + + .. sourcecode:: http + + GET /images/base/json HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "parent":"27cf784147099545", + "created":"2013-03-23T22:24:18.818426-07:00", + "container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "container_config": + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":false, + "AttachStderr":false, + "PortSpecs":null, + "Tty":true, + "OpenStdin":true, + "StdinOnce":false, + "Env":null, + "Cmd": ["/bin/bash"] + ,"Dns":null, + "Image":"base", + "Volumes":null, + "VolumesFrom":"", + "WorkingDir":"" + }, + "Size": 6824592 + } + + :statuscode 200: no error + :statuscode 404: no such image + :statuscode 500: server error + + +Get the history of an image +*************************** + +.. http:get:: /images/(name)/history + + Return the history of the image ``name`` + + **Example request**: + + .. sourcecode:: http + + GET /images/base/history HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id":"b750fe79269d", + "Created":1364102658, + "CreatedBy":"/bin/bash" + }, + { + "Id":"27cf78414709", + "Created":1364068391, + "CreatedBy":"" + } + ] + + :statuscode 200: no error + :statuscode 404: no such image + :statuscode 500: server error + + +Push an image on the registry +***************************** + +.. http:post:: /images/(name)/push + + Push the image ``name`` on the registry + + **Example request**: + + .. sourcecode:: http + + POST /images/test/push HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Pushing..."} + {"status":"Pushing", "progress":"1/? (n/a)", "progressDetail":{"current":1}}} + {"error":"Invalid..."} + ... + + :query registry: the registry you wan to push, optional + :reqheader X-Registry-Auth: include a base64-encoded AuthConfig object. + :statuscode 200: no error + :statuscode 404: no such image + :statuscode 500: server error + + +Tag an image into a repository +****************************** + +.. http:post:: /images/(name)/tag + + Tag the image ``name`` into a repository + + **Example request**: + + .. sourcecode:: http + + POST /images/test/tag?repo=myrepo&force=0 HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + + :query repo: The repository to tag in + :query force: 1/True/true or 0/False/false, default false + :statuscode 200: no error + :statuscode 400: bad parameter + :statuscode 404: no such image + :statuscode 409: conflict + :statuscode 500: server error + + +Remove an image +*************** + +.. http:delete:: /images/(name) + + Remove the image ``name`` from the filesystem + + **Example request**: + + .. sourcecode:: http + + DELETE /images/test HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged":"3e2f21a89f"}, + {"Deleted":"3e2f21a89f"}, + {"Deleted":"53b4f83ac9"} + ] + + :statuscode 200: no error + :statuscode 404: no such image + :statuscode 409: conflict + :statuscode 500: server error + + +Search images +************* + +.. http:get:: /images/search + + Search for an image in the docker index. + + .. note:: + + The response keys have changed from API v1.6 to reflect the JSON + sent by the registry server to the docker daemon's request. + + **Example request**: + + .. sourcecode:: http + + GET /images/search?term=sshd HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_trusted": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_trusted": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_trusted": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + + :query term: term to search + :statuscode 200: no error + :statuscode 500: server error + + +2.3 Misc +-------- + +Build an image from Dockerfile via stdin +**************************************** + +.. http:post:: /build + + Build an image from Dockerfile via stdin + + **Example request**: + + .. sourcecode:: http + + POST /build HTTP/1.1 + + {{ STREAM }} + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream":"Step 1..."} + {"stream":"..."} + {"error":"Error...", "errorDetail":{"code": 123, "message": "Error..."}} + + + The stream must be a tar archive compressed with one of the + following algorithms: identity (no compression), gzip, bzip2, + xz. + + The archive must include a file called ``Dockerfile`` at its + root. It may include any number of other files, which will be + accessible in the build context (See the :ref:`ADD build command + `). + + :query t: repository name (and optionally a tag) to be applied to the resulting image in case of success + :query q: suppress verbose build output + :query nocache: do not use the cache when building the image + :reqheader Content-type: should be set to ``"application/tar"``. + :reqheader X-Registry-Auth: base64-encoded AuthConfig object + :statuscode 200: no error + :statuscode 500: server error + + + +Check auth configuration +************************ + +.. http:post:: /auth + + Get the default username and email + + **Example request**: + + .. sourcecode:: http + + POST /auth HTTP/1.1 + Content-Type: application/json + + { + "username":"hannibal", + "password:"xxxx", + "email":"hannibal@a-team.com", + "serveraddress":"https://index.docker.io/v1/" + } + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + + :statuscode 200: no error + :statuscode 204: no error + :statuscode 500: server error + + +Display system-wide information +******************************* + +.. http:get:: /info + + Display system-wide information + + **Example request**: + + .. sourcecode:: http + + GET /info HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers":11, + "Images":16, + "Debug":false, + "NFd": 11, + "NGoroutines":21, + "MemoryLimit":true, + "SwapLimit":false, + "IPv4Forwarding":true + } + + :statuscode 200: no error + :statuscode 500: server error + + +Show the docker version information +*********************************** + +.. http:get:: /version + + Show the docker version information + + **Example request**: + + .. sourcecode:: http + + GET /version HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version":"0.2.2", + "GitCommit":"5a2a5cc+CHANGES", + "GoVersion":"go1.0.3" + } + + :statuscode 200: no error + :statuscode 500: server error + + +Create a new image from a container's changes +********************************************* + +.. http:post:: /commit + + Create a new image from a container's changes + + **Example request**: + + .. sourcecode:: http + + POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 201 OK + Content-Type: application/vnd.docker.raw-stream + + {"Id":"596069db4bf5"} + + :query container: source container + :query repo: repository + :query tag: tag + :query m: commit message + :query author: author (eg. "John Hannibal Smith ") + :query run: config automatically applied when the image is run. (ex: {"Cmd": ["cat", "/world"], "PortSpecs":["22"]}) + :statuscode 201: no error + :statuscode 404: no such container + :statuscode 500: server error + + +Monitor Docker's events +*********************** + +.. http:get:: /events + + Get events from docker, either in real time via streaming, or via polling (using `since`) + + **Example request**: + + .. sourcecode:: http + + GET /events?since=1374067924 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"create","id":"dfdf82bd3881","from":"base:latest","time":1374067924} + {"status":"start","id":"dfdf82bd3881","from":"base:latest","time":1374067924} + {"status":"stop","id":"dfdf82bd3881","from":"base:latest","time":1374067966} + {"status":"destroy","id":"dfdf82bd3881","from":"base:latest","time":1374067970} + + :query since: timestamp used for polling + :statuscode 200: no error + :statuscode 500: server error + +Get a tarball containing all images and tags in a repository +************************************************************ + +.. http:get:: /images/(name)/get + + Get a tarball containing all images and metadata for the repository specified by ``name``. + + **Example request** + + .. sourcecode:: http + + GET /images/ubuntu/get + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + + :statuscode 200: no error + :statuscode 500: server error + +Load a tarball with a set of images and tags into docker +******************************************************** + +.. http:post:: /images/load + + Load a set of images and tags into the docker repository. + + **Example request** + + .. sourcecode:: http + + POST /images/load + + Tarball in body + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + + :statuscode 200: no error + :statuscode 500: server error + +3. Going further +================ + +3.1 Inside 'docker run' +----------------------- + +Here are the steps of 'docker run' : + +* Create the container +* If the status code is 404, it means the image doesn't exists: + * Try to pull it + * Then retry to create the container +* Start the container +* If you are not in detached mode: + * Attach to the container, using logs=1 (to have stdout and stderr from the container's start) and stream=1 +* If in detached mode or only stdin is attached: + * Display the container's id + + +3.2 Hijacking +------------- + +In this version of the API, /attach, uses hijacking to transport stdin, stdout and stderr on the same socket. This might change in the future. + +3.3 CORS Requests +----------------- + +To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode. + +.. code-block:: bash + + docker -d -H="192.168.1.9:4243" -api-enable-cors diff --git a/docs/sources/reference/api/docker_remote_api_v1.9.rst b/docs/sources/reference/api/docker_remote_api_v1.9.rst new file mode 100644 index 0000000000..cb406da82b --- /dev/null +++ b/docs/sources/reference/api/docker_remote_api_v1.9.rst @@ -0,0 +1,1281 @@ +:title: Remote API v1.9 +:description: API Documentation for Docker +:keywords: API, Docker, rcli, REST, documentation + +:orphan: + +====================== +Docker Remote API v1.9 +====================== + +.. contents:: Table of Contents + +1. Brief introduction +===================== + +- The Remote API has replaced rcli +- The daemon listens on ``unix:///var/run/docker.sock``, but you can + :ref:`bind_docker`. +- The API tends to be REST, but for some complex commands, like + ``attach`` or ``pull``, the HTTP connection is hijacked to transport + ``stdout, stdin`` and ``stderr`` + +2. Endpoints +============ + +2.1 Containers +-------------- + +List containers +*************** + +.. http:get:: /containers/json + + List containers + + **Example request**: + + .. sourcecode:: http + + GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Image": "base:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "9cd87474be90", + "Image": "base:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "3176a2479c92", + "Image": "base:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Image": "base:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + } + ] + + :query all: 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default + :query limit: Show ``limit`` last created containers, include non-running ones. + :query since: Show only containers created since Id, include non-running ones. + :query before: Show only containers created before Id, include non-running ones. + :query size: 1/True/true or 0/False/false, Show the containers sizes + :statuscode 200: no error + :statuscode 400: bad parameter + :statuscode 500: server error + + +Create a container +****************** + +.. http:post:: /containers/create + + Create a container + + **Example request**: + + .. sourcecode:: http + + POST /containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "date" + ], + "Dns":null, + "Image":"base", + "Volumes":{ + "/tmp": {} + }, + "VolumesFrom":"", + "WorkingDir":"", + "ExposedPorts":{ + "22/tcp": {} + } + } + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 201 OK + Content-Type: application/json + + { + "Id":"e90e34656806" + "Warnings":[] + } + + :jsonparam config: the container's configuration + :query name: Assign the specified name to the container. Must match ``/?[a-zA-Z0-9_-]+``. + :statuscode 201: no error + :statuscode 404: no such container + :statuscode 406: impossible to attach (container not running) + :statuscode 500: server error + + +Inspect a container +******************* + +.. http:get:: /containers/(id)/json + + Return low-level information on the container ``id`` + + **Example request**: + + .. sourcecode:: http + + GET /containers/4fa6e0f0c678/json HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "Created": "2013-05-07T14:51:42.041847+02:00", + "Path": "date", + "Args": [], + "Config": { + "Hostname": "4fa6e0f0c678", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Dns": null, + "Image": "base", + "Volumes": {}, + "VolumesFrom": "", + "WorkingDir":"" + + }, + "State": { + "Running": false, + "Pid": 0, + "ExitCode": 0, + "StartedAt": "2013-05-07T14:51:42.087658+02:01360", + "Ghost": false + }, + "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "NetworkSettings": { + "IpAddress": "", + "IpPrefixLen": 0, + "Gateway": "", + "Bridge": "", + "PortMapping": null + }, + "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", + "ResolvConfPath": "/etc/resolv.conf", + "Volumes": {}, + "HostConfig": { + "Binds": null, + "ContainerIDFile": "", + "LxcConf": [], + "Privileged": false, + "PortBindings": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "49153" + } + ] + }, + "Links": null, + "PublishAllPorts": false + } + } + + :statuscode 200: no error + :statuscode 404: no such container + :statuscode 500: server error + + +List processes running inside a container +***************************************** + +.. http:get:: /containers/(id)/top + + List processes running inside the container ``id`` + + **Example request**: + + .. sourcecode:: http + + GET /containers/4fa6e0f0c678/top HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles":[ + "USER", + "PID", + "%CPU", + "%MEM", + "VSZ", + "RSS", + "TTY", + "STAT", + "START", + "TIME", + "COMMAND" + ], + "Processes":[ + ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"], + ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"] + ] + } + + :query ps_args: ps arguments to use (eg. aux) + :statuscode 200: no error + :statuscode 404: no such container + :statuscode 500: server error + + +Inspect changes on a container's filesystem +******************************************* + +.. http:get:: /containers/(id)/changes + + Inspect changes on container ``id`` 's filesystem + + **Example request**: + + .. sourcecode:: http + + GET /containers/4fa6e0f0c678/changes HTTP/1.1 + + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path":"/dev", + "Kind":0 + }, + { + "Path":"/dev/kmsg", + "Kind":1 + }, + { + "Path":"/test", + "Kind":1 + } + ] + + :statuscode 200: no error + :statuscode 404: no such container + :statuscode 500: server error + + +Export a container +****************** + +.. http:get:: /containers/(id)/export + + Export the contents of container ``id`` + + **Example request**: + + .. sourcecode:: http + + GET /containers/4fa6e0f0c678/export HTTP/1.1 + + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ STREAM }} + + :statuscode 200: no error + :statuscode 404: no such container + :statuscode 500: server error + + +Start a container +***************** + +.. http:post:: /containers/(id)/start + + Start the container ``id`` + + **Example request**: + + .. sourcecode:: http + + POST /containers/(id)/start HTTP/1.1 + Content-Type: application/json + + { + "Binds":["/tmp:/tmp"], + "LxcConf":{"lxc.utsname":"docker"}, + "PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts":false, + "Privileged":false + } + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 204 No Content + Content-Type: text/plain + + :jsonparam hostConfig: the container's host configuration (optional) + :statuscode 204: no error + :statuscode 404: no such container + :statuscode 500: server error + + +Stop a container +**************** + +.. http:post:: /containers/(id)/stop + + Stop the container ``id`` + + **Example request**: + + .. sourcecode:: http + + POST /containers/e90e34656806/stop?t=5 HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 204 OK + + :query t: number of seconds to wait before killing the container + :statuscode 204: no error + :statuscode 404: no such container + :statuscode 500: server error + + +Restart a container +******************* + +.. http:post:: /containers/(id)/restart + + Restart the container ``id`` + + **Example request**: + + .. sourcecode:: http + + POST /containers/e90e34656806/restart?t=5 HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 204 OK + + :query t: number of seconds to wait before killing the container + :statuscode 204: no error + :statuscode 404: no such container + :statuscode 500: server error + + +Kill a container +**************** + +.. http:post:: /containers/(id)/kill + + Kill the container ``id`` + + **Example request**: + + .. sourcecode:: http + + POST /containers/e90e34656806/kill HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 204 OK + + :statuscode 204: no error + :statuscode 404: no such container + :statuscode 500: server error + + +Attach to a container +********************* + +.. http:post:: /containers/(id)/attach + + Attach to the container ``id`` + + **Example request**: + + .. sourcecode:: http + + POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + + :query logs: 1/True/true or 0/False/false, return logs. Default false + :query stream: 1/True/true or 0/False/false, return stream. Default false + :query stdin: 1/True/true or 0/False/false, if stream=true, attach to stdin. Default false + :query stdout: 1/True/true or 0/False/false, if logs=true, return stdout log, if stream=true, attach to stdout. Default false + :query stderr: 1/True/true or 0/False/false, if logs=true, return stderr log, if stream=true, attach to stderr. Default false + :statuscode 200: no error + :statuscode 400: bad parameter + :statuscode 404: no such container + :statuscode 500: server error + + **Stream details**: + + When using the TTY setting is enabled in + :http:post:`/containers/create`, the stream is the raw data + from the process PTY and client's stdin. When the TTY is + disabled, then the stream is multiplexed to separate stdout + and stderr. + + The format is a **Header** and a **Payload** (frame). + + **HEADER** + + The header will contain the information on which stream write + the stream (stdout or stderr). It also contain the size of + the associated frame encoded on the last 4 bytes (uint32). + + It is encoded on the first 8 bytes like this:: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + + ``STREAM_TYPE`` can be: + + - 0: stdin (will be writen on stdout) + - 1: stdout + - 2: stderr + + ``SIZE1, SIZE2, SIZE3, SIZE4`` are the 4 bytes of the uint32 size encoded as big endian. + + **PAYLOAD** + + The payload is the raw stream. + + **IMPLEMENTATION** + + The simplest way to implement the Attach protocol is the following: + + 1) Read 8 bytes + 2) chose stdout or stderr depending on the first byte + 3) Extract the frame size from the last 4 byets + 4) Read the extracted size and output it on the correct output + 5) Goto 1) + + + +Wait a container +**************** + +.. http:post:: /containers/(id)/wait + + Block until container ``id`` stops, then returns the exit code + + **Example request**: + + .. sourcecode:: http + + POST /containers/16253994b7c4/wait HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode":0} + + :statuscode 200: no error + :statuscode 404: no such container + :statuscode 500: server error + + +Remove a container +******************* + +.. http:delete:: /containers/(id) + + Remove the container ``id`` from the filesystem + + **Example request**: + + .. sourcecode:: http + + DELETE /containers/16253994b7c4?v=1 HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 204 OK + + :query v: 1/True/true or 0/False/false, Remove the volumes associated to the container. Default false + :statuscode 204: no error + :statuscode 400: bad parameter + :statuscode 404: no such container + :statuscode 500: server error + + +Copy files or folders from a container +************************************** + +.. http:post:: /containers/(id)/copy + + Copy files or folders of container ``id`` + + **Example request**: + + .. sourcecode:: http + + POST /containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource":"test.txt" + } + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ STREAM }} + + :statuscode 200: no error + :statuscode 404: no such container + :statuscode 500: server error + + +2.2 Images +---------- + +List Images +*********** + +.. http:get:: /images/json + + **Example request**: + + .. sourcecode:: http + + GET /images/json?all=0 HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275 + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135 + } + ] + + +Create an image +*************** + +.. http:post:: /images/create + + Create an image, either by pull it from the registry or by importing it + + **Example request**: + + .. sourcecode:: http + + POST /images/create?fromImage=base HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Pulling..."} + {"status":"Pulling", "progress":"1 B/ 100 B", "progressDetail":{"current":1, "total":100}} + {"error":"Invalid..."} + ... + + When using this endpoint to pull an image from the registry, + the ``X-Registry-Auth`` header can be used to include a + base64-encoded AuthConfig object. + + :query fromImage: name of the image to pull + :query fromSrc: source to import, - means stdin + :query repo: repository + :query tag: tag + :query registry: the registry to pull from + :reqheader X-Registry-Auth: base64-encoded AuthConfig object + :statuscode 200: no error + :statuscode 500: server error + + + +Insert a file in an image +************************* + +.. http:post:: /images/(name)/insert + + Insert a file from ``url`` in the image ``name`` at ``path`` + + **Example request**: + + .. sourcecode:: http + + POST /images/test/insert?path=/usr&url=myurl HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Inserting..."} + {"status":"Inserting", "progress":"1/? (n/a)", "progressDetail":{"current":1}} + {"error":"Invalid..."} + ... + + :statuscode 200: no error + :statuscode 500: server error + + +Inspect an image +**************** + +.. http:get:: /images/(name)/json + + Return low-level information on the image ``name`` + + **Example request**: + + .. sourcecode:: http + + GET /images/base/json HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "parent":"27cf784147099545", + "created":"2013-03-23T22:24:18.818426-07:00", + "container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "container_config": + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":false, + "AttachStderr":false, + "PortSpecs":null, + "Tty":true, + "OpenStdin":true, + "StdinOnce":false, + "Env":null, + "Cmd": ["/bin/bash"] + ,"Dns":null, + "Image":"base", + "Volumes":null, + "VolumesFrom":"", + "WorkingDir":"" + }, + "Size": 6824592 + } + + :statuscode 200: no error + :statuscode 404: no such image + :statuscode 500: server error + + +Get the history of an image +*************************** + +.. http:get:: /images/(name)/history + + Return the history of the image ``name`` + + **Example request**: + + .. sourcecode:: http + + GET /images/base/history HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id":"b750fe79269d", + "Created":1364102658, + "CreatedBy":"/bin/bash" + }, + { + "Id":"27cf78414709", + "Created":1364068391, + "CreatedBy":"" + } + ] + + :statuscode 200: no error + :statuscode 404: no such image + :statuscode 500: server error + + +Push an image on the registry +***************************** + +.. http:post:: /images/(name)/push + + Push the image ``name`` on the registry + + **Example request**: + + .. sourcecode:: http + + POST /images/test/push HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Pushing..."} + {"status":"Pushing", "progress":"1/? (n/a)", "progressDetail":{"current":1}}} + {"error":"Invalid..."} + ... + + :query registry: the registry you wan to push, optional + :reqheader X-Registry-Auth: include a base64-encoded AuthConfig object. + :statuscode 200: no error + :statuscode 404: no such image + :statuscode 500: server error + + +Tag an image into a repository +****************************** + +.. http:post:: /images/(name)/tag + + Tag the image ``name`` into a repository + + **Example request**: + + .. sourcecode:: http + + POST /images/test/tag?repo=myrepo&force=0 HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + + :query repo: The repository to tag in + :query force: 1/True/true or 0/False/false, default false + :statuscode 200: no error + :statuscode 400: bad parameter + :statuscode 404: no such image + :statuscode 409: conflict + :statuscode 500: server error + + +Remove an image +*************** + +.. http:delete:: /images/(name) + + Remove the image ``name`` from the filesystem + + **Example request**: + + .. sourcecode:: http + + DELETE /images/test HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged":"3e2f21a89f"}, + {"Deleted":"3e2f21a89f"}, + {"Deleted":"53b4f83ac9"} + ] + + :statuscode 200: no error + :statuscode 404: no such image + :statuscode 409: conflict + :statuscode 500: server error + + +Search images +************* + +.. http:get:: /images/search + + Search for an image in the docker index. + + .. note:: + + The response keys have changed from API v1.6 to reflect the JSON + sent by the registry server to the docker daemon's request. + + **Example request**: + + .. sourcecode:: http + + GET /images/search?term=sshd HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_trusted": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_trusted": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_trusted": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + + :query term: term to search + :statuscode 200: no error + :statuscode 500: server error + + +2.3 Misc +-------- + +Build an image from Dockerfile via stdin +**************************************** + +.. http:post:: /build + + Build an image from Dockerfile via stdin + + **Example request**: + + .. sourcecode:: http + + POST /build HTTP/1.1 + + {{ STREAM }} + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream":"Step 1..."} + {"stream":"..."} + {"error":"Error...", "errorDetail":{"code": 123, "message": "Error..."}} + + + The stream must be a tar archive compressed with one of the + following algorithms: identity (no compression), gzip, bzip2, + xz. + + The archive must include a file called ``Dockerfile`` at its + root. It may include any number of other files, which will be + accessible in the build context (See the :ref:`ADD build command + `). + + :query t: repository name (and optionally a tag) to be applied to the resulting image in case of success + :query q: suppress verbose build output + :query nocache: do not use the cache when building the image + :reqheader Content-type: should be set to ``"application/tar"``. + :reqheader X-Registry-Config: base64-encoded ConfigFile object + :statuscode 200: no error + :statuscode 500: server error + + + +Check auth configuration +************************ + +.. http:post:: /auth + + Get the default username and email + + **Example request**: + + .. sourcecode:: http + + POST /auth HTTP/1.1 + Content-Type: application/json + + { + "username":"hannibal", + "password:"xxxx", + "email":"hannibal@a-team.com", + "serveraddress":"https://index.docker.io/v1/" + } + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + + :statuscode 200: no error + :statuscode 204: no error + :statuscode 500: server error + + +Display system-wide information +******************************* + +.. http:get:: /info + + Display system-wide information + + **Example request**: + + .. sourcecode:: http + + GET /info HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers":11, + "Images":16, + "Debug":false, + "NFd": 11, + "NGoroutines":21, + "MemoryLimit":true, + "SwapLimit":false, + "IPv4Forwarding":true + } + + :statuscode 200: no error + :statuscode 500: server error + + +Show the docker version information +*********************************** + +.. http:get:: /version + + Show the docker version information + + **Example request**: + + .. sourcecode:: http + + GET /version HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version":"0.2.2", + "GitCommit":"5a2a5cc+CHANGES", + "GoVersion":"go1.0.3" + } + + :statuscode 200: no error + :statuscode 500: server error + + +Create a new image from a container's changes +********************************************* + +.. http:post:: /commit + + Create a new image from a container's changes + + **Example request**: + + .. sourcecode:: http + + POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 201 OK + Content-Type: application/vnd.docker.raw-stream + + {"Id":"596069db4bf5"} + + :query container: source container + :query repo: repository + :query tag: tag + :query m: commit message + :query author: author (eg. "John Hannibal Smith ") + :query run: config automatically applied when the image is run. (ex: {"Cmd": ["cat", "/world"], "PortSpecs":["22"]}) + :statuscode 201: no error + :statuscode 404: no such container + :statuscode 500: server error + + +Monitor Docker's events +*********************** + +.. http:get:: /events + + Get events from docker, either in real time via streaming, or via polling (using `since`) + + **Example request**: + + .. sourcecode:: http + + GET /events?since=1374067924 + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"create","id":"dfdf82bd3881","from":"base:latest","time":1374067924} + {"status":"start","id":"dfdf82bd3881","from":"base:latest","time":1374067924} + {"status":"stop","id":"dfdf82bd3881","from":"base:latest","time":1374067966} + {"status":"destroy","id":"dfdf82bd3881","from":"base:latest","time":1374067970} + + :query since: timestamp used for polling + :statuscode 200: no error + :statuscode 500: server error + +Get a tarball containing all images and tags in a repository +************************************************************ + +.. http:get:: /images/(name)/get + + Get a tarball containing all images and metadata for the repository specified by ``name``. + + **Example request** + + .. sourcecode:: http + + GET /images/ubuntu/get + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + + :statuscode 200: no error + :statuscode 500: server error + +Load a tarball with a set of images and tags into docker +******************************************************** + +.. http:post:: /images/load + + Load a set of images and tags into the docker repository. + + **Example request** + + .. sourcecode:: http + + POST /images/load + + Tarball in body + + **Example response**: + + .. sourcecode:: http + + HTTP/1.1 200 OK + + :statuscode 200: no error + :statuscode 500: server error + +3. Going further +================ + +3.1 Inside 'docker run' +----------------------- + +Here are the steps of 'docker run' : + +* Create the container +* If the status code is 404, it means the image doesn't exists: + * Try to pull it + * Then retry to create the container +* Start the container +* If you are not in detached mode: + * Attach to the container, using logs=1 (to have stdout and stderr from the container's start) and stream=1 +* If in detached mode or only stdin is attached: + * Display the container's id + + +3.2 Hijacking +------------- + +In this version of the API, /attach, uses hijacking to transport stdin, stdout and stderr on the same socket. This might change in the future. + +3.3 CORS Requests +----------------- + +To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode. + +.. code-block:: bash + + docker -d -H="192.168.1.9:4243" -api-enable-cors diff --git a/docs/sources/api/index.rst b/docs/sources/reference/api/index.rst similarity index 100% rename from docs/sources/api/index.rst rename to docs/sources/reference/api/index.rst diff --git a/docs/sources/api/index_api.rst b/docs/sources/reference/api/index_api.rst similarity index 100% rename from docs/sources/api/index_api.rst rename to docs/sources/reference/api/index_api.rst diff --git a/docs/sources/api/registry_api.rst b/docs/sources/reference/api/registry_api.rst similarity index 100% rename from docs/sources/api/registry_api.rst rename to docs/sources/reference/api/registry_api.rst diff --git a/docs/sources/api/registry_index_spec.rst b/docs/sources/reference/api/registry_index_spec.rst similarity index 100% rename from docs/sources/api/registry_index_spec.rst rename to docs/sources/reference/api/registry_index_spec.rst diff --git a/docs/sources/api/remote_api_client_libraries.rst b/docs/sources/reference/api/remote_api_client_libraries.rst similarity index 100% rename from docs/sources/api/remote_api_client_libraries.rst rename to docs/sources/reference/api/remote_api_client_libraries.rst diff --git a/docs/sources/use/builder.rst b/docs/sources/reference/builder.rst similarity index 76% rename from docs/sources/use/builder.rst rename to docs/sources/reference/builder.rst index 81145a6ee8..2f71b87a93 100644 --- a/docs/sources/use/builder.rst +++ b/docs/sources/reference/builder.rst @@ -1,12 +1,12 @@ -:title: Build Images (Dockerfile Reference) +:title: Dockerfile Reference :description: Dockerfiles use a simple DSL which allows you to automate the steps you would normally manually take to create an image. :keywords: builder, docker, Dockerfile, automation, image creation .. _dockerbuilder: -=================================== -Build Images (Dockerfile Reference) -=================================== +==================== +Dockerfile Reference +==================== **Docker can act as a builder** and read instructions from a text ``Dockerfile`` to automate the steps you would otherwise take manually @@ -40,9 +40,31 @@ build succeeds: ``sudo docker build -t shykes/myapp .`` The Docker daemon will run your steps one-by-one, committing the -result if necessary, before finally outputting the ID of your new -image. The Docker daemon will automatically clean up the context you -sent. +result to a new image if necessary, before finally outputting the +ID of your new image. The Docker daemon will automatically clean +up the context you sent. + +Note that each instruction is run independently, and causes a new image +to be created - so ``RUN cd /tmp`` will not have any effect on the next +instructions. + +Whenever possible, Docker will re-use the intermediate images, +accelerating ``docker build`` significantly (indicated by ``Using cache``: + +.. code-block:: bash + + $ docker build -t SvenDowideit/ambassador . + Uploading context 10.24 kB + Uploading context + Step 1 : FROM docker-ut + ---> cbba202fe96b + Step 2 : MAINTAINER SvenDowideit@home.org.au + ---> Using cache + ---> 51182097be13 + Step 3 : CMD env | grep _TCP= | sed 's/.*_PORT_\([0-9]*\)_TCP=tcp:\/\/\(.*\):\(.*\)/socat TCP4-LISTEN:\1,fork,reuseaddr TCP4:\2:\3 \&/' | sh && top + ---> Using cache + ---> 1a5ffc17324d + Successfully built 1a5ffc17324d When you're done with your build, you're ready to look into :ref:`image_push`. @@ -125,17 +147,23 @@ the generated images. 3.3 RUN ------- - ``RUN `` +RUN has 2 forms: -The ``RUN`` instruction will execute any commands on the current image -and commit the results. The resulting committed image will be used for -the next step in the Dockerfile. +* ``RUN `` (the command is run in a shell - ``/bin/sh -c``) +* ``RUN ["executable", "param1", "param2"]`` (*exec* form) + +The ``RUN`` instruction will execute any commands in a new layer on top +of the current image and commit the results. The resulting committed image +will be used for the next step in the Dockerfile. Layering ``RUN`` instructions and generating commits conforms to the core concepts of Docker where commits are cheap and containers can be created from any point in an image's history, much like source control. +The *exec* form makes it possible to avoid shell string munging, and to ``RUN`` +commands using a base image that does not contain ``/bin/sh``. + Known Issues (RUN) .................. @@ -374,6 +402,64 @@ the image. The ``WORKDIR`` instruction sets the working directory in which the command given by ``CMD`` is executed. +3.11 ONBUILD +------------ + + ``ONBUILD [INSTRUCTION]`` + +The ``ONBUILD`` instruction adds to the image a "trigger" instruction to be +executed at a later time, when the image is used as the base for another build. +The trigger will be executed in the context of the downstream build, as if it +had been inserted immediately after the *FROM* instruction in the downstream +Dockerfile. + +Any build instruction can be registered as a trigger. + +This is useful if you are building an image which will be used as a base to build +other images, for example an application build environment or a daemon which may be +customized with user-specific configuration. + +For example, if your image is a reusable python application builder, it will require +application source code to be added in a particular directory, and it might require +a build script to be called *after* that. You can't just call *ADD* and *RUN* now, +because you don't yet have access to the application source code, and it will be +different for each application build. You could simply provide application developers +with a boilerplate Dockerfile to copy-paste into their application, but that is +inefficient, error-prone and difficult to update because it mixes with +application-specific code. + +The solution is to use *ONBUILD* to register in advance instructions to run later, +during the next build stage. + +Here's how it works: + +1. When it encounters an *ONBUILD* instruction, the builder adds a trigger to + the metadata of the image being built. + The instruction does not otherwise affect the current build. + +2. At the end of the build, a list of all triggers is stored in the image manifest, + under the key *OnBuild*. They can be inspected with *docker inspect*. + +3. Later the image may be used as a base for a new build, using the *FROM* instruction. + As part of processing the *FROM* instruction, the downstream builder looks for *ONBUILD* + triggers, and executes them in the same order they were registered. If any of the + triggers fail, the *FROM* instruction is aborted which in turn causes the build + to fail. If all triggers succeed, the FROM instruction completes and the build + continues as usual. + +4. Triggers are cleared from the final image after being executed. In other words + they are not inherited by "grand-children" builds. + +For example you might add something like this: + +.. code-block:: bash + + [...] + ONBUILD ADD . /app/src + ONBUILD RUN /usr/local/bin/python-build --dir /app/src + [...] + + .. _dockerfile_examples: 4. Dockerfile Examples diff --git a/docs/sources/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst similarity index 78% rename from docs/sources/commandline/cli.rst rename to docs/sources/reference/commandline/cli.rst index 67c8b06189..ae77080309 100644 --- a/docs/sources/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -18,6 +18,45 @@ To list available commands, either run ``docker`` with no parameters or execute ... +.. _cli_options: + +Types of Options +---------------- + +Boolean +~~~~~~~ + +Boolean options look like ``-d=false``. The value you see is the +default value which gets set if you do **not** use the boolean +flag. If you do call ``run -d``, that sets the opposite boolean value, +so in this case, ``true``, and so ``docker run -d`` **will** run in +"detached" mode, in the background. Other boolean options are similar +-- specifying them will set the value to the opposite of the default +value. + +Multi +~~~~~ + +Options like ``-a=[]`` indicate they can be specified multiple times:: + + docker run -a stdin -a stdout -a stderr -i -t ubuntu /bin/bash + +Sometimes this can use a more complex value string, as for ``-v``:: + + docker run -v /host:/container example/mysql + +Strings and Integers +~~~~~~~~~~~~~~~~~~~~ + +Options like ``-name=""`` expect a string, and they can only be +specified once. Options like ``-c=0`` expect an integer, and they can +only be specified once. + +---- + +Commands +-------- + .. _cli_daemon: ``daemon`` @@ -26,22 +65,22 @@ To list available commands, either run ``docker`` with no parameters or execute :: Usage of docker: - -D=false: Enable debug mode - -H=[unix:///var/run/docker.sock]: tcp://[host[:port]] to bind or unix://[/path/to/socket] to use. When host=[0.0.0.0], port=[4243] or path=[/var/run/docker.sock] is omitted, default values are used. - -api-enable-cors=false: Enable CORS headers in the remote API - -b="": Attach containers to a pre-existing network bridge; use 'none' to disable container networking - -bip="": Use the provided CIDR notation address for the dynamically created bridge (docker0); Mutually exclusive of -b - -d=false: Enable daemon mode - -dns="": Force docker to use specific DNS servers - -g="/var/lib/docker": Path to use as the root of the docker runtime - -icc=true: Enable inter-container communication - -ip="0.0.0.0": Default IP address to use when binding container ports - -iptables=true: Disable docker's addition of iptables rules - -mtu=1500: Set the containers network mtu - -p="/var/run/docker.pid": Path to use for daemon PID file - -r=true: Restart previously running containers - -s="": Force the docker runtime to use a specific storage driver - -v=false: Print version information and quit + -D, --debug=false: Enable debug mode + -H, --host=[]: Multiple tcp://host:port or unix://path/to/socket to bind in daemon mode, single connection otherwise. systemd socket activation can be used with fd://[socketfd]. + --api-enable-cors=false: Enable CORS headers in the remote API + -b, --bridge="": Attach containers to a pre-existing network bridge; use 'none' to disable container networking + --bip="": Use this CIDR notation address for the network bridge's IP, not compatible with -b + -d, --daemon=false: Enable daemon mode + --dns=[]: Force docker to use specific DNS servers + -g, --graph="/var/lib/docker": Path to use as the root of the docker runtime + --icc=true: Enable inter-container communication + --ip="0.0.0.0": Default IP address to use when binding container ports + --iptables=true: Disable docker's addition of iptables rules + -p, --pidfile="/var/run/docker.pid": Path to use for daemon PID file + -r, --restart=true: Restart previously running containers + -s, --storage-driver="": Force the docker runtime to use a specific storage driver + -v, --version=false: Print version information and quit + -mtu, --mtu=0: Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if not default route is available The Docker daemon is the persistent process that manages containers. Docker uses the same binary for both the daemon and client. To run the daemon you provide the ``-d`` flag. @@ -64,6 +103,11 @@ the ``-H`` flag for the client. # both are equal +To run the daemon with `systemd socket activation `_, use ``docker -d -H fd://``. +Using ``fd://`` will work perfectly for most setups but you can also specify individual sockets too ``docker -d -H fd://3``. +If the specified socket activated files aren't found then docker will exit. +You can find examples of using systemd socket activation with docker and systemd in the `docker source tree `_. + .. _cli_attach: ``attach`` @@ -75,8 +119,8 @@ the ``-H`` flag for the client. Attach to a running container. - -nostdin=false: Do not attach stdin - -sig-proxy=true: Proxify all received signal to the process (even in non-tty mode) + --no-stdin=false: Do not attach stdin + --sig-proxy=true: Proxify all received signal to the process (even in non-tty mode) You can detach from the container again (and leave it running) with ``CTRL-c`` (for a quiet exit) or ``CTRL-\`` to get a stacktrace of @@ -135,11 +179,11 @@ Examples: Usage: docker build [OPTIONS] PATH | URL | - Build a new container image from the source code at PATH - -t="": Repository name (and optionally a tag) to be applied + -t, --time="": Repository name (and optionally a tag) to be applied to the resulting image in case of success. - -q=false: Suppress verbose build output. - -no-cache: Do not use the cache when building the image. - -rm: Remove intermediate containers after a successful build + -q, --quiet=false: Suppress verbose build output. + --no-cache: Do not use the cache when building the image. + --rm: Remove intermediate containers after a successful build The files at ``PATH`` or ``URL`` are called the "context" of the build. The build process may refer to any of the files in the context, for example when @@ -233,9 +277,9 @@ by using the ``git://`` schema. Create a new image from a container's changes - -m="": Commit message - -author="": Author (eg. "John Hannibal Smith " - -run="": Configuration to be applied when the image is launched with `docker run`. + -m, --message="": Commit message + -a, --author="": Author (eg. "John Hannibal Smith " + --run="": Configuration to be applied when the image is launched with `docker run`. (ex: -run='{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}') .. _cli_commit_examples: @@ -279,7 +323,7 @@ run ``ls /etc``. Full -run example ................. -The ``-run`` JSON hash changes the ``Config`` section when running ``docker inspect CONTAINERID`` +The ``--run`` JSON hash changes the ``Config`` section when running ``docker inspect CONTAINERID`` or ``config`` when running ``docker inspect IMAGEID``. (Multiline is okay within a single quote ``'``) @@ -379,7 +423,7 @@ For example: Get real time events from the server - -since="": Show previously created events and then stream. + --since="": Show previously created events and then stream. (either seconds since epoch, or date string as below) .. _cli_events_example: @@ -459,8 +503,8 @@ For example: Show the history of an image - -notrunc=false: Don't truncate output - -q=false: only show numeric IDs + --no-trunc=false: Don't truncate output + -q, --quiet=false: only show numeric IDs To see how the ``docker:latest`` image was built: @@ -507,11 +551,11 @@ To see how the ``docker:latest`` image was built: List images - -a=false: show all images (by default filter out the intermediate images used to build) - -notrunc=false: Don't truncate output - -q=false: only show numeric IDs - -tree=false: output graph in tree format - -viz=false: output graph in graphviz format + -a, --all=false: show all images (by default filter out the intermediate images used to build) + --no-trunc=false: Don't truncate output + -q, --quiet=false: only show numeric IDs + --tree=false: output graph in tree format + --viz=false: output graph in graphviz format Listing the most recently created images ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -535,7 +579,7 @@ Listing the full length image IDs .. code-block:: bash - $ sudo docker images -notrunc | head + $ sudo docker images --no-trunc | head REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE 77af4d6b9913e693e8d0b4b294fa62ade6054e6b2f1ffb617ac955dd63fb0182 19 hours ago 1.089 GB committest latest b6fa739cedf5ea12a620a439402b6004d057da800f91c7524b5086a5e4749c9f 19 hours ago 1.089 GB @@ -552,7 +596,7 @@ Displaying images visually .. code-block:: bash - $ sudo docker images -viz | dot -Tpng -o docker.png + $ sudo docker images --viz | dot -Tpng -o docker.png .. image:: docker_images.gif :alt: Example inheritance graph of Docker images. @@ -563,7 +607,7 @@ Displaying image hierarchy .. code-block:: bash - $ sudo docker images -tree + $ sudo docker images --tree ├─8dbd9e392a96 Size: 131.5 MB (virtual 131.5 MB) Tags: ubuntu:12.04,ubuntu:latest,ubuntu:precise └─27cf78414709 Size: 180.1 MB (virtual 180.1 MB) @@ -702,7 +746,7 @@ Insert file from GitHub Return low-level information on a container/image - -format="": Format the output using the given go template. + -f, --format="": Format the output using the given go template. By default, this will render all results in a JSON array. If a format is specified, the given template will be executed for each result. @@ -721,7 +765,7 @@ fairly straightforward manner. .. code-block:: bash - $ sudo docker inspect -format='{{.NetworkSettings.IPAddress}}' $INSTANCE_ID + $ sudo docker inspect --format='{{.NetworkSettings.IPAddress}}' $INSTANCE_ID List All Port Bindings ...................... @@ -755,17 +799,21 @@ we ask for the ``HostPort`` field to get the public address. :: - Usage: docker kill CONTAINER [CONTAINER...] + Usage: docker kill [OPTIONS] CONTAINER [CONTAINER...] - Kill a running container (Send SIGKILL) + Kill a running container (send SIGKILL, or specified signal) -The main process inside the container will be sent SIGKILL. + -s, --signal="KILL": Signal to send to the container + +The main process inside the container will be sent SIGKILL, or any signal specified with option ``--signal``. Known Issues (kill) ~~~~~~~~~~~~~~~~~~~ * :issue:`197` indicates that ``docker kill`` may leave directories behind and make it difficult to remove the container. +* :issue:`3844` lxc 1.0.0 beta3 removed ``lcx-kill`` which is used by Docker versions before 0.8.0; + see the issue for a workaround. .. _cli_load: @@ -790,9 +838,9 @@ Known Issues (kill) Register or Login to the docker registry server - -e="": email - -p="": password - -u="": username + -e, --email="": email + -p, --password="": password + -u, --username="": username If you want to login to a private registry you can specify this by adding the server name. @@ -812,12 +860,14 @@ Known Issues (kill) Fetch the logs of a container + -f, --follow=false: Follow log output + The ``docker logs`` command is a convenience which batch-retrieves whatever logs are present at the time of execution. This does not guarantee execution order when combined with a ``docker run`` (i.e. your run may not have generated any logs at the time you execute ``docker logs``). -The ``docker logs -f`` command combines ``docker logs`` and ``docker attach``: +The ``docker logs --follow`` command combines ``docker logs`` and ``docker attach``: it will first return all logs from the beginning and then continue streaming new output from the container's stdout and stderr. @@ -845,9 +895,9 @@ new output from the container's stdout and stderr. List containers - -a=false: Show all containers. Only running containers are shown by default. - -notrunc=false: Don't truncate output - -q=false: Only display numeric IDs + -a, --all=false: Show all containers. Only running containers are shown by default. + --no-trunc=false: Don't truncate output + -q, --quiet=false: Only display numeric IDs Running ``docker ps`` showing 2 linked containers. @@ -856,7 +906,10 @@ Running ``docker ps`` showing 2 linked containers. $ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 4c01db0b339c ubuntu:12.04 bash 17 seconds ago Up 16 seconds webapp - d7886598dbe2 crosbymichael/redis:latest /redis-server --dir 33 minutes ago Up 33 minutes 6379/tcp redis,webapp/db + d7886598dbe2 crosbymichael/redis:latest /redis-server --dir 33 minutes ago Up 33 minutes 6379/tcp redis,webapp/db + fd2645e2e2b5 busybox:latest top 10 days ago Ghost insane_ptolemy + +The last container is marked as a ``Ghost`` container. It is a container that was running when the docker daemon was restarted (upgraded, or ``-H`` settings changed). The container is still running, but as this docker daemon process is not able to manage it, you can't attach to it. To bring them out of ``Ghost`` Status, you need to use ``docker kill`` or ``docker restart``. .. _cli_pull: @@ -903,7 +956,7 @@ Running ``docker ps`` showing 2 linked containers. Usage: docker rm [OPTIONS] CONTAINER Remove one or more containers - -link="": Remove the link instead of the actual container + --link="": Remove the link instead of the actual container Known Issues (rm) ~~~~~~~~~~~~~~~~~ @@ -926,7 +979,7 @@ This will remove the container referenced under the link ``/redis``. .. code-block:: bash - $ sudo docker rm -link /webapp/redis + $ sudo docker rm --link /webapp/redis /webapp/redis @@ -996,31 +1049,31 @@ image is removed. Run a command in a new container - -a=map[]: Attach to stdin, stdout or stderr - -c=0: CPU shares (relative weight) - -cidfile="": Write the container ID to the file - -d=false: Detached mode: Run container in the background, print new container id - -e=[]: Set environment variables - -h="": Container host name - -i=false: Keep stdin open even if not attached - -privileged=false: Give extended privileges to this container - -m="": Memory limit (format: , where unit = b, k, m or g) - -n=true: Enable networking for this container - -p=[]: Map a network port to the container - -rm=false: Automatically remove the container when it exits (incompatible with -d) - -t=false: Allocate a pseudo-tty - -u="": Username or UID - -dns=[]: Set custom dns servers for the container - -v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. If "container-dir" is missing, then docker creates a new volume. - -volumes-from="": Mount all volumes from the given container(s) - -entrypoint="": Overwrite the default entrypoint set by the image - -w="": Working directory inside the container - -lxc-conf=[]: Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" - -sig-proxy=true: Proxify all received signal to the process (even in non-tty mode) - -expose=[]: Expose a port from the container without publishing it to your host - -link="": Add link to another container (name:alias) - -name="": Assign the specified name to the container. If no name is specific docker will generate a random name - -P=false: Publish all exposed ports to the host interfaces + -a, --attach=map[]: Attach to stdin, stdout or stderr + -c, --cpu-shares=0: CPU shares (relative weight) + --cidfile="": Write the container ID to the file + -d, --detach=false: Detached mode: Run container in the background, print new container id + -e, --env=[]: Set environment variables + -h, --host="": Container host name + -i, --interactive=false: Keep stdin open even if not attached + --privileged=false: Give extended privileges to this container + -m, --memory="": Memory limit (format: , where unit = b, k, m or g) + -n, --networking=true: Enable networking for this container + -p, --publish=[]: Map a network port to the container + --rm=false: Automatically remove the container when it exits (incompatible with -d) + -t, --tty=false: Allocate a pseudo-tty + -u, --user="": Username or UID + --dns=[]: Set custom dns servers for the container + -v, --volume=[]: Create a bind mount to a directory or file with: [host-path]:[container-path]:[rw|ro]. If a directory "container-path" is missing, then docker creates a new volume. + --volumes-from="": Mount all volumes from the given container(s) + --entrypoint="": Overwrite the default entrypoint set by the image + -w, --workdir="": Working directory inside the container + --lxc-conf=[]: Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" + --sig-proxy=true: Proxify all received signal to the process (even in non-tty mode) + --expose=[]: Expose a port from the container without publishing it to your host + --link="": Add link to another container (name:alias) + --name="": Assign the specified name to the container. If no name is specific docker will generate a random name + -P, --publish-all=false: Publish all exposed ports to the host interfaces The ``docker run`` command first ``creates`` a writeable container layer over the specified image, and then ``starts`` it using the specified command. That @@ -1042,7 +1095,7 @@ Examples: .. code-block:: bash - $ sudo docker run -cidfile /tmp/docker_test.cid ubuntu echo "test" + $ sudo docker run --cidfile /tmp/docker_test.cid ubuntu echo "test" This will create a container and print ``test`` to the console. The ``cidfile`` flag makes Docker attempt to create a new file and write the @@ -1051,7 +1104,7 @@ error. Docker will close this file when ``docker run`` exits. .. code-block:: bash - $ sudo docker run -t -i -rm ubuntu bash + $ sudo docker run -t -i --rm ubuntu bash root@bc338942ef20:/# mount -t tmpfs none /mnt mount: permission denied @@ -1063,7 +1116,7 @@ allow it to run: .. code-block:: bash - $ sudo docker run -privileged ubuntu bash + $ sudo docker run --privileged ubuntu bash root@50e3f57e16e6:/# mount -t tmpfs none /mnt root@50e3f57e16e6:/# df -h Filesystem Size Used Avail Use% Mounted on @@ -1096,7 +1149,24 @@ using the container, but inside the current working directory. .. code-block:: bash - $ sudo docker run -p 127.0.0.1:80:8080 ubuntu bash + $ sudo docker run -v /doesnt/exist:/foo -w /foo -i -t ubuntu bash + +When the host directory of a bind-mounted volume doesn't exist, Docker +will automatically create this directory on the host for you. In the +example above, Docker will create the ``/doesnt/exist`` folder before +starting your container. + +.. code-block:: bash + + $ sudo docker run -t -i -v /var/run/docker.sock:/var/run/docker.sock -v ./static-docker:/usr/bin/docker busybox sh + +By bind-mounting the docker unix socket and statically linked docker binary +(such as that provided by https://get.docker.io), you give the container +the full access to create and manipulate the host's docker daemon. + +.. code-block:: bash + + $ sudo docker run -p 127.0.0.1:80:8080 ubuntu bash This binds port ``8080`` of the container to port ``80`` on ``127.0.0.1`` of the host machine. :ref:`port_redirection` explains in detail how to manipulate ports @@ -1104,7 +1174,7 @@ in Docker. .. code-block:: bash - $ sudo docker run -expose 80 ubuntu bash + $ sudo docker run --expose 80 ubuntu bash This exposes port ``80`` of the container for use within a link without publishing the port to the host system's interfaces. :ref:`port_redirection` @@ -1112,28 +1182,28 @@ explains in detail how to manipulate ports in Docker. .. code-block:: bash - $ sudo docker run -name console -t -i ubuntu bash + $ sudo docker run --name console -t -i ubuntu bash This will create and run a new container with the container name being ``console``. .. code-block:: bash - $ sudo docker run -link /redis:redis -name console ubuntu bash + $ sudo docker run --link /redis:redis --name console ubuntu bash -The ``-link`` flag will link the container named ``/redis`` into the +The ``--link`` flag will link the container named ``/redis`` into the newly created container with the alias ``redis``. The new container can access the network and environment of the redis container via -environment variables. The ``-name`` flag will assign the name ``console`` +environment variables. The ``--name`` flag will assign the name ``console`` to the newly created container. .. code-block:: bash - $ sudo docker run -volumes-from 777f7dc92da7,ba8c0c54f0f2:ro -i -t ubuntu pwd + $ sudo docker run --volumes-from 777f7dc92da7,ba8c0c54f0f2:ro -i -t ubuntu pwd -The ``-volumes-from`` flag mounts all the defined volumes from the +The ``--volumes-from`` flag mounts all the defined volumes from the referenced containers. Containers can be specified by a comma seperated -list or by repetitions of the ``-volumes-from`` argument. The container +list or by repetitions of the ``--volumes-from`` argument. The container ID may be optionally suffixed with ``:ro`` or ``:rw`` to mount the volumes in read-only or read-write mode, respectively. By default, the volumes are mounted in the same mode (read write or read only) as the reference container. @@ -1143,11 +1213,11 @@ A complete example .. code-block:: bash - $ sudo docker run -d -name static static-web-files sh - $ sudo docker run -d -expose=8098 -name riak riakserver - $ sudo docker run -d -m 100m -e DEVELOPMENT=1 -e BRANCH=example-code -v $(pwd):/app/bin:ro -name app appserver - $ sudo docker run -d -p 1443:443 -dns=dns.dev.org -v /var/log/httpd -volumes-from static -link riak -link app -h www.sven.dev.org -name web webserver - $ sudo docker run -t -i -rm -volumes-from web -w /var/log/httpd busybox tail -f access.log + $ sudo docker run -d --name static static-web-files sh + $ sudo docker run -d --expose=8098 --name riak riakserver + $ sudo docker run -d -m 100m -e DEVELOPMENT=1 -e BRANCH=example-code -v $(pwd):/app/bin:ro --name app appserver + $ sudo docker run -d -p 1443:443 --dns=dns.dev.org -v /var/log/httpd --volumes-from static --link riak --link app -h www.sven.dev.org --name web webserver + $ sudo docker run -t -i --rm --volumes-from web -w /var/log/httpd busybox tail -f access.log This example shows 5 containers that might be set up to test a web application change: @@ -1181,9 +1251,9 @@ This example shows 5 containers that might be set up to test a web application c Search the docker index for images - -notrunc=false: Don't truncate output - -stars=0: Only displays with at least xxx stars - -trusted=false: Only show trusted builds + --no-trunc=false: Don't truncate output + -s, --stars=0: Only displays with at least xxx stars + -t, --trusted=false: Only show trusted builds .. _cli_start: @@ -1196,8 +1266,8 @@ This example shows 5 containers that might be set up to test a web application c Start a stopped container - -a=false: Attach container's stdout/stderr and forward all signals to the process - -i=false: Attach container's stdin + -a, --attach=false: Attach container's stdout/stderr and forward all signals to the process + -i, --interactive=false: Attach container's stdin .. _cli_stop: @@ -1210,7 +1280,7 @@ This example shows 5 containers that might be set up to test a web application c Stop a running container (Send SIGTERM, and then SIGKILL after grace period) - -t=10: Number of seconds to wait for the container to stop before killing it. + -t, --time=10: Number of seconds to wait for the container to stop before killing it. The main process inside the container will receive SIGTERM, and after a grace period, SIGKILL @@ -1225,7 +1295,7 @@ The main process inside the container will receive SIGTERM, and after a grace pe Tag an image into a repository - -f=false: Force + -f, --force=false: Force .. _cli_top: diff --git a/docs/sources/commandline/docker_images.gif b/docs/sources/reference/commandline/docker_images.gif similarity index 100% rename from docs/sources/commandline/docker_images.gif rename to docs/sources/reference/commandline/docker_images.gif diff --git a/docs/sources/commandline/index.rst b/docs/sources/reference/commandline/index.rst similarity index 100% rename from docs/sources/commandline/index.rst rename to docs/sources/reference/commandline/index.rst diff --git a/docs/sources/reference/index.rst b/docs/sources/reference/index.rst new file mode 100644 index 0000000000..d35a19b93d --- /dev/null +++ b/docs/sources/reference/index.rst @@ -0,0 +1,18 @@ +:title: Docker Reference Manual +:description: References +:keywords: docker, references, api, command line, commands + +.. _references: + +Reference Manual +================ + +Contents: + +.. toctree:: + :maxdepth: 1 + + commandline/index + builder + run + api/index diff --git a/docs/sources/reference/run.rst b/docs/sources/reference/run.rst new file mode 100644 index 0000000000..307edace00 --- /dev/null +++ b/docs/sources/reference/run.rst @@ -0,0 +1,419 @@ +:title: Docker Run Reference +:description: Configure containers at runtime +:keywords: docker, run, configure, runtime + +.. _run_docker: + +==================== +Docker Run Reference +==================== + +**Docker runs processes in isolated containers**. When an operator +executes ``docker run``, she starts a process with its own file +system, its own networking, and its own isolated process tree. The +:ref:`image_def` which starts the process may define defaults related +to the binary to run, the networking to expose, and more, but ``docker +run`` gives final control to the operator who starts the container +from the image. That's the main reason :ref:`cli_run` has more options +than any other ``docker`` command. + +Every one of the :ref:`example_list` shows running containers, and so +here we try to give more in-depth guidance. + +.. contents:: Table of Contents + :depth: 2 + +.. _run_running: + +General Form +============ + +As you've seen in the :ref:`example_list`, the basic `run` command +takes this form:: + + docker run [OPTIONS] IMAGE[:TAG] [COMMAND] [ARG...] + +To learn how to interpret the types of ``[OPTIONS]``, see +:ref:`cli_options`. + +The list of ``[OPTIONS]`` breaks down into two groups: + +1. Settings exclusive to operators, including: + + * Detached or Foreground running, + * Container Identification, + * Network settings, and + * Runtime Constraints on CPU and Memory + * Privileges and LXC Configuration + +2. Setting shared between operators and developers, where operators + can override defaults developers set in images at build time. + +Together, the ``docker run [OPTIONS]`` give complete control over +runtime behavior to the operator, allowing them to override all +defaults set by the developer during ``docker build`` and nearly all +the defaults set by the Docker runtime itself. + +Operator Exclusive Options +========================== + +Only the operator (the person executing ``docker run``) can set the +following options. + +.. contents:: + :local: + +Detached vs Foreground +---------------------- + +When starting a Docker container, you must first decide if you want to +run the container in the background in a "detached" mode or in the +default foreground mode:: + + -d=false: Detached mode: Run container in the background, print new container id + +Detached (-d) +............. + +In detached mode (``-d=true`` or just ``-d``), all I/O should be done +through network connections or shared volumes because the container is +no longer listening to the commandline where you executed ``docker +run``. You can reattach to a detached container with ``docker`` +:ref:`cli_attach`. If you choose to run a container in the detached +mode, then you cannot use the ``-rm`` option. + +Foreground +.......... + +In foreground mode (the default when ``-d`` is not specified), +``docker run`` can start the process in the container and attach the +console to the process's standard input, output, and standard +error. It can even pretend to be a TTY (this is what most commandline +executables expect) and pass along signals. All of that is +configurable:: + + -a=[] : Attach to ``stdin``, ``stdout`` and/or ``stderr`` + -t=false : Allocate a pseudo-tty + -sig-proxy=true: Proxify all received signal to the process (even in non-tty mode) + -i=false : Keep STDIN open even if not attached + +If you do not specify ``-a`` then Docker will `attach everything +(stdin,stdout,stderr) +`_. You +can specify to which of the three standard streams (``stdin``, ``stdout``, +``stderr``) you'd like to connect instead, as in:: + + docker run -a stdin -a stdout -i -t ubuntu /bin/bash + +For interactive processes (like a shell) you will typically want a tty +as well as persistent standard input (``stdin``), so you'll use ``-i +-t`` together in most interactive cases. + +Container Identification +------------------------ + +Name (-name) +............ + +The operator can identify a container in three ways: + +* UUID long identifier ("f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778") +* UUID short identifier ("f78375b1c487") +* Name ("evil_ptolemy") + +The UUID identifiers come from the Docker daemon, and if you do not +assign a name to the container with ``-name`` then the daemon will +also generate a random string name too. The name can become a handy +way to add meaning to a container since you can use this name when +defining :ref:`links ` (or any other place +you need to identify a container). This works for both background and +foreground Docker containers. + +PID Equivalent +.............. + +And finally, to help with automation, you can have Docker write the +container ID out to a file of your choosing. This is similar to how +some programs might write out their process ID to a file (you've seen +them as PID files):: + + -cidfile="": Write the container ID to the file + +Network Settings +---------------- + +:: + -n=true : Enable networking for this container + -dns=[] : Set custom dns servers for the container + +By default, all containers have networking enabled and they can make +any outgoing connections. The operator can completely disable +networking with ``docker run -n`` which disables all incoming and outgoing +networking. In cases like this, you would perform I/O through files or +STDIN/STDOUT only. + +Your container will use the same DNS servers as the host by default, +but you can override this with ``-dns``. + +Clean Up (-rm) +-------------- + +By default a container's file system persists even after the container +exits. This makes debugging a lot easier (since you can inspect the +final state) and you retain all your data by default. But if you are +running short-term **foreground** processes, these container file +systems can really pile up. If instead you'd like Docker to +**automatically clean up the container and remove the file system when +the container exits**, you can add the ``-rm`` flag:: + + -rm=false: Automatically remove the container when it exits (incompatible with -d) + + +Runtime Constraints on CPU and Memory +------------------------------------- + +The operator can also adjust the performance parameters of the container:: + + -m="": Memory limit (format: , where unit = b, k, m or g) + -c=0 : CPU shares (relative weight) + +The operator can constrain the memory available to a container easily +with ``docker run -m``. If the host supports swap memory, then the +``-m`` memory setting can be larger than physical RAM. + +Similarly the operator can increase the priority of this container +with the ``-c`` option. By default, all containers run at the same +priority and get the same proportion of CPU cycles, but you can tell +the kernel to give more shares of CPU time to one or more containers +when you start them via Docker. + +Runtime Privilege and LXC Configuration +--------------------------------------- + +:: + + -privileged=false: Give extended privileges to this container + -lxc-conf=[]: Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" + +By default, Docker containers are "unprivileged" and cannot, for +example, run a Docker daemon inside a Docker container. This is +because by default a container is not allowed to access any devices, +but a "privileged" container is given access to all devices (see +lxc-template.go_ and documentation on `cgroups devices +`_). + +When the operator executes ``docker run -privileged``, Docker will +enable to access to all devices on the host as well as set some +configuration in AppArmor to allow the container nearly all the same +access to the host as processes running outside containers on the +host. Additional information about running with ``-privileged`` is +available on the `Docker Blog +`_. + +An operator can also specify LXC options using one or more +``-lxc-conf`` parameters. These can be new parameters or override +existing parameters from the lxc-template.go_. Note that in the +future, a given host's Docker daemon may not use LXC, so this is an +implementation-specific configuration meant for operators already +familiar with using LXC directly. + +.. _lxc-template.go: https://github.com/dotcloud/docker/blob/master/execdriver/lxc/lxc_template.go + + +Overriding ``Dockerfile`` Image Defaults +======================================== + +When a developer builds an image from a :ref:`Dockerfile +` or when she commits it, the developer can set a +number of default parameters that take effect when the image starts up +as a container. + +Four of the ``Dockerfile`` commands cannot be overridden at runtime: +``FROM, MAINTAINER, RUN``, and ``ADD``. Everything else has a +corresponding override in ``docker run``. We'll go through what the +developer might have set in each ``Dockerfile`` instruction and how the +operator can override that setting. + +.. contents:: + :local: + +CMD (Default Command or Options) +-------------------------------- + +Recall the optional ``COMMAND`` in the Docker commandline:: + + docker run [OPTIONS] IMAGE[:TAG] [COMMAND] [ARG...] + +This command is optional because the person who created the ``IMAGE`` +may have already provided a default ``COMMAND`` using the ``Dockerfile`` +``CMD``. As the operator (the person running a container from the +image), you can override that ``CMD`` just by specifying a new +``COMMAND``. + +If the image also specifies an ``ENTRYPOINT`` then the ``CMD`` or +``COMMAND`` get appended as arguments to the ``ENTRYPOINT``. + + +ENTRYPOINT (Default Command to Execute at Runtime +------------------------------------------------- + +:: + + -entrypoint="": Overwrite the default entrypoint set by the image + +The ENTRYPOINT of an image is similar to a ``COMMAND`` because it +specifies what executable to run when the container starts, but it is +(purposely) more difficult to override. The ``ENTRYPOINT`` gives a +container its default nature or behavior, so that when you set an +``ENTRYPOINT`` you can run the container *as if it were that binary*, +complete with default options, and you can pass in more options via +the ``COMMAND``. But, sometimes an operator may want to run something else +inside the container, so you can override the default ``ENTRYPOINT`` at +runtime by using a string to specify the new ``ENTRYPOINT``. Here is an +example of how to run a shell in a container that has been set up to +automatically run something else (like ``/usr/bin/redis-server``):: + + docker run -i -t -entrypoint /bin/bash example/redis + +or two examples of how to pass more parameters to that ENTRYPOINT:: + + docker run -i -t -entrypoint /bin/bash example/redis -c ls -l + docker run -i -t -entrypoint /usr/bin/redis-cli example/redis --help + + +EXPOSE (Incoming Ports) +----------------------- + +The ``Dockerfile`` doesn't give much control over networking, only +providing the ``EXPOSE`` instruction to give a hint to the operator +about what incoming ports might provide services. The following +options work with or override the ``Dockerfile``'s exposed defaults:: + + -expose=[]: Expose a port from the container + without publishing it to your host + -P=false : Publish all exposed ports to the host interfaces + -p=[] : Publish a container's port to the host (format: + ip:hostPort:containerPort | ip::containerPort | + hostPort:containerPort) + (use 'docker port' to see the actual mapping) + -link="" : Add link to another container (name:alias) + +As mentioned previously, ``EXPOSE`` (and ``-expose``) make a port +available **in** a container for incoming connections. The port number +on the inside of the container (where the service listens) does not +need to be the same number as the port exposed on the outside of the +container (where clients connect), so inside the container you might +have an HTTP service listening on port 80 (and so you ``EXPOSE 80`` in +the ``Dockerfile``), but outside the container the port might be 42800. + +To help a new client container reach the server container's internal +port operator ``-expose``'d by the operator or ``EXPOSE``'d by the +developer, the operator has three choices: start the server container +with ``-P`` or ``-p,`` or start the client container with ``-link``. + +If the operator uses ``-P`` or ``-p`` then Docker will make the +exposed port accessible on the host and the ports will be available to +any client that can reach the host. To find the map between the host +ports and the exposed ports, use ``docker port``) + +If the operator uses ``-link`` when starting the new client container, +then the client container can access the exposed port via a private +networking interface. Docker will set some environment variables in +the client container to help indicate which interface and port to use. + +ENV (Environment Variables) +--------------------------- + +The operator can **set any environment variable** in the container by +using one or more ``-e`` flags, even overriding those already defined by the +developer with a Dockefile ``ENV``:: + + $ docker run -e "deep=purple" -rm ubuntu /bin/bash -c export + declare -x HOME="/" + declare -x HOSTNAME="85bc26a0e200" + declare -x OLDPWD + declare -x PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + declare -x PWD="/" + declare -x SHLVL="1" + declare -x container="lxc" + declare -x deep="purple" + +Similarly the operator can set the **hostname** with ``-h``. + +``-link name:alias`` also sets environment variables, using the +*alias* string to define environment variables within the container +that give the IP and PORT information for connecting to the service +container. Let's imagine we have a container running Redis:: + + # Start the service container, named redis-name + $ docker run -d -name redis-name dockerfiles/redis + 4241164edf6f5aca5b0e9e4c9eccd899b0b8080c64c0cd26efe02166c73208f3 + + # The redis-name container exposed port 6379 + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 4241164edf6f dockerfiles/redis:latest /redis-stable/src/re 5 seconds ago Up 4 seconds 6379/tcp redis-name + + # Note that there are no public ports exposed since we didn't use -p or -P + $ docker port 4241164edf6f 6379 + 2014/01/25 00:55:38 Error: No public port '6379' published for 4241164edf6f + + +Yet we can get information about the Redis container's exposed ports +with ``-link``. Choose an alias that will form a valid environment +variable! + +:: + + $ docker run -rm -link redis-name:redis_alias -entrypoint /bin/bash dockerfiles/redis -c export + declare -x HOME="/" + declare -x HOSTNAME="acda7f7b1cdc" + declare -x OLDPWD + declare -x PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + declare -x PWD="/" + declare -x REDIS_ALIAS_NAME="/distracted_wright/redis" + declare -x REDIS_ALIAS_PORT="tcp://172.17.0.32:6379" + declare -x REDIS_ALIAS_PORT_6379_TCP="tcp://172.17.0.32:6379" + declare -x REDIS_ALIAS_PORT_6379_TCP_ADDR="172.17.0.32" + declare -x REDIS_ALIAS_PORT_6379_TCP_PORT="6379" + declare -x REDIS_ALIAS_PORT_6379_TCP_PROTO="tcp" + declare -x SHLVL="1" + declare -x container="lxc" + +And we can use that information to connect from another container as a client:: + + $ docker run -i -t -rm -link redis-name:redis_alias -entrypoint /bin/bash dockerfiles/redis -c '/redis-stable/src/redis-cli -h $REDIS_ALIAS_PORT_6379_TCP_ADDR -p $REDIS_ALIAS_PORT_6379_TCP_PORT' + 172.17.0.32:6379> + +VOLUME (Shared Filesystems) +--------------------------- + +:: + + -v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. + If "container-dir" is missing, then docker creates a new volume. + -volumes-from="": Mount all volumes from the given container(s) + +The volumes commands are complex enough to have their own +documentation in section :ref:`volume_def`. A developer can define one +or more ``VOLUME``\s associated with an image, but only the operator can +give access from one container to another (or from a container to a +volume mounted on the host). + +USER +---- + +The default user within a container is ``root`` (id = 0), but if the +developer created additional users, those are accessible too. The +developer can set a default user to run the first process with the +``Dockerfile USER`` command, but the operator can override it :: + + -u="": Username or UID + +WORKDIR +------- + +The default working directory for running binaries within a container is the root directory (``/``), but the developer can set a different default with the ``Dockerfile WORKDIR`` command. The operator can override this with:: + + -w="": Working directory inside the container + diff --git a/docs/sources/toctree.rst b/docs/sources/toctree.rst index c7c368a3f9..d1f98b6a5d 100644 --- a/docs/sources/toctree.rst +++ b/docs/sources/toctree.rst @@ -14,8 +14,9 @@ This documentation has the following resources: installation/index use/index examples/index - commandline/index + reference/index contributing/index - api/index terms/index + articles/index faq + diff --git a/docs/sources/use/basics.rst b/docs/sources/use/basics.rst index 7d3d8e42af..6bd1f0b7a0 100644 --- a/docs/sources/use/basics.rst +++ b/docs/sources/use/basics.rst @@ -1,26 +1,27 @@ -:title: Learn Basic Commands +:title: First steps with Docker :description: Common usage and commands :keywords: Examples, Usage, basic commands, docker, documentation, examples -Learn Basic Commands -==================== +First steps with Docker +======================= -Starting Docker ---------------- +Check your Docker install +------------------------- -If you have used one of the quick install paths, Docker may have been -installed with upstart, Ubuntu's system for starting processes at boot -time. You should be able to run ``sudo docker help`` and get output. - -If you get ``docker: command not found`` or something like -``/var/lib/docker/repositories: permission denied`` you will need to -specify the path to it and manually start it. +This guide assumes you have a working installation of Docker. To check +your Docker install, run the following command: .. code-block:: bash - # Run docker in daemon mode - sudo /docker -d & + # Check that you have a working install + docker info + +If you get ``docker: command not found`` or something like +``/var/lib/docker/repositories: permission denied`` you may have an incomplete +docker installation or insufficient privileges to access Docker on your machine. + +Please refer to :ref:`installation_list` for installation instructions. Download a pre-built image -------------------------- @@ -51,42 +52,6 @@ Running an interactive shell # use the escape sequence Ctrl-p + Ctrl-q sudo docker run -i -t ubuntu /bin/bash -.. _dockergroup: - -The sudo command and the docker Group -------------------------------------- - -The ``docker`` daemon always runs as the root user, and since Docker version -0.5.2, the ``docker`` daemon binds to a Unix socket instead of a TCP port. By -default that Unix socket is owned by the user *root*, and so, by default, you -can access it with ``sudo``. - -Starting in version 0.5.3, if you (or your Docker installer) create a -Unix group called *docker* and add users to it, then the ``docker`` -daemon will make the ownership of the Unix socket read/writable by the -*docker* group when the daemon starts. The ``docker`` daemon must -always run as the root user, but if you run the ``docker`` client as a user in -the *docker* group then you don't need to add ``sudo`` to all the -client commands. - -.. warning:: The *docker* group is root-equivalent. - -**Example:** - -.. code-block:: bash - - # Add the docker group if it doesn't already exist. - sudo groupadd docker - - # Add the connected user "${USER}" to the docker group. - # Change the user name to match your preferred user. - # You may have to logout and log back in again for - # this to take effect. - sudo gpasswd -a ${USER} docker - - # Restart the docker daemon. - sudo service docker restart - .. _bind_docker: Bind Docker to another host/port or a Unix socket diff --git a/docs/sources/use/index.rst b/docs/sources/use/index.rst index 7bcd1dd81e..c1b7691cca 100644 --- a/docs/sources/use/index.rst +++ b/docs/sources/use/index.rst @@ -13,9 +13,7 @@ Contents: :maxdepth: 1 basics - builder workingwithrepository - baseimages port_redirection networking host_integration diff --git a/docs/sources/use/networking.rst b/docs/sources/use/networking.rst index 4e75fbc20d..431158cc39 100644 --- a/docs/sources/use/networking.rst +++ b/docs/sources/use/networking.rst @@ -8,7 +8,7 @@ Configure Networking Docker uses Linux bridge capabilities to provide network connectivity to containers. The ``docker0`` bridge interface is managed by Docker -itself for this purpose. Thus, when the Docker daemon starts it : +for this purpose. When the Docker daemon starts it : - creates the ``docker0`` bridge if not present - searches for an IP address range which doesn't overlap with an existing route @@ -31,11 +31,11 @@ itself for this purpose. Thus, when the Docker daemon starts it : At runtime, a :ref:`specific kind of virtual -interface` is given to each containers which is then -bonded to the ``docker0`` bridge. Each containers also receives a +interface` is given to each container which is then +bonded to the ``docker0`` bridge. Each container also receives a dedicated IP address from the same range as ``docker0``. The -``docker0`` IP address is then used as the default gateway for the -containers. +``docker0`` IP address is used as the default gateway for the +container. .. code-block:: bash @@ -55,8 +55,8 @@ which is dedicated to the 52f811c5d3d6 container. How to use a specific IP address range --------------------------------------- -Docker will try hard to find an IP range which is not used by the -host. Even if it works for most cases, it's not bullet-proof and +Docker will try hard to find an IP range that is not used by the +host. Even though it works for most cases, it's not bullet-proof and sometimes you need to have more control over the IP addressing scheme. For this purpose, Docker allows you to manage the ``docker0`` bridge @@ -118,25 +118,25 @@ In this scenario: Container intercommunication ------------------------------- -Containers can communicate with each other according to the ``icc`` -parameter value of the Docker daemon. +The value of the Docker daemon's ``icc`` parameter determines whether +containers can communicate with each other over the bridge network. - The default, ``-icc=true`` allows containers to communicate with each other. - ``-icc=false`` means containers are isolated from each other. -Under the hood, ``iptables`` is used by Docker to either accept or +Docker uses ``iptables`` under the hood to either accept or drop communication between containers. .. _vethxxxx-device: -What's about the vethXXXX device? +What is the vethXXXX device? ----------------------------------- Well. Things get complicated here. The ``vethXXXX`` interface is the host side of a point-to-point link -between the host and the corresponding container, the other side of -the link being materialized by the container's ``eth0`` +between the host and the corresponding container; the other side of +the link is the container's ``eth0`` interface. This pair (host ``vethXXX`` and container ``eth0``) are connected like a tube. Everything that comes in one side will come out the other side. diff --git a/docs/sources/use/puppet.rst b/docs/sources/use/puppet.rst index 94de76c30b..4183c14f18 100644 --- a/docs/sources/use/puppet.rst +++ b/docs/sources/use/puppet.rst @@ -39,7 +39,7 @@ download the source. Usage ----- -The module provides a puppet class for installing docker and two defined types +The module provides a puppet class for installing Docker and two defined types for managing images and containers. Installation @@ -52,7 +52,7 @@ Installation Images ~~~~~~ -The next step is probably to install a docker image, for this we have a +The next step is probably to install a Docker image. For this, we have a defined type which can be used like so: .. code-block:: ruby @@ -65,10 +65,11 @@ This is equivalent to running: docker pull ubuntu -Note that it will only if the image of that name does not already exist. -This is downloading a large binary so on first run can take a while. -For that reason this define turns off the default 5 minute timeout -for exec. Note that you can also remove images you no longer need with: +Note that it will only be downloaded if an image of that name does +not already exist. This is downloading a large binary so on first +run can take a while. For that reason this define turns off the +default 5 minute timeout for the exec type. Note that you can also +remove images you no longer need with: .. code-block:: ruby @@ -79,8 +80,8 @@ for exec. Note that you can also remove images you no longer need with: Containers ~~~~~~~~~~ -Now you have an image you can run commands within a container managed by -docker. +Now you have an image where you can run commands within a container +managed by Docker. .. code-block:: ruby @@ -103,7 +104,7 @@ Run also contains a number of optional parameters: image => 'ubuntu', command => '/bin/sh -c "while true; do echo hello world; sleep 1; done"', ports => ['4444', '4555'], - volumes => ['/var/lib/counchdb', '/var/log'], + volumes => ['/var/lib/couchdb', '/var/log'], volumes_from => '6446ea52fbc9', memory_limit => 10485760, # bytes username => 'example', diff --git a/docs/sources/use/working_with_volumes.rst b/docs/sources/use/working_with_volumes.rst index 86576b05e4..34728cbd3d 100644 --- a/docs/sources/use/working_with_volumes.rst +++ b/docs/sources/use/working_with_volumes.rst @@ -9,7 +9,7 @@ Share Directories via Volumes .. versionadded:: v0.3.0 Data volumes have been available since version 1 of the - :doc:`../api/docker_remote_api` + :doc:`../reference/api/docker_remote_api` A *data volume* is a specially-designated directory within one or more containers that bypasses the :ref:`ufs_def` to provide several useful @@ -73,7 +73,7 @@ data volumes from multiple containers. Interestingly, you can mount the volumes that came from the ``DATA`` container in yet another container via the ``client1`` middleman container:: - $ docker run -t -i -rm -volumes-from client1 ubuntu -name client2 bash + $ docker run -t -i -rm -volumes-from client1 -name client2 ubuntu bash This allows you to abstract the actual data source from users of that data, similar to :ref:`ambassador_pattern_linking `. @@ -89,11 +89,15 @@ Mount a Host Directory as a Container Volume: :: -v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. - If "host-dir" is missing, then docker creates a new volume. -This is not available from a Dockerfile as it makes the built image less portable -or shareable. [host-dir] volumes are 100% host dependent and will break on any -other machine. +If ``host-dir`` is missing from the command, then docker creates a new volume. +If ``host-dir`` is present but points to a non-existent directory on the host, +Docker will automatically create this directory and use it as the source of the +bind-mount. + +Note that this is not available from a Dockerfile due the portability and +sharing purpose of it. The ``host-dir`` volumes are entirely host-dependent and +might not work on any other machine. For example:: diff --git a/engine/engine.go b/engine/engine.go index ad830ce2e4..ec880b9c85 100644 --- a/engine/engine.go +++ b/engine/engine.go @@ -46,7 +46,6 @@ func (eng *Engine) Root() string { } func (eng *Engine) Register(name string, handler Handler) error { - eng.Logf("Register(%s) (handlers=%v)", name, eng.handlers) _, exists := eng.handlers[name] if exists { return fmt.Errorf("Can't overwrite handler for command %s", name) @@ -138,6 +137,9 @@ func (eng *Engine) Job(name string, args ...string) *Job { } func (eng *Engine) Logf(format string, args ...interface{}) (n int, err error) { - prefixedFormat := fmt.Sprintf("[%s] %s\n", eng, strings.TrimRight(format, "\n")) - return fmt.Fprintf(eng.Stderr, prefixedFormat, args...) + if os.Getenv("TEST") == "" { + prefixedFormat := fmt.Sprintf("[%s] %s\n", eng, strings.TrimRight(format, "\n")) + return fmt.Fprintf(eng.Stderr, prefixedFormat, args...) + } + return 0, nil } diff --git a/engine/env.go b/engine/env.go index a65c8438d2..ce8c34bb24 100644 --- a/engine/env.go +++ b/engine/env.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "io" + "sort" "strconv" "strings" ) @@ -59,7 +60,7 @@ func (env *Env) GetInt64(key string) int64 { s := strings.Trim(env.Get(key), " \t") val, err := strconv.ParseInt(s, 10, 64) if err != nil { - return -1 + return 0 } return val } @@ -85,6 +86,28 @@ func (env *Env) GetList(key string) []string { return l } +func (env *Env) GetSubEnv(key string) *Env { + sval := env.Get(key) + if sval == "" { + return nil + } + buf := bytes.NewBufferString(sval) + var sub Env + if err := sub.Decode(buf); err != nil { + return nil + } + return &sub +} + +func (env *Env) SetSubEnv(key string, sub *Env) error { + var buf bytes.Buffer + if err := sub.Encode(&buf); err != nil { + return err + } + env.Set(key, string(buf.Bytes())) + return nil +} + func (env *Env) GetJson(key string, iface interface{}) error { sval := env.Get(key) if sval == "" { @@ -190,24 +213,6 @@ func (env *Env) WriteTo(dst io.Writer) (n int64, err error) { return 0, env.Encode(dst) } -func (env *Env) Export(dst interface{}) (err error) { - defer func() { - if err != nil { - err = fmt.Errorf("ExportEnv %s", err) - } - }() - var buf bytes.Buffer - // step 1: encode/marshal the env to an intermediary json representation - if err := env.Encode(&buf); err != nil { - return err - } - // step 2: decode/unmarshal the intermediary json into the destination object - if err := json.NewDecoder(&buf).Decode(dst); err != nil { - return err - } - return nil -} - func (env *Env) Import(src interface{}) (err error) { defer func() { if err != nil { @@ -232,3 +237,135 @@ func (env *Env) Map() map[string]string { } return m } + +type Table struct { + Data []*Env + sortKey string + Chan chan *Env +} + +func NewTable(sortKey string, sizeHint int) *Table { + return &Table{ + make([]*Env, 0, sizeHint), + sortKey, + make(chan *Env), + } +} + +func (t *Table) SetKey(sortKey string) { + t.sortKey = sortKey +} + +func (t *Table) Add(env *Env) { + t.Data = append(t.Data, env) +} + +func (t *Table) Len() int { + return len(t.Data) +} + +func (t *Table) Less(a, b int) bool { + return t.lessBy(a, b, t.sortKey) +} + +func (t *Table) lessBy(a, b int, by string) bool { + keyA := t.Data[a].Get(by) + keyB := t.Data[b].Get(by) + intA, errA := strconv.ParseInt(keyA, 10, 64) + intB, errB := strconv.ParseInt(keyB, 10, 64) + if errA == nil && errB == nil { + return intA < intB + } + return keyA < keyB +} + +func (t *Table) Swap(a, b int) { + tmp := t.Data[a] + t.Data[a] = t.Data[b] + t.Data[b] = tmp +} + +func (t *Table) Sort() { + sort.Sort(t) +} + +func (t *Table) ReverseSort() { + sort.Sort(sort.Reverse(t)) +} + +func (t *Table) WriteListTo(dst io.Writer) (n int64, err error) { + if _, err := dst.Write([]byte{'['}); err != nil { + return -1, err + } + n = 1 + for i, env := range t.Data { + bytes, err := env.WriteTo(dst) + if err != nil { + return -1, err + } + n += bytes + if i != len(t.Data)-1 { + if _, err := dst.Write([]byte{','}); err != nil { + return -1, err + } + n += 1 + } + } + if _, err := dst.Write([]byte{']'}); err != nil { + return -1, err + } + return n + 1, nil +} + +func (t *Table) ToListString() (string, error) { + buffer := bytes.NewBuffer(nil) + if _, err := t.WriteListTo(buffer); err != nil { + return "", err + } + return buffer.String(), nil +} + +func (t *Table) WriteTo(dst io.Writer) (n int64, err error) { + for _, env := range t.Data { + bytes, err := env.WriteTo(dst) + if err != nil { + return -1, err + } + n += bytes + } + return n, nil +} + +func (t *Table) ReadListFrom(src []byte) (n int64, err error) { + var array []interface{} + + if err := json.Unmarshal(src, &array); err != nil { + return -1, err + } + + for _, item := range array { + if m, ok := item.(map[string]interface{}); ok { + env := &Env{} + for key, value := range m { + env.SetAuto(key, value) + } + t.Add(env) + } + } + + return int64(len(src)), nil +} + +func (t *Table) ReadFrom(src io.Reader) (n int64, err error) { + decoder := NewDecoder(src) + for { + env, err := decoder.Decode() + if err == io.EOF { + return 0, nil + } else if err != nil { + return -1, err + } + t.Add(env) + } + return 0, nil +} diff --git a/engine/env_test.go b/engine/env_test.go index 24c5992dd0..c7079ff942 100644 --- a/engine/env_test.go +++ b/engine/env_test.go @@ -62,7 +62,7 @@ func TestSetenvInt(t *testing.T) { if val := job.GetenvInt("bar"); val != 42 { t.Fatalf("GetenvInt returns incorrect value: %d", val) } - if val := job.GetenvInt("nonexistent"); val != -1 { + if val := job.GetenvInt("nonexistent"); val != 0 { t.Fatalf("GetenvInt returns incorrect value: %d", val) } } @@ -84,32 +84,6 @@ func TestSetenvList(t *testing.T) { } } -func TestImportEnv(t *testing.T) { - type dummy struct { - DummyInt int - DummyStringArray []string - } - - job := mkJob(t, "dummy") - if err := job.ImportEnv(&dummy{42, []string{"foo", "bar"}}); err != nil { - t.Fatal(err) - } - - dmy := dummy{} - if err := job.ExportEnv(&dmy); err != nil { - t.Fatal(err) - } - - if dmy.DummyInt != 42 { - t.Fatalf("Expected 42, got %d", dmy.DummyInt) - } - - if len(dmy.DummyStringArray) != 2 || dmy.DummyStringArray[0] != "foo" || dmy.DummyStringArray[1] != "bar" { - t.Fatalf("Expected {foo, bar}, got %v", dmy.DummyStringArray) - } - -} - func TestEnviron(t *testing.T) { job := mkJob(t, "dummy") job.Setenv("foo", "bar") diff --git a/engine/http.go b/engine/http.go index b115912e2c..c0418bcfb0 100644 --- a/engine/http.go +++ b/engine/http.go @@ -16,8 +16,10 @@ import ( // as the exit status. // func (eng *Engine) ServeHTTP(w http.ResponseWriter, r *http.Request) { - jobName := path.Base(r.URL.Path) - jobArgs, exists := r.URL.Query()["a"] + var ( + jobName = path.Base(r.URL.Path) + jobArgs, exists = r.URL.Query()["a"] + ) if !exists { jobArgs = []string{} } diff --git a/engine/job.go b/engine/job.go index 68b1715d92..1f35ac85ff 100644 --- a/engine/job.go +++ b/engine/job.go @@ -3,6 +3,7 @@ package engine import ( "fmt" "io" + "os" "strings" "time" ) @@ -101,6 +102,10 @@ func (job *Job) String() string { return fmt.Sprintf("%s.%s%s", job.Eng, job.CallString(), job.StatusString()) } +func (job *Job) EnvExists(key string) (value bool) { + return job.env.Exists(key) +} + func (job *Job) Getenv(key string) (value string) { return job.env.Get(key) } @@ -113,6 +118,14 @@ func (job *Job) SetenvBool(key string, value bool) { job.env.SetBool(key, value) } +func (job *Job) GetenvSubEnv(key string) *Env { + return job.env.GetSubEnv(key) +} + +func (job *Job) SetenvSubEnv(key string, value *Env) error { + return job.env.SetSubEnv(key, value) +} + func (job *Job) GetenvInt64(key string) int64 { return job.env.GetInt64(key) } @@ -163,10 +176,6 @@ func (job *Job) EncodeEnv(dst io.Writer) error { return job.env.Encode(dst) } -func (job *Job) ExportEnv(dst interface{}) (err error) { - return job.env.Export(dst) -} - func (job *Job) ImportEnv(src interface{}) (err error) { return job.env.Import(src) } @@ -176,18 +185,23 @@ func (job *Job) Environ() map[string]string { } func (job *Job) Logf(format string, args ...interface{}) (n int, err error) { - prefixedFormat := fmt.Sprintf("[%s] %s\n", job, strings.TrimRight(format, "\n")) - return fmt.Fprintf(job.Stderr, prefixedFormat, args...) + if os.Getenv("TEST") == "" { + prefixedFormat := fmt.Sprintf("[%s] %s\n", job, strings.TrimRight(format, "\n")) + return fmt.Fprintf(job.Stderr, prefixedFormat, args...) + } + return 0, nil } func (job *Job) Printf(format string, args ...interface{}) (n int, err error) { return fmt.Fprintf(job.Stdout, format, args...) } -func (job *Job) Errorf(format string, args ...interface{}) (n int, err error) { - return fmt.Fprintf(job.Stderr, format, args...) +func (job *Job) Errorf(format string, args ...interface{}) Status { + fmt.Fprintf(job.Stderr, format, args...) + return StatusErr } -func (job *Job) Error(err error) (int, error) { - return fmt.Fprintf(job.Stderr, "%s", err) +func (job *Job) Error(err error) Status { + fmt.Fprintf(job.Stderr, "%s", err) + return StatusErr } diff --git a/engine/streams.go b/engine/streams.go index 7cd4a60cf7..48f031de8f 100644 --- a/engine/streams.go +++ b/engine/streams.go @@ -5,6 +5,7 @@ import ( "container/ring" "fmt" "io" + "io/ioutil" "sync" ) @@ -12,6 +13,7 @@ type Output struct { sync.Mutex dests []io.Writer tasks sync.WaitGroup + used bool } // NewOutput returns a new Output object with no destinations attached. @@ -20,15 +22,30 @@ func NewOutput() *Output { return &Output{} } +// Return true if something was written on this output +func (o *Output) Used() bool { + o.Lock() + defer o.Unlock() + return o.used +} + // Add attaches a new destination to the Output. Any data subsequently written // to the output will be written to the new destination in addition to all the others. // This method is thread-safe. -// FIXME: Add cannot fail -func (o *Output) Add(dst io.Writer) error { - o.Mutex.Lock() - defer o.Mutex.Unlock() +func (o *Output) Add(dst io.Writer) { + o.Lock() + defer o.Unlock() o.dests = append(o.dests, dst) - return nil +} + +// Set closes and remove existing destination and then attaches a new destination to +// the Output. Any data subsequently written to the output will be written to the new +// destination in addition to all the others. This method is thread-safe. +func (o *Output) Set(dst io.Writer) { + o.Close() + o.Lock() + defer o.Unlock() + o.dests = []io.Writer{dst} } // AddPipe creates an in-memory pipe with io.Pipe(), adds its writing end as a destination, @@ -80,8 +97,9 @@ func (o *Output) AddString(dst *string) error { // Write writes the same data to all registered destinations. // This method is thread-safe. func (o *Output) Write(p []byte) (n int, err error) { - o.Mutex.Lock() - defer o.Mutex.Unlock() + o.Lock() + defer o.Unlock() + o.used = true var firstErr error for _, dst := range o.dests { _, err := dst.Write(p) @@ -96,8 +114,8 @@ func (o *Output) Write(p []byte) (n int, err error) { // AddTail and AddString tasks to complete. // The Close method of each destination is called if it exists. func (o *Output) Close() error { - o.Mutex.Lock() - defer o.Mutex.Unlock() + o.Lock() + defer o.Unlock() var firstErr error for _, dst := range o.dests { if closer, ok := dst.(io.WriteCloser); ok { @@ -132,6 +150,17 @@ func (i *Input) Read(p []byte) (n int, err error) { return i.src.Read(p) } +// Closes the src +// Not thread safe on purpose +func (i *Input) Close() error { + if i.src != nil { + if closer, ok := i.src.(io.WriteCloser); ok { + return closer.Close() + } + } + return nil +} + // Add attaches a new source to the input. // Add can only be called once per input. Subsequent calls will // return an error. @@ -190,3 +219,39 @@ func (o *Output) AddEnv() (dst *Env, err error) { }() return dst, nil } + +func (o *Output) AddListTable() (dst *Table, err error) { + src, err := o.AddPipe() + if err != nil { + return nil, err + } + dst = NewTable("", 0) + o.tasks.Add(1) + go func() { + defer o.tasks.Done() + content, err := ioutil.ReadAll(src) + if err != nil { + return + } + if _, err := dst.ReadListFrom(content); err != nil { + return + } + }() + return dst, nil +} + +func (o *Output) AddTable() (dst *Table, err error) { + src, err := o.AddPipe() + if err != nil { + return nil, err + } + dst = NewTable("", 0) + o.tasks.Add(1) + go func() { + defer o.tasks.Done() + if _, err := dst.ReadFrom(src); err != nil { + return + } + }() + return dst, nil +} diff --git a/engine/streams_test.go b/engine/streams_test.go index 37720c61ea..30d31d2952 100644 --- a/engine/streams_test.go +++ b/engine/streams_test.go @@ -95,9 +95,7 @@ func TestOutputAddEnv(t *testing.T) { func TestOutputAddClose(t *testing.T) { o := NewOutput() var s sentinelWriteCloser - if err := o.Add(&s); err != nil { - t.Fatal(err) - } + o.Add(&s) if err := o.Close(); err != nil { t.Fatal(err) } diff --git a/engine/table_test.go b/engine/table_test.go new file mode 100644 index 0000000000..3e8e4ff1b3 --- /dev/null +++ b/engine/table_test.go @@ -0,0 +1,28 @@ +package engine + +import ( + "bytes" + "encoding/json" + "testing" +) + +func TestTableWriteTo(t *testing.T) { + table := NewTable("", 0) + e := &Env{} + e.Set("foo", "bar") + table.Add(e) + var buf bytes.Buffer + if _, err := table.WriteTo(&buf); err != nil { + t.Fatal(err) + } + output := make(map[string]string) + if err := json.Unmarshal(buf.Bytes(), &output); err != nil { + t.Fatal(err) + } + if len(output) != 1 { + t.Fatalf("Incorrect output: %v", output) + } + if val, exists := output["foo"]; !exists || val != "bar" { + t.Fatalf("Inccorect output: %v", output) + } +} diff --git a/execdriver/MAINTAINERS b/execdriver/MAINTAINERS new file mode 100644 index 0000000000..e53d933d47 --- /dev/null +++ b/execdriver/MAINTAINERS @@ -0,0 +1,2 @@ +Michael Crosby (@crosbymichael) +Guillaume Charmes (@creack) diff --git a/execdriver/chroot/driver.go b/execdriver/chroot/driver.go new file mode 100644 index 0000000000..396df87bad --- /dev/null +++ b/execdriver/chroot/driver.go @@ -0,0 +1,101 @@ +package chroot + +import ( + "fmt" + "github.com/dotcloud/docker/execdriver" + "github.com/dotcloud/docker/pkg/mount" + "os" + "os/exec" + "syscall" +) + +const ( + DriverName = "chroot" + Version = "0.1" +) + +func init() { + execdriver.RegisterInitFunc(DriverName, func(args *execdriver.InitArgs) error { + if err := mount.ForceMount("proc", "proc", "proc", ""); err != nil { + return err + } + defer mount.ForceUnmount("proc") + cmd := exec.Command(args.Args[0], args.Args[1:]...) + + cmd.Stderr = os.Stderr + cmd.Stdout = os.Stdout + cmd.Stdin = os.Stdin + + return cmd.Run() + }) +} + +type driver struct { +} + +func NewDriver() (*driver, error) { + return &driver{}, nil +} + +func (d *driver) Run(c *execdriver.Command, startCallback execdriver.StartCallback) (int, error) { + params := []string{ + "chroot", + c.Rootfs, + "/.dockerinit", + "-driver", + DriverName, + } + params = append(params, c.Entrypoint) + params = append(params, c.Arguments...) + + var ( + name = params[0] + arg = params[1:] + ) + aname, err := exec.LookPath(name) + if err != nil { + aname = name + } + c.Path = aname + c.Args = append([]string{name}, arg...) + + if err := c.Start(); err != nil { + return -1, err + } + + if startCallback != nil { + startCallback(c) + } + + err = c.Wait() + return getExitCode(c), err +} + +/// Return the exit code of the process +// if the process has not exited -1 will be returned +func getExitCode(c *execdriver.Command) int { + if c.ProcessState == nil { + return -1 + } + return c.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() +} + +func (d *driver) Kill(p *execdriver.Command, sig int) error { + return p.Process.Kill() +} + +func (d *driver) Restore(c *execdriver.Command) error { + panic("Not Implemented") +} + +func (d *driver) Info(id string) execdriver.Info { + panic("Not implemented") +} + +func (d *driver) Name() string { + return fmt.Sprintf("%s-%s", DriverName, Version) +} + +func (d *driver) GetPidsForContainer(id string) ([]int, error) { + return nil, fmt.Errorf("Not supported") +} diff --git a/execdriver/driver.go b/execdriver/driver.go new file mode 100644 index 0000000000..1ea086075d --- /dev/null +++ b/execdriver/driver.go @@ -0,0 +1,111 @@ +package execdriver + +import ( + "errors" + "os/exec" +) + +var ( + ErrNotRunning = errors.New("Process could not be started") + ErrWaitTimeoutReached = errors.New("Wait timeout reached") + ErrDriverAlreadyRegistered = errors.New("A driver already registered this docker init function") + ErrDriverNotFound = errors.New("The requested docker init has not been found") +) + +var dockerInitFcts map[string]InitFunc + +type ( + StartCallback func(*Command) + InitFunc func(i *InitArgs) error +) + +func RegisterInitFunc(name string, fct InitFunc) error { + if dockerInitFcts == nil { + dockerInitFcts = make(map[string]InitFunc) + } + if _, ok := dockerInitFcts[name]; ok { + return ErrDriverAlreadyRegistered + } + dockerInitFcts[name] = fct + return nil +} + +func GetInitFunc(name string) (InitFunc, error) { + fct, ok := dockerInitFcts[name] + if !ok { + return nil, ErrDriverNotFound + } + return fct, nil +} + +// Args provided to the init function for a driver +type InitArgs struct { + User string + Gateway string + Ip string + WorkDir string + Privileged bool + Env []string + Args []string + Mtu int + Driver string +} + +// Driver specific information based on +// processes registered with the driver +type Info interface { + IsRunning() bool +} + +type Driver interface { + Run(c *Command, startCallback StartCallback) (int, error) // Run executes the process and blocks until the process exits and returns the exit code + Kill(c *Command, sig int) error + Restore(c *Command) error // Wait and try to re-attach on an out of process command + Name() string // Driver name + Info(id string) Info // "temporary" hack (until we move state from core to plugins) + GetPidsForContainer(id string) ([]int, error) // Returns a list of pids for the given container. +} + +// Network settings of the container +type Network struct { + Gateway string `json:"gateway"` + IPAddress string `json:"ip"` + Bridge string `json:"bridge"` + IPPrefixLen int `json:"ip_prefix_len"` + Mtu int `json:"mtu"` +} + +type Resources struct { + Memory int64 `json:"memory"` + MemorySwap int64 `json:"memory_swap"` + CpuShares int64 `json:"cpu_shares"` +} + +// Process wrapps an os/exec.Cmd to add more metadata +// TODO: Rename to Command +type Command struct { + exec.Cmd `json:"-"` + + ID string `json:"id"` + Privileged bool `json:"privileged"` + User string `json:"user"` + Rootfs string `json:"rootfs"` // root fs of the container + InitPath string `json:"initpath"` // dockerinit + Entrypoint string `json:"entrypoint"` + Arguments []string `json:"arguments"` + WorkingDir string `json:"working_dir"` + ConfigPath string `json:"config_path"` // this should be able to be removed when the lxc template is moved into the driver + Tty bool `json:"tty"` + Network *Network `json:"network"` // if network is nil then networking is disabled + Config []string `json:"config"` // generic values that specific drivers can consume + Resources *Resources `json:"resources"` +} + +// Return the pid of the process +// If the process is nil -1 will be returned +func (c *Command) Pid() int { + if c.Process == nil { + return -1 + } + return c.Process.Pid +} diff --git a/execdriver/lxc/driver.go b/execdriver/lxc/driver.go new file mode 100644 index 0000000000..4c3979e718 --- /dev/null +++ b/execdriver/lxc/driver.go @@ -0,0 +1,386 @@ +package lxc + +import ( + "fmt" + "github.com/dotcloud/docker/execdriver" + "github.com/dotcloud/docker/pkg/cgroups" + "github.com/dotcloud/docker/utils" + "io/ioutil" + "log" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + "syscall" + "time" +) + +const DriverName = "lxc" + +func init() { + execdriver.RegisterInitFunc(DriverName, func(args *execdriver.InitArgs) error { + if err := setupHostname(args); err != nil { + return err + } + + if err := setupNetworking(args); err != nil { + return err + } + + if err := setupCapabilities(args); err != nil { + return err + } + + if err := setupWorkingDirectory(args); err != nil { + return err + } + + if err := changeUser(args); err != nil { + return err + } + + path, err := exec.LookPath(args.Args[0]) + if err != nil { + log.Printf("Unable to locate %v", args.Args[0]) + os.Exit(127) + } + if err := syscall.Exec(path, args.Args, os.Environ()); err != nil { + return fmt.Errorf("dockerinit unable to execute %s - %s", path, err) + } + panic("Unreachable") + }) +} + +type driver struct { + root string // root path for the driver to use + apparmor bool + sharedRoot bool +} + +func NewDriver(root string, apparmor bool) (*driver, error) { + // setup unconfined symlink + if err := linkLxcStart(root); err != nil { + return nil, err + } + return &driver{ + apparmor: apparmor, + root: root, + sharedRoot: rootIsShared(), + }, nil +} + +func (d *driver) Name() string { + version := d.version() + return fmt.Sprintf("%s-%s", DriverName, version) +} + +func (d *driver) Run(c *execdriver.Command, startCallback execdriver.StartCallback) (int, error) { + configPath, err := d.generateLXCConfig(c) + if err != nil { + return -1, err + } + params := []string{ + "lxc-start", + "-n", c.ID, + "-f", configPath, + "--", + c.InitPath, + "-driver", + DriverName, + } + + if c.Network != nil { + params = append(params, + "-g", c.Network.Gateway, + "-i", fmt.Sprintf("%s/%d", c.Network.IPAddress, c.Network.IPPrefixLen), + "-mtu", strconv.Itoa(c.Network.Mtu), + ) + } + + if c.User != "" { + params = append(params, "-u", c.User) + } + + if c.Privileged { + if d.apparmor { + params[0] = path.Join(d.root, "lxc-start-unconfined") + + } + params = append(params, "-privileged") + } + + if c.WorkingDir != "" { + params = append(params, "-w", c.WorkingDir) + } + + params = append(params, "--", c.Entrypoint) + params = append(params, c.Arguments...) + + if d.sharedRoot { + // lxc-start really needs / to be non-shared, or all kinds of stuff break + // when lxc-start unmount things and those unmounts propagate to the main + // mount namespace. + // What we really want is to clone into a new namespace and then + // mount / MS_REC|MS_SLAVE, but since we can't really clone or fork + // without exec in go we have to do this horrible shell hack... + shellString := + "mount --make-rslave /; exec " + + utils.ShellQuoteArguments(params) + + params = []string{ + "unshare", "-m", "--", "/bin/sh", "-c", shellString, + } + } + + var ( + name = params[0] + arg = params[1:] + ) + aname, err := exec.LookPath(name) + if err != nil { + aname = name + } + c.Path = aname + c.Args = append([]string{name}, arg...) + + if err := c.Start(); err != nil { + return -1, err + } + + var ( + waitErr error + waitLock = make(chan struct{}) + ) + go func() { + if err := c.Wait(); err != nil { + if _, ok := err.(*exec.ExitError); !ok { // Do not propagate the error if it's simply a status code != 0 + waitErr = err + } + } + close(waitLock) + }() + + // Poll lxc for RUNNING status + if err := d.waitForStart(c, waitLock); err != nil { + return -1, err + } + + if startCallback != nil { + startCallback(c) + } + + <-waitLock + + return getExitCode(c), waitErr +} + +/// Return the exit code of the process +// if the process has not exited -1 will be returned +func getExitCode(c *execdriver.Command) int { + if c.ProcessState == nil { + return -1 + } + return c.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() +} + +func (d *driver) Kill(c *execdriver.Command, sig int) error { + return d.kill(c, sig) +} + +func (d *driver) Restore(c *execdriver.Command) error { + for { + output, err := exec.Command("lxc-info", "-n", c.ID).CombinedOutput() + if err != nil { + return err + } + if !strings.Contains(string(output), "RUNNING") { + return nil + } + time.Sleep(500 * time.Millisecond) + } +} + +func (d *driver) version() string { + version := "" + if output, err := exec.Command("lxc-version").CombinedOutput(); err == nil { + outputStr := string(output) + if len(strings.SplitN(outputStr, ":", 2)) == 2 { + version = strings.TrimSpace(strings.SplitN(outputStr, ":", 2)[1]) + } + } + return version +} + +func (d *driver) kill(c *execdriver.Command, sig int) error { + var ( + err error + output []byte + ) + _, err = exec.LookPath("lxc-kill") + if err == nil { + output, err = exec.Command("lxc-kill", "-n", c.ID, strconv.Itoa(sig)).CombinedOutput() + } else { + output, err = exec.Command("lxc-stop", "-k", "-n", c.ID, strconv.Itoa(sig)).CombinedOutput() + } + if err != nil { + return fmt.Errorf("Err: %s Output: %s", err, output) + } + return nil +} + +func (d *driver) waitForStart(c *execdriver.Command, waitLock chan struct{}) error { + var ( + err error + output []byte + ) + // We wait for the container to be fully running. + // Timeout after 5 seconds. In case of broken pipe, just retry. + // Note: The container can run and finish correctly before + // the end of this loop + for now := time.Now(); time.Since(now) < 5*time.Second; { + select { + case <-waitLock: + // If the process dies while waiting for it, just return + return nil + if c.ProcessState != nil && c.ProcessState.Exited() { + return nil + } + default: + } + + output, err = d.getInfo(c.ID) + if err != nil { + output, err = d.getInfo(c.ID) + if err != nil { + return err + } + } + if strings.Contains(string(output), "RUNNING") { + return nil + } + time.Sleep(50 * time.Millisecond) + } + return execdriver.ErrNotRunning +} + +func (d *driver) getInfo(id string) ([]byte, error) { + return exec.Command("lxc-info", "-s", "-n", id).CombinedOutput() +} + +type info struct { + ID string + driver *driver +} + +func (i *info) IsRunning() bool { + var running bool + + output, err := i.driver.getInfo(i.ID) + if err != nil { + panic(err) + } + if strings.Contains(string(output), "RUNNING") { + running = true + } + return running +} + +func (d *driver) Info(id string) execdriver.Info { + return &info{ + ID: id, + driver: d, + } +} + +func (d *driver) GetPidsForContainer(id string) ([]int, error) { + pids := []int{} + + // memory is chosen randomly, any cgroup used by docker works + subsystem := "memory" + + cgroupRoot, err := cgroups.FindCgroupMountpoint(subsystem) + if err != nil { + return pids, err + } + + cgroupDir, err := cgroups.GetThisCgroupDir(subsystem) + if err != nil { + return pids, err + } + + filename := filepath.Join(cgroupRoot, cgroupDir, id, "tasks") + if _, err := os.Stat(filename); os.IsNotExist(err) { + // With more recent lxc versions use, cgroup will be in lxc/ + filename = filepath.Join(cgroupRoot, cgroupDir, "lxc", id, "tasks") + } + + output, err := ioutil.ReadFile(filename) + if err != nil { + return pids, err + } + for _, p := range strings.Split(string(output), "\n") { + if len(p) == 0 { + continue + } + pid, err := strconv.Atoi(p) + if err != nil { + return pids, fmt.Errorf("Invalid pid '%s': %s", p, err) + } + pids = append(pids, pid) + } + return pids, nil +} + +func linkLxcStart(root string) error { + sourcePath, err := exec.LookPath("lxc-start") + if err != nil { + return err + } + targetPath := path.Join(root, "lxc-start-unconfined") + + if _, err := os.Lstat(targetPath); err != nil && !os.IsNotExist(err) { + return err + } else if err == nil { + if err := os.Remove(targetPath); err != nil { + return err + } + } + return os.Symlink(sourcePath, targetPath) +} + +// TODO: This can be moved to the mountinfo reader in the mount pkg +func rootIsShared() bool { + if data, err := ioutil.ReadFile("/proc/self/mountinfo"); err == nil { + for _, line := range strings.Split(string(data), "\n") { + cols := strings.Split(line, " ") + if len(cols) >= 6 && cols[4] == "/" { + return strings.HasPrefix(cols[6], "shared") + } + } + } + + // No idea, probably safe to assume so + return true +} + +func (d *driver) generateLXCConfig(c *execdriver.Command) (string, error) { + root := path.Join(d.root, "containers", c.ID, "config.lxc") + fo, err := os.Create(root) + if err != nil { + return "", err + } + defer fo.Close() + + if err := LxcTemplateCompiled.Execute(fo, struct { + *execdriver.Command + AppArmor bool + }{ + Command: c, + AppArmor: d.apparmor, + }); err != nil { + return "", err + } + return root, nil +} diff --git a/execdriver/lxc/init.go b/execdriver/lxc/init.go new file mode 100644 index 0000000000..7c2b039c50 --- /dev/null +++ b/execdriver/lxc/init.go @@ -0,0 +1,153 @@ +package lxc + +import ( + "fmt" + "github.com/dotcloud/docker/execdriver" + "github.com/dotcloud/docker/pkg/netlink" + "github.com/dotcloud/docker/utils" + "github.com/syndtr/gocapability/capability" + "net" + "os" + "strconv" + "strings" + "syscall" +) + +func setupHostname(args *execdriver.InitArgs) error { + hostname := getEnv(args, "HOSTNAME") + if hostname == "" { + return nil + } + return setHostname(hostname) +} + +// Setup networking +func setupNetworking(args *execdriver.InitArgs) error { + if args.Ip != "" { + // eth0 + iface, err := net.InterfaceByName("eth0") + if err != nil { + return fmt.Errorf("Unable to set up networking: %v", err) + } + ip, ipNet, err := net.ParseCIDR(args.Ip) + if err != nil { + return fmt.Errorf("Unable to set up networking: %v", err) + } + if err := netlink.NetworkLinkAddIp(iface, ip, ipNet); err != nil { + return fmt.Errorf("Unable to set up networking: %v", err) + } + if err := netlink.NetworkSetMTU(iface, args.Mtu); err != nil { + return fmt.Errorf("Unable to set MTU: %v", err) + } + if err := netlink.NetworkLinkUp(iface); err != nil { + return fmt.Errorf("Unable to set up networking: %v", err) + } + + // loopback + iface, err = net.InterfaceByName("lo") + if err != nil { + return fmt.Errorf("Unable to set up networking: %v", err) + } + if err := netlink.NetworkLinkUp(iface); err != nil { + return fmt.Errorf("Unable to set up networking: %v", err) + } + } + if args.Gateway != "" { + gw := net.ParseIP(args.Gateway) + if gw == nil { + return fmt.Errorf("Unable to set up networking, %s is not a valid gateway IP", args.Gateway) + } + + if err := netlink.AddDefaultGw(gw); err != nil { + return fmt.Errorf("Unable to set up networking: %v", err) + } + } + + return nil +} + +// Setup working directory +func setupWorkingDirectory(args *execdriver.InitArgs) error { + if args.WorkDir == "" { + return nil + } + if err := syscall.Chdir(args.WorkDir); err != nil { + return fmt.Errorf("Unable to change dir to %v: %v", args.WorkDir, err) + } + return nil +} + +// Takes care of dropping privileges to the desired user +func changeUser(args *execdriver.InitArgs) error { + if args.User == "" { + return nil + } + userent, err := utils.UserLookup(args.User) + if err != nil { + return fmt.Errorf("Unable to find user %v: %v", args.User, err) + } + + uid, err := strconv.Atoi(userent.Uid) + if err != nil { + return fmt.Errorf("Invalid uid: %v", userent.Uid) + } + gid, err := strconv.Atoi(userent.Gid) + if err != nil { + return fmt.Errorf("Invalid gid: %v", userent.Gid) + } + + if err := syscall.Setgid(gid); err != nil { + return fmt.Errorf("setgid failed: %v", err) + } + if err := syscall.Setuid(uid); err != nil { + return fmt.Errorf("setuid failed: %v", err) + } + + return nil +} + +func setupCapabilities(args *execdriver.InitArgs) error { + + if args.Privileged { + return nil + } + + drop := []capability.Cap{ + capability.CAP_SETPCAP, + capability.CAP_SYS_MODULE, + capability.CAP_SYS_RAWIO, + capability.CAP_SYS_PACCT, + capability.CAP_SYS_ADMIN, + capability.CAP_SYS_NICE, + capability.CAP_SYS_RESOURCE, + capability.CAP_SYS_TIME, + capability.CAP_SYS_TTY_CONFIG, + capability.CAP_MKNOD, + capability.CAP_AUDIT_WRITE, + capability.CAP_AUDIT_CONTROL, + capability.CAP_MAC_OVERRIDE, + capability.CAP_MAC_ADMIN, + } + + c, err := capability.NewPid(os.Getpid()) + if err != nil { + return err + } + + c.Unset(capability.CAPS|capability.BOUNDS, drop...) + + if err := c.Apply(capability.CAPS | capability.BOUNDS); err != nil { + return err + } + return nil +} + +func getEnv(args *execdriver.InitArgs, key string) string { + for _, kv := range args.Env { + parts := strings.SplitN(kv, "=", 2) + if parts[0] == key && len(parts) == 2 { + return parts[1] + } + } + return "" +} diff --git a/sysinit/sysinit_linux.go b/execdriver/lxc/lxc_init_linux.go similarity index 79% rename from sysinit/sysinit_linux.go rename to execdriver/lxc/lxc_init_linux.go index d18d2fab8b..7288f5877b 100644 --- a/sysinit/sysinit_linux.go +++ b/execdriver/lxc/lxc_init_linux.go @@ -1,4 +1,6 @@ -package sysinit +// +build amd64 + +package lxc import ( "syscall" diff --git a/sysinit/sysinit_darwin.go b/execdriver/lxc/lxc_init_unsupported.go similarity index 68% rename from sysinit/sysinit_darwin.go rename to execdriver/lxc/lxc_init_unsupported.go index 64566afb3c..d68cb91a1e 100644 --- a/sysinit/sysinit_darwin.go +++ b/execdriver/lxc/lxc_init_unsupported.go @@ -1,4 +1,6 @@ -package sysinit +// +build !linux !amd64 + +package lxc func setHostname(hostname string) error { panic("Not supported on darwin") diff --git a/lxc_template.go b/execdriver/lxc/lxc_template.go similarity index 74% rename from lxc_template.go rename to execdriver/lxc/lxc_template.go index f96323b5ea..705bdf5363 100644 --- a/lxc_template.go +++ b/execdriver/lxc/lxc_template.go @@ -1,23 +1,24 @@ -package docker +package lxc import ( + "github.com/dotcloud/docker/execdriver" "strings" "text/template" ) const LxcTemplate = ` -{{if .Config.NetworkDisabled}} -# network is disabled (-n=false) -lxc.network.type = empty -{{else}} +{{if .Network}} # network configuration lxc.network.type = veth -lxc.network.link = {{.NetworkSettings.Bridge}} +lxc.network.link = {{.Network.Bridge}} lxc.network.name = eth0 +{{else}} +# network is disabled (-n=false) +lxc.network.type = empty {{end}} # root filesystem -{{$ROOTFS := .RootfsPath}} +{{$ROOTFS := .Rootfs}} lxc.rootfs = {{$ROOTFS}} # use a dedicated pts for the container (and limit the number of pseudo terminal @@ -30,8 +31,8 @@ lxc.console = none # no controlling tty at all lxc.tty = 1 -{{if (getHostConfig .).Privileged}} -lxc.cgroup.devices.allow = a +{{if .Privileged}} +lxc.cgroup.devices.allow = a {{else}} # no implicit access to devices lxc.cgroup.devices.deny = a @@ -81,8 +82,8 @@ lxc.mount.entry = sysfs {{escapeFstabSpaces $ROOTFS}}/sys sysfs nosuid,nodev,noe lxc.mount.entry = devpts {{escapeFstabSpaces $ROOTFS}}/dev/pts devpts newinstance,ptmxmode=0666,nosuid,noexec 0 0 lxc.mount.entry = shm {{escapeFstabSpaces $ROOTFS}}/dev/shm tmpfs size=65536k,nosuid,nodev,noexec 0 0 -{{if (getHostConfig .).Privileged}} -{{if (getCapabilities .).AppArmor}} +{{if .Privileged}} +{{if .AppArmor}} lxc.aa_profile = unconfined {{else}} #lxc.aa_profile = unconfined @@ -90,20 +91,22 @@ lxc.aa_profile = unconfined {{end}} # limits -{{if .Config.Memory}} -lxc.cgroup.memory.limit_in_bytes = {{.Config.Memory}} -lxc.cgroup.memory.soft_limit_in_bytes = {{.Config.Memory}} -{{with $memSwap := getMemorySwap .Config}} +{{if .Resources}} +{{if .Resources.Memory}} +lxc.cgroup.memory.limit_in_bytes = {{.Resources.Memory}} +lxc.cgroup.memory.soft_limit_in_bytes = {{.Resources.Memory}} +{{with $memSwap := getMemorySwap .Resources}} lxc.cgroup.memory.memsw.limit_in_bytes = {{$memSwap}} {{end}} {{end}} -{{if .Config.CpuShares}} -lxc.cgroup.cpu.shares = {{.Config.CpuShares}} +{{if .Resources.CpuShares}} +lxc.cgroup.cpu.shares = {{.Resources.CpuShares}} +{{end}} {{end}} -{{if (getHostConfig .).LxcConf}} -{{range $pair := (getHostConfig .).LxcConf}} -{{$pair.Key}} = {{$pair.Value}} +{{if .Config}} +{{range $value := .Config}} +{{$value}} {{end}} {{end}} ` @@ -116,29 +119,19 @@ func escapeFstabSpaces(field string) string { return strings.Replace(field, " ", "\\040", -1) } -func getMemorySwap(config *Config) int64 { +func getMemorySwap(v *execdriver.Resources) int64 { // By default, MemorySwap is set to twice the size of RAM. // If you want to omit MemorySwap, set it to `-1'. - if config.MemorySwap < 0 { + if v.MemorySwap < 0 { return 0 } - return config.Memory * 2 -} - -func getHostConfig(container *Container) *HostConfig { - return container.hostConfig -} - -func getCapabilities(container *Container) *Capabilities { - return container.runtime.capabilities + return v.Memory * 2 } func init() { var err error funcMap := template.FuncMap{ "getMemorySwap": getMemorySwap, - "getHostConfig": getHostConfig, - "getCapabilities": getCapabilities, "escapeFstabSpaces": escapeFstabSpaces, } LxcTemplateCompiled, err = template.New("lxc").Funcs(funcMap).Parse(LxcTemplate) diff --git a/lxc_template_unit_test.go b/execdriver/lxc/lxc_template_unit_test.go similarity index 60% rename from lxc_template_unit_test.go rename to execdriver/lxc/lxc_template_unit_test.go index f71f1dd6f5..99d6e636f5 100644 --- a/lxc_template_unit_test.go +++ b/execdriver/lxc/lxc_template_unit_test.go @@ -1,11 +1,13 @@ -package docker +package lxc import ( "bufio" "fmt" + "github.com/dotcloud/docker/execdriver" "io/ioutil" "math/rand" "os" + "path" "strings" "testing" "time" @@ -17,32 +19,39 @@ func TestLXCConfig(t *testing.T) { t.Fatal(err) } defer os.RemoveAll(root) + + os.MkdirAll(path.Join(root, "containers", "1"), 0777) + // Memory is allocated randomly for testing rand.Seed(time.Now().UTC().UnixNano()) - memMin := 33554432 - memMax := 536870912 - mem := memMin + rand.Intn(memMax-memMin) - // CPU shares as well - cpuMin := 100 - cpuMax := 10000 - cpu := cpuMin + rand.Intn(cpuMax-cpuMin) - container := &Container{ - root: root, - Config: &Config{ - Memory: int64(mem), - CpuShares: int64(cpu), - NetworkDisabled: true, - }, - hostConfig: &HostConfig{ - Privileged: false, - }, - } - if err := container.generateLXCConfig(); err != nil { + var ( + memMin = 33554432 + memMax = 536870912 + mem = memMin + rand.Intn(memMax-memMin) + cpuMin = 100 + cpuMax = 10000 + cpu = cpuMin + rand.Intn(cpuMax-cpuMin) + ) + + driver, err := NewDriver(root, false) + if err != nil { t.Fatal(err) } - grepFile(t, container.lxcConfigPath(), + command := &execdriver.Command{ + ID: "1", + Resources: &execdriver.Resources{ + Memory: int64(mem), + CpuShares: int64(cpu), + }, + } + p, err := driver.generateLXCConfig(command) + if err != nil { + t.Fatal(err) + } + grepFile(t, p, fmt.Sprintf("lxc.cgroup.memory.limit_in_bytes = %d", mem)) - grepFile(t, container.lxcConfigPath(), + + grepFile(t, p, fmt.Sprintf("lxc.cgroup.memory.memsw.limit_in_bytes = %d", mem*2)) } @@ -52,31 +61,29 @@ func TestCustomLxcConfig(t *testing.T) { t.Fatal(err) } defer os.RemoveAll(root) - container := &Container{ - root: root, - Config: &Config{ - Hostname: "foobar", - NetworkDisabled: true, - }, - hostConfig: &HostConfig{ - Privileged: false, - LxcConf: []KeyValuePair{ - { - Key: "lxc.utsname", - Value: "docker", - }, - { - Key: "lxc.cgroup.cpuset.cpus", - Value: "0,1", - }, - }, - }, - } - if err := container.generateLXCConfig(); err != nil { + + os.MkdirAll(path.Join(root, "containers", "1"), 0777) + + driver, err := NewDriver(root, false) + if err != nil { t.Fatal(err) } - grepFile(t, container.lxcConfigPath(), "lxc.utsname = docker") - grepFile(t, container.lxcConfigPath(), "lxc.cgroup.cpuset.cpus = 0,1") + command := &execdriver.Command{ + ID: "1", + Privileged: false, + Config: []string{ + "lxc.utsname = docker", + "lxc.cgroup.cpuset.cpus = 0,1", + }, + } + + p, err := driver.generateLXCConfig(command) + if err != nil { + t.Fatal(err) + } + + grepFile(t, p, "lxc.utsname = docker") + grepFile(t, p, "lxc.cgroup.cpuset.cpus = 0,1") } func grepFile(t *testing.T, path string, pattern string) { diff --git a/graph.go b/graph.go index 176626d60a..42da42c8af 100644 --- a/graph.go +++ b/graph.go @@ -97,6 +97,7 @@ func (graph *Graph) Get(name string) (*Image, error) { if err != nil { return nil, fmt.Errorf("Driver %s failed to get image rootfs %s: %s", graph.driver, img.ID, err) } + defer graph.driver.Put(img.ID) var size int64 if img.Parent == "" { @@ -193,6 +194,7 @@ func (graph *Graph) Register(jsonData []byte, layerData archive.Archive, img *Im if err != nil { return fmt.Errorf("Driver %s failed to get image rootfs %s: %s", graph.driver, img.ID, err) } + defer graph.driver.Put(img.ID) img.graph = graph if err := StoreImage(img, jsonData, layerData, tmp, rootfs); err != nil { return err diff --git a/graphdriver/aufs/aufs.go b/graphdriver/aufs/aufs.go index 8875cac6d9..d1cf87d1a0 100644 --- a/graphdriver/aufs/aufs.go +++ b/graphdriver/aufs/aufs.go @@ -25,12 +25,13 @@ import ( "fmt" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/graphdriver" - mountpk "github.com/dotcloud/docker/mount" + mountpk "github.com/dotcloud/docker/pkg/mount" "github.com/dotcloud/docker/utils" "os" "os/exec" "path" "strings" + "sync" ) func init() { @@ -38,7 +39,9 @@ func init() { } type Driver struct { - root string + root string + sync.Mutex // Protects concurrent modification to active + active map[string]int } // New returns a new AUFS driver. @@ -54,12 +57,17 @@ func Init(root string) (graphdriver.Driver, error) { "layers", } + a := &Driver{ + root: root, + active: make(map[string]int), + } + // Create the root aufs driver dir and return // if it already exists // If not populate the dir structure if err := os.MkdirAll(root, 0755); err != nil { if os.IsExist(err) { - return &Driver{root}, nil + return a, nil } return nil, err } @@ -69,7 +77,7 @@ func Init(root string) (graphdriver.Driver, error) { return nil, err } } - return &Driver{root}, nil + return a, nil } // Return a nil error if the kernel supports aufs @@ -167,6 +175,14 @@ func (a *Driver) createDirsFor(id string) error { // Unmount and remove the dir information func (a *Driver) Remove(id string) error { + // Protect the a.active from concurrent access + a.Lock() + defer a.Unlock() + + if a.active[id] != 0 { + utils.Errorf("Warning: removing active id %s\n", id) + } + // Make sure the dir is umounted first if err := a.unmount(id); err != nil { return err @@ -176,20 +192,17 @@ func (a *Driver) Remove(id string) error { "diff", } - // Remove the dirs atomically + // Atomically remove each directory in turn by first moving it out of the + // way (so that docker doesn't find it anymore) before doing removal of + // the whole tree. for _, p := range tmpDirs { - // We need to use a temp dir in the same dir as the driver so Rename - // does not fall back to the slow copy if /tmp and the driver dir - // are on different devices - tmp := path.Join(a.rootPath(), "tmp", p, id) - if err := os.MkdirAll(tmp, 0755); err != nil { - return err - } + realPath := path.Join(a.rootPath(), p, id) - if err := os.Rename(realPath, tmp); err != nil && !os.IsNotExist(err) { + tmpPath := path.Join(a.rootPath(), p, fmt.Sprintf("%s-removing", id)) + if err := os.Rename(realPath, tmpPath); err != nil && !os.IsNotExist(err) { return err } - defer os.RemoveAll(tmp) + defer os.RemoveAll(tmpPath) } // Remove the layers file for the id @@ -210,22 +223,50 @@ func (a *Driver) Get(id string) (string, error) { ids = []string{} } + // Protect the a.active from concurrent access + a.Lock() + defer a.Unlock() + + count := a.active[id] + // If a dir does not have a parent ( no layers )do not try to mount // just return the diff path to the data out := path.Join(a.rootPath(), "diff", id) if len(ids) > 0 { out = path.Join(a.rootPath(), "mnt", id) - if err := a.mount(id); err != nil { - return "", err + + if count == 0 { + if err := a.mount(id); err != nil { + return "", err + } } } + + a.active[id] = count + 1 + return out, nil } +func (a *Driver) Put(id string) { + // Protect the a.active from concurrent access + a.Lock() + defer a.Unlock() + + if count := a.active[id]; count > 1 { + a.active[id] = count - 1 + } else { + ids, _ := getParentIds(a.rootPath(), id) + // We only mounted if there are any parents + if ids != nil && len(ids) > 0 { + a.unmount(id) + } + delete(a.active, id) + } +} + // Returns an archive of the contents for the id func (a *Driver) Diff(id string) (archive.Archive, error) { return archive.TarFilter(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ - Recursive: true, Compression: archive.Uncompressed, }) } diff --git a/graphdriver/aufs/mount_linux.go b/graphdriver/aufs/mount_linux.go index c86f1bbd63..6082d9f240 100644 --- a/graphdriver/aufs/mount_linux.go +++ b/graphdriver/aufs/mount_linux.go @@ -1,3 +1,5 @@ +// +build amd64 + package aufs import "syscall" diff --git a/graphdriver/aufs/mount_darwin.go b/graphdriver/aufs/mount_unsupported.go similarity index 89% rename from graphdriver/aufs/mount_darwin.go rename to graphdriver/aufs/mount_unsupported.go index 62c84fc7c9..2735624112 100644 --- a/graphdriver/aufs/mount_darwin.go +++ b/graphdriver/aufs/mount_unsupported.go @@ -1,3 +1,5 @@ +// +build !linux !amd64 + package aufs import "errors" diff --git a/graphdriver/btrfs/btrfs.go b/graphdriver/btrfs/btrfs.go new file mode 100644 index 0000000000..592e058458 --- /dev/null +++ b/graphdriver/btrfs/btrfs.go @@ -0,0 +1,213 @@ +// +build linux,amd64 + +package btrfs + +/* +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "github.com/dotcloud/docker/graphdriver" + "os" + "path" + "syscall" + "unsafe" +) + +func init() { + graphdriver.Register("btrfs", Init) +} + +func Init(home string) (graphdriver.Driver, error) { + rootdir := path.Dir(home) + + var buf syscall.Statfs_t + if err := syscall.Statfs(rootdir, &buf); err != nil { + return nil, err + } + + if buf.Type != 0x9123683E { + return nil, fmt.Errorf("%s is not a btrfs filesystem", rootdir) + } + + return &Driver{ + home: home, + }, nil +} + +type Driver struct { + home string +} + +func (d *Driver) String() string { + return "btrfs" +} + +func (d *Driver) Status() [][2]string { + return nil +} + +func (d *Driver) Cleanup() error { + return nil +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func openDir(path string) (*C.DIR, error) { + Cpath := C.CString(path) + defer free(Cpath) + + dir := C.opendir(Cpath) + if dir == nil { + return nil, fmt.Errorf("Can't open dir") + } + return dir, nil +} + +func closeDir(dir *C.DIR) { + if dir != nil { + C.closedir(dir) + } +} + +func getDirFd(dir *C.DIR) uintptr { + return uintptr(C.dirfd(dir)) +} + +func subvolCreate(path, name string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_vol_args + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error()) + } + return nil +} + +func subvolSnapshot(src, dest, name string) error { + srcDir, err := openDir(src) + if err != nil { + return err + } + defer closeDir(srcDir) + + destDir, err := openDir(dest) + if err != nil { + return err + } + defer closeDir(destDir) + + var args C.struct_btrfs_ioctl_vol_args_v2 + args.fd = C.__s64(getDirFd(srcDir)) + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error()) + } + return nil +} + +func subvolDelete(path, name string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_vol_args + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to destroy btrfs snapshot: %v", errno.Error()) + } + return nil +} + +func (d *Driver) subvolumesDir() string { + return path.Join(d.home, "subvolumes") +} + +func (d *Driver) subvolumesDirId(id string) string { + return path.Join(d.subvolumesDir(), id) +} + +func (d *Driver) Create(id string, parent string) error { + subvolumes := path.Join(d.home, "subvolumes") + if err := os.MkdirAll(subvolumes, 0700); err != nil { + return err + } + if parent == "" { + if err := subvolCreate(subvolumes, id); err != nil { + return err + } + } else { + parentDir, err := d.Get(parent) + if err != nil { + return err + } + if err := subvolSnapshot(parentDir, subvolumes, id); err != nil { + return err + } + } + return nil +} + +func (d *Driver) Remove(id string) error { + dir := d.subvolumesDirId(id) + if _, err := os.Stat(dir); err != nil { + return err + } + if err := subvolDelete(d.subvolumesDir(), id); err != nil { + return err + } + return os.RemoveAll(dir) +} + +func (d *Driver) Get(id string) (string, error) { + dir := d.subvolumesDirId(id) + st, err := os.Stat(dir) + if err != nil { + return "", err + } + + if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + + return dir, nil +} + +func (d *Driver) Put(id string) { + // Get() creates no runtime resources (like e.g. mounts) + // so this doesn't need to do anything. +} + +func (d *Driver) Exists(id string) bool { + dir := d.subvolumesDirId(id) + _, err := os.Stat(dir) + return err == nil +} diff --git a/graphdriver/btrfs/dummy_unsupported.go b/graphdriver/btrfs/dummy_unsupported.go new file mode 100644 index 0000000000..6c44615763 --- /dev/null +++ b/graphdriver/btrfs/dummy_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux !amd64 + +package btrfs diff --git a/graphdriver/devmapper/attach_loopback.go b/graphdriver/devmapper/attach_loopback.go index 456b5645f4..23339076e8 100644 --- a/graphdriver/devmapper/attach_loopback.go +++ b/graphdriver/devmapper/attach_loopback.go @@ -1,4 +1,4 @@ -// +build linux +// +build linux,amd64 package devmapper diff --git a/graphdriver/devmapper/deviceset.go b/graphdriver/devmapper/deviceset.go index 7308c0e922..8432d92a4e 100644 --- a/graphdriver/devmapper/deviceset.go +++ b/graphdriver/devmapper/deviceset.go @@ -1,4 +1,4 @@ -// +build linux +// +build linux,amd64 package devmapper @@ -568,6 +568,15 @@ func (devices *DeviceSet) removeDevice(hash string) error { return fmt.Errorf("hash %s doesn't exists", hash) } + // This is a workaround for the kernel not discarding block so + // on the thin pool when we remove a thinp device, so we do it + // manually + if err := devices.activateDeviceIfNeeded(hash); err == nil { + if err := BlockDeviceDiscard(info.DevName()); err != nil { + utils.Debugf("Error discarding block on device: %s (ignoring)\n", err) + } + } + devinfo, _ := getInfo(info.Name()) if devinfo != nil && devinfo.Exists != 0 { if err := removeDevice(info.Name()); err != nil { diff --git a/graphdriver/devmapper/devmapper.go b/graphdriver/devmapper/devmapper.go index dfbdf385d7..7f83a09df9 100644 --- a/graphdriver/devmapper/devmapper.go +++ b/graphdriver/devmapper/devmapper.go @@ -1,4 +1,4 @@ -// +build linux +// +build linux,amd64 package devmapper @@ -7,6 +7,7 @@ import ( "fmt" "github.com/dotcloud/docker/utils" "runtime" + "syscall" ) type DevmapperLogger interface { @@ -288,6 +289,29 @@ func GetBlockDeviceSize(file *osFile) (uint64, error) { return uint64(size), nil } +func BlockDeviceDiscard(path string) error { + file, err := osOpenFile(path, osORdWr, 0) + if err != nil { + return err + } + defer file.Close() + + size, err := GetBlockDeviceSize(file) + if err != nil { + return err + } + + if err := ioctlBlkDiscard(file.Fd(), 0, size); err != nil { + return err + } + + // Without this sometimes the remove of the device that happens after + // discard fails with EBUSY. + syscall.Sync() + + return nil +} + // This is the programmatic example of "dmsetup create" func createPool(poolName string, dataFile, metadataFile *osFile) error { task, err := createTask(DeviceCreate, poolName) diff --git a/graphdriver/devmapper/devmapper_log.go b/graphdriver/devmapper/devmapper_log.go index 8d54ad4e3a..18dde7cca5 100644 --- a/graphdriver/devmapper/devmapper_log.go +++ b/graphdriver/devmapper/devmapper_log.go @@ -1,4 +1,4 @@ -// +build linux +// +build linux,amd64 package devmapper diff --git a/graphdriver/devmapper/devmapper_test.go b/graphdriver/devmapper/devmapper_test.go index a43e32e059..3ffa163ceb 100644 --- a/graphdriver/devmapper/devmapper_test.go +++ b/graphdriver/devmapper/devmapper_test.go @@ -1,4 +1,4 @@ -// +build linux +// +build linux,amd64 package devmapper diff --git a/graphdriver/devmapper/devmapper_wrapper.go b/graphdriver/devmapper/devmapper_wrapper.go index 80d430e2bf..bf558affc8 100644 --- a/graphdriver/devmapper/devmapper_wrapper.go +++ b/graphdriver/devmapper/devmapper_wrapper.go @@ -1,4 +1,4 @@ -// +build linux +// +build linux,amd64 package devmapper @@ -66,6 +66,7 @@ type ( // IOCTL consts const ( BlkGetSize64 = C.BLKGETSIZE64 + BlkDiscard = C.BLKDISCARD LoopSetFd = C.LOOP_SET_FD LoopCtlGetFree = C.LOOP_CTL_GET_FREE diff --git a/graphdriver/devmapper/driver.go b/graphdriver/devmapper/driver.go index 10ac172562..664899cfbf 100644 --- a/graphdriver/devmapper/driver.go +++ b/graphdriver/devmapper/driver.go @@ -1,12 +1,14 @@ -// +build linux +// +build linux,amd64 package devmapper import ( "fmt" "github.com/dotcloud/docker/graphdriver" + "github.com/dotcloud/docker/utils" "io/ioutil" "path" + "sync" ) func init() { @@ -20,7 +22,9 @@ func init() { type Driver struct { *DeviceSet - home string + home string + sync.Mutex // Protects concurrent modification to active + active map[string]int } var Init = func(home string) (graphdriver.Driver, error) { @@ -31,6 +35,7 @@ var Init = func(home string) (graphdriver.Driver, error) { d := &Driver{ DeviceSet: deviceSet, home: home, + active: make(map[string]int), } return d, nil } @@ -82,6 +87,14 @@ func (d *Driver) Create(id, parent string) error { } func (d *Driver) Remove(id string) error { + // Protect the d.active from concurrent access + d.Lock() + defer d.Unlock() + + if d.active[id] != 0 { + utils.Errorf("Warning: removing active id %s\n", id) + } + mp := path.Join(d.home, "mnt", id) if err := d.unmount(id, mp); err != nil { return err @@ -90,13 +103,38 @@ func (d *Driver) Remove(id string) error { } func (d *Driver) Get(id string) (string, error) { + // Protect the d.active from concurrent access + d.Lock() + defer d.Unlock() + + count := d.active[id] + mp := path.Join(d.home, "mnt", id) - if err := d.mount(id, mp); err != nil { - return "", err + if count == 0 { + if err := d.mount(id, mp); err != nil { + return "", err + } } + + d.active[id] = count + 1 + return path.Join(mp, "rootfs"), nil } +func (d *Driver) Put(id string) { + // Protect the d.active from concurrent access + d.Lock() + defer d.Unlock() + + if count := d.active[id]; count > 1 { + d.active[id] = count - 1 + } else { + mp := path.Join(d.home, "mnt", id) + d.unmount(id, mp) + delete(d.active, id) + } +} + func (d *Driver) mount(id, mountPoint string) error { // Create the target directories if they don't exist if err := osMkdirAll(mountPoint, 0755); err != nil && !osIsExist(err) { diff --git a/graphdriver/devmapper/driver_test.go b/graphdriver/devmapper/driver_test.go index b6d997bc2f..785845ce6e 100644 --- a/graphdriver/devmapper/driver_test.go +++ b/graphdriver/devmapper/driver_test.go @@ -1,4 +1,4 @@ -// +build linux +// +build linux,amd64 package devmapper @@ -641,6 +641,10 @@ func TestDriverRemove(t *testing.T) { "DmTaskSetMessage", "DmTaskCreate", "DmTaskGetInfo", + "DmTaskSetCookie", + "DmTaskSetTarget", + "DmTaskSetAddNode", + "DmUdevWait", "Mounted", "sysUnmount", ) diff --git a/graphdriver/devmapper/ioctl.go b/graphdriver/devmapper/ioctl.go index 448d2d5a50..30bafff943 100644 --- a/graphdriver/devmapper/ioctl.go +++ b/graphdriver/devmapper/ioctl.go @@ -1,4 +1,4 @@ -// +build linux +// +build linux,amd64 package devmapper @@ -58,3 +58,14 @@ func ioctlBlkGetSize64(fd uintptr) (int64, error) { } return size, nil } + +func ioctlBlkDiscard(fd uintptr, offset, length uint64) error { + var r [2]uint64 + r[0] = offset + r[1] = length + + if _, _, err := sysSyscall(sysSysIoctl, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { + return err + } + return nil +} diff --git a/graphdriver/devmapper/mount.go b/graphdriver/devmapper/mount.go index d0050484bf..4f19109bf8 100644 --- a/graphdriver/devmapper/mount.go +++ b/graphdriver/devmapper/mount.go @@ -1,4 +1,4 @@ -// +build linux +// +build linux,amd64 package devmapper diff --git a/graphdriver/devmapper/sys.go b/graphdriver/devmapper/sys.go index 540c468988..5a9ab4d74b 100644 --- a/graphdriver/devmapper/sys.go +++ b/graphdriver/devmapper/sys.go @@ -1,4 +1,4 @@ -// +build linux +// +build linux,amd64 package devmapper diff --git a/graphdriver/driver.go b/graphdriver/driver.go index 1d5995dffc..c0ed00b0ad 100644 --- a/graphdriver/driver.go +++ b/graphdriver/driver.go @@ -17,6 +17,7 @@ type Driver interface { Remove(id string) error Get(id string) (dir string, err error) + Put(id string) Exists(id string) bool Status() [][2]string @@ -40,6 +41,8 @@ var ( "aufs", "devicemapper", "vfs", + // experimental, has to be enabled manually for now + "btrfs", } ) diff --git a/graphdriver/vfs/driver.go b/graphdriver/vfs/driver.go index 12230f463a..21da63878a 100644 --- a/graphdriver/vfs/driver.go +++ b/graphdriver/vfs/driver.go @@ -84,6 +84,11 @@ func (d *Driver) Get(id string) (string, error) { return dir, nil } +func (d *Driver) Put(id string) { + // The vfs driver has no runtime resources (e.g. mounts) + // to clean up, so we don't need anything here +} + func (d *Driver) Exists(id string) bool { _, err := os.Stat(d.dir(id)) return err == nil diff --git a/hack/MAINTAINERS.md b/hack/MAINTAINERS.md index b109c32660..8944fbee1a 100644 --- a/hack/MAINTAINERS.md +++ b/hack/MAINTAINERS.md @@ -5,7 +5,7 @@ Dear maintainer. Thank you for investing the time and energy to help make Docker as useful as possible. Maintaining a project is difficult, sometimes unrewarding work. Sure, you will get to contribute cool features to the project. But most of your time -will be spent reviewing, cleaning up, documenting, andswering questions, justifying +will be spent reviewing, cleaning up, documenting, answering questions, justifying design decisions - while everyone has all the fun! But remember - the quality of the maintainers work is what distinguishes the good projects from the great. So please be proud of your work, even the unglamourous parts, and encourage a culture @@ -54,13 +54,13 @@ But how do we identify the relevant maintainer for a given pull request? Docker follows the timeless, highly efficient and totally unfair system known as [Benevolent dictator for life](http://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life), with yours truly, Solomon Hykes, in the role of BDFL. -This means that all decisions are made by default by me. Since making every decision myself would be highly unscalable, in practice decisions are spread across multiple maintainers. +This means that all decisions are made by default by me. Since making every decision myself would be highly un-scalable, in practice decisions are spread across multiple maintainers. The relevant maintainer for a pull request is assigned in 3 steps: * Step 1: Determine the subdirectory affected by the pull request. This might be src/registry, docs/source/api, or any other part of the repo. -* Step 2: Find the MAINTAINERS file which affects this directory. If the directory itself does not have a MAINTAINERS file, work your way up the the repo hierarchy until you find one. +* Step 2: Find the MAINTAINERS file which affects this directory. If the directory itself does not have a MAINTAINERS file, work your way up the repo hierarchy until you find one. * Step 3: The first maintainer listed is the primary maintainer. The pull request is assigned to him. He may assign it to other listed maintainers, at his discretion. diff --git a/hack/PACKAGERS.md b/hack/PACKAGERS.md index 1dd039c3e3..b50306430b 100644 --- a/hack/PACKAGERS.md +++ b/hack/PACKAGERS.md @@ -39,6 +39,7 @@ To build docker, you will need the following system dependencies * Go version 1.2 or later * SQLite version 3.7.9 or later * libdevmapper version 1.02.68-cvs (2012-01-26) or later from lvm2 version 2.02.89 or later +* btrfs-progs version 3.8 or later (including commit e5cb128 from 2013-01-07) for the necessary btrfs headers * A clean checkout of the source must be added to a valid Go [workspace](http://golang.org/doc/code.html#Workspaces) under the path *src/github.com/dotcloud/docker*. @@ -56,15 +57,40 @@ NOTE: if you''re not able to package the exact version (to the exact commit) of please get in touch so we can remediate! Who knows what discrepancies can be caused by even the slightest deviation. We promise to do our best to make everybody happy. -## Disabling CGO +## Stripping Binaries -Make sure to disable CGO on your system, and then recompile the standard library on the build -machine: +Please, please, please do not strip any compiled binaries. This is really important. -```bash -export CGO_ENABLED=0 -cd /tmp && echo 'package main' > t.go && go test -a -i -v -``` +See the following quotes from Dave Cheney, which explain this position better +from the upstream Golang perspective. + +### [go issue #5855, comment #3](https://code.google.com/p/go/issues/detail?id=5855#c3) + +> Super super important: Do not strip go binaries or archives. It isn't tested, +> often breaks, and doesn't work. + +### [launchpad golang issue #1200255, comment #8](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/8) + +> To quote myself: "Please do not strip Go binaries, it is not supported, not +> tested, is often broken, and doesn't do what you want" +> +> To unpack that a bit +> +> * not supported, as in, we don't support it, and recommend against it when +> asked +> * not tested, we don't test stripped binaries as part of the build CI process +> * is often broken, stripping a go binary will produce anywhere from no, to +> subtle, to outright execution failure, see above + +### [launchpad golang issue #1200255, comment #13](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/13) + +> To clarify my previous statements. +> +> * I do not disagree with the debian policy, it is there for a good reason +> * Having said that, it stripping Go binaries doesn't work, and nobody is +> looking at making it work, so there is that. +> +> Thanks for patching the build formula. ## Building Docker @@ -120,7 +146,6 @@ The test suite will also download a small test container, so you will need inter To run properly, docker needs the following software to be installed at runtime: -* GNU Tar version 1.26 or later * iproute2 version 3.5 or later (build after 2012-05-21), and specifically the "ip" utility * iptables version 1.4 or later * The LXC utility scripts (http://lxc.sourceforge.net) version 0.8 or later diff --git a/hack/RELEASE-CHECKLIST.md b/hack/RELEASE-CHECKLIST.md index 41aca4775d..a7ae45f2ff 100644 --- a/hack/RELEASE-CHECKLIST.md +++ b/hack/RELEASE-CHECKLIST.md @@ -66,6 +66,14 @@ EXAMPLES: * Improve detection of kernel version ``` +If you need a list of contributors between the last major release and the +current bump branch, use something like: +```bash +git log --format='%aN <%aE>' v0.7.0...bump_v0.8.0 | sort -uf +``` +Obviously, you'll need to adjust version numbers as necessary. If you just need +a count, add a simple `| wc -l`. + ### 3. Change the contents of the VERSION file ```bash diff --git a/hack/fmt-check.hook b/hack/fmt-check.hook deleted file mode 100644 index cd18a18bcb..0000000000 --- a/hack/fmt-check.hook +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/sh - -# This pre-commit hook will abort if a committed file doesn't pass gofmt. -# By Even Shaw -# http://github.com/edsrzf/gofmt-git-hook - -test_fmt() { - hash gofmt 2>&- || { echo >&2 "gofmt not in PATH."; exit 1; } - IFS=' -' - for file in `git diff --cached --name-only --diff-filter=ACM | grep '\.go$'` - do - output=`git cat-file -p :$file | gofmt -l 2>&1` - if test $? -ne 0 - then - output=`echo "$output" | sed "s,,$file,"` - syntaxerrors="${list}${output}\n" - elif test -n "$output" - then - list="${list}${file}\n" - fi - done - exitcode=0 - if test -n "$syntaxerrors" - then - echo >&2 "gofmt found syntax errors:" - printf "$syntaxerrors" - exitcode=1 - fi - if test -n "$list" - then - echo >&2 "gofmt needs to format these files (run gofmt -w and git add):" - printf "$list" - exitcode=1 - fi - exit $exitcode -} - -case "$1" in - --about ) - echo "Check Go code formatting" - ;; - * ) - test_fmt - ;; -esac diff --git a/hack/infrastructure/docker-ci/buildbot/setup.sh b/hack/infrastructure/docker-ci/buildbot/setup.sh index c7e89c44b2..c5d9cb988e 100755 --- a/hack/infrastructure/docker-ci/buildbot/setup.sh +++ b/hack/infrastructure/docker-ci/buildbot/setup.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Setup of buildbot configuration. Package installation is being done by # Vagrantfile diff --git a/hack/infrastructure/docker-ci/docker-coverage/coverage-docker.sh b/hack/infrastructure/docker-ci/docker-coverage/coverage-docker.sh index f03243cf8f..c29ede5b81 100755 --- a/hack/infrastructure/docker-ci/docker-coverage/coverage-docker.sh +++ b/hack/infrastructure/docker-ci/docker-coverage/coverage-docker.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -x # Generate a random string of $1 characters diff --git a/hack/infrastructure/docker-ci/docker-test/test_docker.sh b/hack/infrastructure/docker-ci/docker-test/test_docker.sh index cf8fdb90bb..14816706ed 100755 --- a/hack/infrastructure/docker-ci/docker-test/test_docker.sh +++ b/hack/infrastructure/docker-ci/docker-test/test_docker.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -x COMMIT=${1-HEAD} diff --git a/hack/infrastructure/docker-ci/nightlyrelease/dockerbuild.sh b/hack/infrastructure/docker-ci/nightlyrelease/dockerbuild.sh index 80caaec25e..d5e58da7e1 100644 --- a/hack/infrastructure/docker-ci/nightlyrelease/dockerbuild.sh +++ b/hack/infrastructure/docker-ci/nightlyrelease/dockerbuild.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Variables AWS_ACCESS_KEY, AWS_SECRET_KEY and PG_PASSPHRASE are decoded # from /root/release_credentials.json diff --git a/hack/infrastructure/docker-ci/registry-coverage/registry_coverage.sh b/hack/infrastructure/docker-ci/registry-coverage/registry_coverage.sh index e16cea8e3c..c67b17eba0 100755 --- a/hack/infrastructure/docker-ci/registry-coverage/registry_coverage.sh +++ b/hack/infrastructure/docker-ci/registry-coverage/registry_coverage.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -x diff --git a/hack/install.sh b/hack/install.sh index 02d812f388..65e34f9659 100755 --- a/hack/install.sh +++ b/hack/install.sh @@ -37,8 +37,10 @@ if command_exists docker || command_exists lxc-docker; then ( set -x; sleep 20 ) fi +user="$(id -un 2>/dev/null || true)" + sh_c='sh -c' -if [ "$(whoami 2>/dev/null || true)" != 'root' ]; then +if [ "$user" != 'root' ]; then if command_exists sudo; then sh_c='sudo sh -c' elif command_exists su; then @@ -124,6 +126,16 @@ case "$lsb_dist" in $sh_c 'docker run busybox echo "Docker has been successfully installed!"' ) || true fi + your_user=your-user + [ "$user" != 'root' ] && your_user="$user" + echo + echo 'If you would like to use Docker as a non-root user, you should now consider' + echo 'adding your user to the "docker" group with something like:' + echo + echo ' sudo usermod -aG docker' $your_user + echo + echo 'Remember that you will have to log out and back in for this to take effect!' + echo exit 0 ;; diff --git a/hack/make.sh b/hack/make.sh index fe0f9c175f..ef13c1a283 100755 --- a/hack/make.sh +++ b/hack/make.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e # This script builds various binary artifacts from a checkout of the docker @@ -25,12 +25,18 @@ set -o pipefail # We're a nice, sexy, little shell script, and people might try to run us; # but really, they shouldn't. We want to be in a container! -RESOLVCONF=$(readlink --canonicalize /etc/resolv.conf) -grep -q "$RESOLVCONF" /proc/mounts || { - echo >&2 "# WARNING! I don't seem to be running in a docker container." - echo >&2 "# The result of this command might be an incorrect build, and will not be officially supported." - echo >&2 "# Try this: 'make all'" -} +if [ "$(pwd)" != '/go/src/github.com/dotcloud/docker' ] || [ -z "$DOCKER_CROSSPLATFORMS" ]; then + { + echo "# WARNING! I don't seem to be running in the Docker container." + echo "# The result of this command might be an incorrect build, and will not be" + echo "# officially supported." + echo "#" + echo "# Try this instead: make all" + echo "#" + } >&2 +fi + +echo # List of bundles to create when no argument is passed DEFAULT_BUNDLES=( diff --git a/hack/make/ubuntu b/hack/make/ubuntu index f15608e920..1d309d2b5c 100644 --- a/hack/make/ubuntu +++ b/hack/make/ubuntu @@ -24,6 +24,10 @@ PACKAGE_LICENSE="Apache-2.0" bundle_ubuntu() { DIR=$DEST/build + # Include our udev rules + mkdir -p $DIR/etc/udev/rules.d + cp contrib/udev/80-docker.rules $DIR/etc/udev/rules.d/ + # Include our init scripts mkdir -p $DIR/etc cp -R contrib/init/upstart $DIR/etc/init @@ -112,6 +116,7 @@ EOF --depends aufs-tools \ --depends iptables \ --deb-recommends ca-certificates \ + --deb-recommends xz-utils \ --description "$PACKAGE_DESCRIPTION" \ --maintainer "$PACKAGE_MAINTAINER" \ --conflicts lxc-docker-virtual-package \ @@ -121,13 +126,13 @@ EOF --replaces lxc-docker-virtual-package \ --url "$PACKAGE_URL" \ --license "$PACKAGE_LICENSE" \ + --config-files /etc/udev/rules.d/80-docker.rules \ --config-files /etc/init/docker.conf \ --config-files /etc/init.d/docker \ --config-files /etc/default/docker \ - --deb-compression xz \ + --deb-compression gz \ -t deb . - mkdir empty - fpm -s dir -C empty \ + fpm -s empty \ --name lxc-docker --version $PKGVERSION \ --architecture "$PACKAGE_ARCHITECTURE" \ --depends lxc-docker-$VERSION \ @@ -135,8 +140,8 @@ EOF --maintainer "$PACKAGE_MAINTAINER" \ --url "$PACKAGE_URL" \ --license "$PACKAGE_LICENSE" \ - --deb-compression xz \ - -t deb . + --deb-compression gz \ + -t deb ) } diff --git a/hack/release.sh b/hack/release.sh index 8256faa2dc..50913dd395 100755 --- a/hack/release.sh +++ b/hack/release.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e # This script looks for bundles built by make.sh, and releases them on a @@ -151,7 +151,8 @@ release_build() { S3ARCH=i386 ;; arm) - # GOARCH is fine + S3ARCH=armel + # someday, we might potentially support mutliple GOARM values, in which case we might get armhf here too ;; *) echo >&2 "error: can't convert $S3ARCH to an appropriate value for 'uname -m'" @@ -295,7 +296,7 @@ EOF # Upload the index script release_index() { - sed "s,https://get.docker.io/,$(s3_url)/," hack/install.sh | write_to_s3 s3://$BUCKET/index + sed "s,url='https://get.docker.io/',url='$(s3_url)/'," hack/install.sh | write_to_s3 s3://$BUCKET/index } release_test() { diff --git a/hack/stats.sh b/hack/stats.sh index 2053e583a2..985a77f22d 100755 --- a/hack/stats.sh +++ b/hack/stats.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash ## Run this script from the root of the docker repository ## to query project stats useful to the maintainers. diff --git a/hack/travis/dco.py b/hack/travis/dco.py index d80d528f9a..f873940815 100755 --- a/hack/travis/dco.py +++ b/hack/travis/dco.py @@ -5,7 +5,7 @@ import yaml from env import commit_range -commit_format = '-%n hash: "%h"%n author: %aN <%aE>%n message: |%n%w(0,2,2)%B' +commit_format = '-%n hash: "%h"%n author: %aN <%aE>%n message: |%n%w(0,2,2).%B' gitlog = subprocess.check_output([ 'git', 'log', '--reverse', @@ -24,6 +24,11 @@ p = re.compile(r'^{0} ([^<]+) <([^<>@]+@[^<>]+)> \(github: (\S+)\)$'.format(re.e failed_commits = 0 for commit in commits: + commit['message'] = commit['message'][1:] + # trim off our '.' that exists just to prevent fun YAML parsing issues + # see https://github.com/dotcloud/docker/pull/3836#issuecomment-33723094 + # and https://travis-ci.org/dotcloud/docker/builds/17926783 + commit['stat'] = subprocess.check_output([ 'git', 'log', '--format=format:', '--max-count=1', '--name-status', commit['hash'], '--', diff --git a/hack/travis/env.py b/hack/travis/env.py index 86d90f1567..9830b8df34 100644 --- a/hack/travis/env.py +++ b/hack/travis/env.py @@ -6,7 +6,7 @@ if 'TRAVIS' not in os.environ: exit(127) if os.environ['TRAVIS_PULL_REQUEST'] != 'false': - commit_range = [os.environ['TRAVIS_BRANCH'], 'FETCH_HEAD'] + commit_range = ['upstream/' + os.environ['TRAVIS_BRANCH'], 'FETCH_HEAD'] else: try: subprocess.check_call([ diff --git a/hack/travis/gofmt.py b/hack/travis/gofmt.py index 1bb062e2f6..dc724bc90e 100755 --- a/hack/travis/gofmt.py +++ b/hack/travis/gofmt.py @@ -11,6 +11,9 @@ files = subprocess.check_output([ exit_status = 0 for filename in files.split('\n'): + if filename.startswith('vendor/'): + continue # we can't be changing our upstream vendors for gofmt, so don't even check them + if filename.endswith('.go'): try: out = subprocess.check_output(['gofmt', '-s', '-l', filename]) diff --git a/hack/vendor.sh b/hack/vendor.sh index f76b30bba1..d3e7ea9f43 100755 --- a/hack/vendor.sh +++ b/hack/vendor.sh @@ -1,54 +1,52 @@ -#!/bin/bash +#!/usr/bin/env bash +set -e + +cd "$(dirname "$BASH_SOURCE")/.." # Downloads dependencies into vendor/ directory -if [[ ! -d vendor ]]; then - mkdir vendor -fi -vendor_dir=${PWD}/vendor +mkdir -p vendor +cd vendor -rm_pkg_dir () { - PKG=$1 - REV=$2 - ( - set -e - cd $vendor_dir - if [[ -d src/$PKG ]]; then - echo "src/$PKG already exists. Removing." - rm -fr src/$PKG - fi - ) +clone() { + vcs=$1 + pkg=$2 + rev=$3 + + pkg_url=https://$pkg + target_dir=src/$pkg + + echo -n "$pkg @ $rev: " + + if [ -d $target_dir ]; then + echo -n 'rm old, ' + rm -fr $target_dir + fi + + echo -n 'clone, ' + case $vcs in + git) + git clone --quiet --no-checkout $pkg_url $target_dir + ( cd $target_dir && git reset --quiet --hard $rev ) + ;; + hg) + hg clone --quiet --updaterev $rev $pkg_url $target_dir + ;; + esac + + echo -n 'rm VCS, ' + ( cd $target_dir && rm -rf .{git,hg} ) + + echo done } -git_clone () { - PKG=$1 - REV=$2 - ( - set -e - rm_pkg_dir $PKG $REV - cd $vendor_dir && git clone http://$PKG src/$PKG - cd src/$PKG && git checkout -f $REV && rm -fr .git - ) -} +clone git github.com/kr/pty 3b1f6487b -hg_clone () { - PKG=$1 - REV=$2 - ( - set -e - rm_pkg_dir $PKG $REV - cd $vendor_dir && hg clone http://$PKG src/$PKG - cd src/$PKG && hg checkout -r $REV && rm -fr .hg - ) -} +clone git github.com/gorilla/context 708054d61e5 -git_clone github.com/kr/pty 3b1f6487b +clone git github.com/gorilla/mux 9b36453141c -git_clone github.com/gorilla/context/ 708054d61e5 +clone git github.com/syndtr/gocapability 3454319be2 -git_clone github.com/gorilla/mux/ 9b36453141c +clone hg code.google.com/p/go.net 84a4013f96e0 -git_clone github.com/syndtr/gocapability 3454319be2 - -hg_clone code.google.com/p/go.net 84a4013f96e0 - -hg_clone code.google.com/p/gosqlite 74691fb6f837 +clone hg code.google.com/p/gosqlite 74691fb6f837 diff --git a/image.go b/image.go index f062910ef8..dbd2173597 100644 --- a/image.go +++ b/image.go @@ -104,6 +104,7 @@ func StoreImage(img *Image, jsonData []byte, layerData archive.Archive, root, la if err != nil { return err } + defer driver.Put(img.Parent) changes, err := archive.ChangesDirs(layer, parent) if err != nil { return err @@ -147,7 +148,7 @@ func jsonPath(root string) string { } // TarLayer returns a tar archive of the image's filesystem layer. -func (img *Image) TarLayer() (archive.Archive, error) { +func (img *Image) TarLayer() (arch archive.Archive, err error) { if img.graph == nil { return nil, fmt.Errorf("Can't load storage driver for unregistered image %s", img.ID) } @@ -160,19 +161,35 @@ func (img *Image) TarLayer() (archive.Archive, error) { if err != nil { return nil, err } + + defer func() { + if err != nil { + driver.Put(img.ID) + } + }() + if img.Parent == "" { - return archive.Tar(imgFs, archive.Uncompressed) - } else { - parentFs, err := driver.Get(img.Parent) + archive, err := archive.Tar(imgFs, archive.Uncompressed) if err != nil { return nil, err } - changes, err := archive.ChangesDirs(imgFs, parentFs) - if err != nil { - return nil, err - } - return archive.ExportChanges(imgFs, changes) + return EofReader(archive, func() { driver.Put(img.ID) }), nil } + + parentFs, err := driver.Get(img.Parent) + if err != nil { + return nil, err + } + defer driver.Put(img.Parent) + changes, err := archive.ChangesDirs(imgFs, parentFs) + if err != nil { + return nil, err + } + archive, err := archive.ExportChanges(imgFs, changes) + if err != nil { + return nil, err + } + return EofReader(archive, func() { driver.Put(img.ID) }), nil } func ValidateID(id string) error { @@ -186,12 +203,20 @@ func ValidateID(id string) error { } func GenerateID() string { - id := make([]byte, 32) - _, err := io.ReadFull(rand.Reader, id) - if err != nil { - panic(err) // This shouldn't happen + for { + id := make([]byte, 32) + if _, err := io.ReadFull(rand.Reader, id); err != nil { + panic(err) // This shouldn't happen + } + value := hex.EncodeToString(id) + // if we try to parse the truncated for as an int and we don't have + // an error then the value is all numberic and causes issues when + // used as a hostname. ref #3869 + if _, err := strconv.Atoi(utils.TruncateID(value)); err == nil { + continue + } + return value } - return hex.EncodeToString(id) } // Image includes convenience proxy functions to its graph diff --git a/integration/api_test.go b/integration/api_test.go index ff42afac5a..82de56a8ba 100644 --- a/integration/api_test.go +++ b/integration/api_test.go @@ -7,6 +7,7 @@ import ( "encoding/json" "fmt" "github.com/dotcloud/docker" + "github.com/dotcloud/docker/api" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/utils" "io" @@ -21,7 +22,6 @@ import ( func TestGetVersion(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - srv := mkServerFromEngine(eng, t) var err error r := httptest.NewRecorder() @@ -31,7 +31,7 @@ func TestGetVersion(t *testing.T) { t.Fatal(err) } // FIXME getting the version should require an actual running Server - if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -58,20 +58,22 @@ func TestGetVersion(t *testing.T) { func TestGetInfo(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - srv := mkServerFromEngine(eng, t) - initialImages, err := srv.Images(false, "") + job := eng.Job("images") + initialImages, err := job.Stdout.AddListTable() if err != nil { t.Fatal(err) } - + if err := job.Run(); err != nil { + t.Fatal(err) + } req, err := http.NewRequest("GET", "/info", nil) if err != nil { t.Fatal(err) } r := httptest.NewRecorder() - if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -85,8 +87,8 @@ func TestGetInfo(t *testing.T) { t.Fatal(err) } out.Close() - if images := i.GetInt("Images"); images != len(initialImages) { - t.Errorf("Expected images: %d, %d found", len(initialImages), images) + if images := i.GetInt("Images"); images != initialImages.Len() { + t.Errorf("Expected images: %d, %d found", initialImages.Len(), images) } expected := "application/json" if result := r.HeaderMap.Get("Content-Type"); result != expected { @@ -119,7 +121,7 @@ func TestGetEvents(t *testing.T) { r := httptest.NewRecorder() setTimeout(t, "", 500*time.Millisecond, func() { - if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -143,14 +145,15 @@ func TestGetEvents(t *testing.T) { func TestGetImagesJSON(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - srv := mkServerFromEngine(eng, t) - // all=0 - - initialImages, err := srv.Images(false, "") + job := eng.Job("images") + initialImages, err := job.Stdout.AddListTable() if err != nil { t.Fatal(err) } + if err := job.Run(); err != nil { + t.Fatal(err) + } req, err := http.NewRequest("GET", "/images/json?all=0", nil) if err != nil { @@ -159,23 +162,23 @@ func TestGetImagesJSON(t *testing.T) { r := httptest.NewRecorder() - if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) - images := []docker.APIImages{} - if err := json.Unmarshal(r.Body.Bytes(), &images); err != nil { + images := engine.NewTable("Created", 0) + if _, err := images.ReadListFrom(r.Body.Bytes()); err != nil { t.Fatal(err) } - if len(images) != len(initialImages) { - t.Errorf("Expected %d image, %d found", len(initialImages), len(images)) + if images.Len() != initialImages.Len() { + t.Errorf("Expected %d image, %d found", initialImages.Len(), images.Len()) } found := false - for _, img := range images { - if strings.Contains(img.RepoTags[0], unitTestImageName) { + for _, img := range images.Data { + if strings.Contains(img.GetList("RepoTags")[0], unitTestImageName) { found = true break } @@ -188,32 +191,29 @@ func TestGetImagesJSON(t *testing.T) { // all=1 - initialImages, err = srv.Images(true, "") - if err != nil { - t.Fatal(err) - } + initialImages = getAllImages(eng, t) req2, err := http.NewRequest("GET", "/images/json?all=true", nil) if err != nil { t.Fatal(err) } - if err := docker.ServeRequest(srv, docker.APIVERSION, r2, req2); err != nil { + if err := api.ServeRequest(eng, api.APIVERSION, r2, req2); err != nil { t.Fatal(err) } assertHttpNotError(r2, t) - images2 := []docker.APIImages{} - if err := json.Unmarshal(r2.Body.Bytes(), &images2); err != nil { + images2 := engine.NewTable("Id", 0) + if _, err := images2.ReadListFrom(r2.Body.Bytes()); err != nil { t.Fatal(err) } - if len(images2) != len(initialImages) { - t.Errorf("Expected %d image, %d found", len(initialImages), len(images2)) + if images2.Len() != initialImages.Len() { + t.Errorf("Expected %d image, %d found", initialImages.Len(), images2.Len()) } found = false - for _, img := range images2 { - if img.ID == unitTestImageID { + for _, img := range images2.Data { + if img.Get("Id") == unitTestImageID { found = true break } @@ -230,41 +230,24 @@ func TestGetImagesJSON(t *testing.T) { t.Fatal(err) } - if err := docker.ServeRequest(srv, docker.APIVERSION, r3, req3); err != nil { + if err := api.ServeRequest(eng, api.APIVERSION, r3, req3); err != nil { t.Fatal(err) } assertHttpNotError(r3, t) - images3 := []docker.APIImages{} - if err := json.Unmarshal(r3.Body.Bytes(), &images3); err != nil { + images3 := engine.NewTable("Id", 0) + if _, err := images3.ReadListFrom(r3.Body.Bytes()); err != nil { t.Fatal(err) } - if len(images3) != 0 { - t.Errorf("Expected 0 image, %d found", len(images3)) - } - - r4 := httptest.NewRecorder() - - // all=foobar - req4, err := http.NewRequest("GET", "/images/json?all=foobar", nil) - if err != nil { - t.Fatal(err) - } - - if err := docker.ServeRequest(srv, docker.APIVERSION, r4, req4); err != nil { - t.Fatal(err) - } - // Don't assert against HTTP error since we expect an error - if r4.Code != http.StatusBadRequest { - t.Fatalf("%d Bad Request expected, received %d\n", http.StatusBadRequest, r4.Code) + if images3.Len() != 0 { + t.Errorf("Expected 0 image, %d found", images3.Len()) } } func TestGetImagesHistory(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - srv := mkServerFromEngine(eng, t) r := httptest.NewRecorder() @@ -272,24 +255,23 @@ func TestGetImagesHistory(t *testing.T) { if err != nil { t.Fatal(err) } - if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) - history := []docker.APIHistory{} - if err := json.Unmarshal(r.Body.Bytes(), &history); err != nil { + outs := engine.NewTable("Created", 0) + if _, err := outs.ReadListFrom(r.Body.Bytes()); err != nil { t.Fatal(err) } - if len(history) != 1 { - t.Errorf("Expected 1 line, %d found", len(history)) + if len(outs.Data) != 1 { + t.Errorf("Expected 1 line, %d found", len(outs.Data)) } } func TestGetImagesByName(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - srv := mkServerFromEngine(eng, t) req, err := http.NewRequest("GET", "/images/"+unitTestImageName+"/json", nil) if err != nil { @@ -297,7 +279,7 @@ func TestGetImagesByName(t *testing.T) { } r := httptest.NewRecorder() - if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -314,9 +296,17 @@ func TestGetImagesByName(t *testing.T) { func TestGetContainersJSON(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - srv := mkServerFromEngine(eng, t) - beginLen := len(srv.Containers(true, false, -1, "", "")) + job := eng.Job("containers") + job.SetenvBool("all", true) + outs, err := job.Stdout.AddTable() + if err != nil { + t.Fatal(err) + } + if err := job.Run(); err != nil { + t.Fatal(err) + } + beginLen := len(outs.Data) containerID := createTestContainer(eng, &docker.Config{ Image: unitTestImageID, @@ -333,26 +323,25 @@ func TestGetContainersJSON(t *testing.T) { } r := httptest.NewRecorder() - if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) - containers := []docker.APIContainers{} - if err := json.Unmarshal(r.Body.Bytes(), &containers); err != nil { + containers := engine.NewTable("", 0) + if _, err := containers.ReadListFrom(r.Body.Bytes()); err != nil { t.Fatal(err) } - if len(containers) != beginLen+1 { - t.Fatalf("Expected %d container, %d found (started with: %d)", beginLen+1, len(containers), beginLen) + if len(containers.Data) != beginLen+1 { + t.Fatalf("Expected %d container, %d found (started with: %d)", beginLen+1, len(containers.Data), beginLen) } - if containers[0].ID != containerID { - t.Fatalf("Container ID mismatch. Expected: %s, received: %s\n", containerID, containers[0].ID) + if id := containers.Data[0].Get("Id"); id != containerID { + t.Fatalf("Container ID mismatch. Expected: %s, received: %s\n", containerID, id) } } func TestGetContainersExport(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - srv := mkServerFromEngine(eng, t) // Create a container and remove a file containerID := createTestContainer(eng, @@ -370,7 +359,7 @@ func TestGetContainersExport(t *testing.T) { if err != nil { t.Fatal(err) } - if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -388,7 +377,7 @@ func TestGetContainersExport(t *testing.T) { } t.Fatal(err) } - if h.Name == "./test" { + if h.Name == "test" { found = true break } @@ -401,7 +390,6 @@ func TestGetContainersExport(t *testing.T) { func TestGetContainersChanges(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - srv := mkServerFromEngine(eng, t) // Create a container and remove a file containerID := createTestContainer(eng, @@ -418,19 +406,19 @@ func TestGetContainersChanges(t *testing.T) { if err != nil { t.Fatal(err) } - if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) - changes := []docker.Change{} - if err := json.Unmarshal(r.Body.Bytes(), &changes); err != nil { + outs := engine.NewTable("", 0) + if _, err := outs.ReadListFrom(r.Body.Bytes()); err != nil { t.Fatal(err) } // Check the changelog success := false - for _, elem := range changes { - if elem.Path == "/etc/passwd" && elem.Kind == 2 { + for _, elem := range outs.Data { + if elem.Get("Path") == "/etc/passwd" && elem.GetInt("Kind") == 2 { success = true } } @@ -442,7 +430,6 @@ func TestGetContainersChanges(t *testing.T) { func TestGetContainersTop(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - srv := mkServerFromEngine(eng, t) containerID := createTestContainer(eng, &docker.Config{ @@ -486,37 +473,39 @@ func TestGetContainersTop(t *testing.T) { if err != nil { t.Fatal(err) } - if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) - procs := docker.APITop{} - if err := json.Unmarshal(r.Body.Bytes(), &procs); err != nil { + var procs engine.Env + if err := procs.Decode(r.Body); err != nil { t.Fatal(err) } - if len(procs.Titles) != 11 { - t.Fatalf("Expected 11 titles, found %d.", len(procs.Titles)) + if len(procs.GetList("Titles")) != 11 { + t.Fatalf("Expected 11 titles, found %d.", len(procs.GetList("Titles"))) } - if procs.Titles[0] != "USER" || procs.Titles[10] != "COMMAND" { - t.Fatalf("Expected Titles[0] to be USER and Titles[10] to be COMMAND, found %s and %s.", procs.Titles[0], procs.Titles[10]) + if procs.GetList("Titles")[0] != "USER" || procs.GetList("Titles")[10] != "COMMAND" { + t.Fatalf("Expected Titles[0] to be USER and Titles[10] to be COMMAND, found %s and %s.", procs.GetList("Titles")[0], procs.GetList("Titles")[10]) } - - if len(procs.Processes) != 2 { - t.Fatalf("Expected 2 processes, found %d.", len(procs.Processes)) + processes := [][]string{} + if err := procs.GetJson("Processes", &processes); err != nil { + t.Fatal(err) } - if procs.Processes[0][10] != "/bin/sh -c cat" { - t.Fatalf("Expected `/bin/sh -c cat`, found %s.", procs.Processes[0][10]) + if len(processes) != 2 { + t.Fatalf("Expected 2 processes, found %d.", len(processes)) } - if procs.Processes[1][10] != "/bin/sh -c cat" { - t.Fatalf("Expected `/bin/sh -c cat`, found %s.", procs.Processes[1][10]) + if processes[0][10] != "/bin/sh -c cat" { + t.Fatalf("Expected `/bin/sh -c cat`, found %s.", processes[0][10]) + } + if processes[1][10] != "/bin/sh -c cat" { + t.Fatalf("Expected `/bin/sh -c cat`, found %s.", processes[1][10]) } } func TestGetContainersByName(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - srv := mkServerFromEngine(eng, t) // Create a container and remove a file containerID := createTestContainer(eng, @@ -532,7 +521,7 @@ func TestGetContainersByName(t *testing.T) { if err != nil { t.Fatal(err) } - if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -567,7 +556,7 @@ func TestPostCommit(t *testing.T) { } r := httptest.NewRecorder() - if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -575,11 +564,11 @@ func TestPostCommit(t *testing.T) { t.Fatalf("%d Created expected, received %d\n", http.StatusCreated, r.Code) } - apiID := &docker.APIID{} - if err := json.Unmarshal(r.Body.Bytes(), apiID); err != nil { + var env engine.Env + if err := env.Decode(r.Body); err != nil { t.Fatal(err) } - if _, err := srv.ImageInspect(apiID.ID); err != nil { + if _, err := srv.ImageInspect(env.Get("Id")); err != nil { t.Fatalf("The image has not been committed") } } @@ -587,7 +576,6 @@ func TestPostCommit(t *testing.T) { func TestPostContainersCreate(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - srv := mkServerFromEngine(eng, t) configJSON, err := json.Marshal(&docker.Config{ Image: unitTestImageID, @@ -604,7 +592,7 @@ func TestPostContainersCreate(t *testing.T) { } r := httptest.NewRecorder() - if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -612,11 +600,11 @@ func TestPostContainersCreate(t *testing.T) { t.Fatalf("%d Created expected, received %d\n", http.StatusCreated, r.Code) } - apiRun := &docker.APIRun{} - if err := json.Unmarshal(r.Body.Bytes(), apiRun); err != nil { + var apiRun engine.Env + if err := apiRun.Decode(r.Body); err != nil { t.Fatal(err) } - containerID := apiRun.ID + containerID := apiRun.Get("Id") containerAssertExists(eng, containerID, t) containerRun(eng, containerID, t) @@ -629,7 +617,6 @@ func TestPostContainersCreate(t *testing.T) { func TestPostContainersKill(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - srv := mkServerFromEngine(eng, t) containerID := createTestContainer(eng, &docker.Config{ @@ -654,7 +641,7 @@ func TestPostContainersKill(t *testing.T) { if err != nil { t.Fatal(err) } - if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -669,7 +656,6 @@ func TestPostContainersKill(t *testing.T) { func TestPostContainersRestart(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - srv := mkServerFromEngine(eng, t) containerID := createTestContainer(eng, &docker.Config{ @@ -694,7 +680,7 @@ func TestPostContainersRestart(t *testing.T) { t.Fatal(err) } r := httptest.NewRecorder() - if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -715,7 +701,6 @@ func TestPostContainersRestart(t *testing.T) { func TestPostContainersStart(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - srv := mkServerFromEngine(eng, t) containerID := createTestContainer( eng, @@ -737,7 +722,7 @@ func TestPostContainersStart(t *testing.T) { req.Header.Set("Content-Type", "application/json") r := httptest.NewRecorder() - if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -754,7 +739,7 @@ func TestPostContainersStart(t *testing.T) { } r = httptest.NewRecorder() - if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } // Starting an already started container should return an error @@ -769,7 +754,6 @@ func TestPostContainersStart(t *testing.T) { func TestRunErrorBindMountRootSource(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - srv := mkServerFromEngine(eng, t) containerID := createTestContainer( eng, @@ -793,7 +777,7 @@ func TestRunErrorBindMountRootSource(t *testing.T) { req.Header.Set("Content-Type", "application/json") r := httptest.NewRecorder() - if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } if r.Code != http.StatusInternalServerError { @@ -805,7 +789,6 @@ func TestRunErrorBindMountRootSource(t *testing.T) { func TestPostContainersStop(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - srv := mkServerFromEngine(eng, t) containerID := createTestContainer(eng, &docker.Config{ @@ -831,7 +814,7 @@ func TestPostContainersStop(t *testing.T) { t.Fatal(err) } r := httptest.NewRecorder() - if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -846,7 +829,6 @@ func TestPostContainersStop(t *testing.T) { func TestPostContainersWait(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - srv := mkServerFromEngine(eng, t) containerID := createTestContainer(eng, &docker.Config{ @@ -864,16 +846,16 @@ func TestPostContainersWait(t *testing.T) { if err != nil { t.Fatal(err) } - if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) - apiWait := &docker.APIWait{} - if err := json.Unmarshal(r.Body.Bytes(), apiWait); err != nil { + var apiWait engine.Env + if err := apiWait.Decode(r.Body); err != nil { t.Fatal(err) } - if apiWait.StatusCode != 0 { - t.Fatalf("Non zero exit code for sleep: %d\n", apiWait.StatusCode) + if apiWait.GetInt("StatusCode") != 0 { + t.Fatalf("Non zero exit code for sleep: %d\n", apiWait.GetInt("StatusCode")) } }) @@ -885,7 +867,6 @@ func TestPostContainersWait(t *testing.T) { func TestPostContainersAttach(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - srv := mkServerFromEngine(eng, t) containerID := createTestContainer(eng, &docker.Config{ @@ -923,7 +904,7 @@ func TestPostContainersAttach(t *testing.T) { t.Fatal(err) } - if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r.ResponseRecorder, t) @@ -964,7 +945,6 @@ func TestPostContainersAttach(t *testing.T) { func TestPostContainersAttachStderr(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - srv := mkServerFromEngine(eng, t) containerID := createTestContainer(eng, &docker.Config{ @@ -1002,7 +982,7 @@ func TestPostContainersAttachStderr(t *testing.T) { t.Fatal(err) } - if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r.ResponseRecorder, t) @@ -1046,7 +1026,6 @@ func TestPostContainersAttachStderr(t *testing.T) { func TestDeleteContainers(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - srv := mkServerFromEngine(eng, t) containerID := createTestContainer(eng, &docker.Config{ @@ -1060,7 +1039,7 @@ func TestDeleteContainers(t *testing.T) { t.Fatal(err) } r := httptest.NewRecorder() - if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -1073,13 +1052,13 @@ func TestDeleteContainers(t *testing.T) { func TestOptionsRoute(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - srv := mkServerFromEngine(eng, t) + r := httptest.NewRecorder() req, err := http.NewRequest("OPTIONS", "/", nil) if err != nil { t.Fatal(err) } - if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -1091,14 +1070,14 @@ func TestOptionsRoute(t *testing.T) { func TestGetEnabledCors(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - srv := mkServerFromEngine(eng, t) + r := httptest.NewRecorder() req, err := http.NewRequest("GET", "/version", nil) if err != nil { t.Fatal(err) } - if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -1124,23 +1103,17 @@ func TestGetEnabledCors(t *testing.T) { func TestDeleteImages(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - srv := mkServerFromEngine(eng, t) - initialImages, err := srv.Images(false, "") - if err != nil { - t.Fatal(err) - } + initialImages := getImages(eng, t, true, "") if err := eng.Job("tag", unitTestImageName, "test", "test").Run(); err != nil { t.Fatal(err) } - images, err := srv.Images(false, "") - if err != nil { - t.Fatal(err) - } - if len(images[0].RepoTags) != len(initialImages[0].RepoTags)+1 { - t.Errorf("Expected %d images, %d found", len(initialImages)+1, len(images)) + images := getImages(eng, t, true, "") + + if len(images.Data[0].GetList("RepoTags")) != len(initialImages.Data[0].GetList("RepoTags"))+1 { + t.Errorf("Expected %d images, %d found", len(initialImages.Data[0].GetList("RepoTags"))+1, len(images.Data[0].GetList("RepoTags"))) } req, err := http.NewRequest("DELETE", "/images/"+unitTestImageID, nil) @@ -1149,7 +1122,7 @@ func TestDeleteImages(t *testing.T) { } r := httptest.NewRecorder() - if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } if r.Code != http.StatusConflict { @@ -1162,7 +1135,7 @@ func TestDeleteImages(t *testing.T) { } r2 := httptest.NewRecorder() - if err := docker.ServeRequest(srv, docker.APIVERSION, r2, req2); err != nil { + if err := api.ServeRequest(eng, api.APIVERSION, r2, req2); err != nil { t.Fatal(err) } assertHttpNotError(r2, t) @@ -1170,27 +1143,23 @@ func TestDeleteImages(t *testing.T) { t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) } - var outs []docker.APIRmi - if err := json.Unmarshal(r2.Body.Bytes(), &outs); err != nil { + outs := engine.NewTable("Created", 0) + if _, err := outs.ReadListFrom(r2.Body.Bytes()); err != nil { t.Fatal(err) } - if len(outs) != 1 { - t.Fatalf("Expected %d event (untagged), got %d", 1, len(outs)) - } - images, err = srv.Images(false, "") - if err != nil { - t.Fatal(err) + if len(outs.Data) != 1 { + t.Fatalf("Expected %d event (untagged), got %d", 1, len(outs.Data)) } + images = getImages(eng, t, false, "") - if len(images[0].RepoTags) != len(initialImages[0].RepoTags) { - t.Errorf("Expected %d image, %d found", len(initialImages), len(images)) + if images.Len() != initialImages.Len() { + t.Errorf("Expected %d image, %d found", initialImages.Len(), images.Len()) } } func TestPostContainersCopy(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - srv := mkServerFromEngine(eng, t) // Create a container and remove a file containerID := createTestContainer(eng, @@ -1203,19 +1172,22 @@ func TestPostContainersCopy(t *testing.T) { containerRun(eng, containerID, t) r := httptest.NewRecorder() - copyData := docker.APICopy{HostPath: ".", Resource: "/test.txt"} - jsonData, err := json.Marshal(copyData) - if err != nil { + var copyData engine.Env + copyData.Set("Resource", "/test.txt") + copyData.Set("HostPath", ".") + + jsonData := bytes.NewBuffer(nil) + if err := copyData.Encode(jsonData); err != nil { t.Fatal(err) } - req, err := http.NewRequest("POST", "/containers/"+containerID+"/copy", bytes.NewReader(jsonData)) + req, err := http.NewRequest("POST", "/containers/"+containerID+"/copy", jsonData) if err != nil { t.Fatal(err) } req.Header.Add("Content-Type", "application/json") - if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) diff --git a/integration/auth_test.go b/integration/auth_test.go index 07559c01cf..c5bdabace2 100644 --- a/integration/auth_test.go +++ b/integration/auth_test.go @@ -3,6 +3,7 @@ package docker import ( "crypto/rand" "encoding/hex" + "fmt" "github.com/dotcloud/docker/auth" "os" "strings" @@ -17,7 +18,12 @@ import ( func TestLogin(t *testing.T) { os.Setenv("DOCKER_INDEX_URL", "https://indexstaging-docker.dotcloud.com") defer os.Setenv("DOCKER_INDEX_URL", "") - authConfig := &auth.AuthConfig{Username: "unittester", Password: "surlautrerivejetattendrai", Email: "noise+unittester@dotcloud.com"} + authConfig := &auth.AuthConfig{ + Username: "unittester", + Password: "surlautrerivejetattendrai", + Email: "noise+unittester@docker.com", + ServerAddress: "https://indexstaging-docker.dotcloud.com/v1/", + } status, err := auth.Login(authConfig, nil) if err != nil { t.Fatal(err) @@ -28,8 +34,6 @@ func TestLogin(t *testing.T) { } func TestCreateAccount(t *testing.T) { - os.Setenv("DOCKER_INDEX_URL", "https://indexstaging-docker.dotcloud.com") - defer os.Setenv("DOCKER_INDEX_URL", "") tokenBuffer := make([]byte, 16) _, err := rand.Read(tokenBuffer) if err != nil { @@ -37,13 +41,20 @@ func TestCreateAccount(t *testing.T) { } token := hex.EncodeToString(tokenBuffer)[:12] username := "ut" + token - authConfig := &auth.AuthConfig{Username: username, Password: "test42", Email: "docker-ut+" + token + "@example.com"} + authConfig := &auth.AuthConfig{ + Username: username, + Password: "test42", + Email: fmt.Sprintf("docker-ut+%s@example.com", token), + ServerAddress: "https://indexstaging-docker.dotcloud.com/v1/", + } status, err := auth.Login(authConfig, nil) if err != nil { t.Fatal(err) } - expectedStatus := "Account created. Please use the confirmation link we sent" + - " to your e-mail to activate it." + expectedStatus := fmt.Sprintf( + "Account created. Please see the documentation of the registry %s for instructions how to activate it.", + authConfig.ServerAddress, + ) if status != expectedStatus { t.Fatalf("Expected status: \"%s\", found \"%s\" instead.", expectedStatus, status) } diff --git a/integration/buildfile_test.go b/integration/buildfile_test.go index 5dd403274e..6a7da70558 100644 --- a/integration/buildfile_test.go +++ b/integration/buildfile_test.go @@ -223,6 +223,31 @@ run [ "$(cat /bar/withfile)" = "test2" ] }, nil, }, + + // JSON! + { + ` +FROM {IMAGE} +RUN ["/bin/echo","hello","world"] +CMD ["/bin/true"] +ENTRYPOINT ["/bin/echo","your command -->"] +`, + nil, + nil, + }, + { + ` +FROM {IMAGE} +ADD test /test +RUN ["chmod","+x","/test"] +RUN ["/test"] +RUN [ "$(cat /testfile)" = 'test!' ] +`, + [][2]string{ + {"test", "#!/bin/sh\necho 'test!' > /testfile"}, + }, + nil, + }, } // FIXME: test building with 2 successive overlapping ADD commands @@ -296,7 +321,7 @@ func buildImage(context testContextTemplate, t *testing.T, eng *engine.Engine, u } dockerfile := constructDockerfile(context.dockerfile, ip, port) - buildfile := docker.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, useCache, false, ioutil.Discard, utils.NewStreamFormatter(false), nil) + buildfile := docker.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, useCache, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil) id, err := buildfile.Build(mkTestContext(dockerfile, context.files, t)) if err != nil { return nil, err @@ -700,7 +725,7 @@ func TestForbiddenContextPath(t *testing.T) { } dockerfile := constructDockerfile(context.dockerfile, ip, port) - buildfile := docker.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, true, false, ioutil.Discard, utils.NewStreamFormatter(false), nil) + buildfile := docker.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, true, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil) _, err = buildfile.Build(mkTestContext(dockerfile, context.files, t)) if err == nil { @@ -746,7 +771,7 @@ func TestBuildADDFileNotFound(t *testing.T) { } dockerfile := constructDockerfile(context.dockerfile, ip, port) - buildfile := docker.NewBuildFile(mkServerFromEngine(eng, t), ioutil.Discard, ioutil.Discard, false, true, false, ioutil.Discard, utils.NewStreamFormatter(false), nil) + buildfile := docker.NewBuildFile(mkServerFromEngine(eng, t), ioutil.Discard, ioutil.Discard, false, true, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil) _, err = buildfile.Build(mkTestContext(dockerfile, context.files, t)) if err == nil { @@ -822,3 +847,19 @@ func TestBuildFailsDockerfileEmpty(t *testing.T) { t.Fatal("Expected: %v, got: %v", docker.ErrDockerfileEmpty, err) } } + +func TestBuildOnBuildTrigger(t *testing.T) { + _, err := buildImage(testContextTemplate{` + from {IMAGE} + onbuild run echo here is the trigger + onbuild run touch foobar + `, + nil, nil, + }, + t, nil, true, + ) + if err != nil { + t.Fatal(err) + } + // FIXME: test that the 'foobar' file was created in the final build. +} diff --git a/integration/commands_test.go b/integration/commands_test.go index 9a2168d966..a0fc4b9523 100644 --- a/integration/commands_test.go +++ b/integration/commands_test.go @@ -12,7 +12,9 @@ import ( "os" "path" "regexp" + "strconv" "strings" + "syscall" "testing" "time" ) @@ -90,18 +92,25 @@ func setTimeout(t *testing.T, msg string, d time.Duration, f func()) { } } +func expectPipe(expected string, r io.Reader) error { + o, err := bufio.NewReader(r).ReadString('\n') + if err != nil { + return err + } + if strings.Trim(o, " \r\n") != expected { + return fmt.Errorf("Unexpected output. Expected [%s], received [%s]", expected, o) + } + return nil +} + func assertPipe(input, output string, r io.Reader, w io.Writer, count int) error { for i := 0; i < count; i++ { if _, err := w.Write([]byte(input)); err != nil { return err } - o, err := bufio.NewReader(r).ReadString('\n') - if err != nil { + if err := expectPipe(output, r); err != nil { return err } - if strings.Trim(o, " \r\n") != output { - return fmt.Errorf("Unexpected output. Expected [%s], received [%s]", output, o) - } } return nil } @@ -1021,13 +1030,76 @@ func TestContainerOrphaning(t *testing.T) { buildSomething(template2, imageName) // remove the second image by name - resp, err := srv.ImageDelete(imageName, true) + resp, err := srv.DeleteImage(imageName, true) // see if we deleted the first image (and orphaned the container) - for _, i := range resp { - if img1 == i.Deleted { + for _, i := range resp.Data { + if img1 == i.Get("Deleted") { t.Fatal("Orphaned image with container") } } } + +func TestCmdKill(t *testing.T) { + stdin, stdinPipe := io.Pipe() + stdout, stdoutPipe := io.Pipe() + + cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli2 := docker.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) + + ch := make(chan struct{}) + go func() { + defer close(ch) + cli.CmdRun("-i", "-t", unitTestImageID, "sh", "-c", "trap 'echo SIGUSR1' USR1; trap 'echo SIGUSR2' USR2; echo Ready; while true; do read; done") + }() + + container := waitContainerStart(t, 10*time.Second) + + setTimeout(t, "Read Ready timed out", 3*time.Second, func() { + if err := expectPipe("Ready", stdout); err != nil { + t.Fatal(err) + } + }) + + setTimeout(t, "SIGUSR1 timed out", 2*time.Second, func() { + for i := 0; i < 10; i++ { + if err := cli2.CmdKill("-s", strconv.Itoa(int(syscall.SIGUSR1)), container.ID); err != nil { + t.Fatal(err) + } + if err := expectPipe("SIGUSR1", stdout); err != nil { + t.Fatal(err) + } + } + }) + + setTimeout(t, "SIGUSR2 timed out", 2*time.Second, func() { + for i := 0; i < 10; i++ { + if err := cli2.CmdKill("--signal=USR2", container.ID); err != nil { + t.Fatal(err) + } + if err := expectPipe("SIGUSR2", stdout); err != nil { + t.Fatal(err) + } + } + }) + + time.Sleep(500 * time.Millisecond) + if !container.State.IsRunning() { + t.Fatal("The container should be still running") + } + + setTimeout(t, "Waiting for container timedout", 5*time.Second, func() { + if err := cli2.CmdKill(container.ID); err != nil { + t.Fatal(err) + } + + <-ch + if err := cli2.CmdWait(container.ID); err != nil { + t.Fatal(err) + } + }) + + closeWrap(stdin, stdinPipe, stdout, stdoutPipe) +} diff --git a/integration/runtime_test.go b/integration/runtime_test.go index cdd4818934..da95967a30 100644 --- a/integration/runtime_test.go +++ b/integration/runtime_test.go @@ -51,14 +51,17 @@ func cleanup(eng *engine.Engine, t *testing.T) error { container.Kill() runtime.Destroy(container) } - srv := mkServerFromEngine(eng, t) - images, err := srv.Images(true, "") + job := eng.Job("images") + images, err := job.Stdout.AddTable() if err != nil { - return err + t.Fatal(err) } - for _, image := range images { - if image.ID != unitTestImageID { - srv.ImageDelete(image.ID, false) + if err := job.Run(); err != nil { + t.Fatal(err) + } + for _, image := range images.Data { + if image.Get("Id") != unitTestImageID { + eng.Job("image_delete", image.Get("Id")).Run() } } return nil @@ -122,19 +125,22 @@ func setupBaseImage() { if err != nil { log.Fatalf("Can't initialize engine at %s: %s", unitTestStoreBase, err) } - job := eng.Job("initapi") + job := eng.Job("initserver") job.Setenv("Root", unitTestStoreBase) job.SetenvBool("Autorestart", false) job.Setenv("BridgeIface", unitTestNetworkBridge) if err := job.Run(); err != nil { log.Fatalf("Unable to create a runtime for tests: %s", err) } - srv := mkServerFromEngine(eng, log.New(os.Stderr, "", 0)) + job = eng.Job("inspect", unitTestImageName, "image") + img, _ := job.Stdout.AddEnv() // If the unit test is not found, try to download it. - if img, err := srv.ImageInspect(unitTestImageName); err != nil || img.ID != unitTestImageID { + if err := job.Run(); err != nil || img.Get("id") != unitTestImageID { // Retrieve the Image - if err := srv.ImagePull(unitTestImageName, "", os.Stdout, utils.NewStreamFormatter(false), nil, nil, true); err != nil { + job = eng.Job("pull", unitTestImageName) + job.Stdout.Add(utils.NopWriteCloser(os.Stdout)) + if err := job.Run(); err != nil { log.Fatalf("Unable to pull the test image: %s", err) } } @@ -158,7 +164,7 @@ func spawnGlobalDaemon() { Host: testDaemonAddr, } job := eng.Job("serveapi", listenURL.String()) - job.SetenvBool("Logging", os.Getenv("DEBUG") != "") + job.SetenvBool("Logging", true) if err := job.Run(); err != nil { log.Fatalf("Unable to spawn the test daemon: %s", err) } @@ -567,7 +573,7 @@ func TestRestore(t *testing.T) { if err != nil { t.Fatal(err) } - job := eng.Job("initapi") + job := eng.Job("initserver") job.Setenv("Root", eng.Root()) job.SetenvBool("Autorestart", false) if err := job.Run(); err != nil { @@ -599,7 +605,7 @@ func TestRestore(t *testing.T) { } func TestReloadContainerLinks(t *testing.T) { - // FIXME: here we don't use NewTestEngine because it calls initapi with Autorestart=false, + // FIXME: here we don't use NewTestEngine because it calls initserver with Autorestart=false, // and we want to set it to true. root, err := newTestDirectory(unitTestStoreBase) if err != nil { @@ -609,7 +615,7 @@ func TestReloadContainerLinks(t *testing.T) { if err != nil { t.Fatal(err) } - job := eng.Job("initapi") + job := eng.Job("initserver") job.Setenv("Root", eng.Root()) job.SetenvBool("Autorestart", true) if err := job.Run(); err != nil { @@ -659,7 +665,7 @@ func TestReloadContainerLinks(t *testing.T) { if err != nil { t.Fatal(err) } - job = eng.Job("initapi") + job = eng.Job("initserver") job.Setenv("Root", eng.Root()) job.SetenvBool("Autorestart", false) if err := job.Run(); err != nil { diff --git a/integration/server_test.go b/integration/server_test.go index 2650311c36..45d4930ad7 100644 --- a/integration/server_test.go +++ b/integration/server_test.go @@ -2,10 +2,10 @@ package docker import ( "github.com/dotcloud/docker" - "github.com/dotcloud/docker/utils" - "io/ioutil" + "github.com/dotcloud/docker/engine" "strings" "testing" + "time" ) func TestImageTagImageDelete(t *testing.T) { @@ -14,10 +14,7 @@ func TestImageTagImageDelete(t *testing.T) { srv := mkServerFromEngine(eng, t) - initialImages, err := srv.Images(false, "") - if err != nil { - t.Fatal(err) - } + initialImages := getAllImages(eng, t) if err := eng.Job("tag", unitTestImageName, "utest", "tag1").Run(); err != nil { t.Fatal(err) } @@ -30,58 +27,48 @@ func TestImageTagImageDelete(t *testing.T) { t.Fatal(err) } - images, err := srv.Images(false, "") - if err != nil { + images := getAllImages(eng, t) + + nExpected := len(initialImages.Data[0].GetList("RepoTags")) + 3 + nActual := len(images.Data[0].GetList("RepoTags")) + if nExpected != nActual { + t.Errorf("Expected %d images, %d found", nExpected, nActual) + } + + if _, err := srv.DeleteImage("utest/docker:tag2", true); err != nil { t.Fatal(err) } - if len(images[0].RepoTags) != len(initialImages[0].RepoTags)+3 { - t.Errorf("Expected %d images, %d found", len(initialImages)+3, len(images)) + images = getAllImages(eng, t) + + nExpected = len(initialImages.Data[0].GetList("RepoTags")) + 2 + nActual = len(images.Data[0].GetList("RepoTags")) + if nExpected != nActual { + t.Errorf("Expected %d images, %d found", nExpected, nActual) } - if _, err := srv.ImageDelete("utest/docker:tag2", true); err != nil { + if _, err := srv.DeleteImage("utest:5000/docker:tag3", true); err != nil { t.Fatal(err) } - images, err = srv.Images(false, "") - if err != nil { + images = getAllImages(eng, t) + + nExpected = len(initialImages.Data[0].GetList("RepoTags")) + 1 + nActual = len(images.Data[0].GetList("RepoTags")) + + if _, err := srv.DeleteImage("utest:tag1", true); err != nil { t.Fatal(err) } - if len(images[0].RepoTags) != len(initialImages[0].RepoTags)+2 { - t.Errorf("Expected %d images, %d found", len(initialImages)+2, len(images)) - } + images = getAllImages(eng, t) - if _, err := srv.ImageDelete("utest:5000/docker:tag3", true); err != nil { - t.Fatal(err) - } - - images, err = srv.Images(false, "") - if err != nil { - t.Fatal(err) - } - - if len(images[0].RepoTags) != len(initialImages[0].RepoTags)+1 { - t.Errorf("Expected %d images, %d found", len(initialImages)+1, len(images)) - } - - if _, err := srv.ImageDelete("utest:tag1", true); err != nil { - t.Fatal(err) - } - - images, err = srv.Images(false, "") - if err != nil { - t.Fatal(err) - } - - if len(images) != len(initialImages) { - t.Errorf("Expected %d image, %d found", len(initialImages), len(images)) + if images.Len() != initialImages.Len() { + t.Errorf("Expected %d image, %d found", initialImages.Len(), images.Len()) } } func TestCreateRm(t *testing.T) { eng := NewTestEngine(t) - srv := mkServerFromEngine(eng, t) defer mkRuntimeFromEngine(eng, t).Nuke() config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil) @@ -91,23 +78,68 @@ func TestCreateRm(t *testing.T) { id := createTestContainer(eng, config, t) - if c := srv.Containers(true, false, -1, "", ""); len(c) != 1 { - t.Errorf("Expected 1 container, %v found", len(c)) + job := eng.Job("containers") + job.SetenvBool("all", true) + outs, err := job.Stdout.AddListTable() + if err != nil { + t.Fatal(err) } - - if err = srv.ContainerDestroy(id, true, false); err != nil { + if err := job.Run(); err != nil { t.Fatal(err) } - if c := srv.Containers(true, false, -1, "", ""); len(c) != 0 { - t.Errorf("Expected 0 container, %v found", len(c)) + if len(outs.Data) != 1 { + t.Errorf("Expected 1 container, %v found", len(outs.Data)) + } + + job = eng.Job("container_delete", id) + job.SetenvBool("removeVolume", true) + if err := job.Run(); err != nil { + t.Fatal(err) + } + + job = eng.Job("containers") + job.SetenvBool("all", true) + outs, err = job.Stdout.AddListTable() + if err != nil { + t.Fatal(err) + } + if err := job.Run(); err != nil { + t.Fatal(err) + } + + if len(outs.Data) != 0 { + t.Errorf("Expected 0 container, %v found", len(outs.Data)) } } +func TestCreateNumberHostname(t *testing.T) { + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + + config, _, _, err := docker.ParseRun([]string{"-h", "web.0", unitTestImageID, "echo test"}, nil) + if err != nil { + t.Fatal(err) + } + + createTestContainer(eng, config, t) +} + +func TestCreateNumberUsername(t *testing.T) { + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + + config, _, _, err := docker.ParseRun([]string{"-u", "1002", unitTestImageID, "echo test"}, nil) + if err != nil { + t.Fatal(err) + } + + createTestContainer(eng, config, t) +} + func TestCreateRmVolumes(t *testing.T) { eng := NewTestEngine(t) - srv := mkServerFromEngine(eng, t) defer mkRuntimeFromEngine(eng, t).Nuke() config, hostConfig, _, err := docker.ParseRun([]string{"-v", "/srv", unitTestImageID, "echo", "test"}, nil) @@ -117,11 +149,21 @@ func TestCreateRmVolumes(t *testing.T) { id := createTestContainer(eng, config, t) - if c := srv.Containers(true, false, -1, "", ""); len(c) != 1 { - t.Errorf("Expected 1 container, %v found", len(c)) + job := eng.Job("containers") + job.SetenvBool("all", true) + outs, err := job.Stdout.AddListTable() + if err != nil { + t.Fatal(err) + } + if err := job.Run(); err != nil { + t.Fatal(err) } - job := eng.Job("start", id) + if len(outs.Data) != 1 { + t.Errorf("Expected 1 container, %v found", len(outs.Data)) + } + + job = eng.Job("start", id) if err := job.ImportEnv(hostConfig); err != nil { t.Fatal(err) } @@ -135,12 +177,24 @@ func TestCreateRmVolumes(t *testing.T) { t.Fatal(err) } - if err = srv.ContainerDestroy(id, true, false); err != nil { + job = eng.Job("container_delete", id) + job.SetenvBool("removeVolume", true) + if err := job.Run(); err != nil { t.Fatal(err) } - if c := srv.Containers(true, false, -1, "", ""); len(c) != 0 { - t.Errorf("Expected 0 container, %v found", len(c)) + job = eng.Job("containers") + job.SetenvBool("all", true) + outs, err = job.Stdout.AddListTable() + if err != nil { + t.Fatal(err) + } + if err := job.Run(); err != nil { + t.Fatal(err) + } + + if len(outs.Data) != 0 { + t.Errorf("Expected 0 container, %v found", len(outs.Data)) } } @@ -164,6 +218,85 @@ func TestCommit(t *testing.T) { } } +func TestRestartKillWait(t *testing.T) { + eng := NewTestEngine(t) + srv := mkServerFromEngine(eng, t) + runtime := mkRuntimeFromEngine(eng, t) + defer runtime.Nuke() + + config, hostConfig, _, err := docker.ParseRun([]string{"-i", unitTestImageID, "/bin/cat"}, nil) + if err != nil { + t.Fatal(err) + } + + id := createTestContainer(eng, config, t) + + job := eng.Job("containers") + job.SetenvBool("all", true) + outs, err := job.Stdout.AddListTable() + if err != nil { + t.Fatal(err) + } + if err := job.Run(); err != nil { + t.Fatal(err) + } + + if len(outs.Data) != 1 { + t.Errorf("Expected 1 container, %v found", len(outs.Data)) + } + + job = eng.Job("start", id) + if err := job.ImportEnv(hostConfig); err != nil { + t.Fatal(err) + } + if err := job.Run(); err != nil { + t.Fatal(err) + } + job = eng.Job("kill", id) + if err := job.Run(); err != nil { + t.Fatal(err) + } + + eng, err = engine.New(eng.Root()) + if err != nil { + t.Fatal(err) + } + + job = eng.Job("initserver") + job.Setenv("Root", eng.Root()) + job.SetenvBool("AutoRestart", false) + // TestGetEnabledCors and TestOptionsRoute require EnableCors=true + job.SetenvBool("EnableCors", true) + if err := job.Run(); err != nil { + t.Fatal(err) + } + + srv = mkServerFromEngine(eng, t) + + job = srv.Eng.Job("containers") + job.SetenvBool("all", true) + outs, err = job.Stdout.AddListTable() + if err != nil { + t.Fatal(err) + } + if err := job.Run(); err != nil { + t.Fatal(err) + } + + if len(outs.Data) != 1 { + t.Errorf("Expected 1 container, %v found", len(outs.Data)) + } + + setTimeout(t, "Waiting on stopped container timedout", 5*time.Second, func() { + job = srv.Eng.Job("wait", outs.Data[0].Get("Id")) + var statusStr string + job.Stdout.AddString(&statusStr) + if err := job.Run(); err != nil { + t.Fatal(err) + } + }) +} + func TestCreateStartRestartStopStartKillRm(t *testing.T) { eng := NewTestEngine(t) srv := mkServerFromEngine(eng, t) @@ -176,11 +309,21 @@ func TestCreateStartRestartStopStartKillRm(t *testing.T) { id := createTestContainer(eng, config, t) - if c := srv.Containers(true, false, -1, "", ""); len(c) != 1 { - t.Errorf("Expected 1 container, %v found", len(c)) + job := srv.Eng.Job("containers") + job.SetenvBool("all", true) + outs, err := job.Stdout.AddListTable() + if err != nil { + t.Fatal(err) + } + if err := job.Run(); err != nil { + t.Fatal(err) } - job := eng.Job("start", id) + if len(outs.Data) != 1 { + t.Errorf("Expected 1 container, %v found", len(outs.Data)) + } + + job = eng.Job("start", id) if err := job.ImportEnv(hostConfig); err != nil { t.Fatal(err) } @@ -188,7 +331,9 @@ func TestCreateStartRestartStopStartKillRm(t *testing.T) { t.Fatal(err) } - if err := srv.ContainerRestart(id, 15); err != nil { + job = eng.Job("restart", id) + job.SetenvInt("t", 15) + if err := job.Run(); err != nil { t.Fatal(err) } @@ -211,12 +356,24 @@ func TestCreateStartRestartStopStartKillRm(t *testing.T) { } // FIXME: this failed once with a race condition ("Unable to remove filesystem for xxx: directory not empty") - if err := srv.ContainerDestroy(id, true, false); err != nil { + job = eng.Job("container_delete", id) + job.SetenvBool("removeVolume", true) + if err := job.Run(); err != nil { t.Fatal(err) } - if c := srv.Containers(true, false, -1, "", ""); len(c) != 0 { - t.Errorf("Expected 0 container, %v found", len(c)) + job = srv.Eng.Job("containers") + job.SetenvBool("all", true) + outs, err = job.Stdout.AddListTable() + if err != nil { + t.Fatal(err) + } + if err := job.Run(); err != nil { + t.Fatal(err) + } + + if len(outs.Data) != 0 { + t.Errorf("Expected 0 container, %v found", len(outs.Data)) } } @@ -242,10 +399,7 @@ func TestRmi(t *testing.T) { srv := mkServerFromEngine(eng, t) defer mkRuntimeFromEngine(eng, t).Nuke() - initialImages, err := srv.Images(false, "") - if err != nil { - t.Fatal(err) - } + initialImages := getAllImages(eng, t) config, hostConfig, _, err := docker.ParseRun([]string{unitTestImageID, "echo", "test"}, nil) if err != nil { @@ -300,34 +454,28 @@ func TestRmi(t *testing.T) { t.Fatal(err) } - images, err := srv.Images(false, "") + images := getAllImages(eng, t) + + if images.Len()-initialImages.Len() != 2 { + t.Fatalf("Expected 2 new images, found %d.", images.Len()-initialImages.Len()) + } + + _, err = srv.DeleteImage(imageID, true) if err != nil { t.Fatal(err) } - if len(images)-len(initialImages) != 2 { - t.Fatalf("Expected 2 new images, found %d.", len(images)-len(initialImages)) + images = getAllImages(eng, t) + + if images.Len()-initialImages.Len() != 1 { + t.Fatalf("Expected 1 new image, found %d.", images.Len()-initialImages.Len()) } - _, err = srv.ImageDelete(imageID, true) - if err != nil { - t.Fatal(err) - } - - images, err = srv.Images(false, "") - if err != nil { - t.Fatal(err) - } - - if len(images)-len(initialImages) != 1 { - t.Fatalf("Expected 1 new image, found %d.", len(images)-len(initialImages)) - } - - for _, image := range images { - if strings.Contains(unitTestImageID, image.ID) { + for _, image := range images.Data { + if strings.Contains(unitTestImageID, image.Get("Id")) { continue } - if image.RepoTags[0] == ":" { + if image.GetList("RepoTags")[0] == ":" { t.Fatalf("Expected tagged image, got untagged one.") } } @@ -337,8 +485,6 @@ func TestImagesFilter(t *testing.T) { eng := NewTestEngine(t) defer nuke(mkRuntimeFromEngine(eng, t)) - srv := mkServerFromEngine(eng, t) - if err := eng.Job("tag", unitTestImageName, "utest", "tag1").Run(); err != nil { t.Fatal(err) } @@ -351,39 +497,27 @@ func TestImagesFilter(t *testing.T) { t.Fatal(err) } - images, err := srv.Images(false, "utest*/*") - if err != nil { - t.Fatal(err) - } + images := getImages(eng, t, false, "utest*/*") - if len(images[0].RepoTags) != 2 { + if len(images.Data[0].GetList("RepoTags")) != 2 { t.Fatal("incorrect number of matches returned") } - images, err = srv.Images(false, "utest") - if err != nil { - t.Fatal(err) - } + images = getImages(eng, t, false, "utest") - if len(images[0].RepoTags) != 1 { + if len(images.Data[0].GetList("RepoTags")) != 1 { t.Fatal("incorrect number of matches returned") } - images, err = srv.Images(false, "utest*") - if err != nil { - t.Fatal(err) - } + images = getImages(eng, t, false, "utest*") - if len(images[0].RepoTags) != 1 { + if len(images.Data[0].GetList("RepoTags")) != 1 { t.Fatal("incorrect number of matches returned") } - images, err = srv.Images(false, "*5000*/*") - if err != nil { - t.Fatal(err) - } + images = getImages(eng, t, false, "*5000*/*") - if len(images[0].RepoTags) != 1 { + if len(images.Data[0].GetList("RepoTags")) != 1 { t.Fatal("incorrect number of matches returned") } } @@ -392,24 +526,137 @@ func TestImageInsert(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() srv := mkServerFromEngine(eng, t) - sf := utils.NewStreamFormatter(true) // bad image name fails - if err := srv.ImageInsert("foo", "https://www.docker.io/static/img/docker-top-logo.png", "/foo", ioutil.Discard, sf); err == nil { + if err := srv.Eng.Job("insert", "foo", "https://www.docker.io/static/img/docker-top-logo.png", "/foo").Run(); err == nil { t.Fatal("expected an error and got none") } // bad url fails - if err := srv.ImageInsert(unitTestImageID, "http://bad_host_name_that_will_totally_fail.com/", "/foo", ioutil.Discard, sf); err == nil { + if err := srv.Eng.Job("insert", unitTestImageID, "http://bad_host_name_that_will_totally_fail.com/", "/foo").Run(); err == nil { t.Fatal("expected an error and got none") } // success returns nil - if err := srv.ImageInsert(unitTestImageID, "https://www.docker.io/static/img/docker-top-logo.png", "/foo", ioutil.Discard, sf); err != nil { + if err := srv.Eng.Job("insert", unitTestImageID, "https://www.docker.io/static/img/docker-top-logo.png", "/foo").Run(); err != nil { t.Fatalf("expected no error, but got %v", err) } } +func TestListContainers(t *testing.T) { + eng := NewTestEngine(t) + srv := mkServerFromEngine(eng, t) + defer mkRuntimeFromEngine(eng, t).Nuke() + + config := docker.Config{ + Image: unitTestImageID, + Cmd: []string{"/bin/sh", "-c", "cat"}, + OpenStdin: true, + } + + firstID := createTestContainer(eng, &config, t) + secondID := createTestContainer(eng, &config, t) + thirdID := createTestContainer(eng, &config, t) + fourthID := createTestContainer(eng, &config, t) + defer func() { + containerKill(eng, firstID, t) + containerKill(eng, secondID, t) + containerKill(eng, fourthID, t) + containerWait(eng, firstID, t) + containerWait(eng, secondID, t) + containerWait(eng, fourthID, t) + }() + + startContainer(eng, firstID, t) + startContainer(eng, secondID, t) + startContainer(eng, fourthID, t) + + // all + if !assertContainerList(srv, true, -1, "", "", []string{fourthID, thirdID, secondID, firstID}) { + t.Error("Container list is not in the correct order") + } + + // running + if !assertContainerList(srv, false, -1, "", "", []string{fourthID, secondID, firstID}) { + t.Error("Container list is not in the correct order") + } + + // from here 'all' flag is ignored + + // limit + expected := []string{fourthID, thirdID} + if !assertContainerList(srv, true, 2, "", "", expected) || + !assertContainerList(srv, false, 2, "", "", expected) { + t.Error("Container list is not in the correct order") + } + + // since + expected = []string{fourthID, thirdID, secondID} + if !assertContainerList(srv, true, -1, firstID, "", expected) || + !assertContainerList(srv, false, -1, firstID, "", expected) { + t.Error("Container list is not in the correct order") + } + + // before + expected = []string{secondID, firstID} + if !assertContainerList(srv, true, -1, "", thirdID, expected) || + !assertContainerList(srv, false, -1, "", thirdID, expected) { + t.Error("Container list is not in the correct order") + } + + // since & before + expected = []string{thirdID, secondID} + if !assertContainerList(srv, true, -1, firstID, fourthID, expected) || + !assertContainerList(srv, false, -1, firstID, fourthID, expected) { + t.Error("Container list is not in the correct order") + } + + // since & limit + expected = []string{fourthID, thirdID} + if !assertContainerList(srv, true, 2, firstID, "", expected) || + !assertContainerList(srv, false, 2, firstID, "", expected) { + t.Error("Container list is not in the correct order") + } + + // before & limit + expected = []string{thirdID} + if !assertContainerList(srv, true, 1, "", fourthID, expected) || + !assertContainerList(srv, false, 1, "", fourthID, expected) { + t.Error("Container list is not in the correct order") + } + + // since & before & limit + expected = []string{thirdID} + if !assertContainerList(srv, true, 1, firstID, fourthID, expected) || + !assertContainerList(srv, false, 1, firstID, fourthID, expected) { + t.Error("Container list is not in the correct order") + } +} + +func assertContainerList(srv *docker.Server, all bool, limit int, since, before string, expected []string) bool { + job := srv.Eng.Job("containers") + job.SetenvBool("all", all) + job.SetenvInt("limit", limit) + job.Setenv("since", since) + job.Setenv("before", before) + outs, err := job.Stdout.AddListTable() + if err != nil { + return false + } + if err := job.Run(); err != nil { + return false + } + if len(outs.Data) != len(expected) { + return false + } + for i := 0; i < len(outs.Data); i++ { + if outs.Data[i].Get("Id") != expected[i] { + return false + } + } + return true +} + // Regression test for being able to untag an image with an existing // container func TestDeleteTagWithExistingContainers(t *testing.T) { @@ -434,25 +681,31 @@ func TestDeleteTagWithExistingContainers(t *testing.T) { t.Fatal("No id returned") } - containers := srv.Containers(true, false, -1, "", "") + job := srv.Eng.Job("containers") + job.SetenvBool("all", true) + outs, err := job.Stdout.AddListTable() + if err != nil { + t.Fatal(err) + } + if err := job.Run(); err != nil { + t.Fatal(err) + } - if len(containers) != 1 { - t.Fatalf("Expected 1 container got %d", len(containers)) + if len(outs.Data) != 1 { + t.Fatalf("Expected 1 container got %d", len(outs.Data)) } // Try to remove the tag - imgs, err := srv.ImageDelete("utest:tag1", true) + imgs, err := srv.DeleteImage("utest:tag1", true) if err != nil { t.Fatal(err) } - if len(imgs) != 1 { - t.Fatalf("Should only have deleted one untag %d", len(imgs)) + if len(imgs.Data) != 1 { + t.Fatalf("Should only have deleted one untag %d", len(imgs.Data)) } - untag := imgs[0] - - if untag.Untagged != unitTestImageID { - t.Fatalf("Expected %s got %s", unitTestImageID, untag.Untagged) + if untag := imgs.Data[0].Get("Untagged"); untag != unitTestImageID { + t.Fatalf("Expected %s got %s", unitTestImageID, untag) } } diff --git a/integration/sorter_test.go b/integration/sorter_test.go index 77848c7ddf..3ce1225ca4 100644 --- a/integration/sorter_test.go +++ b/integration/sorter_test.go @@ -1,9 +1,7 @@ package docker import ( - "github.com/dotcloud/docker" - "github.com/dotcloud/docker/utils" - "io/ioutil" + "github.com/dotcloud/docker/engine" "testing" "time" ) @@ -11,53 +9,48 @@ import ( func TestServerListOrderedImagesByCreationDate(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - srv := mkServerFromEngine(eng, t) - if err := generateImage("", srv); err != nil { + if err := generateImage("", eng); err != nil { t.Fatal(err) } - images, err := srv.Images(true, "") - if err != nil { - t.Fatal(err) - } + images := getImages(eng, t, true, "") - if images[0].Created < images[1].Created { - t.Error("Expected []APIImges to be ordered by most recent creation date.") + if images.Data[0].GetInt("Created") < images.Data[1].GetInt("Created") { + t.Error("Expected images to be ordered by most recent creation date.") } } func TestServerListOrderedImagesByCreationDateAndTag(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - srv := mkServerFromEngine(eng, t) - err := generateImage("bar", srv) + err := generateImage("bar", eng) if err != nil { t.Fatal(err) } time.Sleep(time.Second) - err = generateImage("zed", srv) + err = generateImage("zed", eng) if err != nil { t.Fatal(err) } - images, err := srv.Images(true, "") - if err != nil { - t.Fatal(err) - } + images := getImages(eng, t, true, "") - if images[0].RepoTags[0] != "repo:zed" && images[0].RepoTags[0] != "repo:bar" { - t.Errorf("Expected []APIImges to be ordered by most recent creation date. %s", images) + if repoTags := images.Data[0].GetList("RepoTags"); repoTags[0] != "repo:zed" && repoTags[0] != "repo:bar" { + t.Errorf("Expected Images to be ordered by most recent creation date.") } } -func generateImage(name string, srv *docker.Server) error { +func generateImage(name string, eng *engine.Engine) error { archive, err := fakeTar() if err != nil { return err } - return srv.ImageImport("-", "repo", name, archive, ioutil.Discard, utils.NewStreamFormatter(true)) + job := eng.Job("import", "-", "repo", name) + job.Stdin.Add(archive) + job.SetenvBool("json", true) + return job.Run() } diff --git a/integration/utils_test.go b/integration/utils_test.go index 85ba13d698..450cb7527f 100644 --- a/integration/utils_test.go +++ b/integration/utils_test.go @@ -4,9 +4,6 @@ import ( "archive/tar" "bytes" "fmt" - "github.com/dotcloud/docker" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/utils" "io" "io/ioutil" "net/http" @@ -16,6 +13,10 @@ import ( "strings" "testing" "time" + + "github.com/dotcloud/docker" + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/utils" ) // This file contains utility functions for docker's unit test suite. @@ -32,13 +33,18 @@ func mkRuntime(f utils.Fataler) *docker.Runtime { config := &docker.DaemonConfig{ Root: root, AutoRestart: false, - Mtu: docker.DefaultNetworkMtu, + Mtu: docker.GetDefaultNetworkMtu(), } - r, err := docker.NewRuntimeFromDirectory(config) + + eng, err := engine.New(root) + if err != nil { + f.Fatal(err) + } + + r, err := docker.NewRuntimeFromDirectory(config, eng) if err != nil { f.Fatal(err) } - r.UpdateCapabilities(true) return r } @@ -72,10 +78,11 @@ func containerRun(eng *engine.Engine, id string, t utils.Fataler) { func containerFileExists(eng *engine.Engine, id, dir string, t utils.Fataler) bool { c := getContainer(eng, id, t) - if err := c.EnsureMounted(); err != nil { + if err := c.Mount(); err != nil { t.Fatal(err) } - if _, err := os.Stat(path.Join(c.RootfsPath(), dir)); err != nil { + defer c.Unmount() + if _, err := os.Stat(path.Join(c.BasefsPath(), dir)); err != nil { if os.IsNotExist(err) { return false } @@ -186,11 +193,9 @@ func NewTestEngine(t utils.Fataler) *engine.Engine { if err != nil { t.Fatal(err) } - eng.Stdout = ioutil.Discard - eng.Stderr = ioutil.Discard // Load default plugins // (This is manually copied and modified from main() until we have a more generic plugin system) - job := eng.Job("initapi") + job := eng.Job("initserver") job.Setenv("Root", root) job.SetenvBool("AutoRestart", false) // TestGetEnabledCors and TestOptionsRoute require EnableCors=true @@ -329,3 +334,22 @@ func fakeTar() (io.Reader, error) { tw.Close() return buf, nil } + +func getAllImages(eng *engine.Engine, t *testing.T) *engine.Table { + return getImages(eng, t, true, "") +} + +func getImages(eng *engine.Engine, t *testing.T, all bool, filter string) *engine.Table { + job := eng.Job("images") + job.SetenvBool("all", all) + job.Setenv("filter", filter) + images, err := job.Stdout.AddListTable() + if err != nil { + t.Fatal(err) + } + if err := job.Run(); err != nil { + t.Fatal(err) + } + return images + +} diff --git a/links.go b/links.go index 55834b92d2..aa1c08374b 100644 --- a/links.go +++ b/links.go @@ -2,7 +2,7 @@ package docker import ( "fmt" - "github.com/dotcloud/docker/pkg/iptables" + "github.com/dotcloud/docker/engine" "path" "strings" ) @@ -11,13 +11,13 @@ type Link struct { ParentIP string ChildIP string Name string - BridgeInterface string ChildEnvironment []string Ports []Port IsEnabled bool + eng *engine.Engine } -func NewLink(parent, child *Container, name, bridgeInterface string) (*Link, error) { +func NewLink(parent, child *Container, name string, eng *engine.Engine) (*Link, error) { if parent.ID == child.ID { return nil, fmt.Errorf("Cannot link to self: %s == %s", parent.ID, child.ID) } @@ -33,12 +33,12 @@ func NewLink(parent, child *Container, name, bridgeInterface string) (*Link, err } l := &Link{ - BridgeInterface: bridgeInterface, Name: name, ChildIP: child.NetworkSettings.IPAddress, ParentIP: parent.NetworkSettings.IPAddress, ChildEnvironment: child.Config.Env, Ports: ports, + eng: eng, } return l, nil @@ -119,30 +119,21 @@ func (l *Link) Disable() { } func (l *Link) toggle(action string, ignoreErrors bool) error { - for _, p := range l.Ports { - if output, err := iptables.Raw(action, "FORWARD", - "-i", l.BridgeInterface, "-o", l.BridgeInterface, - "-p", p.Proto(), - "-s", l.ParentIP, - "--dport", p.Port(), - "-d", l.ChildIP, - "-j", "ACCEPT"); !ignoreErrors && err != nil { - return err - } else if len(output) != 0 { - return fmt.Errorf("Error toggle iptables forward: %s", output) - } + job := l.eng.Job("link", action) - if output, err := iptables.Raw(action, "FORWARD", - "-i", l.BridgeInterface, "-o", l.BridgeInterface, - "-p", p.Proto(), - "-s", l.ChildIP, - "--sport", p.Port(), - "-d", l.ParentIP, - "-j", "ACCEPT"); !ignoreErrors && err != nil { - return err - } else if len(output) != 0 { - return fmt.Errorf("Error toggle iptables forward: %s", output) - } + job.Setenv("ParentIP", l.ParentIP) + job.Setenv("ChildIP", l.ChildIP) + job.SetenvBool("IgnoreErrors", ignoreErrors) + + out := make([]string, len(l.Ports)) + for i, p := range l.Ports { + out[i] = fmt.Sprintf("%s/%s", p.Port(), p.Proto()) + } + job.SetenvList("Ports", out) + + if err := job.Run(); err != nil { + // TODO: get ouput from job + return err } return nil } diff --git a/links_test.go b/links_test.go index be8d48e45c..8a266a9a3d 100644 --- a/links_test.go +++ b/links_test.go @@ -30,7 +30,7 @@ func TestLinkNew(t *testing.T) { to := newMockLinkContainer(toID, "172.0.17.3") - link, err := NewLink(to, from, "/db/docker", "172.0.17.1") + link, err := NewLink(to, from, "/db/docker", nil) if err != nil { t.Fatal(err) } @@ -50,9 +50,6 @@ func TestLinkNew(t *testing.T) { if link.ChildIP != "172.0.17.2" { t.Fail() } - if link.BridgeInterface != "172.0.17.1" { - t.Fail() - } for _, p := range link.Ports { if p != Port("6379/tcp") { t.Fail() @@ -75,7 +72,7 @@ func TestLinkEnv(t *testing.T) { to := newMockLinkContainer(toID, "172.0.17.3") - link, err := NewLink(to, from, "/db/docker", "172.0.17.1") + link, err := NewLink(to, from, "/db/docker", nil) if err != nil { t.Fatal(err) } diff --git a/network.go b/network.go deleted file mode 100644 index 22ea8ba757..0000000000 --- a/network.go +++ /dev/null @@ -1,820 +0,0 @@ -package docker - -import ( - "encoding/binary" - "errors" - "fmt" - "github.com/dotcloud/docker/pkg/iptables" - "github.com/dotcloud/docker/pkg/netlink" - "github.com/dotcloud/docker/proxy" - "github.com/dotcloud/docker/utils" - "log" - "net" - "strconv" - "sync" - "syscall" - "unsafe" -) - -const ( - DefaultNetworkBridge = "docker0" - DisableNetworkBridge = "none" - DefaultNetworkMtu = 1500 - portRangeStart = 49153 - portRangeEnd = 65535 - siocBRADDBR = 0x89a0 -) - -// Calculates the first and last IP addresses in an IPNet -func networkRange(network *net.IPNet) (net.IP, net.IP) { - netIP := network.IP.To4() - firstIP := netIP.Mask(network.Mask) - lastIP := net.IPv4(0, 0, 0, 0).To4() - for i := 0; i < len(lastIP); i++ { - lastIP[i] = netIP[i] | ^network.Mask[i] - } - return firstIP, lastIP -} - -// Detects overlap between one IPNet and another -func networkOverlaps(netX *net.IPNet, netY *net.IPNet) bool { - firstIP, _ := networkRange(netX) - if netY.Contains(firstIP) { - return true - } - firstIP, _ = networkRange(netY) - if netX.Contains(firstIP) { - return true - } - return false -} - -// Converts a 4 bytes IP into a 32 bit integer -func ipToInt(ip net.IP) int32 { - return int32(binary.BigEndian.Uint32(ip.To4())) -} - -// Converts 32 bit integer into a 4 bytes IP address -func intToIP(n int32) net.IP { - b := make([]byte, 4) - binary.BigEndian.PutUint32(b, uint32(n)) - return net.IP(b) -} - -// Given a netmask, calculates the number of available hosts -func networkSize(mask net.IPMask) int32 { - m := net.IPv4Mask(0, 0, 0, 0) - for i := 0; i < net.IPv4len; i++ { - m[i] = ^mask[i] - } - - return int32(binary.BigEndian.Uint32(m)) + 1 -} - -func checkRouteOverlaps(networks []*net.IPNet, dockerNetwork *net.IPNet) error { - for _, network := range networks { - if networkOverlaps(dockerNetwork, network) { - return fmt.Errorf("Network %s is already routed: '%s'", dockerNetwork, network) - } - } - return nil -} - -func checkNameserverOverlaps(nameservers []string, dockerNetwork *net.IPNet) error { - if len(nameservers) > 0 { - for _, ns := range nameservers { - _, nsNetwork, err := net.ParseCIDR(ns) - if err != nil { - return err - } - if networkOverlaps(dockerNetwork, nsNetwork) { - return fmt.Errorf("%s overlaps nameserver %s", dockerNetwork, nsNetwork) - } - } - } - return nil -} - -// CreateBridgeIface creates a network bridge interface on the host system with the name `ifaceName`, -// and attempts to configure it with an address which doesn't conflict with any other interface on the host. -// If it can't find an address which doesn't conflict, it will return an error. -func CreateBridgeIface(config *DaemonConfig) error { - addrs := []string{ - // Here we don't follow the convention of using the 1st IP of the range for the gateway. - // This is to use the same gateway IPs as the /24 ranges, which predate the /16 ranges. - // In theory this shouldn't matter - in practice there's bound to be a few scripts relying - // on the internal addressing or other stupid things like that. - // The shouldn't, but hey, let's not break them unless we really have to. - "172.17.42.1/16", // Don't use 172.16.0.0/16, it conflicts with EC2 DNS 172.16.0.23 - "10.0.42.1/16", // Don't even try using the entire /8, that's too intrusive - "10.1.42.1/16", - "10.42.42.1/16", - "172.16.42.1/24", - "172.16.43.1/24", - "172.16.44.1/24", - "10.0.42.1/24", - "10.0.43.1/24", - "192.168.42.1/24", - "192.168.43.1/24", - "192.168.44.1/24", - } - - nameservers := []string{} - resolvConf, _ := utils.GetResolvConf() - // we don't check for an error here, because we don't really care - // if we can't read /etc/resolv.conf. So instead we skip the append - // if resolvConf is nil. It either doesn't exist, or we can't read it - // for some reason. - if resolvConf != nil { - nameservers = append(nameservers, utils.GetNameserversAsCIDR(resolvConf)...) - } - - var ifaceAddr string - if len(config.BridgeIp) != 0 { - _, _, err := net.ParseCIDR(config.BridgeIp) - if err != nil { - return err - } - ifaceAddr = config.BridgeIp - } else { - for _, addr := range addrs { - _, dockerNetwork, err := net.ParseCIDR(addr) - if err != nil { - return err - } - routes, err := netlink.NetworkGetRoutes() - if err != nil { - return err - } - if err := checkRouteOverlaps(routes, dockerNetwork); err == nil { - if err := checkNameserverOverlaps(nameservers, dockerNetwork); err == nil { - ifaceAddr = addr - break - } - } else { - utils.Debugf("%s: %s", addr, err) - } - } - } - if ifaceAddr == "" { - return fmt.Errorf("Could not find a free IP address range for interface '%s'. Please configure its address manually and run 'docker -b %s'", config.BridgeIface, config.BridgeIface) - } - utils.Debugf("Creating bridge %s with network %s", config.BridgeIface, ifaceAddr) - - if err := createBridgeIface(config.BridgeIface); err != nil { - return err - } - iface, err := net.InterfaceByName(config.BridgeIface) - if err != nil { - return err - } - ipAddr, ipNet, err := net.ParseCIDR(ifaceAddr) - if err != nil { - return err - } - if netlink.NetworkLinkAddIp(iface, ipAddr, ipNet); err != nil { - return fmt.Errorf("Unable to add private network: %s", err) - } - if err := netlink.NetworkLinkUp(iface); err != nil { - return fmt.Errorf("Unable to start network bridge: %s", err) - } - - return nil -} - -// Create the actual bridge device. This is more backward-compatible than -// netlink.NetworkLinkAdd and works on RHEL 6. -func createBridgeIface(name string) error { - s, err := syscall.Socket(syscall.AF_INET6, syscall.SOCK_STREAM, syscall.IPPROTO_IP) - if err != nil { - utils.Debugf("Bridge socket creation failed IPv6 probably not enabled: %v", err) - s, err = syscall.Socket(syscall.AF_INET, syscall.SOCK_STREAM, syscall.IPPROTO_IP) - if err != nil { - return fmt.Errorf("Error creating bridge creation socket: %s", err) - } - } - defer syscall.Close(s) - - nameBytePtr, err := syscall.BytePtrFromString(name) - if err != nil { - return fmt.Errorf("Error converting bridge name %s to byte array: %s", name, err) - } - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), siocBRADDBR, uintptr(unsafe.Pointer(nameBytePtr))); err != 0 { - return fmt.Errorf("Error creating bridge: %s", err) - } - return nil -} - -// Return the IPv4 address of a network interface -func getIfaceAddr(name string) (net.Addr, error) { - iface, err := net.InterfaceByName(name) - if err != nil { - return nil, err - } - addrs, err := iface.Addrs() - if err != nil { - return nil, err - } - var addrs4 []net.Addr - for _, addr := range addrs { - ip := (addr.(*net.IPNet)).IP - if ip4 := ip.To4(); len(ip4) == net.IPv4len { - addrs4 = append(addrs4, addr) - } - } - switch { - case len(addrs4) == 0: - return nil, fmt.Errorf("Interface %v has no IP addresses", name) - case len(addrs4) > 1: - fmt.Printf("Interface %v has more than 1 IPv4 address. Defaulting to using %v\n", - name, (addrs4[0].(*net.IPNet)).IP) - } - return addrs4[0], nil -} - -// Port mapper takes care of mapping external ports to containers by setting -// up iptables rules. -// It keeps track of all mappings and is able to unmap at will -type PortMapper struct { - tcpMapping map[string]*net.TCPAddr - tcpProxies map[string]proxy.Proxy - udpMapping map[string]*net.UDPAddr - udpProxies map[string]proxy.Proxy - - iptables *iptables.Chain - defaultIp net.IP - proxyFactoryFunc func(net.Addr, net.Addr) (proxy.Proxy, error) -} - -func (mapper *PortMapper) Map(ip net.IP, port int, backendAddr net.Addr) error { - - if _, isTCP := backendAddr.(*net.TCPAddr); isTCP { - mapKey := (&net.TCPAddr{Port: port, IP: ip}).String() - if _, exists := mapper.tcpProxies[mapKey]; exists { - return fmt.Errorf("TCP Port %s is already in use", mapKey) - } - backendPort := backendAddr.(*net.TCPAddr).Port - backendIP := backendAddr.(*net.TCPAddr).IP - if mapper.iptables != nil { - if err := mapper.iptables.Forward(iptables.Add, ip, port, "tcp", backendIP.String(), backendPort); err != nil { - return err - } - } - mapper.tcpMapping[mapKey] = backendAddr.(*net.TCPAddr) - proxy, err := mapper.proxyFactoryFunc(&net.TCPAddr{IP: ip, Port: port}, backendAddr) - if err != nil { - mapper.Unmap(ip, port, "tcp") - return err - } - mapper.tcpProxies[mapKey] = proxy - go proxy.Run() - } else { - mapKey := (&net.UDPAddr{Port: port, IP: ip}).String() - if _, exists := mapper.udpProxies[mapKey]; exists { - return fmt.Errorf("UDP: Port %s is already in use", mapKey) - } - backendPort := backendAddr.(*net.UDPAddr).Port - backendIP := backendAddr.(*net.UDPAddr).IP - if mapper.iptables != nil { - if err := mapper.iptables.Forward(iptables.Add, ip, port, "udp", backendIP.String(), backendPort); err != nil { - return err - } - } - mapper.udpMapping[mapKey] = backendAddr.(*net.UDPAddr) - proxy, err := mapper.proxyFactoryFunc(&net.UDPAddr{IP: ip, Port: port}, backendAddr) - if err != nil { - mapper.Unmap(ip, port, "udp") - return err - } - mapper.udpProxies[mapKey] = proxy - go proxy.Run() - } - return nil -} - -func (mapper *PortMapper) Unmap(ip net.IP, port int, proto string) error { - if proto == "tcp" { - mapKey := (&net.TCPAddr{Port: port, IP: ip}).String() - backendAddr, ok := mapper.tcpMapping[mapKey] - if !ok { - return fmt.Errorf("Port tcp/%s is not mapped", mapKey) - } - if proxy, exists := mapper.tcpProxies[mapKey]; exists { - proxy.Close() - delete(mapper.tcpProxies, mapKey) - } - if mapper.iptables != nil { - if err := mapper.iptables.Forward(iptables.Delete, ip, port, proto, backendAddr.IP.String(), backendAddr.Port); err != nil { - return err - } - } - delete(mapper.tcpMapping, mapKey) - } else { - mapKey := (&net.UDPAddr{Port: port, IP: ip}).String() - backendAddr, ok := mapper.udpMapping[mapKey] - if !ok { - return fmt.Errorf("Port udp/%s is not mapped", mapKey) - } - if proxy, exists := mapper.udpProxies[mapKey]; exists { - proxy.Close() - delete(mapper.udpProxies, mapKey) - } - if mapper.iptables != nil { - if err := mapper.iptables.Forward(iptables.Delete, ip, port, proto, backendAddr.IP.String(), backendAddr.Port); err != nil { - return err - } - } - delete(mapper.udpMapping, mapKey) - } - return nil -} - -func newPortMapper(config *DaemonConfig) (*PortMapper, error) { - // We can always try removing the iptables - if err := iptables.RemoveExistingChain("DOCKER"); err != nil { - return nil, err - } - var chain *iptables.Chain - if config.EnableIptables { - var err error - chain, err = iptables.NewChain("DOCKER", config.BridgeIface) - if err != nil { - return nil, fmt.Errorf("Failed to create DOCKER chain: %s", err) - } - } - - mapper := &PortMapper{ - tcpMapping: make(map[string]*net.TCPAddr), - tcpProxies: make(map[string]proxy.Proxy), - udpMapping: make(map[string]*net.UDPAddr), - udpProxies: make(map[string]proxy.Proxy), - iptables: chain, - defaultIp: config.DefaultIp, - proxyFactoryFunc: proxy.NewProxy, - } - return mapper, nil -} - -// Port allocator: Automatically allocate and release networking ports -type PortAllocator struct { - sync.Mutex - inUse map[string]struct{} - fountain chan int - quit chan bool -} - -func (alloc *PortAllocator) runFountain() { - for { - for port := portRangeStart; port < portRangeEnd; port++ { - select { - case alloc.fountain <- port: - case quit := <-alloc.quit: - if quit { - return - } - } - } - } -} - -// FIXME: Release can no longer fail, change its prototype to reflect that. -func (alloc *PortAllocator) Release(addr net.IP, port int) error { - mapKey := (&net.TCPAddr{Port: port, IP: addr}).String() - utils.Debugf("Releasing %d", port) - alloc.Lock() - delete(alloc.inUse, mapKey) - alloc.Unlock() - return nil -} - -func (alloc *PortAllocator) Acquire(addr net.IP, port int) (int, error) { - mapKey := (&net.TCPAddr{Port: port, IP: addr}).String() - utils.Debugf("Acquiring %s", mapKey) - if port == 0 { - // Allocate a port from the fountain - for port := range alloc.fountain { - if _, err := alloc.Acquire(addr, port); err == nil { - return port, nil - } - } - return -1, fmt.Errorf("Port generator ended unexpectedly") - } - alloc.Lock() - defer alloc.Unlock() - if _, inUse := alloc.inUse[mapKey]; inUse { - return -1, fmt.Errorf("Port already in use: %d", port) - } - alloc.inUse[mapKey] = struct{}{} - return port, nil -} - -func (alloc *PortAllocator) Close() error { - alloc.quit <- true - close(alloc.quit) - close(alloc.fountain) - return nil -} - -func newPortAllocator() (*PortAllocator, error) { - allocator := &PortAllocator{ - inUse: make(map[string]struct{}), - fountain: make(chan int), - quit: make(chan bool), - } - go allocator.runFountain() - return allocator, nil -} - -// IP allocator: Automatically allocate and release networking ports -type IPAllocator struct { - network *net.IPNet - queueAlloc chan allocatedIP - queueReleased chan net.IP - inUse map[int32]struct{} - quit chan bool -} - -type allocatedIP struct { - ip net.IP - err error -} - -func (alloc *IPAllocator) run() { - firstIP, _ := networkRange(alloc.network) - ipNum := ipToInt(firstIP) - ownIP := ipToInt(alloc.network.IP) - size := networkSize(alloc.network.Mask) - - pos := int32(1) - max := size - 2 // -1 for the broadcast address, -1 for the gateway address - for { - var ( - newNum int32 - inUse bool - ) - - // Find first unused IP, give up after one whole round - for attempt := int32(0); attempt < max; attempt++ { - newNum = ipNum + pos - - pos = pos%max + 1 - - // The network's IP is never okay to use - if newNum == ownIP { - continue - } - - if _, inUse = alloc.inUse[newNum]; !inUse { - // We found an unused IP - break - } - } - - ip := allocatedIP{ip: intToIP(newNum)} - if inUse { - ip.err = errors.New("No unallocated IP available") - } - - select { - case quit := <-alloc.quit: - if quit { - return - } - case alloc.queueAlloc <- ip: - alloc.inUse[newNum] = struct{}{} - case released := <-alloc.queueReleased: - r := ipToInt(released) - delete(alloc.inUse, r) - - if inUse { - // If we couldn't allocate a new IP, the released one - // will be the only free one now, so instantly use it - // next time - pos = r - ipNum - } else { - // Use same IP as last time - if pos == 1 { - pos = max - } else { - pos-- - } - } - } - } -} - -func (alloc *IPAllocator) Acquire() (net.IP, error) { - ip := <-alloc.queueAlloc - return ip.ip, ip.err -} - -func (alloc *IPAllocator) Release(ip net.IP) { - alloc.queueReleased <- ip -} - -func (alloc *IPAllocator) Close() error { - alloc.quit <- true - close(alloc.quit) - close(alloc.queueAlloc) - close(alloc.queueReleased) - return nil -} - -func newIPAllocator(network *net.IPNet) *IPAllocator { - alloc := &IPAllocator{ - network: network, - queueAlloc: make(chan allocatedIP), - queueReleased: make(chan net.IP), - inUse: make(map[int32]struct{}), - quit: make(chan bool), - } - - go alloc.run() - - return alloc -} - -// Network interface represents the networking stack of a container -type NetworkInterface struct { - IPNet net.IPNet - Gateway net.IP - - manager *NetworkManager - extPorts []*Nat - disabled bool -} - -// Allocate an external port and map it to the interface -func (iface *NetworkInterface) AllocatePort(port Port, binding PortBinding) (*Nat, error) { - - if iface.disabled { - return nil, fmt.Errorf("Trying to allocate port for interface %v, which is disabled", iface) // FIXME - } - - ip := iface.manager.portMapper.defaultIp - - if binding.HostIp != "" { - ip = net.ParseIP(binding.HostIp) - } else { - binding.HostIp = ip.String() - } - - nat := &Nat{ - Port: port, - Binding: binding, - } - - containerPort, err := parsePort(port.Port()) - if err != nil { - return nil, err - } - - hostPort, _ := parsePort(nat.Binding.HostPort) - - if nat.Port.Proto() == "tcp" { - extPort, err := iface.manager.tcpPortAllocator.Acquire(ip, hostPort) - if err != nil { - return nil, err - } - - backend := &net.TCPAddr{IP: iface.IPNet.IP, Port: containerPort} - if err := iface.manager.portMapper.Map(ip, extPort, backend); err != nil { - iface.manager.tcpPortAllocator.Release(ip, extPort) - return nil, err - } - nat.Binding.HostPort = strconv.Itoa(extPort) - } else { - extPort, err := iface.manager.udpPortAllocator.Acquire(ip, hostPort) - if err != nil { - return nil, err - } - backend := &net.UDPAddr{IP: iface.IPNet.IP, Port: containerPort} - if err := iface.manager.portMapper.Map(ip, extPort, backend); err != nil { - iface.manager.udpPortAllocator.Release(ip, extPort) - return nil, err - } - nat.Binding.HostPort = strconv.Itoa(extPort) - } - iface.extPorts = append(iface.extPorts, nat) - - return nat, nil -} - -type Nat struct { - Port Port - Binding PortBinding -} - -func (n *Nat) String() string { - return fmt.Sprintf("%s:%s:%s/%s", n.Binding.HostIp, n.Binding.HostPort, n.Port.Port(), n.Port.Proto()) -} - -// Release: Network cleanup - release all resources -func (iface *NetworkInterface) Release() { - if iface.disabled { - return - } - - for _, nat := range iface.extPorts { - hostPort, err := parsePort(nat.Binding.HostPort) - if err != nil { - log.Printf("Unable to get host port: %s", err) - continue - } - ip := net.ParseIP(nat.Binding.HostIp) - utils.Debugf("Unmaping %s/%s:%s", nat.Port.Proto, ip.String(), nat.Binding.HostPort) - if err := iface.manager.portMapper.Unmap(ip, hostPort, nat.Port.Proto()); err != nil { - log.Printf("Unable to unmap port %s: %s", nat, err) - } - - if nat.Port.Proto() == "tcp" { - if err := iface.manager.tcpPortAllocator.Release(ip, hostPort); err != nil { - log.Printf("Unable to release port %s", nat) - } - } else if nat.Port.Proto() == "udp" { - if err := iface.manager.udpPortAllocator.Release(ip, hostPort); err != nil { - log.Printf("Unable to release port %s: %s", nat, err) - } - } - } - - iface.manager.ipAllocator.Release(iface.IPNet.IP) -} - -// Network Manager manages a set of network interfaces -// Only *one* manager per host machine should be used -type NetworkManager struct { - bridgeIface string - bridgeNetwork *net.IPNet - - ipAllocator *IPAllocator - tcpPortAllocator *PortAllocator - udpPortAllocator *PortAllocator - portMapper *PortMapper - - disabled bool -} - -// Allocate a network interface -func (manager *NetworkManager) Allocate() (*NetworkInterface, error) { - - if manager.disabled { - return &NetworkInterface{disabled: true}, nil - } - - var ip net.IP - var err error - - ip, err = manager.ipAllocator.Acquire() - if err != nil { - return nil, err - } - // avoid duplicate IP - ipNum := ipToInt(ip) - firstIP := manager.ipAllocator.network.IP.To4().Mask(manager.ipAllocator.network.Mask) - firstIPNum := ipToInt(firstIP) + 1 - - if firstIPNum == ipNum { - ip, err = manager.ipAllocator.Acquire() - if err != nil { - return nil, err - } - } - - iface := &NetworkInterface{ - IPNet: net.IPNet{IP: ip, Mask: manager.bridgeNetwork.Mask}, - Gateway: manager.bridgeNetwork.IP, - manager: manager, - } - return iface, nil -} - -func (manager *NetworkManager) Close() error { - if manager.disabled { - return nil - } - err1 := manager.tcpPortAllocator.Close() - err2 := manager.udpPortAllocator.Close() - err3 := manager.ipAllocator.Close() - if err1 != nil { - return err1 - } - if err2 != nil { - return err2 - } - return err3 -} - -func newNetworkManager(config *DaemonConfig) (*NetworkManager, error) { - if config.BridgeIface == DisableNetworkBridge { - manager := &NetworkManager{ - disabled: true, - } - return manager, nil - } - - addr, err := getIfaceAddr(config.BridgeIface) - if err != nil { - // If the iface is not found, try to create it - if err := CreateBridgeIface(config); err != nil { - return nil, err - } - addr, err = getIfaceAddr(config.BridgeIface) - if err != nil { - return nil, err - } - } - network := addr.(*net.IPNet) - - // Configure iptables for link support - if config.EnableIptables { - - // Enable NAT - natArgs := []string{"POSTROUTING", "-t", "nat", "-s", addr.String(), "!", "-d", addr.String(), "-j", "MASQUERADE"} - - if !iptables.Exists(natArgs...) { - if output, err := iptables.Raw(append([]string{"-A"}, natArgs...)...); err != nil { - return nil, fmt.Errorf("Unable to enable network bridge NAT: %s", err) - } else if len(output) != 0 { - return nil, fmt.Errorf("Error iptables postrouting: %s", output) - } - } - - // Accept incoming packets for existing connections - existingArgs := []string{"FORWARD", "-o", config.BridgeIface, "-m", "conntrack", "--ctstate", "RELATED,ESTABLISHED", "-j", "ACCEPT"} - - if !iptables.Exists(existingArgs...) { - if output, err := iptables.Raw(append([]string{"-I"}, existingArgs...)...); err != nil { - return nil, fmt.Errorf("Unable to allow incoming packets: %s", err) - } else if len(output) != 0 { - return nil, fmt.Errorf("Error iptables allow incoming: %s", output) - } - } - - // Accept all non-intercontainer outgoing packets - outgoingArgs := []string{"FORWARD", "-i", config.BridgeIface, "!", "-o", config.BridgeIface, "-j", "ACCEPT"} - - if !iptables.Exists(outgoingArgs...) { - if output, err := iptables.Raw(append([]string{"-I"}, outgoingArgs...)...); err != nil { - return nil, fmt.Errorf("Unable to allow outgoing packets: %s", err) - } else if len(output) != 0 { - return nil, fmt.Errorf("Error iptables allow outgoing: %s", output) - } - } - - args := []string{"FORWARD", "-i", config.BridgeIface, "-o", config.BridgeIface, "-j"} - acceptArgs := append(args, "ACCEPT") - dropArgs := append(args, "DROP") - - if !config.InterContainerCommunication { - iptables.Raw(append([]string{"-D"}, acceptArgs...)...) - if !iptables.Exists(dropArgs...) { - utils.Debugf("Disable inter-container communication") - if output, err := iptables.Raw(append([]string{"-I"}, dropArgs...)...); err != nil { - return nil, fmt.Errorf("Unable to prevent intercontainer communication: %s", err) - } else if len(output) != 0 { - return nil, fmt.Errorf("Error disabling intercontainer communication: %s", output) - } - } - } else { - iptables.Raw(append([]string{"-D"}, dropArgs...)...) - if !iptables.Exists(acceptArgs...) { - utils.Debugf("Enable inter-container communication") - if output, err := iptables.Raw(append([]string{"-I"}, acceptArgs...)...); err != nil { - return nil, fmt.Errorf("Unable to allow intercontainer communication: %s", err) - } else if len(output) != 0 { - return nil, fmt.Errorf("Error enabling intercontainer communication: %s", output) - } - } - } - } - - ipAllocator := newIPAllocator(network) - - tcpPortAllocator, err := newPortAllocator() - if err != nil { - return nil, err - } - - udpPortAllocator, err := newPortAllocator() - if err != nil { - return nil, err - } - - portMapper, err := newPortMapper(config) - if err != nil { - return nil, err - } - - manager := &NetworkManager{ - bridgeIface: config.BridgeIface, - bridgeNetwork: network, - ipAllocator: ipAllocator, - tcpPortAllocator: tcpPortAllocator, - udpPortAllocator: udpPortAllocator, - portMapper: portMapper, - } - - return manager, nil -} diff --git a/network_test.go b/network_test.go deleted file mode 100644 index 69fcba01a2..0000000000 --- a/network_test.go +++ /dev/null @@ -1,387 +0,0 @@ -package docker - -import ( - "github.com/dotcloud/docker/pkg/iptables" - "github.com/dotcloud/docker/proxy" - "net" - "testing" -) - -func TestPortAllocation(t *testing.T) { - ip := net.ParseIP("192.168.0.1") - ip2 := net.ParseIP("192.168.0.2") - allocator, err := newPortAllocator() - if err != nil { - t.Fatal(err) - } - if port, err := allocator.Acquire(ip, 80); err != nil { - t.Fatal(err) - } else if port != 80 { - t.Fatalf("Acquire(80) should return 80, not %d", port) - } - port, err := allocator.Acquire(ip, 0) - if err != nil { - t.Fatal(err) - } - if port <= 0 { - t.Fatalf("Acquire(0) should return a non-zero port") - } - if _, err := allocator.Acquire(ip, port); err == nil { - t.Fatalf("Acquiring a port already in use should return an error") - } - if newPort, err := allocator.Acquire(ip, 0); err != nil { - t.Fatal(err) - } else if newPort == port { - t.Fatalf("Acquire(0) allocated the same port twice: %d", port) - } - if _, err := allocator.Acquire(ip, 80); err == nil { - t.Fatalf("Acquiring a port already in use should return an error") - } - if _, err := allocator.Acquire(ip2, 80); err != nil { - t.Fatalf("It should be possible to allocate the same port on a different interface") - } - if _, err := allocator.Acquire(ip2, 80); err == nil { - t.Fatalf("Acquiring a port already in use should return an error") - } - if err := allocator.Release(ip, 80); err != nil { - t.Fatal(err) - } - if _, err := allocator.Acquire(ip, 80); err != nil { - t.Fatal(err) - } -} - -func TestNetworkRange(t *testing.T) { - // Simple class C test - _, network, _ := net.ParseCIDR("192.168.0.1/24") - first, last := networkRange(network) - if !first.Equal(net.ParseIP("192.168.0.0")) { - t.Error(first.String()) - } - if !last.Equal(net.ParseIP("192.168.0.255")) { - t.Error(last.String()) - } - if size := networkSize(network.Mask); size != 256 { - t.Error(size) - } - - // Class A test - _, network, _ = net.ParseCIDR("10.0.0.1/8") - first, last = networkRange(network) - if !first.Equal(net.ParseIP("10.0.0.0")) { - t.Error(first.String()) - } - if !last.Equal(net.ParseIP("10.255.255.255")) { - t.Error(last.String()) - } - if size := networkSize(network.Mask); size != 16777216 { - t.Error(size) - } - - // Class A, random IP address - _, network, _ = net.ParseCIDR("10.1.2.3/8") - first, last = networkRange(network) - if !first.Equal(net.ParseIP("10.0.0.0")) { - t.Error(first.String()) - } - if !last.Equal(net.ParseIP("10.255.255.255")) { - t.Error(last.String()) - } - - // 32bit mask - _, network, _ = net.ParseCIDR("10.1.2.3/32") - first, last = networkRange(network) - if !first.Equal(net.ParseIP("10.1.2.3")) { - t.Error(first.String()) - } - if !last.Equal(net.ParseIP("10.1.2.3")) { - t.Error(last.String()) - } - if size := networkSize(network.Mask); size != 1 { - t.Error(size) - } - - // 31bit mask - _, network, _ = net.ParseCIDR("10.1.2.3/31") - first, last = networkRange(network) - if !first.Equal(net.ParseIP("10.1.2.2")) { - t.Error(first.String()) - } - if !last.Equal(net.ParseIP("10.1.2.3")) { - t.Error(last.String()) - } - if size := networkSize(network.Mask); size != 2 { - t.Error(size) - } - - // 26bit mask - _, network, _ = net.ParseCIDR("10.1.2.3/26") - first, last = networkRange(network) - if !first.Equal(net.ParseIP("10.1.2.0")) { - t.Error(first.String()) - } - if !last.Equal(net.ParseIP("10.1.2.63")) { - t.Error(last.String()) - } - if size := networkSize(network.Mask); size != 64 { - t.Error(size) - } -} - -func TestConversion(t *testing.T) { - ip := net.ParseIP("127.0.0.1") - i := ipToInt(ip) - if i == 0 { - t.Fatal("converted to zero") - } - conv := intToIP(i) - if !ip.Equal(conv) { - t.Error(conv.String()) - } -} - -func TestIPAllocator(t *testing.T) { - expectedIPs := []net.IP{ - 0: net.IPv4(127, 0, 0, 2), - 1: net.IPv4(127, 0, 0, 3), - 2: net.IPv4(127, 0, 0, 4), - 3: net.IPv4(127, 0, 0, 5), - 4: net.IPv4(127, 0, 0, 6), - } - - gwIP, n, _ := net.ParseCIDR("127.0.0.1/29") - alloc := newIPAllocator(&net.IPNet{IP: gwIP, Mask: n.Mask}) - // Pool after initialisation (f = free, u = used) - // 2(f) - 3(f) - 4(f) - 5(f) - 6(f) - // ↑ - - // Check that we get 5 IPs, from 127.0.0.2–127.0.0.6, in that - // order. - for i := 0; i < 5; i++ { - ip, err := alloc.Acquire() - if err != nil { - t.Fatal(err) - } - - assertIPEquals(t, expectedIPs[i], ip) - } - // Before loop begin - // 2(f) - 3(f) - 4(f) - 5(f) - 6(f) - // ↑ - - // After i = 0 - // 2(u) - 3(f) - 4(f) - 5(f) - 6(f) - // ↑ - - // After i = 1 - // 2(u) - 3(u) - 4(f) - 5(f) - 6(f) - // ↑ - - // After i = 2 - // 2(u) - 3(u) - 4(u) - 5(f) - 6(f) - // ↑ - - // After i = 3 - // 2(u) - 3(u) - 4(u) - 5(u) - 6(f) - // ↑ - - // After i = 4 - // 2(u) - 3(u) - 4(u) - 5(u) - 6(u) - // ↑ - - // Check that there are no more IPs - _, err := alloc.Acquire() - if err == nil { - t.Fatal("There shouldn't be any IP addresses at this point") - } - - // Release some IPs in non-sequential order - alloc.Release(expectedIPs[3]) - // 2(u) - 3(u) - 4(u) - 5(f) - 6(u) - // ↑ - - alloc.Release(expectedIPs[2]) - // 2(u) - 3(u) - 4(f) - 5(f) - 6(u) - // ↑ - - alloc.Release(expectedIPs[4]) - // 2(u) - 3(u) - 4(f) - 5(f) - 6(f) - // ↑ - - // Make sure that IPs are reused in sequential order, starting - // with the first released IP - newIPs := make([]net.IP, 3) - for i := 0; i < 3; i++ { - ip, err := alloc.Acquire() - if err != nil { - t.Fatal(err) - } - - newIPs[i] = ip - } - // Before loop begin - // 2(u) - 3(u) - 4(f) - 5(f) - 6(f) - // ↑ - - // After i = 0 - // 2(u) - 3(u) - 4(f) - 5(u) - 6(f) - // ↑ - - // After i = 1 - // 2(u) - 3(u) - 4(f) - 5(u) - 6(u) - // ↑ - - // After i = 2 - // 2(u) - 3(u) - 4(u) - 5(u) - 6(u) - // ↑ - - assertIPEquals(t, expectedIPs[3], newIPs[0]) - assertIPEquals(t, expectedIPs[4], newIPs[1]) - assertIPEquals(t, expectedIPs[2], newIPs[2]) - - _, err = alloc.Acquire() - if err == nil { - t.Fatal("There shouldn't be any IP addresses at this point") - } -} - -func assertIPEquals(t *testing.T, ip1, ip2 net.IP) { - if !ip1.Equal(ip2) { - t.Fatalf("Expected IP %s, got %s", ip1, ip2) - } -} - -func AssertOverlap(CIDRx string, CIDRy string, t *testing.T) { - _, netX, _ := net.ParseCIDR(CIDRx) - _, netY, _ := net.ParseCIDR(CIDRy) - if !networkOverlaps(netX, netY) { - t.Errorf("%v and %v should overlap", netX, netY) - } -} - -func AssertNoOverlap(CIDRx string, CIDRy string, t *testing.T) { - _, netX, _ := net.ParseCIDR(CIDRx) - _, netY, _ := net.ParseCIDR(CIDRy) - if networkOverlaps(netX, netY) { - t.Errorf("%v and %v should not overlap", netX, netY) - } -} - -func TestNetworkOverlaps(t *testing.T) { - //netY starts at same IP and ends within netX - AssertOverlap("172.16.0.1/24", "172.16.0.1/25", t) - //netY starts within netX and ends at same IP - AssertOverlap("172.16.0.1/24", "172.16.0.128/25", t) - //netY starts and ends within netX - AssertOverlap("172.16.0.1/24", "172.16.0.64/25", t) - //netY starts at same IP and ends outside of netX - AssertOverlap("172.16.0.1/24", "172.16.0.1/23", t) - //netY starts before and ends at same IP of netX - AssertOverlap("172.16.1.1/24", "172.16.0.1/23", t) - //netY starts before and ends outside of netX - AssertOverlap("172.16.1.1/24", "172.16.0.1/23", t) - //netY starts and ends before netX - AssertNoOverlap("172.16.1.1/25", "172.16.0.1/24", t) - //netX starts and ends before netY - AssertNoOverlap("172.16.1.1/25", "172.16.2.1/24", t) -} - -func TestCheckRouteOverlaps(t *testing.T) { - routesData := []string{"10.0.2.0/32", "10.0.3.0/24", "10.0.42.0/24", "172.16.42.0/24", "192.168.142.0/24"} - - routes := []*net.IPNet{} - for _, addr := range routesData { - _, netX, _ := net.ParseCIDR(addr) - routes = append(routes, netX) - } - - _, netX, _ := net.ParseCIDR("172.16.0.1/24") - if err := checkRouteOverlaps(routes, netX); err != nil { - t.Fatal(err) - } - - _, netX, _ = net.ParseCIDR("10.0.2.0/24") - if err := checkRouteOverlaps(routes, netX); err == nil { - t.Fatalf("10.0.2.0/24 and 10.0.2.0 should overlap but it doesn't") - } -} - -func TestCheckNameserverOverlaps(t *testing.T) { - nameservers := []string{"10.0.2.3/32", "192.168.102.1/32"} - - _, netX, _ := net.ParseCIDR("10.0.2.3/32") - - if err := checkNameserverOverlaps(nameservers, netX); err == nil { - t.Fatalf("%s should overlap 10.0.2.3/32 but doesn't", netX) - } - - _, netX, _ = net.ParseCIDR("192.168.102.2/32") - - if err := checkNameserverOverlaps(nameservers, netX); err != nil { - t.Fatalf("%s should not overlap %v but it does", netX, nameservers) - } -} - -type StubProxy struct { - frontendAddr *net.Addr - backendAddr *net.Addr -} - -func (proxy *StubProxy) Run() {} -func (proxy *StubProxy) Close() {} -func (proxy *StubProxy) FrontendAddr() net.Addr { return *proxy.frontendAddr } -func (proxy *StubProxy) BackendAddr() net.Addr { return *proxy.backendAddr } - -func NewStubProxy(frontendAddr, backendAddr net.Addr) (proxy.Proxy, error) { - return &StubProxy{ - frontendAddr: &frontendAddr, - backendAddr: &backendAddr, - }, nil -} - -func TestPortMapper(t *testing.T) { - // FIXME: is this iptables chain still used anywhere? - var chain *iptables.Chain - mapper := &PortMapper{ - tcpMapping: make(map[string]*net.TCPAddr), - tcpProxies: make(map[string]proxy.Proxy), - udpMapping: make(map[string]*net.UDPAddr), - udpProxies: make(map[string]proxy.Proxy), - iptables: chain, - defaultIp: net.IP("0.0.0.0"), - proxyFactoryFunc: NewStubProxy, - } - - dstIp1 := net.ParseIP("192.168.0.1") - dstIp2 := net.ParseIP("192.168.0.2") - srcAddr1 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.1")} - srcAddr2 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.2")} - - if err := mapper.Map(dstIp1, 80, srcAddr1); err != nil { - t.Fatalf("Failed to allocate port: %s", err) - } - - if mapper.Map(dstIp1, 80, srcAddr1) == nil { - t.Fatalf("Port is in use - mapping should have failed") - } - - if mapper.Map(dstIp1, 80, srcAddr2) == nil { - t.Fatalf("Port is in use - mapping should have failed") - } - - if err := mapper.Map(dstIp2, 80, srcAddr2); err != nil { - t.Fatalf("Failed to allocate port: %s", err) - } - - if mapper.Unmap(dstIp1, 80, "tcp") != nil { - t.Fatalf("Failed to release port") - } - - if mapper.Unmap(dstIp2, 80, "tcp") != nil { - t.Fatalf("Failed to release port") - } - - if mapper.Unmap(dstIp2, 80, "tcp") == nil { - t.Fatalf("Port already released, but no error reported") - } -} diff --git a/networkdriver/ipallocator/allocator.go b/networkdriver/ipallocator/allocator.go new file mode 100644 index 0000000000..1c5a7b4cc2 --- /dev/null +++ b/networkdriver/ipallocator/allocator.go @@ -0,0 +1,159 @@ +package ipallocator + +import ( + "encoding/binary" + "errors" + "github.com/dotcloud/docker/networkdriver" + "github.com/dotcloud/docker/pkg/collections" + "net" + "sync" +) + +type networkSet map[string]*collections.OrderedIntSet + +var ( + ErrNoAvailableIPs = errors.New("no available ip addresses on network") + ErrIPAlreadyAllocated = errors.New("ip already allocated") +) + +var ( + lock = sync.Mutex{} + allocatedIPs = networkSet{} + availableIPS = networkSet{} +) + +// RequestIP requests an available ip from the given network. It +// will return the next available ip if the ip provided is nil. If the +// ip provided is not nil it will validate that the provided ip is available +// for use or return an error +func RequestIP(address *net.IPNet, ip *net.IP) (*net.IP, error) { + lock.Lock() + defer lock.Unlock() + + checkAddress(address) + + if ip == nil { + next, err := getNextIp(address) + if err != nil { + return nil, err + } + return next, nil + } + + if err := registerIP(address, ip); err != nil { + return nil, err + } + return ip, nil +} + +// ReleaseIP adds the provided ip back into the pool of +// available ips to be returned for use. +func ReleaseIP(address *net.IPNet, ip *net.IP) error { + lock.Lock() + defer lock.Unlock() + + checkAddress(address) + + var ( + existing = allocatedIPs[address.String()] + available = availableIPS[address.String()] + pos = getPosition(address, ip) + ) + + existing.Remove(int(pos)) + available.Push(int(pos)) + + return nil +} + +// convert the ip into the position in the subnet. Only +// position are saved in the set +func getPosition(address *net.IPNet, ip *net.IP) int32 { + var ( + first, _ = networkdriver.NetworkRange(address) + base = ipToInt(&first) + i = ipToInt(ip) + ) + return i - base +} + +// return an available ip if one is currently available. If not, +// return the next available ip for the nextwork +func getNextIp(address *net.IPNet) (*net.IP, error) { + var ( + ownIP = ipToInt(&address.IP) + available = availableIPS[address.String()] + allocated = allocatedIPs[address.String()] + first, _ = networkdriver.NetworkRange(address) + base = ipToInt(&first) + size = int(networkdriver.NetworkSize(address.Mask)) + max = int32(size - 2) // size -1 for the broadcast address, -1 for the gateway address + pos = int32(available.Pop()) + ) + + // We pop and push the position not the ip + if pos != 0 { + ip := intToIP(int32(base + pos)) + allocated.Push(int(pos)) + + return ip, nil + } + + var ( + firstNetIP = address.IP.To4().Mask(address.Mask) + firstAsInt = ipToInt(&firstNetIP) + 1 + ) + + pos = int32(allocated.PullBack()) + for i := int32(0); i < max; i++ { + pos = pos%max + 1 + next := int32(base + pos) + + if next == ownIP || next == firstAsInt { + continue + } + + if !allocated.Exists(int(pos)) { + ip := intToIP(next) + allocated.Push(int(pos)) + return ip, nil + } + } + return nil, ErrNoAvailableIPs +} + +func registerIP(address *net.IPNet, ip *net.IP) error { + var ( + existing = allocatedIPs[address.String()] + available = availableIPS[address.String()] + pos = getPosition(address, ip) + ) + + if existing.Exists(int(pos)) { + return ErrIPAlreadyAllocated + } + available.Remove(int(pos)) + + return nil +} + +// Converts a 4 bytes IP into a 32 bit integer +func ipToInt(ip *net.IP) int32 { + return int32(binary.BigEndian.Uint32(ip.To4())) +} + +// Converts 32 bit integer into a 4 bytes IP address +func intToIP(n int32) *net.IP { + b := make([]byte, 4) + binary.BigEndian.PutUint32(b, uint32(n)) + ip := net.IP(b) + return &ip +} + +func checkAddress(address *net.IPNet) { + key := address.String() + if _, exists := allocatedIPs[key]; !exists { + allocatedIPs[key] = collections.NewOrderedIntSet() + availableIPS[key] = collections.NewOrderedIntSet() + } +} diff --git a/networkdriver/ipallocator/allocator_test.go b/networkdriver/ipallocator/allocator_test.go new file mode 100644 index 0000000000..5e9fcfc983 --- /dev/null +++ b/networkdriver/ipallocator/allocator_test.go @@ -0,0 +1,241 @@ +package ipallocator + +import ( + "fmt" + "net" + "testing" +) + +func reset() { + allocatedIPs = networkSet{} + availableIPS = networkSet{} +} + +func TestRequestNewIps(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + + for i := 2; i < 10; i++ { + ip, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + if expected := fmt.Sprintf("192.168.0.%d", i); ip.String() != expected { + t.Fatalf("Expected ip %s got %s", expected, ip.String()) + } + } +} + +func TestReleaseIp(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + + ip, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + if err := ReleaseIP(network, ip); err != nil { + t.Fatal(err) + } +} + +func TestGetReleasedIp(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + + ip, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + value := ip.String() + if err := ReleaseIP(network, ip); err != nil { + t.Fatal(err) + } + + ip, err = RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + if ip.String() != value { + t.Fatalf("Expected to receive same ip %s got %s", value, ip.String()) + } +} + +func TestRequesetSpecificIp(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + + ip := net.ParseIP("192.168.1.5") + + if _, err := RequestIP(network, &ip); err != nil { + t.Fatal(err) + } +} + +func TestConversion(t *testing.T) { + ip := net.ParseIP("127.0.0.1") + i := ipToInt(&ip) + if i == 0 { + t.Fatal("converted to zero") + } + conv := intToIP(i) + if !ip.Equal(*conv) { + t.Error(conv.String()) + } +} + +func TestIPAllocator(t *testing.T) { + expectedIPs := []net.IP{ + 0: net.IPv4(127, 0, 0, 2), + 1: net.IPv4(127, 0, 0, 3), + 2: net.IPv4(127, 0, 0, 4), + 3: net.IPv4(127, 0, 0, 5), + 4: net.IPv4(127, 0, 0, 6), + } + + gwIP, n, _ := net.ParseCIDR("127.0.0.1/29") + network := &net.IPNet{IP: gwIP, Mask: n.Mask} + // Pool after initialisation (f = free, u = used) + // 2(f) - 3(f) - 4(f) - 5(f) - 6(f) + // ↑ + + // Check that we get 5 IPs, from 127.0.0.2–127.0.0.6, in that + // order. + for i := 0; i < 5; i++ { + ip, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + assertIPEquals(t, &expectedIPs[i], ip) + } + // Before loop begin + // 2(f) - 3(f) - 4(f) - 5(f) - 6(f) + // ↑ + + // After i = 0 + // 2(u) - 3(f) - 4(f) - 5(f) - 6(f) + // ↑ + + // After i = 1 + // 2(u) - 3(u) - 4(f) - 5(f) - 6(f) + // ↑ + + // After i = 2 + // 2(u) - 3(u) - 4(u) - 5(f) - 6(f) + // ↑ + + // After i = 3 + // 2(u) - 3(u) - 4(u) - 5(u) - 6(f) + // ↑ + + // After i = 4 + // 2(u) - 3(u) - 4(u) - 5(u) - 6(u) + // ↑ + + // Check that there are no more IPs + ip, err := RequestIP(network, nil) + if err == nil { + t.Fatalf("There shouldn't be any IP addresses at this point, got %s\n", ip) + } + + // Release some IPs in non-sequential order + if err := ReleaseIP(network, &expectedIPs[3]); err != nil { + t.Fatal(err) + } + // 2(u) - 3(u) - 4(u) - 5(f) - 6(u) + // ↑ + + if err := ReleaseIP(network, &expectedIPs[2]); err != nil { + t.Fatal(err) + } + // 2(u) - 3(u) - 4(f) - 5(f) - 6(u) + // ↑ + + if err := ReleaseIP(network, &expectedIPs[4]); err != nil { + t.Fatal(err) + } + // 2(u) - 3(u) - 4(f) - 5(f) - 6(f) + // ↑ + + // Make sure that IPs are reused in sequential order, starting + // with the first released IP + newIPs := make([]*net.IP, 3) + for i := 0; i < 3; i++ { + ip, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + newIPs[i] = ip + } + // Before loop begin + // 2(u) - 3(u) - 4(f) - 5(f) - 6(f) + // ↑ + + // After i = 0 + // 2(u) - 3(u) - 4(f) - 5(u) - 6(f) + // ↑ + + // After i = 1 + // 2(u) - 3(u) - 4(f) - 5(u) - 6(u) + // ↑ + + // After i = 2 + // 2(u) - 3(u) - 4(u) - 5(u) - 6(u) + // ↑ + + // Reordered these because the new set will always return the + // lowest ips first and not in the order that they were released + assertIPEquals(t, &expectedIPs[2], newIPs[0]) + assertIPEquals(t, &expectedIPs[3], newIPs[1]) + assertIPEquals(t, &expectedIPs[4], newIPs[2]) + + _, err = RequestIP(network, nil) + if err == nil { + t.Fatal("There shouldn't be any IP addresses at this point") + } +} + +func TestAllocateFirstIP(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{192, 168, 0, 0}, + Mask: []byte{255, 255, 255, 0}, + } + + firstIP := network.IP.To4().Mask(network.Mask) + first := ipToInt(&firstIP) + 1 + + ip, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + allocated := ipToInt(ip) + + if allocated == first { + t.Fatalf("allocated ip should not equal first ip: %d == %d", first, allocated) + } +} + +func assertIPEquals(t *testing.T, ip1, ip2 *net.IP) { + if !ip1.Equal(*ip2) { + t.Fatalf("Expected IP %s, got %s", ip1, ip2) + } +} diff --git a/networkdriver/lxc/driver.go b/networkdriver/lxc/driver.go new file mode 100644 index 0000000000..c767fd2208 --- /dev/null +++ b/networkdriver/lxc/driver.go @@ -0,0 +1,471 @@ +package lxc + +import ( + "fmt" + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/networkdriver" + "github.com/dotcloud/docker/networkdriver/ipallocator" + "github.com/dotcloud/docker/networkdriver/portallocator" + "github.com/dotcloud/docker/networkdriver/portmapper" + "github.com/dotcloud/docker/pkg/iptables" + "github.com/dotcloud/docker/pkg/netlink" + "github.com/dotcloud/docker/utils" + "io/ioutil" + "log" + "net" + "strings" + "syscall" + "unsafe" +) + +const ( + DefaultNetworkBridge = "docker0" + siocBRADDBR = 0x89a0 +) + +// Network interface represents the networking stack of a container +type networkInterface struct { + IP net.IP + PortMappings []net.Addr // there are mappings to the host interfaces +} + +var ( + addrs = []string{ + // Here we don't follow the convention of using the 1st IP of the range for the gateway. + // This is to use the same gateway IPs as the /24 ranges, which predate the /16 ranges. + // In theory this shouldn't matter - in practice there's bound to be a few scripts relying + // on the internal addressing or other stupid things like that. + // The shouldn't, but hey, let's not break them unless we really have to. + "172.17.42.1/16", // Don't use 172.16.0.0/16, it conflicts with EC2 DNS 172.16.0.23 + "10.0.42.1/16", // Don't even try using the entire /8, that's too intrusive + "10.1.42.1/16", + "10.42.42.1/16", + "172.16.42.1/24", + "172.16.43.1/24", + "172.16.44.1/24", + "10.0.42.1/24", + "10.0.43.1/24", + "192.168.42.1/24", + "192.168.43.1/24", + "192.168.44.1/24", + } + + bridgeIface string + bridgeNetwork *net.IPNet + + defaultBindingIP = net.ParseIP("0.0.0.0") + currentInterfaces = make(map[string]*networkInterface) +) + +func init() { + if err := engine.Register("init_networkdriver", InitDriver); err != nil { + panic(err) + } +} + +func InitDriver(job *engine.Job) engine.Status { + var ( + network *net.IPNet + enableIPTables = job.GetenvBool("EnableIptables") + icc = job.GetenvBool("InterContainerCommunication") + ipForward = job.GetenvBool("EnableIpForward") + bridgeIP = job.Getenv("BridgeIP") + ) + + if defaultIP := job.Getenv("DefaultBindingIP"); defaultIP != "" { + defaultBindingIP = net.ParseIP(defaultIP) + } + + bridgeIface = job.Getenv("BridgeIface") + if bridgeIface == "" { + bridgeIface = DefaultNetworkBridge + } + + addr, err := networkdriver.GetIfaceAddr(bridgeIface) + if err != nil { + // If the iface is not found, try to create it + job.Logf("creating new bridge for %s", bridgeIface) + if err := createBridge(bridgeIP); err != nil { + job.Error(err) + return engine.StatusErr + } + + job.Logf("getting iface addr") + addr, err = networkdriver.GetIfaceAddr(bridgeIface) + if err != nil { + job.Error(err) + return engine.StatusErr + } + network = addr.(*net.IPNet) + } else { + network = addr.(*net.IPNet) + } + + // Configure iptables for link support + if enableIPTables { + if err := setupIPTables(addr, icc); err != nil { + job.Error(err) + return engine.StatusErr + } + } + + if ipForward { + // Enable IPv4 forwarding + if err := ioutil.WriteFile("/proc/sys/net/ipv4/ip_forward", []byte{'1', '\n'}, 0644); err != nil { + job.Logf("WARNING: unable to enable IPv4 forwarding: %s\n", err) + } + } + + // We can always try removing the iptables + if err := iptables.RemoveExistingChain("DOCKER"); err != nil { + job.Error(err) + return engine.StatusErr + } + + if enableIPTables { + chain, err := iptables.NewChain("DOCKER", bridgeIface) + if err != nil { + job.Error(err) + return engine.StatusErr + } + portmapper.SetIptablesChain(chain) + } + + bridgeNetwork = network + + // https://github.com/dotcloud/docker/issues/2768 + job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", bridgeNetwork.IP) + + for name, f := range map[string]engine.Handler{ + "allocate_interface": Allocate, + "release_interface": Release, + "allocate_port": AllocatePort, + "link": LinkContainers, + } { + if err := job.Eng.Register(name, f); err != nil { + job.Error(err) + return engine.StatusErr + } + } + return engine.StatusOK +} + +func setupIPTables(addr net.Addr, icc bool) error { + // Enable NAT + natArgs := []string{"POSTROUTING", "-t", "nat", "-s", addr.String(), "!", "-d", addr.String(), "-j", "MASQUERADE"} + + if !iptables.Exists(natArgs...) { + if output, err := iptables.Raw(append([]string{"-I"}, natArgs...)...); err != nil { + return fmt.Errorf("Unable to enable network bridge NAT: %s", err) + } else if len(output) != 0 { + return fmt.Errorf("Error iptables postrouting: %s", output) + } + } + + var ( + args = []string{"FORWARD", "-i", bridgeIface, "-o", bridgeIface, "-j"} + acceptArgs = append(args, "ACCEPT") + dropArgs = append(args, "DROP") + ) + + if !icc { + iptables.Raw(append([]string{"-D"}, acceptArgs...)...) + + if !iptables.Exists(dropArgs...) { + + utils.Debugf("Disable inter-container communication") + if output, err := iptables.Raw(append([]string{"-I"}, dropArgs...)...); err != nil { + return fmt.Errorf("Unable to prevent intercontainer communication: %s", err) + } else if len(output) != 0 { + return fmt.Errorf("Error disabling intercontainer communication: %s", output) + } + } + } else { + iptables.Raw(append([]string{"-D"}, dropArgs...)...) + + if !iptables.Exists(acceptArgs...) { + utils.Debugf("Enable inter-container communication") + if output, err := iptables.Raw(append([]string{"-I"}, acceptArgs...)...); err != nil { + return fmt.Errorf("Unable to allow intercontainer communication: %s", err) + } else if len(output) != 0 { + return fmt.Errorf("Error enabling intercontainer communication: %s", output) + } + } + } + + // Accept all non-intercontainer outgoing packets + outgoingArgs := []string{"FORWARD", "-i", bridgeIface, "!", "-o", bridgeIface, "-j", "ACCEPT"} + if !iptables.Exists(outgoingArgs...) { + if output, err := iptables.Raw(append([]string{"-I"}, outgoingArgs...)...); err != nil { + return fmt.Errorf("Unable to allow outgoing packets: %s", err) + } else if len(output) != 0 { + return fmt.Errorf("Error iptables allow outgoing: %s", output) + } + } + + // Accept incoming packets for existing connections + existingArgs := []string{"FORWARD", "-o", bridgeIface, "-m", "conntrack", "--ctstate", "RELATED,ESTABLISHED", "-j", "ACCEPT"} + + if !iptables.Exists(existingArgs...) { + if output, err := iptables.Raw(append([]string{"-I"}, existingArgs...)...); err != nil { + return fmt.Errorf("Unable to allow incoming packets: %s", err) + } else if len(output) != 0 { + return fmt.Errorf("Error iptables allow incoming: %s", output) + } + } + return nil +} + +// CreateBridgeIface creates a network bridge interface on the host system with the name `ifaceName`, +// and attempts to configure it with an address which doesn't conflict with any other interface on the host. +// If it can't find an address which doesn't conflict, it will return an error. +func createBridge(bridgeIP string) error { + nameservers := []string{} + resolvConf, _ := utils.GetResolvConf() + // we don't check for an error here, because we don't really care + // if we can't read /etc/resolv.conf. So instead we skip the append + // if resolvConf is nil. It either doesn't exist, or we can't read it + // for some reason. + if resolvConf != nil { + nameservers = append(nameservers, utils.GetNameserversAsCIDR(resolvConf)...) + } + + var ifaceAddr string + if len(bridgeIP) != 0 { + _, _, err := net.ParseCIDR(bridgeIP) + if err != nil { + return err + } + ifaceAddr = bridgeIP + } else { + for _, addr := range addrs { + _, dockerNetwork, err := net.ParseCIDR(addr) + if err != nil { + return err + } + if err := networkdriver.CheckNameserverOverlaps(nameservers, dockerNetwork); err == nil { + if err := networkdriver.CheckRouteOverlaps(dockerNetwork); err == nil { + ifaceAddr = addr + break + } else { + utils.Debugf("%s %s", addr, err) + } + } + } + } + + if ifaceAddr == "" { + return fmt.Errorf("Could not find a free IP address range for interface '%s'. Please configure its address manually and run 'docker -b %s'", bridgeIface, bridgeIface) + } + utils.Debugf("Creating bridge %s with network %s", bridgeIface, ifaceAddr) + + if err := createBridgeIface(bridgeIface); err != nil { + return err + } + + iface, err := net.InterfaceByName(bridgeIface) + if err != nil { + return err + } + + ipAddr, ipNet, err := net.ParseCIDR(ifaceAddr) + if err != nil { + return err + } + + if netlink.NetworkLinkAddIp(iface, ipAddr, ipNet); err != nil { + return fmt.Errorf("Unable to add private network: %s", err) + } + if err := netlink.NetworkLinkUp(iface); err != nil { + return fmt.Errorf("Unable to start network bridge: %s", err) + } + return nil +} + +// Create the actual bridge device. This is more backward-compatible than +// netlink.NetworkLinkAdd and works on RHEL 6. +func createBridgeIface(name string) error { + s, err := syscall.Socket(syscall.AF_INET6, syscall.SOCK_STREAM, syscall.IPPROTO_IP) + if err != nil { + utils.Debugf("Bridge socket creation failed IPv6 probably not enabled: %v", err) + s, err = syscall.Socket(syscall.AF_INET, syscall.SOCK_STREAM, syscall.IPPROTO_IP) + if err != nil { + return fmt.Errorf("Error creating bridge creation socket: %s", err) + } + } + defer syscall.Close(s) + + nameBytePtr, err := syscall.BytePtrFromString(name) + if err != nil { + return fmt.Errorf("Error converting bridge name %s to byte array: %s", name, err) + } + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), siocBRADDBR, uintptr(unsafe.Pointer(nameBytePtr))); err != 0 { + return fmt.Errorf("Error creating bridge: %s", err) + } + return nil +} + +// Allocate a network interface +func Allocate(job *engine.Job) engine.Status { + var ( + ip *net.IP + err error + id = job.Args[0] + requestedIP = net.ParseIP(job.Getenv("RequestedIP")) + ) + + if requestedIP != nil { + ip, err = ipallocator.RequestIP(bridgeNetwork, &requestedIP) + } else { + ip, err = ipallocator.RequestIP(bridgeNetwork, nil) + } + if err != nil { + job.Error(err) + return engine.StatusErr + } + + out := engine.Env{} + out.Set("IP", ip.String()) + out.Set("Mask", bridgeNetwork.Mask.String()) + out.Set("Gateway", bridgeNetwork.IP.String()) + out.Set("Bridge", bridgeIface) + + size, _ := bridgeNetwork.Mask.Size() + out.SetInt("IPPrefixLen", size) + + currentInterfaces[id] = &networkInterface{ + IP: *ip, + } + + out.WriteTo(job.Stdout) + + return engine.StatusOK +} + +// release an interface for a select ip +func Release(job *engine.Job) engine.Status { + var ( + id = job.Args[0] + containerInterface = currentInterfaces[id] + ip net.IP + port int + proto string + ) + + for _, nat := range containerInterface.PortMappings { + if err := portmapper.Unmap(nat); err != nil { + log.Printf("Unable to unmap port %s: %s", nat, err) + } + + // this is host mappings + switch a := nat.(type) { + case *net.TCPAddr: + proto = "tcp" + ip = a.IP + port = a.Port + case *net.UDPAddr: + proto = "udp" + ip = a.IP + port = a.Port + } + + if err := portallocator.ReleasePort(ip, proto, port); err != nil { + log.Printf("Unable to release port %s", nat) + } + } + + if err := ipallocator.ReleaseIP(bridgeNetwork, &containerInterface.IP); err != nil { + log.Printf("Unable to release ip %s\n", err) + } + return engine.StatusOK +} + +// Allocate an external port and map it to the interface +func AllocatePort(job *engine.Job) engine.Status { + var ( + err error + + ip = defaultBindingIP + id = job.Args[0] + hostIP = job.Getenv("HostIP") + hostPort = job.GetenvInt("HostPort") + containerPort = job.GetenvInt("ContainerPort") + proto = job.Getenv("Proto") + network = currentInterfaces[id] + ) + + if hostIP != "" { + ip = net.ParseIP(hostIP) + } + + // host ip, proto, and host port + hostPort, err = portallocator.RequestPort(ip, proto, hostPort) + if err != nil { + job.Error(err) + return engine.StatusErr + } + + var ( + container net.Addr + host net.Addr + ) + + if proto == "tcp" { + host = &net.TCPAddr{IP: ip, Port: hostPort} + container = &net.TCPAddr{IP: network.IP, Port: containerPort} + } else { + host = &net.UDPAddr{IP: ip, Port: hostPort} + container = &net.UDPAddr{IP: network.IP, Port: containerPort} + } + + if err := portmapper.Map(container, ip, hostPort); err != nil { + portallocator.ReleasePort(ip, proto, hostPort) + + job.Error(err) + return engine.StatusErr + } + network.PortMappings = append(network.PortMappings, host) + + out := engine.Env{} + out.Set("HostIP", ip.String()) + out.SetInt("HostPort", hostPort) + + if _, err := out.WriteTo(job.Stdout); err != nil { + job.Error(err) + return engine.StatusErr + } + return engine.StatusOK +} + +func LinkContainers(job *engine.Job) engine.Status { + var ( + action = job.Args[0] + childIP = job.Getenv("ChildIP") + parentIP = job.Getenv("ParentIP") + ignoreErrors = job.GetenvBool("IgnoreErrors") + ports = job.GetenvList("Ports") + ) + split := func(p string) (string, string) { + parts := strings.Split(p, "/") + return parts[0], parts[1] + } + + for _, p := range ports { + port, proto := split(p) + if output, err := iptables.Raw(action, "FORWARD", + "-i", bridgeIface, "-o", bridgeIface, + "-p", proto, + "-s", parentIP, + "--dport", port, + "-d", childIP, + "-j", "ACCEPT"); !ignoreErrors && err != nil { + job.Error(err) + return engine.StatusErr + } else if len(output) != 0 { + job.Errorf("Error toggle iptables forward: %s", output) + return engine.StatusErr + } + } + return engine.StatusOK +} diff --git a/networkdriver/network.go b/networkdriver/network.go new file mode 100644 index 0000000000..8dda789d2f --- /dev/null +++ b/networkdriver/network.go @@ -0,0 +1,10 @@ +package networkdriver + +import ( + "errors" +) + +var ( + ErrNetworkOverlapsWithNameservers = errors.New("requested network overlaps with nameserver") + ErrNetworkOverlaps = errors.New("requested network overlaps with existing network") +) diff --git a/networkdriver/network_test.go b/networkdriver/network_test.go new file mode 100644 index 0000000000..c15f8b1cf5 --- /dev/null +++ b/networkdriver/network_test.go @@ -0,0 +1,190 @@ +package networkdriver + +import ( + "github.com/dotcloud/docker/pkg/netlink" + "net" + "testing" +) + +func TestNonOverlapingNameservers(t *testing.T) { + network := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + nameservers := []string{ + "127.0.0.1/32", + } + + if err := CheckNameserverOverlaps(nameservers, network); err != nil { + t.Fatal(err) + } +} + +func TestOverlapingNameservers(t *testing.T) { + network := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + nameservers := []string{ + "192.168.0.1/32", + } + + if err := CheckNameserverOverlaps(nameservers, network); err == nil { + t.Fatalf("Expected error %s got %s", ErrNetworkOverlapsWithNameservers, err) + } +} + +func TestCheckRouteOverlaps(t *testing.T) { + orig := networkGetRoutesFct + defer func() { + networkGetRoutesFct = orig + }() + networkGetRoutesFct = func() ([]netlink.Route, error) { + routesData := []string{"10.0.2.0/32", "10.0.3.0/24", "10.0.42.0/24", "172.16.42.0/24", "192.168.142.0/24"} + + routes := []netlink.Route{} + for _, addr := range routesData { + _, netX, _ := net.ParseCIDR(addr) + routes = append(routes, netlink.Route{IPNet: netX}) + } + return routes, nil + } + + _, netX, _ := net.ParseCIDR("172.16.0.1/24") + if err := CheckRouteOverlaps(netX); err != nil { + t.Fatal(err) + } + + _, netX, _ = net.ParseCIDR("10.0.2.0/24") + if err := CheckRouteOverlaps(netX); err == nil { + t.Fatalf("10.0.2.0/24 and 10.0.2.0 should overlap but it doesn't") + } +} + +func TestCheckNameserverOverlaps(t *testing.T) { + nameservers := []string{"10.0.2.3/32", "192.168.102.1/32"} + + _, netX, _ := net.ParseCIDR("10.0.2.3/32") + + if err := CheckNameserverOverlaps(nameservers, netX); err == nil { + t.Fatalf("%s should overlap 10.0.2.3/32 but doesn't", netX) + } + + _, netX, _ = net.ParseCIDR("192.168.102.2/32") + + if err := CheckNameserverOverlaps(nameservers, netX); err != nil { + t.Fatalf("%s should not overlap %v but it does", netX, nameservers) + } +} + +func AssertOverlap(CIDRx string, CIDRy string, t *testing.T) { + _, netX, _ := net.ParseCIDR(CIDRx) + _, netY, _ := net.ParseCIDR(CIDRy) + if !NetworkOverlaps(netX, netY) { + t.Errorf("%v and %v should overlap", netX, netY) + } +} + +func AssertNoOverlap(CIDRx string, CIDRy string, t *testing.T) { + _, netX, _ := net.ParseCIDR(CIDRx) + _, netY, _ := net.ParseCIDR(CIDRy) + if NetworkOverlaps(netX, netY) { + t.Errorf("%v and %v should not overlap", netX, netY) + } +} + +func TestNetworkOverlaps(t *testing.T) { + //netY starts at same IP and ends within netX + AssertOverlap("172.16.0.1/24", "172.16.0.1/25", t) + //netY starts within netX and ends at same IP + AssertOverlap("172.16.0.1/24", "172.16.0.128/25", t) + //netY starts and ends within netX + AssertOverlap("172.16.0.1/24", "172.16.0.64/25", t) + //netY starts at same IP and ends outside of netX + AssertOverlap("172.16.0.1/24", "172.16.0.1/23", t) + //netY starts before and ends at same IP of netX + AssertOverlap("172.16.1.1/24", "172.16.0.1/23", t) + //netY starts before and ends outside of netX + AssertOverlap("172.16.1.1/24", "172.16.0.1/23", t) + //netY starts and ends before netX + AssertNoOverlap("172.16.1.1/25", "172.16.0.1/24", t) + //netX starts and ends before netY + AssertNoOverlap("172.16.1.1/25", "172.16.2.1/24", t) +} + +func TestNetworkRange(t *testing.T) { + // Simple class C test + _, network, _ := net.ParseCIDR("192.168.0.1/24") + first, last := NetworkRange(network) + if !first.Equal(net.ParseIP("192.168.0.0")) { + t.Error(first.String()) + } + if !last.Equal(net.ParseIP("192.168.0.255")) { + t.Error(last.String()) + } + if size := NetworkSize(network.Mask); size != 256 { + t.Error(size) + } + + // Class A test + _, network, _ = net.ParseCIDR("10.0.0.1/8") + first, last = NetworkRange(network) + if !first.Equal(net.ParseIP("10.0.0.0")) { + t.Error(first.String()) + } + if !last.Equal(net.ParseIP("10.255.255.255")) { + t.Error(last.String()) + } + if size := NetworkSize(network.Mask); size != 16777216 { + t.Error(size) + } + + // Class A, random IP address + _, network, _ = net.ParseCIDR("10.1.2.3/8") + first, last = NetworkRange(network) + if !first.Equal(net.ParseIP("10.0.0.0")) { + t.Error(first.String()) + } + if !last.Equal(net.ParseIP("10.255.255.255")) { + t.Error(last.String()) + } + + // 32bit mask + _, network, _ = net.ParseCIDR("10.1.2.3/32") + first, last = NetworkRange(network) + if !first.Equal(net.ParseIP("10.1.2.3")) { + t.Error(first.String()) + } + if !last.Equal(net.ParseIP("10.1.2.3")) { + t.Error(last.String()) + } + if size := NetworkSize(network.Mask); size != 1 { + t.Error(size) + } + + // 31bit mask + _, network, _ = net.ParseCIDR("10.1.2.3/31") + first, last = NetworkRange(network) + if !first.Equal(net.ParseIP("10.1.2.2")) { + t.Error(first.String()) + } + if !last.Equal(net.ParseIP("10.1.2.3")) { + t.Error(last.String()) + } + if size := NetworkSize(network.Mask); size != 2 { + t.Error(size) + } + + // 26bit mask + _, network, _ = net.ParseCIDR("10.1.2.3/26") + first, last = NetworkRange(network) + if !first.Equal(net.ParseIP("10.1.2.0")) { + t.Error(first.String()) + } + if !last.Equal(net.ParseIP("10.1.2.63")) { + t.Error(last.String()) + } + if size := NetworkSize(network.Mask); size != 64 { + t.Error(size) + } +} diff --git a/networkdriver/portallocator/portallocator.go b/networkdriver/portallocator/portallocator.go new file mode 100644 index 0000000000..71cac82703 --- /dev/null +++ b/networkdriver/portallocator/portallocator.go @@ -0,0 +1,165 @@ +package portallocator + +import ( + "errors" + "github.com/dotcloud/docker/pkg/collections" + "net" + "sync" +) + +const ( + BeginPortRange = 49153 + EndPortRange = 65535 +) + +type ( + portMappings map[string]*collections.OrderedIntSet + ipMapping map[string]portMappings +) + +var ( + ErrPortAlreadyAllocated = errors.New("port has already been allocated") + ErrPortExceedsRange = errors.New("port exceeds upper range") + ErrUnknownProtocol = errors.New("unknown protocol") +) + +var ( + currentDynamicPort = map[string]int{ + "tcp": BeginPortRange - 1, + "udp": BeginPortRange - 1, + } + defaultIP = net.ParseIP("0.0.0.0") + defaultAllocatedPorts = portMappings{} + otherAllocatedPorts = ipMapping{} + lock = sync.Mutex{} +) + +func init() { + defaultAllocatedPorts["tcp"] = collections.NewOrderedIntSet() + defaultAllocatedPorts["udp"] = collections.NewOrderedIntSet() +} + +// RequestPort returns an available port if the port is 0 +// If the provided port is not 0 then it will be checked if +// it is available for allocation +func RequestPort(ip net.IP, proto string, port int) (int, error) { + lock.Lock() + defer lock.Unlock() + + if err := validateProtocol(proto); err != nil { + return 0, err + } + + // If the user requested a specific port to be allocated + if port > 0 { + if err := registerSetPort(ip, proto, port); err != nil { + return 0, err + } + return port, nil + } + return registerDynamicPort(ip, proto) +} + +// ReleasePort will return the provided port back into the +// pool for reuse +func ReleasePort(ip net.IP, proto string, port int) error { + lock.Lock() + defer lock.Unlock() + + if err := validateProtocol(proto); err != nil { + return err + } + + allocated := defaultAllocatedPorts[proto] + allocated.Remove(port) + + if !equalsDefault(ip) { + registerIP(ip) + + // Remove the port for the specific ip address + allocated = otherAllocatedPorts[ip.String()][proto] + allocated.Remove(port) + } + return nil +} + +func ReleaseAll() error { + lock.Lock() + defer lock.Unlock() + + currentDynamicPort["tcp"] = BeginPortRange - 1 + currentDynamicPort["udp"] = BeginPortRange - 1 + + defaultAllocatedPorts = portMappings{} + defaultAllocatedPorts["tcp"] = collections.NewOrderedIntSet() + defaultAllocatedPorts["udp"] = collections.NewOrderedIntSet() + + otherAllocatedPorts = ipMapping{} + + return nil +} + +func registerDynamicPort(ip net.IP, proto string) (int, error) { + allocated := defaultAllocatedPorts[proto] + + port := nextPort(proto) + if port > EndPortRange { + return 0, ErrPortExceedsRange + } + + if !equalsDefault(ip) { + registerIP(ip) + + ipAllocated := otherAllocatedPorts[ip.String()][proto] + ipAllocated.Push(port) + } else { + allocated.Push(port) + } + return port, nil +} + +func registerSetPort(ip net.IP, proto string, port int) error { + allocated := defaultAllocatedPorts[proto] + if allocated.Exists(port) { + return ErrPortAlreadyAllocated + } + + if !equalsDefault(ip) { + registerIP(ip) + + ipAllocated := otherAllocatedPorts[ip.String()][proto] + if ipAllocated.Exists(port) { + return ErrPortAlreadyAllocated + } + ipAllocated.Push(port) + } else { + allocated.Push(port) + } + return nil +} + +func equalsDefault(ip net.IP) bool { + return ip == nil || ip.Equal(defaultIP) +} + +func nextPort(proto string) int { + c := currentDynamicPort[proto] + 1 + currentDynamicPort[proto] = c + return c +} + +func registerIP(ip net.IP) { + if _, exists := otherAllocatedPorts[ip.String()]; !exists { + otherAllocatedPorts[ip.String()] = portMappings{ + "tcp": collections.NewOrderedIntSet(), + "udp": collections.NewOrderedIntSet(), + } + } +} + +func validateProtocol(proto string) error { + if _, exists := defaultAllocatedPorts[proto]; !exists { + return ErrUnknownProtocol + } + return nil +} diff --git a/networkdriver/portallocator/portallocator_test.go b/networkdriver/portallocator/portallocator_test.go new file mode 100644 index 0000000000..603bd03bd7 --- /dev/null +++ b/networkdriver/portallocator/portallocator_test.go @@ -0,0 +1,184 @@ +package portallocator + +import ( + "net" + "testing" +) + +func reset() { + ReleaseAll() +} + +func TestRequestNewPort(t *testing.T) { + defer reset() + + port, err := RequestPort(defaultIP, "tcp", 0) + if err != nil { + t.Fatal(err) + } + + if expected := BeginPortRange; port != expected { + t.Fatalf("Expected port %d got %d", expected, port) + } +} + +func TestRequestSpecificPort(t *testing.T) { + defer reset() + + port, err := RequestPort(defaultIP, "tcp", 5000) + if err != nil { + t.Fatal(err) + } + if port != 5000 { + t.Fatalf("Expected port 5000 got %d", port) + } +} + +func TestReleasePort(t *testing.T) { + defer reset() + + port, err := RequestPort(defaultIP, "tcp", 5000) + if err != nil { + t.Fatal(err) + } + if port != 5000 { + t.Fatalf("Expected port 5000 got %d", port) + } + + if err := ReleasePort(defaultIP, "tcp", 5000); err != nil { + t.Fatal(err) + } +} + +func TestReuseReleasedPort(t *testing.T) { + defer reset() + + port, err := RequestPort(defaultIP, "tcp", 5000) + if err != nil { + t.Fatal(err) + } + if port != 5000 { + t.Fatalf("Expected port 5000 got %d", port) + } + + if err := ReleasePort(defaultIP, "tcp", 5000); err != nil { + t.Fatal(err) + } + + port, err = RequestPort(defaultIP, "tcp", 5000) + if err != nil { + t.Fatal(err) + } +} + +func TestReleaseUnreadledPort(t *testing.T) { + defer reset() + + port, err := RequestPort(defaultIP, "tcp", 5000) + if err != nil { + t.Fatal(err) + } + if port != 5000 { + t.Fatalf("Expected port 5000 got %d", port) + } + + port, err = RequestPort(defaultIP, "tcp", 5000) + if err != ErrPortAlreadyAllocated { + t.Fatalf("Expected error %s got %s", ErrPortAlreadyAllocated, err) + } +} + +func TestUnknowProtocol(t *testing.T) { + defer reset() + + if _, err := RequestPort(defaultIP, "tcpp", 0); err != ErrUnknownProtocol { + t.Fatalf("Expected error %s got %s", ErrUnknownProtocol, err) + } +} + +func TestAllocateAllPorts(t *testing.T) { + defer reset() + + for i := 0; i <= EndPortRange-BeginPortRange; i++ { + port, err := RequestPort(defaultIP, "tcp", 0) + if err != nil { + t.Fatal(err) + } + + if expected := BeginPortRange + i; port != expected { + t.Fatalf("Expected port %d got %d", expected, port) + } + } + + if _, err := RequestPort(defaultIP, "tcp", 0); err != ErrPortExceedsRange { + t.Fatalf("Expected error %s got %s", ErrPortExceedsRange, err) + } + + _, err := RequestPort(defaultIP, "udp", 0) + if err != nil { + t.Fatal(err) + } +} + +func BenchmarkAllocatePorts(b *testing.B) { + defer reset() + + for i := 0; i < b.N; i++ { + for i := 0; i <= EndPortRange-BeginPortRange; i++ { + port, err := RequestPort(defaultIP, "tcp", 0) + if err != nil { + b.Fatal(err) + } + + if expected := BeginPortRange + i; port != expected { + b.Fatalf("Expected port %d got %d", expected, port) + } + } + reset() + } +} + +func TestPortAllocation(t *testing.T) { + defer reset() + + ip := net.ParseIP("192.168.0.1") + ip2 := net.ParseIP("192.168.0.2") + if port, err := RequestPort(ip, "tcp", 80); err != nil { + t.Fatal(err) + } else if port != 80 { + t.Fatalf("Acquire(80) should return 80, not %d", port) + } + port, err := RequestPort(ip, "tcp", 0) + if err != nil { + t.Fatal(err) + } + if port <= 0 { + t.Fatalf("Acquire(0) should return a non-zero port") + } + + if _, err := RequestPort(ip, "tcp", port); err == nil { + t.Fatalf("Acquiring a port already in use should return an error") + } + + if newPort, err := RequestPort(ip, "tcp", 0); err != nil { + t.Fatal(err) + } else if newPort == port { + t.Fatalf("Acquire(0) allocated the same port twice: %d", port) + } + + if _, err := RequestPort(ip, "tcp", 80); err == nil { + t.Fatalf("Acquiring a port already in use should return an error") + } + if _, err := RequestPort(ip2, "tcp", 80); err != nil { + t.Fatalf("It should be possible to allocate the same port on a different interface") + } + if _, err := RequestPort(ip2, "tcp", 80); err == nil { + t.Fatalf("Acquiring a port already in use should return an error") + } + if err := ReleasePort(ip, "tcp", 80); err != nil { + t.Fatal(err) + } + if _, err := RequestPort(ip, "tcp", 80); err != nil { + t.Fatal(err) + } +} diff --git a/networkdriver/portmapper/mapper.go b/networkdriver/portmapper/mapper.go new file mode 100644 index 0000000000..f052c48143 --- /dev/null +++ b/networkdriver/portmapper/mapper.go @@ -0,0 +1,131 @@ +package portmapper + +import ( + "errors" + "fmt" + "github.com/dotcloud/docker/pkg/iptables" + "github.com/dotcloud/docker/proxy" + "net" + "sync" +) + +type mapping struct { + proto string + userlandProxy proxy.Proxy + host net.Addr + container net.Addr +} + +var ( + chain *iptables.Chain + lock sync.Mutex + + // udp:ip:port + currentMappings = make(map[string]*mapping) + newProxy = proxy.NewProxy +) + +var ( + ErrUnknownBackendAddressType = errors.New("unknown container address type not supported") + ErrPortMappedForIP = errors.New("port is already mapped to ip") + ErrPortNotMapped = errors.New("port is not mapped") +) + +func SetIptablesChain(c *iptables.Chain) { + chain = c +} + +func Map(container net.Addr, hostIP net.IP, hostPort int) error { + lock.Lock() + defer lock.Unlock() + + var m *mapping + switch container.(type) { + case *net.TCPAddr: + m = &mapping{ + proto: "tcp", + host: &net.TCPAddr{IP: hostIP, Port: hostPort}, + container: container, + } + case *net.UDPAddr: + m = &mapping{ + proto: "udp", + host: &net.UDPAddr{IP: hostIP, Port: hostPort}, + container: container, + } + default: + return ErrUnknownBackendAddressType + } + + key := getKey(m.host) + if _, exists := currentMappings[key]; exists { + return ErrPortMappedForIP + } + + containerIP, containerPort := getIPAndPort(m.container) + if err := forward(iptables.Add, m.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil { + return err + } + + p, err := newProxy(m.host, m.container) + if err != nil { + // need to undo the iptables rules before we reutrn + forward(iptables.Delete, m.proto, hostIP, hostPort, containerIP.String(), containerPort) + return err + } + + m.userlandProxy = p + currentMappings[key] = m + + go p.Run() + + return nil +} + +func Unmap(host net.Addr) error { + lock.Lock() + defer lock.Unlock() + + key := getKey(host) + data, exists := currentMappings[key] + if !exists { + return ErrPortNotMapped + } + + data.userlandProxy.Close() + delete(currentMappings, key) + + containerIP, containerPort := getIPAndPort(data.container) + hostIP, hostPort := getIPAndPort(data.host) + if err := forward(iptables.Delete, data.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil { + return err + } + return nil +} + +func getKey(a net.Addr) string { + switch t := a.(type) { + case *net.TCPAddr: + return fmt.Sprintf("%s:%d/%s", t.IP.String(), t.Port, "tcp") + case *net.UDPAddr: + return fmt.Sprintf("%s:%d/%s", t.IP.String(), t.Port, "udp") + } + return "" +} + +func getIPAndPort(a net.Addr) (net.IP, int) { + switch t := a.(type) { + case *net.TCPAddr: + return t.IP, t.Port + case *net.UDPAddr: + return t.IP, t.Port + } + return nil, 0 +} + +func forward(action iptables.Action, proto string, sourceIP net.IP, sourcePort int, containerIP string, containerPort int) error { + if chain == nil { + return nil + } + return chain.Forward(action, sourceIP, sourcePort, proto, containerIP, containerPort) +} diff --git a/networkdriver/portmapper/mapper_test.go b/networkdriver/portmapper/mapper_test.go new file mode 100644 index 0000000000..05718063e3 --- /dev/null +++ b/networkdriver/portmapper/mapper_test.go @@ -0,0 +1,107 @@ +package portmapper + +import ( + "github.com/dotcloud/docker/pkg/iptables" + "github.com/dotcloud/docker/proxy" + "net" + "testing" +) + +func init() { + // override this func to mock out the proxy server + newProxy = proxy.NewStubProxy +} + +func reset() { + chain = nil + currentMappings = make(map[string]*mapping) +} + +func TestSetIptablesChain(t *testing.T) { + defer reset() + + c := &iptables.Chain{ + Name: "TEST", + Bridge: "192.168.1.1", + } + + if chain != nil { + t.Fatal("chain should be nil at init") + } + + SetIptablesChain(c) + if chain == nil { + t.Fatal("chain should not be nil after set") + } +} + +func TestMapPorts(t *testing.T) { + dstIp1 := net.ParseIP("192.168.0.1") + dstIp2 := net.ParseIP("192.168.0.2") + dstAddr1 := &net.TCPAddr{IP: dstIp1, Port: 80} + dstAddr2 := &net.TCPAddr{IP: dstIp2, Port: 80} + + srcAddr1 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.1")} + srcAddr2 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.2")} + + if err := Map(srcAddr1, dstIp1, 80); err != nil { + t.Fatalf("Failed to allocate port: %s", err) + } + + if Map(srcAddr1, dstIp1, 80) == nil { + t.Fatalf("Port is in use - mapping should have failed") + } + + if Map(srcAddr2, dstIp1, 80) == nil { + t.Fatalf("Port is in use - mapping should have failed") + } + + if err := Map(srcAddr2, dstIp2, 80); err != nil { + t.Fatalf("Failed to allocate port: %s", err) + } + + if Unmap(dstAddr1) != nil { + t.Fatalf("Failed to release port") + } + + if Unmap(dstAddr2) != nil { + t.Fatalf("Failed to release port") + } + + if Unmap(dstAddr2) == nil { + t.Fatalf("Port already released, but no error reported") + } +} + +func TestGetUDPKey(t *testing.T) { + addr := &net.UDPAddr{IP: net.ParseIP("192.168.1.5"), Port: 53} + + key := getKey(addr) + + if expected := "192.168.1.5:53/udp"; key != expected { + t.Fatalf("expected key %s got %s", expected, key) + } +} + +func TestGetTCPKey(t *testing.T) { + addr := &net.TCPAddr{IP: net.ParseIP("192.168.1.5"), Port: 80} + + key := getKey(addr) + + if expected := "192.168.1.5:80/tcp"; key != expected { + t.Fatalf("expected key %s got %s", expected, key) + } +} + +func TestGetUDPIPAndPort(t *testing.T) { + addr := &net.UDPAddr{IP: net.ParseIP("192.168.1.5"), Port: 53} + + ip, port := getIPAndPort(addr) + if expected := "192.168.1.5"; ip.String() != expected { + t.Fatalf("expected ip %s got %s", expected, ip) + } + + if ep := 53; port != ep { + t.Fatalf("expected port %d got %d", ep, port) + } +} diff --git a/networkdriver/utils.go b/networkdriver/utils.go new file mode 100644 index 0000000000..0a4ef70c95 --- /dev/null +++ b/networkdriver/utils.go @@ -0,0 +1,118 @@ +package networkdriver + +import ( + "encoding/binary" + "errors" + "fmt" + "net" + + "github.com/dotcloud/docker/pkg/netlink" +) + +var ( + networkGetRoutesFct = netlink.NetworkGetRoutes + ErrNoDefaultRoute = errors.New("no default route") +) + +func CheckNameserverOverlaps(nameservers []string, toCheck *net.IPNet) error { + if len(nameservers) > 0 { + for _, ns := range nameservers { + _, nsNetwork, err := net.ParseCIDR(ns) + if err != nil { + return err + } + if NetworkOverlaps(toCheck, nsNetwork) { + return ErrNetworkOverlapsWithNameservers + } + } + } + return nil +} + +func CheckRouteOverlaps(toCheck *net.IPNet) error { + networks, err := networkGetRoutesFct() + if err != nil { + return err + } + + for _, network := range networks { + if network.IPNet != nil && NetworkOverlaps(toCheck, network.IPNet) { + return ErrNetworkOverlaps + } + } + return nil +} + +// Detects overlap between one IPNet and another +func NetworkOverlaps(netX *net.IPNet, netY *net.IPNet) bool { + if firstIP, _ := NetworkRange(netX); netY.Contains(firstIP) { + return true + } + if firstIP, _ := NetworkRange(netY); netX.Contains(firstIP) { + return true + } + return false +} + +// Calculates the first and last IP addresses in an IPNet +func NetworkRange(network *net.IPNet) (net.IP, net.IP) { + var ( + netIP = network.IP.To4() + firstIP = netIP.Mask(network.Mask) + lastIP = net.IPv4(0, 0, 0, 0).To4() + ) + + for i := 0; i < len(lastIP); i++ { + lastIP[i] = netIP[i] | ^network.Mask[i] + } + return firstIP, lastIP +} + +// Given a netmask, calculates the number of available hosts +func NetworkSize(mask net.IPMask) int32 { + m := net.IPv4Mask(0, 0, 0, 0) + for i := 0; i < net.IPv4len; i++ { + m[i] = ^mask[i] + } + return int32(binary.BigEndian.Uint32(m)) + 1 +} + +// Return the IPv4 address of a network interface +func GetIfaceAddr(name string) (net.Addr, error) { + iface, err := net.InterfaceByName(name) + if err != nil { + return nil, err + } + addrs, err := iface.Addrs() + if err != nil { + return nil, err + } + var addrs4 []net.Addr + for _, addr := range addrs { + ip := (addr.(*net.IPNet)).IP + if ip4 := ip.To4(); len(ip4) == net.IPv4len { + addrs4 = append(addrs4, addr) + } + } + switch { + case len(addrs4) == 0: + return nil, fmt.Errorf("Interface %v has no IP addresses", name) + case len(addrs4) > 1: + fmt.Printf("Interface %v has more than 1 IPv4 address. Defaulting to using %v\n", + name, (addrs4[0].(*net.IPNet)).IP) + } + return addrs4[0], nil +} + +func GetDefaultRouteIface() (*net.Interface, error) { + rs, err := networkGetRoutesFct() + if err != nil { + return nil, fmt.Errorf("unable to get routes: %v", err) + } + for _, r := range rs { + if r.Default { + return r.Iface, nil + } + } + return nil, ErrNoDefaultRoute +} diff --git a/opts.go b/opts.go index 3119f9dd10..b1d71c491d 100644 --- a/opts.go +++ b/opts.go @@ -2,6 +2,7 @@ package docker import ( "fmt" + "github.com/dotcloud/docker/api" "github.com/dotcloud/docker/utils" "os" "path/filepath" @@ -129,7 +130,7 @@ func ValidateEnv(val string) (string, error) { } func ValidateHost(val string) (string, error) { - host, err := utils.ParseHost(DEFAULTHTTPHOST, DEFAULTHTTPPORT, DEFAULTUNIXSOCKET, val) + host, err := utils.ParseHost(api.DEFAULTHTTPHOST, api.DEFAULTHTTPPORT, api.DEFAULTUNIXSOCKET, val) if err != nil { return val, err } diff --git a/cgroups/MAINTAINERS b/pkg/cgroups/MAINTAINERS similarity index 100% rename from cgroups/MAINTAINERS rename to pkg/cgroups/MAINTAINERS diff --git a/pkg/cgroups/cgroups.go b/pkg/cgroups/cgroups.go new file mode 100644 index 0000000000..91ac3842ac --- /dev/null +++ b/pkg/cgroups/cgroups.go @@ -0,0 +1,57 @@ +package cgroups + +import ( + "bufio" + "fmt" + "github.com/dotcloud/docker/pkg/mount" + "io" + "os" + "strings" +) + +// https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt +func FindCgroupMountpoint(subsystem string) (string, error) { + mounts, err := mount.GetMounts() + if err != nil { + return "", err + } + + for _, mount := range mounts { + if mount.Fstype == "cgroup" { + for _, opt := range strings.Split(mount.VfsOpts, ",") { + if opt == subsystem { + return mount.Mountpoint, nil + } + } + } + } + + return "", fmt.Errorf("cgroup mountpoint not found for %s", subsystem) +} + +// Returns the relative path to the cgroup docker is running in. +func GetThisCgroupDir(subsystem string) (string, error) { + f, err := os.Open("/proc/self/cgroup") + if err != nil { + return "", err + } + defer f.Close() + + return parseCgroupFile(subsystem, f) +} + +func parseCgroupFile(subsystem string, r io.Reader) (string, error) { + s := bufio.NewScanner(r) + + for s.Scan() { + if err := s.Err(); err != nil { + return "", err + } + text := s.Text() + parts := strings.Split(text, ":") + if parts[1] == subsystem { + return parts[2], nil + } + } + return "", fmt.Errorf("cgroup '%s' not found in /proc/self/cgroup", subsystem) +} diff --git a/cgroups/cgroups_test.go b/pkg/cgroups/cgroups_test.go similarity index 100% rename from cgroups/cgroups_test.go rename to pkg/cgroups/cgroups_test.go diff --git a/pkg/collections/orderedintset.go b/pkg/collections/orderedintset.go new file mode 100644 index 0000000000..23abab04d3 --- /dev/null +++ b/pkg/collections/orderedintset.go @@ -0,0 +1,96 @@ +package collections + +import ( + "sync" +) + +// OrderedIntSet is a thread-safe sorted set and a stack. +type OrderedIntSet struct { + sync.RWMutex + set []int +} + +// NewOrderedSet returns an initialized OrderedSet +func NewOrderedIntSet() *OrderedIntSet { + return &OrderedIntSet{} +} + +// Push takes a string and adds it to the set. If the elem aready exists, it has no effect. +func (s *OrderedIntSet) Push(elem int) { + s.RLock() + for _, e := range s.set { + if e == elem { + s.RUnlock() + return + } + } + s.RUnlock() + + s.Lock() + + // Make sure the list is always sorted + for i, e := range s.set { + if elem < e { + s.set = append(s.set[:i], append([]int{elem}, s.set[i:]...)...) + s.Unlock() + return + } + } + // If we reach here, then elem is the biggest elem of the list. + s.set = append(s.set, elem) + s.Unlock() +} + +// Pop is an alias to PopFront() +func (s *OrderedIntSet) Pop() int { + return s.PopFront() +} + +// Pop returns the first elemen from the list and removes it. +// If the list is empty, it returns 0 +func (s *OrderedIntSet) PopFront() int { + s.RLock() + + for i, e := range s.set { + ret := e + s.RUnlock() + s.Lock() + s.set = append(s.set[:i], s.set[i+1:]...) + s.Unlock() + return ret + } + s.RUnlock() + + return 0 +} + +// PullBack retrieve the last element of the list. +// The element is not removed. +// If the list is empty, an empty element is returned. +func (s *OrderedIntSet) PullBack() int { + if len(s.set) == 0 { + return 0 + } + return s.set[len(s.set)-1] +} + +// Exists checks if the given element present in the list. +func (s *OrderedIntSet) Exists(elem int) bool { + for _, e := range s.set { + if e == elem { + return true + } + } + return false +} + +// Remove removes an element from the list. +// If the element is not found, it has no effect. +func (s *OrderedIntSet) Remove(elem int) { + for i, e := range s.set { + if e == elem { + s.set = append(s.set[:i], s.set[i+1:]...) + return + } + } +} diff --git a/pkg/graphdb/conn_linux.go b/pkg/graphdb/conn_linux.go index 2bd51940ce..7a1ab8c92f 100644 --- a/pkg/graphdb/conn_linux.go +++ b/pkg/graphdb/conn_linux.go @@ -1,3 +1,5 @@ +// +build amd64 + package graphdb import ( diff --git a/pkg/graphdb/conn_darwin.go b/pkg/graphdb/conn_unsupported.go similarity index 79% rename from pkg/graphdb/conn_darwin.go rename to pkg/graphdb/conn_unsupported.go index 6e75fd8edb..c2d602569f 100644 --- a/pkg/graphdb/conn_darwin.go +++ b/pkg/graphdb/conn_unsupported.go @@ -1,3 +1,5 @@ +// +build !linux !amd64 + package graphdb func NewSqliteConn(root string) (*Database, error) { diff --git a/pkg/iptables/iptables.go b/pkg/iptables/iptables.go index 0438bcbd88..4cdd67ef7c 100644 --- a/pkg/iptables/iptables.go +++ b/pkg/iptables/iptables.go @@ -73,6 +73,23 @@ func (c *Chain) Forward(action Action, ip net.IP, port int, proto, dest_addr str } else if len(output) != 0 { return fmt.Errorf("Error iptables forward: %s", output) } + + fAction := action + if fAction == Add { + fAction = "-I" + } + if output, err := Raw(string(fAction), "FORWARD", + "!", "-i", c.Bridge, + "-o", c.Bridge, + "-p", proto, + "-d", dest_addr, + "--dport", strconv.Itoa(dest_port), + "-j", "ACCEPT"); err != nil { + return err + } else if len(output) != 0 { + return fmt.Errorf("Error iptables forward: %s", output) + } + return nil } diff --git a/pkg/mflag/LICENSE b/pkg/mflag/LICENSE new file mode 100644 index 0000000000..ebcfbcc779 --- /dev/null +++ b/pkg/mflag/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2014 The Docker & Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pkg/mflag/README.md b/pkg/mflag/README.md new file mode 100644 index 0000000000..da00efa336 --- /dev/null +++ b/pkg/mflag/README.md @@ -0,0 +1,40 @@ +Package mflag (aka multiple-flag) implements command-line flag parsing. +It's an **hacky** fork of the [official golang package](http://golang.org/pkg/flag/) + +It adds: + +* both short and long flag version +`./example -s red` `./example --string blue` + +* multiple names for the same option +``` +$>./example -h +Usage of example: + -s, --string="": a simple string +``` + +___ +It is very flexible on purpose, so you can do things like: +``` +$>./example -h +Usage of example: + -s, -string, --string="": a simple string +``` + +Or: +``` +$>./example -h +Usage of example: + -oldflag, --newflag="": a simple string +``` + +You can also hide some flags from the usage, so if we want only `--newflag`: +``` +$>./example -h +Usage of example: + --newflag="": a simple string +$>./example -oldflag str +str +``` + +See [example.go](example/example.go) for more details. diff --git a/pkg/mflag/example/example.go b/pkg/mflag/example/example.go new file mode 100644 index 0000000000..fa26c97e1b --- /dev/null +++ b/pkg/mflag/example/example.go @@ -0,0 +1,30 @@ +package main + +import ( + "fmt" + flag "github.com/dotcloud/docker/pkg/mflag" +) + +var ( + i int + str string + b, b2, h bool +) + +func init() { + flag.BoolVar(&b, []string{"b"}, false, "a simple bool") + flag.BoolVar(&b2, []string{"-bool"}, false, "a simple bool") + flag.IntVar(&i, []string{"#integer", "-integer"}, -1, "a simple integer") + flag.StringVar(&str, []string{"s", "#hidden", "-string"}, "", "a simple string") //-s -hidden and --string will work, but -hidden won't be in the usage + flag.BoolVar(&h, []string{"h", "#help", "-help"}, false, "display the help") + flag.Parse() +} +func main() { + if h { + flag.PrintDefaults() + } + fmt.Printf("s/#hidden/-string: %s\n", str) + fmt.Printf("b: %b\n", b) + fmt.Printf("-bool: %b\n", b2) + fmt.Printf("s/#hidden/-string(via lookup): %s\n", flag.Lookup("s").Value.String()) +} diff --git a/pkg/mflag/flag.go b/pkg/mflag/flag.go new file mode 100644 index 0000000000..f721e04557 --- /dev/null +++ b/pkg/mflag/flag.go @@ -0,0 +1,868 @@ +// Copyright 2014 The Docker & Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* + Package flag implements command-line flag parsing. + + Usage: + + Define flags using flag.String(), Bool(), Int(), etc. + + This declares an integer flag, -f or --flagname, stored in the pointer ip, with type *int. + import "flag" + var ip = flag.Int([]string{"f", "-flagname"}, 1234, "help message for flagname") + If you like, you can bind the flag to a variable using the Var() functions. + var flagvar int + func init() { + // -flaghidden will work, but will be hidden from the usage + flag.IntVar(&flagvar, []string{"f", "#flaghidden", "-flagname"}, 1234, "help message for flagname") + } + Or you can create custom flags that satisfy the Value interface (with + pointer receivers) and couple them to flag parsing by + flag.Var(&flagVal, []string{"name"}, "help message for flagname") + For such flags, the default value is just the initial value of the variable. + + After all flags are defined, call + flag.Parse() + to parse the command line into the defined flags. + + Flags may then be used directly. If you're using the flags themselves, + they are all pointers; if you bind to variables, they're values. + fmt.Println("ip has value ", *ip) + fmt.Println("flagvar has value ", flagvar) + + After parsing, the arguments after the flag are available as the + slice flag.Args() or individually as flag.Arg(i). + The arguments are indexed from 0 through flag.NArg()-1. + + Command line flag syntax: + -flag + -flag=x + -flag x // non-boolean flags only + One or two minus signs may be used; they are equivalent. + The last form is not permitted for boolean flags because the + meaning of the command + cmd -x * + will change if there is a file called 0, false, etc. You must + use the -flag=false form to turn off a boolean flag. + + Flag parsing stops just before the first non-flag argument + ("-" is a non-flag argument) or after the terminator "--". + + Integer flags accept 1234, 0664, 0x1234 and may be negative. + Boolean flags may be 1, 0, t, f, true, false, TRUE, FALSE, True, False. + Duration flags accept any input valid for time.ParseDuration. + + The default set of command-line flags is controlled by + top-level functions. The FlagSet type allows one to define + independent sets of flags, such as to implement subcommands + in a command-line interface. The methods of FlagSet are + analogous to the top-level functions for the command-line + flag set. +*/ +package mflag + +import ( + "errors" + "fmt" + "io" + "os" + "sort" + "strconv" + "strings" + "time" +) + +// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. +var ErrHelp = errors.New("flag: help requested") + +// -- bool Value +type boolValue bool + +func newBoolValue(val bool, p *bool) *boolValue { + *p = val + return (*boolValue)(p) +} + +func (b *boolValue) Set(s string) error { + v, err := strconv.ParseBool(s) + *b = boolValue(v) + return err +} + +func (b *boolValue) Get() interface{} { return bool(*b) } + +func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) } + +func (b *boolValue) IsBoolFlag() bool { return true } + +// optional interface to indicate boolean flags that can be +// supplied without "=value" text +type boolFlag interface { + Value + IsBoolFlag() bool +} + +// -- int Value +type intValue int + +func newIntValue(val int, p *int) *intValue { + *p = val + return (*intValue)(p) +} + +func (i *intValue) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = intValue(v) + return err +} + +func (i *intValue) Get() interface{} { return int(*i) } + +func (i *intValue) String() string { return fmt.Sprintf("%v", *i) } + +// -- int64 Value +type int64Value int64 + +func newInt64Value(val int64, p *int64) *int64Value { + *p = val + return (*int64Value)(p) +} + +func (i *int64Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = int64Value(v) + return err +} + +func (i *int64Value) Get() interface{} { return int64(*i) } + +func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) } + +// -- uint Value +type uintValue uint + +func newUintValue(val uint, p *uint) *uintValue { + *p = val + return (*uintValue)(p) +} + +func (i *uintValue) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uintValue(v) + return err +} + +func (i *uintValue) Get() interface{} { return uint(*i) } + +func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) } + +// -- uint64 Value +type uint64Value uint64 + +func newUint64Value(val uint64, p *uint64) *uint64Value { + *p = val + return (*uint64Value)(p) +} + +func (i *uint64Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uint64Value(v) + return err +} + +func (i *uint64Value) Get() interface{} { return uint64(*i) } + +func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) } + +// -- string Value +type stringValue string + +func newStringValue(val string, p *string) *stringValue { + *p = val + return (*stringValue)(p) +} + +func (s *stringValue) Set(val string) error { + *s = stringValue(val) + return nil +} + +func (s *stringValue) Get() interface{} { return string(*s) } + +func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) } + +// -- float64 Value +type float64Value float64 + +func newFloat64Value(val float64, p *float64) *float64Value { + *p = val + return (*float64Value)(p) +} + +func (f *float64Value) Set(s string) error { + v, err := strconv.ParseFloat(s, 64) + *f = float64Value(v) + return err +} + +func (f *float64Value) Get() interface{} { return float64(*f) } + +func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) } + +// -- time.Duration Value +type durationValue time.Duration + +func newDurationValue(val time.Duration, p *time.Duration) *durationValue { + *p = val + return (*durationValue)(p) +} + +func (d *durationValue) Set(s string) error { + v, err := time.ParseDuration(s) + *d = durationValue(v) + return err +} + +func (d *durationValue) Get() interface{} { return time.Duration(*d) } + +func (d *durationValue) String() string { return (*time.Duration)(d).String() } + +// Value is the interface to the dynamic value stored in a flag. +// (The default value is represented as a string.) +// +// If a Value has an IsBoolFlag() bool method returning true, +// the command-line parser makes -name equivalent to -name=true +// rather than using the next command-line argument. +type Value interface { + String() string + Set(string) error +} + +// Getter is an interface that allows the contents of a Value to be retrieved. +// It wraps the Value interface, rather than being part of it, because it +// appeared after Go 1 and its compatibility rules. All Value types provided +// by this package satisfy the Getter interface. +type Getter interface { + Value + Get() interface{} +} + +// ErrorHandling defines how to handle flag parsing errors. +type ErrorHandling int + +const ( + ContinueOnError ErrorHandling = iota + ExitOnError + PanicOnError +) + +// A FlagSet represents a set of defined flags. The zero value of a FlagSet +// has no name and has ContinueOnError error handling. +type FlagSet struct { + // Usage is the function called when an error occurs while parsing flags. + // The field is a function (not a method) that may be changed to point to + // a custom error handler. + Usage func() + + name string + parsed bool + actual map[string]*Flag + formal map[string]*Flag + args []string // arguments after flags + errorHandling ErrorHandling + output io.Writer // nil means stderr; use out() accessor +} + +// A Flag represents the state of a flag. +type Flag struct { + Names []string // name as it appears on command line + Usage string // help message + Value Value // value as set + DefValue string // default value (as text); for usage message +} + +// sortFlags returns the flags as a slice in lexicographical sorted order. +func sortFlags(flags map[string]*Flag) []*Flag { + var list sort.StringSlice + for _, f := range flags { + if len(f.Names) == 1 { + list = append(list, f.Names[0]) + continue + } + + found := false + fName := strings.TrimPrefix(strings.TrimPrefix(f.Names[0], "#"), "-") + for _, name := range list { + if name == fName { + found = true + break + } + } + if !found { + list = append(list, fName) + } + } + list.Sort() + result := make([]*Flag, len(list)) + for i, name := range list { + result[i] = flags[name] + } + return result +} + +func (f *FlagSet) out() io.Writer { + if f.output == nil { + return os.Stderr + } + return f.output +} + +// SetOutput sets the destination for usage and error messages. +// If output is nil, os.Stderr is used. +func (f *FlagSet) SetOutput(output io.Writer) { + f.output = output +} + +// VisitAll visits the flags in lexicographical order, calling fn for each. +// It visits all flags, even those not set. +func (f *FlagSet) VisitAll(fn func(*Flag)) { + for _, flag := range sortFlags(f.formal) { + fn(flag) + } +} + +// VisitAll visits the command-line flags in lexicographical order, calling +// fn for each. It visits all flags, even those not set. +func VisitAll(fn func(*Flag)) { + CommandLine.VisitAll(fn) +} + +// Visit visits the flags in lexicographical order, calling fn for each. +// It visits only those flags that have been set. +func (f *FlagSet) Visit(fn func(*Flag)) { + for _, flag := range sortFlags(f.actual) { + fn(flag) + } +} + +// Visit visits the command-line flags in lexicographical order, calling fn +// for each. It visits only those flags that have been set. +func Visit(fn func(*Flag)) { + CommandLine.Visit(fn) +} + +// Lookup returns the Flag structure of the named flag, returning nil if none exists. +func (f *FlagSet) Lookup(name string) *Flag { + return f.formal[name] +} + +// Lookup returns the Flag structure of the named command-line flag, +// returning nil if none exists. +func Lookup(name string) *Flag { + return CommandLine.formal[name] +} + +// Set sets the value of the named flag. +func (f *FlagSet) Set(name, value string) error { + flag, ok := f.formal[name] + if !ok { + return fmt.Errorf("no such flag -%v", name) + } + err := flag.Value.Set(value) + if err != nil { + return err + } + if f.actual == nil { + f.actual = make(map[string]*Flag) + } + f.actual[name] = flag + return nil +} + +// Set sets the value of the named command-line flag. +func Set(name, value string) error { + return CommandLine.Set(name, value) +} + +// PrintDefaults prints, to standard error unless configured +// otherwise, the default values of all defined flags in the set. +func (f *FlagSet) PrintDefaults() { + f.VisitAll(func(flag *Flag) { + format := " -%s=%s: %s\n" + if _, ok := flag.Value.(*stringValue); ok { + // put quotes on the value + format = " -%s=%q: %s\n" + } + names := []string{} + for _, name := range flag.Names { + if name[0] != '#' { + names = append(names, name) + } + } + fmt.Fprintf(f.out(), format, strings.Join(names, ", -"), flag.DefValue, flag.Usage) + }) +} + +// PrintDefaults prints to standard error the default values of all defined command-line flags. +func PrintDefaults() { + CommandLine.PrintDefaults() +} + +// defaultUsage is the default function to print a usage message. +func defaultUsage(f *FlagSet) { + if f.name == "" { + fmt.Fprintf(f.out(), "Usage:\n") + } else { + fmt.Fprintf(f.out(), "Usage of %s:\n", f.name) + } + f.PrintDefaults() +} + +// NOTE: Usage is not just defaultUsage(CommandLine) +// because it serves (via godoc flag Usage) as the example +// for how to write your own usage function. + +// Usage prints to standard error a usage message documenting all defined command-line flags. +// The function is a variable that may be changed to point to a custom function. +var Usage = func() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + PrintDefaults() +} + +// NFlag returns the number of flags that have been set. +func (f *FlagSet) NFlag() int { return len(f.actual) } + +// NFlag returns the number of command-line flags that have been set. +func NFlag() int { return len(CommandLine.actual) } + +// Arg returns the i'th argument. Arg(0) is the first remaining argument +// after flags have been processed. +func (f *FlagSet) Arg(i int) string { + if i < 0 || i >= len(f.args) { + return "" + } + return f.args[i] +} + +// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument +// after flags have been processed. +func Arg(i int) string { + return CommandLine.Arg(i) +} + +// NArg is the number of arguments remaining after flags have been processed. +func (f *FlagSet) NArg() int { return len(f.args) } + +// NArg is the number of arguments remaining after flags have been processed. +func NArg() int { return len(CommandLine.args) } + +// Args returns the non-flag arguments. +func (f *FlagSet) Args() []string { return f.args } + +// Args returns the non-flag command-line arguments. +func Args() []string { return CommandLine.args } + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func (f *FlagSet) BoolVar(p *bool, names []string, value bool, usage string) { + f.Var(newBoolValue(value, p), names, usage) +} + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func BoolVar(p *bool, names []string, value bool, usage string) { + CommandLine.Var(newBoolValue(value, p), names, usage) +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func (f *FlagSet) Bool(names []string, value bool, usage string) *bool { + p := new(bool) + f.BoolVar(p, names, value, usage) + return p +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func Bool(names []string, value bool, usage string) *bool { + return CommandLine.Bool(names, value, usage) +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func (f *FlagSet) IntVar(p *int, names []string, value int, usage string) { + f.Var(newIntValue(value, p), names, usage) +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func IntVar(p *int, names []string, value int, usage string) { + CommandLine.Var(newIntValue(value, p), names, usage) +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func (f *FlagSet) Int(names []string, value int, usage string) *int { + p := new(int) + f.IntVar(p, names, value, usage) + return p +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func Int(names []string, value int, usage string) *int { + return CommandLine.Int(names, value, usage) +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func (f *FlagSet) Int64Var(p *int64, names []string, value int64, usage string) { + f.Var(newInt64Value(value, p), names, usage) +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func Int64Var(p *int64, names []string, value int64, usage string) { + CommandLine.Var(newInt64Value(value, p), names, usage) +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func (f *FlagSet) Int64(names []string, value int64, usage string) *int64 { + p := new(int64) + f.Int64Var(p, names, value, usage) + return p +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func Int64(names []string, value int64, usage string) *int64 { + return CommandLine.Int64(names, value, usage) +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func (f *FlagSet) UintVar(p *uint, names []string, value uint, usage string) { + f.Var(newUintValue(value, p), names, usage) +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func UintVar(p *uint, names []string, value uint, usage string) { + CommandLine.Var(newUintValue(value, p), names, usage) +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func (f *FlagSet) Uint(names []string, value uint, usage string) *uint { + p := new(uint) + f.UintVar(p, names, value, usage) + return p +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func Uint(names []string, value uint, usage string) *uint { + return CommandLine.Uint(names, value, usage) +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func (f *FlagSet) Uint64Var(p *uint64, names []string, value uint64, usage string) { + f.Var(newUint64Value(value, p), names, usage) +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func Uint64Var(p *uint64, names []string, value uint64, usage string) { + CommandLine.Var(newUint64Value(value, p), names, usage) +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func (f *FlagSet) Uint64(names []string, value uint64, usage string) *uint64 { + p := new(uint64) + f.Uint64Var(p, names, value, usage) + return p +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func Uint64(names []string, value uint64, usage string) *uint64 { + return CommandLine.Uint64(names, value, usage) +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func (f *FlagSet) StringVar(p *string, names []string, value string, usage string) { + f.Var(newStringValue(value, p), names, usage) +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func StringVar(p *string, names []string, value string, usage string) { + CommandLine.Var(newStringValue(value, p), names, usage) +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func (f *FlagSet) String(names []string, value string, usage string) *string { + p := new(string) + f.StringVar(p, names, value, usage) + return p +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func String(names []string, value string, usage string) *string { + return CommandLine.String(names, value, usage) +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func (f *FlagSet) Float64Var(p *float64, names []string, value float64, usage string) { + f.Var(newFloat64Value(value, p), names, usage) +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func Float64Var(p *float64, names []string, value float64, usage string) { + CommandLine.Var(newFloat64Value(value, p), names, usage) +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func (f *FlagSet) Float64(names []string, value float64, usage string) *float64 { + p := new(float64) + f.Float64Var(p, names, value, usage) + return p +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func Float64(names []string, value float64, usage string) *float64 { + return CommandLine.Float64(names, value, usage) +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +func (f *FlagSet) DurationVar(p *time.Duration, names []string, value time.Duration, usage string) { + f.Var(newDurationValue(value, p), names, usage) +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +func DurationVar(p *time.Duration, names []string, value time.Duration, usage string) { + CommandLine.Var(newDurationValue(value, p), names, usage) +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +func (f *FlagSet) Duration(names []string, value time.Duration, usage string) *time.Duration { + p := new(time.Duration) + f.DurationVar(p, names, value, usage) + return p +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +func Duration(names []string, value time.Duration, usage string) *time.Duration { + return CommandLine.Duration(names, value, usage) +} + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func (f *FlagSet) Var(value Value, names []string, usage string) { + // Remember the default value as a string; it won't change. + flag := &Flag{names, usage, value, value.String()} + for _, name := range names { + name = strings.TrimPrefix(name, "#") + _, alreadythere := f.formal[name] + if alreadythere { + var msg string + if f.name == "" { + msg = fmt.Sprintf("flag redefined: %s", name) + } else { + msg = fmt.Sprintf("%s flag redefined: %s", f.name, name) + } + fmt.Fprintln(f.out(), msg) + panic(msg) // Happens only if flags are declared with identical names + } + if f.formal == nil { + f.formal = make(map[string]*Flag) + } + f.formal[name] = flag + } +} + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func Var(value Value, names []string, usage string) { + CommandLine.Var(value, names, usage) +} + +// failf prints to standard error a formatted error and usage message and +// returns the error. +func (f *FlagSet) failf(format string, a ...interface{}) error { + err := fmt.Errorf(format, a...) + fmt.Fprintln(f.out(), err) + f.usage() + return err +} + +// usage calls the Usage method for the flag set, or the usage function if +// the flag set is CommandLine. +func (f *FlagSet) usage() { + if f == CommandLine { + Usage() + } else if f.Usage == nil { + defaultUsage(f) + } else { + f.Usage() + } +} + +// parseOne parses one flag. It reports whether a flag was seen. +func (f *FlagSet) parseOne() (bool, error) { + if len(f.args) == 0 { + return false, nil + } + s := f.args[0] + if len(s) == 0 || s[0] != '-' || len(s) == 1 { + return false, nil + } + if s[1] == '-' && len(s) == 2 { // "--" terminates the flags + f.args = f.args[1:] + return false, nil + } + name := s[1:] + if len(name) == 0 || name[0] == '=' { + return false, f.failf("bad flag syntax: %s", s) + } + + // it's a flag. does it have an argument? + f.args = f.args[1:] + has_value := false + value := "" + for i := 1; i < len(name); i++ { // equals cannot be first + if name[i] == '=' { + value = name[i+1:] + has_value = true + name = name[0:i] + break + } + } + m := f.formal + flag, alreadythere := m[name] // BUG + if !alreadythere { + if name == "-help" || name == "help" || name == "h" { // special case for nice help message. + f.usage() + return false, ErrHelp + } + return false, f.failf("flag provided but not defined: -%s", name) + } + if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() { // special case: doesn't need an arg + if has_value { + if err := fv.Set(value); err != nil { + return false, f.failf("invalid boolean value %q for -%s: %v", value, name, err) + } + } else { + fv.Set("true") + } + } else { + // It must have a value, which might be the next argument. + if !has_value && len(f.args) > 0 { + // value is the next arg + has_value = true + value, f.args = f.args[0], f.args[1:] + } + if !has_value { + return false, f.failf("flag needs an argument: -%s", name) + } + if err := flag.Value.Set(value); err != nil { + return false, f.failf("invalid value %q for flag -%s: %v", value, name, err) + } + } + if f.actual == nil { + f.actual = make(map[string]*Flag) + } + f.actual[name] = flag + return true, nil +} + +// Parse parses flag definitions from the argument list, which should not +// include the command name. Must be called after all flags in the FlagSet +// are defined and before flags are accessed by the program. +// The return value will be ErrHelp if -help was set but not defined. +func (f *FlagSet) Parse(arguments []string) error { + f.parsed = true + f.args = arguments + for { + seen, err := f.parseOne() + if seen { + continue + } + if err == nil { + break + } + switch f.errorHandling { + case ContinueOnError: + return err + case ExitOnError: + os.Exit(2) + case PanicOnError: + panic(err) + } + } + return nil +} + +// Parsed reports whether f.Parse has been called. +func (f *FlagSet) Parsed() bool { + return f.parsed +} + +// Parse parses the command-line flags from os.Args[1:]. Must be called +// after all flags are defined and before flags are accessed by the program. +func Parse() { + // Ignore errors; CommandLine is set for ExitOnError. + CommandLine.Parse(os.Args[1:]) +} + +// Parsed returns true if the command-line flags have been parsed. +func Parsed() bool { + return CommandLine.Parsed() +} + +// CommandLine is the default set of command-line flags, parsed from os.Args. +// The top-level functions such as BoolVar, Arg, and on are wrappers for the +// methods of CommandLine. +var CommandLine = NewFlagSet(os.Args[0], ExitOnError) + +// NewFlagSet returns a new, empty flag set with the specified name and +// error handling property. +func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { + f := &FlagSet{ + name: name, + errorHandling: errorHandling, + } + return f +} + +// Init sets the name and error handling property for a flag set. +// By default, the zero FlagSet uses an empty name and the +// ContinueOnError error handling policy. +func (f *FlagSet) Init(name string, errorHandling ErrorHandling) { + f.name = name + f.errorHandling = errorHandling +} diff --git a/pkg/mflag/flag_test.go b/pkg/mflag/flag_test.go new file mode 100644 index 0000000000..b9e8a0ef3e --- /dev/null +++ b/pkg/mflag/flag_test.go @@ -0,0 +1,400 @@ +// Copyright 2014 The Docker & Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mflag_test + +import ( + "bytes" + "fmt" + . "github.com/dotcloud/docker/pkg/mflag" + "os" + "sort" + "strings" + "testing" + "time" +) + +// ResetForTesting clears all flag state and sets the usage function as directed. +// After calling ResetForTesting, parse errors in flag handling will not +// exit the program. +func ResetForTesting(usage func()) { + CommandLine = NewFlagSet(os.Args[0], ContinueOnError) + Usage = usage +} +func boolString(s string) string { + if s == "0" { + return "false" + } + return "true" +} + +func TestEverything(t *testing.T) { + ResetForTesting(nil) + Bool([]string{"test_bool"}, false, "bool value") + Int([]string{"test_int"}, 0, "int value") + Int64([]string{"test_int64"}, 0, "int64 value") + Uint([]string{"test_uint"}, 0, "uint value") + Uint64([]string{"test_uint64"}, 0, "uint64 value") + String([]string{"test_string"}, "0", "string value") + Float64([]string{"test_float64"}, 0, "float64 value") + Duration([]string{"test_duration"}, 0, "time.Duration value") + + m := make(map[string]*Flag) + desired := "0" + visitor := func(f *Flag) { + for _, name := range f.Names { + if len(name) > 5 && name[0:5] == "test_" { + m[name] = f + ok := false + switch { + case f.Value.String() == desired: + ok = true + case name == "test_bool" && f.Value.String() == boolString(desired): + ok = true + case name == "test_duration" && f.Value.String() == desired+"s": + ok = true + } + if !ok { + t.Error("Visit: bad value", f.Value.String(), "for", name) + } + } + } + } + VisitAll(visitor) + if len(m) != 8 { + t.Error("VisitAll misses some flags") + for k, v := range m { + t.Log(k, *v) + } + } + m = make(map[string]*Flag) + Visit(visitor) + if len(m) != 0 { + t.Errorf("Visit sees unset flags") + for k, v := range m { + t.Log(k, *v) + } + } + // Now set all flags + Set("test_bool", "true") + Set("test_int", "1") + Set("test_int64", "1") + Set("test_uint", "1") + Set("test_uint64", "1") + Set("test_string", "1") + Set("test_float64", "1") + Set("test_duration", "1s") + desired = "1" + Visit(visitor) + if len(m) != 8 { + t.Error("Visit fails after set") + for k, v := range m { + t.Log(k, *v) + } + } + // Now test they're visited in sort order. + var flagNames []string + Visit(func(f *Flag) { + for _, name := range f.Names { + flagNames = append(flagNames, name) + } + }) + if !sort.StringsAreSorted(flagNames) { + t.Errorf("flag names not sorted: %v", flagNames) + } +} + +func TestGet(t *testing.T) { + ResetForTesting(nil) + Bool([]string{"test_bool"}, true, "bool value") + Int([]string{"test_int"}, 1, "int value") + Int64([]string{"test_int64"}, 2, "int64 value") + Uint([]string{"test_uint"}, 3, "uint value") + Uint64([]string{"test_uint64"}, 4, "uint64 value") + String([]string{"test_string"}, "5", "string value") + Float64([]string{"test_float64"}, 6, "float64 value") + Duration([]string{"test_duration"}, 7, "time.Duration value") + + visitor := func(f *Flag) { + for _, name := range f.Names { + if len(name) > 5 && name[0:5] == "test_" { + g, ok := f.Value.(Getter) + if !ok { + t.Errorf("Visit: value does not satisfy Getter: %T", f.Value) + return + } + switch name { + case "test_bool": + ok = g.Get() == true + case "test_int": + ok = g.Get() == int(1) + case "test_int64": + ok = g.Get() == int64(2) + case "test_uint": + ok = g.Get() == uint(3) + case "test_uint64": + ok = g.Get() == uint64(4) + case "test_string": + ok = g.Get() == "5" + case "test_float64": + ok = g.Get() == float64(6) + case "test_duration": + ok = g.Get() == time.Duration(7) + } + if !ok { + t.Errorf("Visit: bad value %T(%v) for %s", g.Get(), g.Get(), name) + } + } + } + } + VisitAll(visitor) +} + +func TestUsage(t *testing.T) { + called := false + ResetForTesting(func() { called = true }) + if CommandLine.Parse([]string{"-x"}) == nil { + t.Error("parse did not fail for unknown flag") + } + if !called { + t.Error("did not call Usage for unknown flag") + } +} + +func testParse(f *FlagSet, t *testing.T) { + if f.Parsed() { + t.Error("f.Parse() = true before Parse") + } + boolFlag := f.Bool([]string{"bool"}, false, "bool value") + bool2Flag := f.Bool([]string{"bool2"}, false, "bool2 value") + intFlag := f.Int([]string{"-int"}, 0, "int value") + int64Flag := f.Int64([]string{"-int64"}, 0, "int64 value") + uintFlag := f.Uint([]string{"uint"}, 0, "uint value") + uint64Flag := f.Uint64([]string{"-uint64"}, 0, "uint64 value") + stringFlag := f.String([]string{"string"}, "0", "string value") + float64Flag := f.Float64([]string{"float64"}, 0, "float64 value") + durationFlag := f.Duration([]string{"duration"}, 5*time.Second, "time.Duration value") + extra := "one-extra-argument" + args := []string{ + "-bool", + "-bool2=true", + "--int", "22", + "--int64", "0x23", + "-uint", "24", + "--uint64", "25", + "-string", "hello", + "-float64", "2718e28", + "-duration", "2m", + extra, + } + if err := f.Parse(args); err != nil { + t.Fatal(err) + } + if !f.Parsed() { + t.Error("f.Parse() = false after Parse") + } + if *boolFlag != true { + t.Error("bool flag should be true, is ", *boolFlag) + } + if *bool2Flag != true { + t.Error("bool2 flag should be true, is ", *bool2Flag) + } + if *intFlag != 22 { + t.Error("int flag should be 22, is ", *intFlag) + } + if *int64Flag != 0x23 { + t.Error("int64 flag should be 0x23, is ", *int64Flag) + } + if *uintFlag != 24 { + t.Error("uint flag should be 24, is ", *uintFlag) + } + if *uint64Flag != 25 { + t.Error("uint64 flag should be 25, is ", *uint64Flag) + } + if *stringFlag != "hello" { + t.Error("string flag should be `hello`, is ", *stringFlag) + } + if *float64Flag != 2718e28 { + t.Error("float64 flag should be 2718e28, is ", *float64Flag) + } + if *durationFlag != 2*time.Minute { + t.Error("duration flag should be 2m, is ", *durationFlag) + } + if len(f.Args()) != 1 { + t.Error("expected one argument, got", len(f.Args())) + } else if f.Args()[0] != extra { + t.Errorf("expected argument %q got %q", extra, f.Args()[0]) + } +} + +func testPanic(f *FlagSet, t *testing.T) { + f.Int([]string{"-int"}, 0, "int value") + if f.Parsed() { + t.Error("f.Parse() = true before Parse") + } + args := []string{ + "-int", "21", + } + f.Parse(args) +} + +func TestParsePanic(t *testing.T) { + ResetForTesting(func() {}) + testPanic(CommandLine, t) +} + +func TestParse(t *testing.T) { + ResetForTesting(func() { t.Error("bad parse") }) + testParse(CommandLine, t) +} + +func TestFlagSetParse(t *testing.T) { + testParse(NewFlagSet("test", ContinueOnError), t) +} + +// Declare a user-defined flag type. +type flagVar []string + +func (f *flagVar) String() string { + return fmt.Sprint([]string(*f)) +} + +func (f *flagVar) Set(value string) error { + *f = append(*f, value) + return nil +} + +func TestUserDefined(t *testing.T) { + var flags FlagSet + flags.Init("test", ContinueOnError) + var v flagVar + flags.Var(&v, []string{"v"}, "usage") + if err := flags.Parse([]string{"-v", "1", "-v", "2", "-v=3"}); err != nil { + t.Error(err) + } + if len(v) != 3 { + t.Fatal("expected 3 args; got ", len(v)) + } + expect := "[1 2 3]" + if v.String() != expect { + t.Errorf("expected value %q got %q", expect, v.String()) + } +} + +// Declare a user-defined boolean flag type. +type boolFlagVar struct { + count int +} + +func (b *boolFlagVar) String() string { + return fmt.Sprintf("%d", b.count) +} + +func (b *boolFlagVar) Set(value string) error { + if value == "true" { + b.count++ + } + return nil +} + +func (b *boolFlagVar) IsBoolFlag() bool { + return b.count < 4 +} + +func TestUserDefinedBool(t *testing.T) { + var flags FlagSet + flags.Init("test", ContinueOnError) + var b boolFlagVar + var err error + flags.Var(&b, []string{"b"}, "usage") + if err = flags.Parse([]string{"-b", "-b", "-b", "-b=true", "-b=false", "-b", "barg", "-b"}); err != nil { + if b.count < 4 { + t.Error(err) + } + } + + if b.count != 4 { + t.Errorf("want: %d; got: %d", 4, b.count) + } + + if err == nil { + t.Error("expected error; got none") + } +} + +func TestSetOutput(t *testing.T) { + var flags FlagSet + var buf bytes.Buffer + flags.SetOutput(&buf) + flags.Init("test", ContinueOnError) + flags.Parse([]string{"-unknown"}) + if out := buf.String(); !strings.Contains(out, "-unknown") { + t.Logf("expected output mentioning unknown; got %q", out) + } +} + +// This tests that one can reset the flags. This still works but not well, and is +// superseded by FlagSet. +func TestChangingArgs(t *testing.T) { + ResetForTesting(func() { t.Fatal("bad parse") }) + oldArgs := os.Args + defer func() { os.Args = oldArgs }() + os.Args = []string{"cmd", "-before", "subcmd", "-after", "args"} + before := Bool([]string{"before"}, false, "") + if err := CommandLine.Parse(os.Args[1:]); err != nil { + t.Fatal(err) + } + cmd := Arg(0) + os.Args = Args() + after := Bool([]string{"after"}, false, "") + Parse() + args := Args() + + if !*before || cmd != "subcmd" || !*after || len(args) != 1 || args[0] != "args" { + t.Fatalf("expected true subcmd true [args] got %v %v %v %v", *before, cmd, *after, args) + } +} + +// Test that -help invokes the usage message and returns ErrHelp. +func TestHelp(t *testing.T) { + var helpCalled = false + fs := NewFlagSet("help test", ContinueOnError) + fs.Usage = func() { helpCalled = true } + var flag bool + fs.BoolVar(&flag, []string{"flag"}, false, "regular flag") + // Regular flag invocation should work + err := fs.Parse([]string{"-flag=true"}) + if err != nil { + t.Fatal("expected no error; got ", err) + } + if !flag { + t.Error("flag was not set by -flag") + } + if helpCalled { + t.Error("help called for regular flag") + helpCalled = false // reset for next test + } + // Help flag should work as expected. + err = fs.Parse([]string{"-help"}) + if err == nil { + t.Fatal("error expected") + } + if err != ErrHelp { + t.Fatal("expected ErrHelp; got ", err) + } + if !helpCalled { + t.Fatal("help was not called") + } + // If we define a help flag, that should override. + var help bool + fs.BoolVar(&help, []string{"help"}, false, "help flag") + helpCalled = false + err = fs.Parse([]string{"-help"}) + if err != nil { + t.Fatal("expected no error for defined -help; got ", err) + } + if helpCalled { + t.Fatal("help was called; should not have been for defined help flag") + } +} diff --git a/mount/MAINTAINERS b/pkg/mount/MAINTAINERS similarity index 100% rename from mount/MAINTAINERS rename to pkg/mount/MAINTAINERS diff --git a/mount/flags_linux.go b/pkg/mount/flags_linux.go similarity index 96% rename from mount/flags_linux.go rename to pkg/mount/flags_linux.go index 6f4c7acffa..b7124b1dfa 100644 --- a/mount/flags_linux.go +++ b/pkg/mount/flags_linux.go @@ -1,3 +1,5 @@ +// +build amd64 + package mount import ( @@ -38,6 +40,7 @@ func parseOptions(options string) (int, string) { "nodiratime": {false, syscall.MS_NODIRATIME}, "bind": {false, syscall.MS_BIND}, "rbind": {false, syscall.MS_BIND | syscall.MS_REC}, + "private": {false, syscall.MS_PRIVATE}, "relatime": {false, syscall.MS_RELATIME}, "norelatime": {true, syscall.MS_RELATIME}, "strictatime": {false, syscall.MS_STRICTATIME}, diff --git a/mount/flags_darwin.go b/pkg/mount/flags_unsupported.go similarity index 78% rename from mount/flags_darwin.go rename to pkg/mount/flags_unsupported.go index e89d5e703a..c894efe5b1 100644 --- a/mount/flags_darwin.go +++ b/pkg/mount/flags_unsupported.go @@ -1,3 +1,5 @@ +// +build !linux !amd64 + package mount func parseOptions(options string) (int, string) { diff --git a/mount/mount.go b/pkg/mount/mount.go similarity index 71% rename from mount/mount.go rename to pkg/mount/mount.go index b087293a9d..3860b975bd 100644 --- a/mount/mount.go +++ b/pkg/mount/mount.go @@ -25,27 +25,37 @@ func Mounted(mountpoint string) (bool, error) { return false, nil } -// Mount the specified options at the target path +// Mount the specified options at the target path only if +// the target is not mounted // Options must be specified as fstab style func Mount(device, target, mType, options string) error { if mounted, err := Mounted(target); err != nil || mounted { return err } + return ForceMount(device, target, mType, options) +} +// Mount the specified options at the target path +// reguardless if the target is mounted or not +// Options must be specified as fstab style +func ForceMount(device, target, mType, options string) error { flag, data := parseOptions(options) if err := mount(device, target, mType, uintptr(flag), data); err != nil { return err } return nil - } // Unmount the target only if it is mounted -func Unmount(target string) (err error) { +func Unmount(target string) error { if mounted, err := Mounted(target); err != nil || !mounted { return err } + return ForceUnmount(target) +} +// Unmount the target reguardless if it is mounted or not +func ForceUnmount(target string) (err error) { // Simple retry logic for unmount for i := 0; i < 10; i++ { if err = unmount(target, 0); err == nil { diff --git a/mount/mount_test.go b/pkg/mount/mount_test.go similarity index 100% rename from mount/mount_test.go rename to pkg/mount/mount_test.go diff --git a/mount/mounter_linux.go b/pkg/mount/mounter_linux.go similarity index 96% rename from mount/mounter_linux.go rename to pkg/mount/mounter_linux.go index dd4280c777..70b7798de5 100644 --- a/mount/mounter_linux.go +++ b/pkg/mount/mounter_linux.go @@ -1,3 +1,5 @@ +// +build amd64 + package mount import ( diff --git a/mount/mounter_darwin.go b/pkg/mount/mounter_unsupported.go similarity index 88% rename from mount/mounter_darwin.go rename to pkg/mount/mounter_unsupported.go index 7615f94f9e..ee27b35f89 100644 --- a/mount/mounter_darwin.go +++ b/pkg/mount/mounter_unsupported.go @@ -1,3 +1,5 @@ +// +build !linux !amd64 + package mount func mount(device, target, mType string, flag uintptr, data string) error { diff --git a/mount/mountinfo.go b/pkg/mount/mountinfo.go similarity index 100% rename from mount/mountinfo.go rename to pkg/mount/mountinfo.go diff --git a/mount/mountinfo_test.go b/pkg/mount/mountinfo_test.go similarity index 100% rename from mount/mountinfo_test.go rename to pkg/mount/mountinfo_test.go diff --git a/pkg/netlink/netlink.go b/pkg/netlink/netlink.go new file mode 100644 index 0000000000..5098b4b816 --- /dev/null +++ b/pkg/netlink/netlink.go @@ -0,0 +1,15 @@ +// Packet netlink provide access to low level Netlink sockets and messages. +// +// Actual implementations are in: +// netlink_linux.go +// netlink_darwin.go +package netlink + +import "net" + +// A Route is a subnet associated with the interface to reach it. +type Route struct { + *net.IPNet + Iface *net.Interface + Default bool +} diff --git a/pkg/netlink/netlink_linux.go b/pkg/netlink/netlink_linux.go index 9a937d1218..0ea5b4dbac 100644 --- a/pkg/netlink/netlink_linux.go +++ b/pkg/netlink/netlink_linux.go @@ -1,3 +1,5 @@ +// +build amd64 + package netlink import ( @@ -473,7 +475,7 @@ func NetworkLinkAdd(name string, linkType string) error { // Returns an array of IPNet for all the currently routed subnets on ipv4 // This is similar to the first column of "ip route" output -func NetworkGetRoutes() ([]*net.IPNet, error) { +func NetworkGetRoutes() ([]Route, error) { native := nativeEndian() s, err := getNetlinkSocket() @@ -496,7 +498,7 @@ func NetworkGetRoutes() ([]*net.IPNet, error) { return nil, err } - res := make([]*net.IPNet, 0) + res := make([]Route, 0) done: for { @@ -525,8 +527,7 @@ done: continue } - var iface *net.Interface = nil - var ipNet *net.IPNet = nil + var r Route msg := (*RtMsg)(unsafe.Pointer(&m.Data[0:syscall.SizeofRtMsg][0])) @@ -546,8 +547,8 @@ done: } if msg.Dst_len == 0 { - // Ignore default routes - continue + // Default routes + r.Default = true } attrs, err := syscall.ParseNetlinkRouteAttr(&m) @@ -558,18 +559,17 @@ done: switch attr.Attr.Type { case syscall.RTA_DST: ip := attr.Value - ipNet = &net.IPNet{ + r.IPNet = &net.IPNet{ IP: ip, Mask: net.CIDRMask(int(msg.Dst_len), 8*len(ip)), } case syscall.RTA_OIF: index := int(native.Uint32(attr.Value[0:4])) - iface, _ = net.InterfaceByIndex(index) - _ = iface + r.Iface, _ = net.InterfaceByIndex(index) } } - if ipNet != nil { - res = append(res, ipNet) + if r.Default || r.IPNet != nil { + res = append(res, r) } } } diff --git a/pkg/netlink/netlink_darwin.go b/pkg/netlink/netlink_unsupported.go similarity index 89% rename from pkg/netlink/netlink_darwin.go rename to pkg/netlink/netlink_unsupported.go index dcc60b6764..cd796b373f 100644 --- a/pkg/netlink/netlink_darwin.go +++ b/pkg/netlink/netlink_unsupported.go @@ -1,3 +1,5 @@ +// +build !linux !amd64 + package netlink import ( @@ -5,7 +7,7 @@ import ( "net" ) -func NetworkGetRoutes() ([]*net.IPNet, error) { +func NetworkGetRoutes() ([]Route, error) { return nil, fmt.Errorf("Not implemented") } diff --git a/pkg/sysinfo/MAINTAINERS b/pkg/sysinfo/MAINTAINERS new file mode 100644 index 0000000000..dcc038e7f3 --- /dev/null +++ b/pkg/sysinfo/MAINTAINERS @@ -0,0 +1,2 @@ +Michael Crosby (@crosbymichael) +Guillaume J. Charmes (@creack) diff --git a/pkg/sysinfo/sysinfo.go b/pkg/sysinfo/sysinfo.go new file mode 100644 index 0000000000..27af37bb89 --- /dev/null +++ b/pkg/sysinfo/sysinfo.go @@ -0,0 +1,46 @@ +package sysinfo + +import ( + "github.com/dotcloud/docker/pkg/cgroups" + "io/ioutil" + "log" + "os" + "path" +) + +type SysInfo struct { + MemoryLimit bool + SwapLimit bool + IPv4ForwardingDisabled bool + AppArmor bool +} + +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + if cgroupMemoryMountpoint, err := cgroups.FindCgroupMountpoint("memory"); err != nil { + if !quiet { + log.Printf("WARNING: %s\n", err) + } + } else { + _, err1 := ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.limit_in_bytes")) + _, err2 := ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.soft_limit_in_bytes")) + sysInfo.MemoryLimit = err1 == nil && err2 == nil + if !sysInfo.MemoryLimit && !quiet { + log.Printf("WARNING: Your kernel does not support cgroup memory limit.") + } + + _, err = ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.memsw.limit_in_bytes")) + sysInfo.SwapLimit = err == nil + if !sysInfo.SwapLimit && !quiet { + log.Printf("WARNING: Your kernel does not support cgroup swap limit.") + } + } + + // Check if AppArmor seems to be enabled on this system. + if _, err := os.Stat("/sys/kernel/security/apparmor"); os.IsNotExist(err) { + sysInfo.AppArmor = false + } else { + sysInfo.AppArmor = true + } + return sysInfo +} diff --git a/pkg/systemd/activation/files.go b/pkg/systemd/activation/files.go new file mode 100644 index 0000000000..0281146310 --- /dev/null +++ b/pkg/systemd/activation/files.go @@ -0,0 +1,55 @@ +/* +Copyright 2013 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Package activation implements primitives for systemd socket activation. +package activation + +import ( + "os" + "strconv" + "syscall" +) + +// based on: https://gist.github.com/alberts/4640792 +const ( + listenFdsStart = 3 +) + +func Files(unsetEnv bool) []*os.File { + if unsetEnv { + // there is no way to unset env in golang os package for now + // https://code.google.com/p/go/issues/detail?id=6423 + defer os.Setenv("LISTEN_PID", "") + defer os.Setenv("LISTEN_FDS", "") + } + + pid, err := strconv.Atoi(os.Getenv("LISTEN_PID")) + if err != nil || pid != os.Getpid() { + return nil + } + + nfds, err := strconv.Atoi(os.Getenv("LISTEN_FDS")) + if err != nil || nfds == 0 { + return nil + } + + var files []*os.File + for fd := listenFdsStart; fd < listenFdsStart+nfds; fd++ { + syscall.CloseOnExec(fd) + files = append(files, os.NewFile(uintptr(fd), "LISTEN_FD_"+strconv.Itoa(fd))) + } + + return files +} diff --git a/pkg/systemd/activation/listeners.go b/pkg/systemd/activation/listeners.go new file mode 100644 index 0000000000..3296a08361 --- /dev/null +++ b/pkg/systemd/activation/listeners.go @@ -0,0 +1,37 @@ +/* +Copyright 2014 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package activation + +import ( + "fmt" + "net" +) + +// Listeners returns net.Listeners for all socket activated fds passed to this process. +func Listeners(unsetEnv bool) ([]net.Listener, error) { + files := Files(unsetEnv) + listeners := make([]net.Listener, len(files)) + + for i, f := range files { + var err error + listeners[i], err = net.FileListener(f) + if err != nil { + return nil, fmt.Errorf("Error setting up FileListener for fd %d: %s", f.Fd(), err.Error()) + } + } + + return listeners, nil +} diff --git a/pkg/systemd/listendfd.go b/pkg/systemd/listendfd.go new file mode 100644 index 0000000000..f6044328c2 --- /dev/null +++ b/pkg/systemd/listendfd.go @@ -0,0 +1,40 @@ +package systemd + +import ( + "errors" + "net" + "strconv" + + "github.com/dotcloud/docker/pkg/systemd/activation" +) + +// ListenFD returns the specified socket activated files as a slice of +// net.Listeners or all of the activated files if "*" is given. +func ListenFD(addr string) ([]net.Listener, error) { + // socket activation + listeners, err := activation.Listeners(false) + if err != nil { + return nil, err + } + + if listeners == nil || len(listeners) == 0 { + return nil, errors.New("No sockets found") + } + + // default to all fds just like unix:// and tcp:// + if addr == "" { + addr = "*" + } + + fdNum, _ := strconv.Atoi(addr) + fdOffset := fdNum - 3 + if (addr != "*") && (len(listeners) < int(fdOffset)+1) { + return nil, errors.New("Too few socket activated files passed in") + } + + if addr == "*" { + return listeners, nil + } + + return []net.Listener{listeners[fdOffset]}, nil +} diff --git a/proxy/stub_proxy.go b/proxy/stub_proxy.go new file mode 100644 index 0000000000..7684427058 --- /dev/null +++ b/proxy/stub_proxy.go @@ -0,0 +1,22 @@ +package proxy + +import ( + "net" +) + +type StubProxy struct { + frontendAddr net.Addr + backendAddr net.Addr +} + +func (p *StubProxy) Run() {} +func (p *StubProxy) Close() {} +func (p *StubProxy) FrontendAddr() net.Addr { return p.frontendAddr } +func (p *StubProxy) BackendAddr() net.Addr { return p.backendAddr } + +func NewStubProxy(frontendAddr, backendAddr net.Addr) (Proxy, error) { + return &StubProxy{ + frontendAddr: frontendAddr, + backendAddr: backendAddr, + }, nil +} diff --git a/reflink_copy_linux.go b/reflink_copy_linux.go index 83c7f75413..74a0cb98f7 100644 --- a/reflink_copy_linux.go +++ b/reflink_copy_linux.go @@ -1,3 +1,5 @@ +// +build amd64 + package docker // FIXME: This could be easily rewritten in pure Go diff --git a/reflink_copy_darwin.go b/reflink_copy_unsupported.go similarity index 91% rename from reflink_copy_darwin.go rename to reflink_copy_unsupported.go index 4f0ea8c4fd..271ed0178f 100644 --- a/reflink_copy_darwin.go +++ b/reflink_copy_unsupported.go @@ -1,3 +1,5 @@ +// +build !linux !amd64 + package docker import ( diff --git a/registry/registry.go b/registry/registry.go index a038fdfb66..df94302305 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -22,7 +22,7 @@ import ( var ( ErrAlreadyExists = errors.New("Image already exists") ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")") - ErrLoginRequired = errors.New("Authentication is required.") + errLoginRequired = errors.New("Authentication is required.") ) func pingRegistryEndpoint(endpoint string) (bool, error) { @@ -59,7 +59,7 @@ func pingRegistryEndpoint(endpoint string) (bool, error) { // versions of the registry if standalone == "" { return true, nil - // Accepted values are "true" (case-insensitive) and "1". + // Accepted values are "true" (case-insensitive) and "1". } else if strings.EqualFold(standalone, "true") || standalone == "1" { return true, nil } @@ -186,7 +186,7 @@ func (r *Registry) GetRemoteHistory(imgID, registry string, token []string) ([]s defer res.Body.Close() if res.StatusCode != 200 { if res.StatusCode == 401 { - return nil, ErrLoginRequired + return nil, errLoginRequired } return nil, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res) } @@ -205,15 +205,18 @@ func (r *Registry) GetRemoteHistory(imgID, registry string, token []string) ([]s } // Check if an image exists in the Registry +// TODO: This method should return the errors instead of masking them and returning false func (r *Registry) LookupRemoteImage(imgID, registry string, token []string) bool { req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/json", nil) if err != nil { + utils.Errorf("Error in LookupRemoteImage %s", err) return false } setTokenAuth(req, token) res, err := doWithCookies(r.client, req) if err != nil { + utils.Errorf("Error in LookupRemoteImage %s", err) return false } res.Body.Close() @@ -329,7 +332,7 @@ func (r *Registry) GetRepositoryData(remote string) (*RepositoryData, error) { } defer res.Body.Close() if res.StatusCode == 401 { - return nil, ErrLoginRequired + return nil, errLoginRequired } // TODO: Right now we're ignoring checksums in the response body. // In the future, we need to use them to check image validity. @@ -614,6 +617,10 @@ func (r *Registry) SearchRepositories(term string) (*SearchResults, error) { if err != nil { return nil, err } + if r.authConfig != nil && len(r.authConfig.Username) > 0 { + req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password) + } + req.Header.Set("X-Docker-Token", "true") res, err := r.client.Do(req) if err != nil { return nil, err diff --git a/runtime.go b/runtime.go index 731e3a8784..7e4ae79b40 100644 --- a/runtime.go +++ b/runtime.go @@ -4,18 +4,23 @@ import ( "container/list" "fmt" "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/cgroups" + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/execdriver" + "github.com/dotcloud/docker/execdriver/chroot" + "github.com/dotcloud/docker/execdriver/lxc" "github.com/dotcloud/docker/graphdriver" "github.com/dotcloud/docker/graphdriver/aufs" + _ "github.com/dotcloud/docker/graphdriver/btrfs" _ "github.com/dotcloud/docker/graphdriver/devmapper" _ "github.com/dotcloud/docker/graphdriver/vfs" + _ "github.com/dotcloud/docker/networkdriver/lxc" + "github.com/dotcloud/docker/networkdriver/portallocator" "github.com/dotcloud/docker/pkg/graphdb" + "github.com/dotcloud/docker/pkg/sysinfo" "github.com/dotcloud/docker/utils" "io" "io/ioutil" - "log" "os" - "os/exec" "path" "regexp" "sort" @@ -35,27 +40,21 @@ var ( validContainerNamePattern = regexp.MustCompile(`^/?` + validContainerNameChars + `+$`) ) -type Capabilities struct { - MemoryLimit bool - SwapLimit bool - IPv4ForwardingDisabled bool - AppArmor bool -} - type Runtime struct { repository string sysInitPath string containers *list.List - networkManager *NetworkManager graph *Graph repositories *TagStore idIndex *utils.TruncIndex - capabilities *Capabilities + sysInfo *sysinfo.SysInfo volumes *Graph srv *Server + eng *engine.Engine config *DaemonConfig containerGraph *graphdb.Database driver graphdriver.Driver + execDriver execdriver.Driver } // List returns an array of all containers registered in the runtime. @@ -135,11 +134,12 @@ func (runtime *Runtime) Register(container *Container) error { } // Get the root filesystem from the driver - rootfs, err := runtime.driver.Get(container.ID) + basefs, err := runtime.driver.Get(container.ID) if err != nil { return fmt.Errorf("Error getting container filesystem %s from driver %s: %s", container.ID, runtime.driver, err) } - container.rootfs = rootfs + defer runtime.driver.Put(container.ID) + container.basefs = basefs container.runtime = runtime @@ -160,11 +160,9 @@ func (runtime *Runtime) Register(container *Container) error { // if so, then we need to restart monitor and init a new lock // If the container is supposed to be running, make sure of it if container.State.IsRunning() { - output, err := exec.Command("lxc-info", "-n", container.ID).CombinedOutput() - if err != nil { - return err - } - if !strings.Contains(string(output), "RUNNING") { + info := runtime.execDriver.Info(container.ID) + + if !info.IsRunning() { utils.Debugf("Container %s was supposed to be running but is not.", container.ID) if runtime.config.AutoRestart { utils.Debugf("Restarting") @@ -188,8 +186,14 @@ func (runtime *Runtime) Register(container *Container) error { } container.waitLock = make(chan struct{}) - go container.monitor() + go container.monitor(nil) } + } else { + // When the container is not running, we still initialize the waitLock + // chan and close it. Receiving on nil chan blocks whereas receiving on a + // closed chan does not. In this case we do not want to block. + container.waitLock = make(chan struct{}) + close(container.waitLock) } return nil } @@ -331,43 +335,6 @@ func (runtime *Runtime) restore() error { return nil } -// FIXME: comment please! -func (runtime *Runtime) UpdateCapabilities(quiet bool) { - if cgroupMemoryMountpoint, err := cgroups.FindCgroupMountpoint("memory"); err != nil { - if !quiet { - log.Printf("WARNING: %s\n", err) - } - } else { - _, err1 := ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.limit_in_bytes")) - _, err2 := ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.soft_limit_in_bytes")) - runtime.capabilities.MemoryLimit = err1 == nil && err2 == nil - if !runtime.capabilities.MemoryLimit && !quiet { - log.Printf("WARNING: Your kernel does not support cgroup memory limit.") - } - - _, err = ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.memsw.limit_in_bytes")) - runtime.capabilities.SwapLimit = err == nil - if !runtime.capabilities.SwapLimit && !quiet { - log.Printf("WARNING: Your kernel does not support cgroup swap limit.") - } - } - - content, err3 := ioutil.ReadFile("/proc/sys/net/ipv4/ip_forward") - runtime.capabilities.IPv4ForwardingDisabled = err3 != nil || len(content) == 0 || content[0] != '1' - if runtime.capabilities.IPv4ForwardingDisabled && !quiet { - log.Printf("WARNING: IPv4 forwarding is disabled.") - } - - // Check if AppArmor seems to be enabled on this system. - if _, err := os.Stat("/sys/kernel/security/apparmor"); os.IsNotExist(err) { - utils.Debugf("/sys/kernel/security/apparmor not found; assuming AppArmor is not enabled.") - runtime.capabilities.AppArmor = false - } else { - utils.Debugf("/sys/kernel/security/apparmor found; assuming AppArmor is enabled.") - runtime.capabilities.AppArmor = true - } -} - // Create creates a new container from the given configuration with a given name. func (runtime *Runtime) Create(config *Config, name string) (*Container, []string, error) { // Lookup image @@ -504,6 +471,8 @@ func (runtime *Runtime) Create(config *Config, name string) (*Container, []strin if err != nil { return nil, nil, err } + defer runtime.driver.Put(initID) + if err := setupInitLayer(initPath); err != nil { return nil, nil, err } @@ -561,9 +530,10 @@ func (runtime *Runtime) Create(config *Config, name string) (*Container, []strin func (runtime *Runtime) Commit(container *Container, repository, tag, comment, author string, config *Config) (*Image, error) { // FIXME: freeze the container before copying it to avoid data corruption? // FIXME: this shouldn't be in commands. - if err := container.EnsureMounted(); err != nil { + if err := container.Mount(); err != nil { return nil, err } + defer container.Unmount() rwTar, err := container.ExportRw() if err != nil { @@ -641,16 +611,15 @@ func (runtime *Runtime) RegisterLink(parent, child *Container, alias string) err } // FIXME: harmonize with NewGraph() -func NewRuntime(config *DaemonConfig) (*Runtime, error) { - runtime, err := NewRuntimeFromDirectory(config) +func NewRuntime(config *DaemonConfig, eng *engine.Engine) (*Runtime, error) { + runtime, err := NewRuntimeFromDirectory(config, eng) if err != nil { return nil, err } - runtime.UpdateCapabilities(false) return runtime, nil } -func NewRuntimeFromDirectory(config *DaemonConfig) (*Runtime, error) { +func NewRuntimeFromDirectory(config *DaemonConfig, eng *engine.Engine) (*Runtime, error) { // Set the default driver graphdriver.DefaultDriver = config.GraphDriver @@ -675,10 +644,6 @@ func NewRuntimeFromDirectory(config *DaemonConfig) (*Runtime, error) { } } - utils.Debugf("Escaping AppArmor confinement") - if err := linkLxcStart(config.Root); err != nil { - return nil, err - } utils.Debugf("Creating images graph") g, err := NewGraph(path.Join(config.Root, "graph"), driver) if err != nil { @@ -701,12 +666,20 @@ func NewRuntimeFromDirectory(config *DaemonConfig) (*Runtime, error) { if err != nil { return nil, fmt.Errorf("Couldn't create Tag store: %s", err) } - if config.BridgeIface == "" { - config.BridgeIface = DefaultNetworkBridge - } - netManager, err := newNetworkManager(config) - if err != nil { - return nil, err + + if !config.DisableNetwork { + job := eng.Job("init_networkdriver") + + job.SetenvBool("EnableIptables", config.EnableIptables) + job.SetenvBool("InterContainerCommunication", config.InterContainerCommunication) + job.SetenvBool("EnableIpForward", config.EnableIpForward) + job.Setenv("BridgeIface", config.BridgeIface) + job.Setenv("BridgeIP", config.BridgeIP) + job.Setenv("DefaultBindingIP", config.DefaultIp.String()) + + if err := job.Run(); err != nil { + return nil, err + } } graphdbPath := path.Join(config.Root, "linkgraph.db") @@ -735,19 +708,40 @@ func NewRuntimeFromDirectory(config *DaemonConfig) (*Runtime, error) { sysInitPath = localCopy } + sysInfo := sysinfo.New(false) + + /* + temporarilly disabled. + */ + if false { + var ed execdriver.Driver + if driver := os.Getenv("EXEC_DRIVER"); driver == "lxc" { + ed, err = lxc.NewDriver(config.Root, sysInfo.AppArmor) + } else { + ed, err = chroot.NewDriver() + } + if ed != nil { + } + } + ed, err := lxc.NewDriver(config.Root, sysInfo.AppArmor) + if err != nil { + return nil, err + } + runtime := &Runtime{ repository: runtimeRepo, containers: list.New(), - networkManager: netManager, graph: g, repositories: repositories, idIndex: utils.NewTruncIndex(), - capabilities: &Capabilities{}, + sysInfo: sysInfo, volumes: volumes, config: config, containerGraph: graph, driver: driver, sysInitPath: sysInitPath, + execDriver: ed, + eng: eng, } if err := runtime.restore(); err != nil { @@ -758,8 +752,8 @@ func NewRuntimeFromDirectory(config *DaemonConfig) (*Runtime, error) { func (runtime *Runtime) Close() error { errorsStrings := []string{} - if err := runtime.networkManager.Close(); err != nil { - utils.Errorf("runtime.networkManager.Close(): %s", err.Error()) + if err := portallocator.ReleaseAll(); err != nil { + utils.Errorf("portallocator.ReleaseAll(): %s", err) errorsStrings = append(errorsStrings, err.Error()) } if err := runtime.driver.Cleanup(); err != nil { @@ -781,18 +775,17 @@ func (runtime *Runtime) Mount(container *Container) error { if err != nil { return fmt.Errorf("Error getting container %s from driver %s: %s", container.ID, runtime.driver, err) } - if container.rootfs == "" { - container.rootfs = dir - } else if container.rootfs != dir { + if container.basefs == "" { + container.basefs = dir + } else if container.basefs != dir { return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", - runtime.driver, container.ID, container.rootfs, dir) + runtime.driver, container.ID, container.basefs, dir) } return nil } func (runtime *Runtime) Unmount(container *Container) error { - // FIXME: Unmount is deprecated because drivers are responsible for mounting - // and unmounting when necessary. Use driver.Remove() instead. + runtime.driver.Put(container.ID) return nil } @@ -804,10 +797,12 @@ func (runtime *Runtime) Changes(container *Container) ([]archive.Change, error) if err != nil { return nil, fmt.Errorf("Error getting container rootfs %s from driver %s: %s", container.ID, container.runtime.driver, err) } + defer runtime.driver.Put(container.ID) initDir, err := runtime.driver.Get(container.ID + "-init") if err != nil { return nil, fmt.Errorf("Error getting container init rootfs %s from driver %s: %s", container.ID, container.runtime.driver, err) } + defer runtime.driver.Put(container.ID + "-init") return archive.ChangesDirs(cDir, initDir) } @@ -826,7 +821,23 @@ func (runtime *Runtime) Diff(container *Container) (archive.Archive, error) { return nil, fmt.Errorf("Error getting container rootfs %s from driver %s: %s", container.ID, container.runtime.driver, err) } - return archive.ExportChanges(cDir, changes) + archive, err := archive.ExportChanges(cDir, changes) + if err != nil { + return nil, err + } + return EofReader(archive, func() { runtime.driver.Put(container.ID) }), nil +} + +func (runtime *Runtime) Run(c *Container, startCallback execdriver.StartCallback) (int, error) { + return runtime.execDriver.Run(c.command, startCallback) +} + +func (runtime *Runtime) Kill(c *Container, sig int) error { + return runtime.execDriver.Kill(c.command, sig) +} + +func (runtime *Runtime) RestoreCommand(c *Container) error { + return runtime.execDriver.Restore(c.command) } // Nuke kills all containers then removes all content @@ -848,23 +859,6 @@ func (runtime *Runtime) Nuke() error { return os.RemoveAll(runtime.config.Root) } -func linkLxcStart(root string) error { - sourcePath, err := exec.LookPath("lxc-start") - if err != nil { - return err - } - targetPath := path.Join(root, "lxc-start-unconfined") - - if _, err := os.Lstat(targetPath); err != nil && !os.IsNotExist(err) { - return err - } else if err == nil { - if err := os.Remove(targetPath); err != nil { - return err - } - } - return os.Symlink(sourcePath, targetPath) -} - // FIXME: this is a convenience function for integration tests // which need direct access to runtime.graph. // Once the tests switch to using engine and jobs, this method diff --git a/server.go b/server.go index 3ad5122d25..2942eaeb5b 100644 --- a/server.go +++ b/server.go @@ -6,7 +6,6 @@ import ( "fmt" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/auth" - "github.com/dotcloud/docker/cgroups" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/pkg/graphdb" "github.com/dotcloud/docker/registry" @@ -34,19 +33,17 @@ func (srv *Server) Close() error { } func init() { - engine.Register("initapi", jobInitApi) + engine.Register("initserver", jobInitServer) } // jobInitApi runs the remote api server `srv` as a daemon, // Only one api server can run at the same time - this is enforced by a pidfile. // The signals SIGINT, SIGQUIT and SIGTERM are intercepted for cleanup. -func jobInitApi(job *engine.Job) engine.Status { +func jobInitServer(job *engine.Job) engine.Status { job.Logf("Creating server") - // FIXME: ImportEnv deprecates ConfigFromJob - srv, err := NewServer(job.Eng, ConfigFromJob(job)) + srv, err := NewServer(job.Eng, DaemonConfigFromJob(job)) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } if srv.runtime.config.Pidfile != "" { job.Logf("Creating pidfile") @@ -67,85 +64,43 @@ func jobInitApi(job *engine.Job) engine.Status { }() job.Eng.Hack_SetGlobalVar("httpapi.server", srv) job.Eng.Hack_SetGlobalVar("httpapi.runtime", srv.runtime) - // https://github.com/dotcloud/docker/issues/2768 - if srv.runtime.networkManager.bridgeNetwork != nil { - job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", srv.runtime.networkManager.bridgeNetwork.IP) - } - if err := job.Eng.Register("export", srv.ContainerExport); err != nil { - job.Error(err) - return engine.StatusErr - } - if err := job.Eng.Register("create", srv.ContainerCreate); err != nil { - job.Error(err) - return engine.StatusErr - } - if err := job.Eng.Register("stop", srv.ContainerStop); err != nil { - job.Error(err) - return engine.StatusErr - } - if err := job.Eng.Register("start", srv.ContainerStart); err != nil { - job.Error(err) - return engine.StatusErr - } - if err := job.Eng.Register("kill", srv.ContainerKill); err != nil { - job.Error(err) - return engine.StatusErr - } - if err := job.Eng.Register("serveapi", srv.ListenAndServe); err != nil { - job.Error(err) - return engine.StatusErr - } - if err := job.Eng.Register("wait", srv.ContainerWait); err != nil { - job.Error(err) - return engine.StatusErr - } - if err := job.Eng.Register("tag", srv.ImageTag); err != nil { - job.Error(err) - return engine.StatusErr - } - if err := job.Eng.Register("resize", srv.ContainerResize); err != nil { - job.Error(err) - return engine.StatusErr - } - if err := job.Eng.Register("commit", srv.ContainerCommit); err != nil { - job.Error(err) - return engine.StatusErr - } - if err := job.Eng.Register("info", srv.DockerInfo); err != nil { - job.Error(err) - return engine.StatusErr - } - return engine.StatusOK -} -func (srv *Server) ListenAndServe(job *engine.Job) engine.Status { - protoAddrs := job.Args - chErrors := make(chan error, len(protoAddrs)) - for _, protoAddr := range protoAddrs { - protoAddrParts := strings.SplitN(protoAddr, "://", 2) - switch protoAddrParts[0] { - case "unix": - if err := syscall.Unlink(protoAddrParts[1]); err != nil && !os.IsNotExist(err) { - log.Fatal(err) - } - case "tcp": - if !strings.HasPrefix(protoAddrParts[1], "127.0.0.1") { - log.Println("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") - } - default: - job.Errorf("Invalid protocol format.") - return engine.StatusErr - } - go func() { - // FIXME: merge Server.ListenAndServe with ListenAndServe - chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], srv, job.GetenvBool("Logging")) - }() - } - for i := 0; i < len(protoAddrs); i += 1 { - err := <-chErrors - if err != nil { - job.Error(err) - return engine.StatusErr + for name, handler := range map[string]engine.Handler{ + "export": srv.ContainerExport, + "create": srv.ContainerCreate, + "stop": srv.ContainerStop, + "restart": srv.ContainerRestart, + "start": srv.ContainerStart, + "kill": srv.ContainerKill, + "wait": srv.ContainerWait, + "tag": srv.ImageTag, + "resize": srv.ContainerResize, + "commit": srv.ContainerCommit, + "info": srv.DockerInfo, + "container_delete": srv.ContainerDestroy, + "image_export": srv.ImageExport, + "images": srv.Images, + "history": srv.ImageHistory, + "viz": srv.ImagesViz, + "container_copy": srv.ContainerCopy, + "insert": srv.ImageInsert, + "attach": srv.ContainerAttach, + "search": srv.ImagesSearch, + "changes": srv.ContainerChanges, + "top": srv.ContainerTop, + "load": srv.ImageLoad, + "build": srv.Build, + "pull": srv.ImagePull, + "import": srv.ImageImport, + "image_delete": srv.ImageDelete, + "inspect": srv.JobInspect, + "events": srv.Events, + "push": srv.ImagePush, + "containers": srv.Containers, + "auth": srv.Auth, + } { + if err := job.Eng.Register(name, handler); err != nil { + return job.Error(err) } } return engine.StatusOK @@ -174,68 +129,165 @@ func (v *simpleVersionInfo) Version() string { // for the container to exit. // If a signal is given, then just send it to the container and return. func (srv *Server) ContainerKill(job *engine.Job) engine.Status { + signalMap := map[string]syscall.Signal{ + "HUP": syscall.SIGHUP, + "INT": syscall.SIGINT, + "QUIT": syscall.SIGQUIT, + "ILL": syscall.SIGILL, + "TRAP": syscall.SIGTRAP, + "ABRT": syscall.SIGABRT, + "BUS": syscall.SIGBUS, + "FPE": syscall.SIGFPE, + "KILL": syscall.SIGKILL, + "USR1": syscall.SIGUSR1, + "SEGV": syscall.SIGSEGV, + "USR2": syscall.SIGUSR2, + "PIPE": syscall.SIGPIPE, + "ALRM": syscall.SIGALRM, + "TERM": syscall.SIGTERM, + //"STKFLT": syscall.SIGSTKFLT, + "CHLD": syscall.SIGCHLD, + "CONT": syscall.SIGCONT, + "STOP": syscall.SIGSTOP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "URG": syscall.SIGURG, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, + "VTALRM": syscall.SIGVTALRM, + "PROF": syscall.SIGPROF, + "WINCH": syscall.SIGWINCH, + "IO": syscall.SIGIO, + //"PWR": syscall.SIGPWR, + "SYS": syscall.SIGSYS, + } + if n := len(job.Args); n < 1 || n > 2 { - job.Errorf("Usage: %s CONTAINER [SIGNAL]", job.Name) - return engine.StatusErr + return job.Errorf("Usage: %s CONTAINER [SIGNAL]", job.Name) } name := job.Args[0] var sig uint64 if len(job.Args) == 2 && job.Args[1] != "" { - var err error - // The largest legal signal is 31, so let's parse on 5 bits - sig, err = strconv.ParseUint(job.Args[1], 10, 5) - if err != nil { - job.Errorf("Invalid signal: %s", job.Args[1]) - return engine.StatusErr + sig = uint64(signalMap[job.Args[1]]) + if sig == 0 { + var err error + // The largest legal signal is 31, so let's parse on 5 bits + sig, err = strconv.ParseUint(job.Args[1], 10, 5) + if err != nil { + return job.Errorf("Invalid signal: %s", job.Args[1]) + } } } if container := srv.runtime.Get(name); container != nil { - // If no signal is passed, perform regular Kill (SIGKILL + wait()) - if sig == 0 { + // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) + if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL { if err := container.Kill(); err != nil { - job.Errorf("Cannot kill container %s: %s", name, err) - return engine.StatusErr + return job.Errorf("Cannot kill container %s: %s", name, err) } srv.LogEvent("kill", container.ID, srv.runtime.repositories.ImageName(container.Image)) } else { // Otherwise, just send the requested signal if err := container.kill(int(sig)); err != nil { - job.Errorf("Cannot kill container %s: %s", name, err) - return engine.StatusErr + return job.Errorf("Cannot kill container %s: %s", name, err) } // FIXME: Add event for signals } } else { - job.Errorf("No such container: %s", name) - return engine.StatusErr + return job.Errorf("No such container: %s", name) + } + return engine.StatusOK +} + +func (srv *Server) Auth(job *engine.Job) engine.Status { + authConfig := &auth.AuthConfig{} + job.GetenvJson("authConfig", authConfig) + status, err := auth.Login(authConfig, srv.HTTPRequestFactory(nil)) + if err != nil { + return job.Error(err) + } + job.Printf("%s\n", status) + return engine.StatusOK +} + +func (srv *Server) Events(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s FROM", job.Name) + } + + var ( + from = job.Args[0] + since = job.GetenvInt64("since") + ) + sendEvent := func(event *utils.JSONMessage) error { + b, err := json.Marshal(event) + if err != nil { + return fmt.Errorf("JSON error") + } + _, err = job.Stdout.Write(b) + if err != nil { + // On error, evict the listener + utils.Errorf("%s", err) + srv.Lock() + delete(srv.listeners, from) + srv.Unlock() + return err + } + return nil + } + + listener := make(chan utils.JSONMessage) + srv.Lock() + srv.listeners[from] = listener + srv.Unlock() + job.Stdout.Write(nil) // flush + if since != 0 { + // If since, send previous events that happened after the timestamp + for _, event := range srv.GetEvents() { + if event.Time >= since { + err := sendEvent(&event) + if err != nil && err.Error() == "JSON error" { + continue + } + if err != nil { + job.Error(err) + return engine.StatusErr + } + } + } + } + for event := range listener { + err := sendEvent(&event) + if err != nil && err.Error() == "JSON error" { + continue + } + if err != nil { + return job.Error(err) + } } return engine.StatusOK } func (srv *Server) ContainerExport(job *engine.Job) engine.Status { if len(job.Args) != 1 { - job.Errorf("Usage: %s container_id", job.Name) - return engine.StatusErr + return job.Errorf("Usage: %s container_id", job.Name) } name := job.Args[0] if container := srv.runtime.Get(name); container != nil { data, err := container.Export() if err != nil { - job.Errorf("%s: %s", name, err) - return engine.StatusErr + return job.Errorf("%s: %s", name, err) } // Stream the entire contents of the container (basically a volatile snapshot) if _, err := io.Copy(job.Stdout, data); err != nil { - job.Errorf("%s: %s", name, err) - return engine.StatusErr + return job.Errorf("%s: %s", name, err) } // FIXME: factor job-specific LogEvent to engine.Job.Run() srv.LogEvent("export", container.ID, srv.runtime.repositories.ImageName(container.Image)) return engine.StatusOK } - job.Errorf("No such container: %s", name) - return engine.StatusErr + return job.Errorf("No such container: %s", name) } // ImageExport exports all images with the given tag. All versions @@ -243,11 +295,15 @@ func (srv *Server) ContainerExport(job *engine.Job) engine.Status { // uncompressed tar ball. // name is the set of tags to export. // out is the writer where the images are written to. -func (srv *Server) ImageExport(name string, out io.Writer) error { +func (srv *Server) ImageExport(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s CONTAINER\n", job.Name) + } + name := job.Args[0] // get image json tempdir, err := ioutil.TempDir("", "docker-export-") if err != nil { - return err + return job.Error(err) } defer os.RemoveAll(tempdir) @@ -255,17 +311,17 @@ func (srv *Server) ImageExport(name string, out io.Writer) error { rootRepo, err := srv.runtime.repositories.Get(name) if err != nil { - return err + return job.Error(err) } if rootRepo != nil { for _, id := range rootRepo { image, err := srv.ImageInspect(id) if err != nil { - return err + return job.Error(err) } if err := srv.exportImage(image, tempdir); err != nil { - return err + return job.Error(err) } } @@ -275,27 +331,27 @@ func (srv *Server) ImageExport(name string, out io.Writer) error { rootRepoJson, _ := json.Marshal(rootRepoMap) if err := ioutil.WriteFile(path.Join(tempdir, "repositories"), rootRepoJson, os.ModeAppend); err != nil { - return err + return job.Error(err) } } else { image, err := srv.ImageInspect(name) if err != nil { - return err + return job.Error(err) } if err := srv.exportImage(image, tempdir); err != nil { - return err + return job.Error(err) } } fs, err := archive.Tar(tempdir, archive.Uncompressed) if err != nil { - return err + return job.Error(err) } - if _, err := io.Copy(out, fs); err != nil { - return err + if _, err := io.Copy(job.Stdout, fs); err != nil { + return job.Error(err) } - return nil + return engine.StatusOK } func (srv *Server) exportImage(image *Image, tempdir string) error { @@ -353,12 +409,90 @@ func (srv *Server) exportImage(image *Image, tempdir string) error { return nil } +func (srv *Server) Build(job *engine.Job) engine.Status { + if len(job.Args) != 0 { + return job.Errorf("Usage: %s\n", job.Name) + } + var ( + remoteURL = job.Getenv("remote") + repoName = job.Getenv("t") + suppressOutput = job.GetenvBool("q") + noCache = job.GetenvBool("nocache") + rm = job.GetenvBool("rm") + authConfig = &auth.AuthConfig{} + configFile = &auth.ConfigFile{} + tag string + context io.Reader + ) + job.GetenvJson("authConfig", authConfig) + job.GetenvJson("configFile", configFile) + repoName, tag = utils.ParseRepositoryTag(repoName) + + if remoteURL == "" { + context = job.Stdin + } else if utils.IsGIT(remoteURL) { + if !strings.HasPrefix(remoteURL, "git://") { + remoteURL = "https://" + remoteURL + } + root, err := ioutil.TempDir("", "docker-build-git") + if err != nil { + return job.Error(err) + } + defer os.RemoveAll(root) + + if output, err := exec.Command("git", "clone", remoteURL, root).CombinedOutput(); err != nil { + return job.Errorf("Error trying to use git: %s (%s)", err, output) + } + + c, err := archive.Tar(root, archive.Uncompressed) + if err != nil { + return job.Error(err) + } + context = c + } else if utils.IsURL(remoteURL) { + f, err := utils.Download(remoteURL) + if err != nil { + return job.Error(err) + } + defer f.Body.Close() + dockerFile, err := ioutil.ReadAll(f.Body) + if err != nil { + return job.Error(err) + } + c, err := MkBuildContext(string(dockerFile), nil) + if err != nil { + return job.Error(err) + } + context = c + } + + sf := utils.NewStreamFormatter(job.GetenvBool("json")) + b := NewBuildFile(srv, + &StdoutFormater{ + Writer: job.Stdout, + StreamFormatter: sf, + }, + &StderrFormater{ + Writer: job.Stdout, + StreamFormatter: sf, + }, + !suppressOutput, !noCache, rm, job.Stdout, sf, authConfig, configFile) + id, err := b.Build(context) + if err != nil { + return job.Error(err) + } + if repoName != "" { + srv.runtime.repositories.Set(repoName, tag, id, false) + } + return engine.StatusOK +} + // Loads a set of images into the repository. This is the complementary of ImageExport. // The input stream is an uncompressed tar ball containing images and metadata. -func (srv *Server) ImageLoad(in io.Reader) error { +func (srv *Server) ImageLoad(job *engine.Job) engine.Status { tmpImageDir, err := ioutil.TempDir("", "docker-import-") if err != nil { - return err + return job.Error(err) } defer os.RemoveAll(tmpImageDir) @@ -369,33 +503,33 @@ func (srv *Server) ImageLoad(in io.Reader) error { tarFile, err := os.Create(repoTarFile) if err != nil { - return err + return job.Error(err) } - if _, err := io.Copy(tarFile, in); err != nil { - return err + if _, err := io.Copy(tarFile, job.Stdin); err != nil { + return job.Error(err) } tarFile.Close() repoFile, err := os.Open(repoTarFile) if err != nil { - return err + return job.Error(err) } if err := os.Mkdir(repoDir, os.ModeDir); err != nil { - return err + return job.Error(err) } if err := archive.Untar(repoFile, repoDir, nil); err != nil { - return err + return job.Error(err) } dirs, err := ioutil.ReadDir(repoDir) if err != nil { - return err + return job.Error(err) } for _, d := range dirs { if d.IsDir() { if err := srv.recursiveLoad(d.Name(), tmpImageDir); err != nil { - return err + return job.Error(err) } } } @@ -404,21 +538,21 @@ func (srv *Server) ImageLoad(in io.Reader) error { if err == nil { repositories := map[string]Repository{} if err := json.Unmarshal(repositoriesJson, &repositories); err != nil { - return err + return job.Error(err) } for imageName, tagMap := range repositories { for tag, address := range tagMap { if err := srv.runtime.repositories.Set(imageName, tag, address, true); err != nil { - return err + return job.Error(err) } } } } else if !os.IsNotExist(err) { - return err + return job.Error(err) } - return nil + return engine.StatusOK } func (srv *Server) recursiveLoad(address, tmpImageDir string) error { @@ -457,59 +591,93 @@ func (srv *Server) recursiveLoad(address, tmpImageDir string) error { return nil } -func (srv *Server) ImagesSearch(term string) ([]registry.SearchResult, error) { - r, err := registry.NewRegistry(nil, srv.HTTPRequestFactory(nil), auth.IndexServerAddress()) +func (srv *Server) ImagesSearch(job *engine.Job) engine.Status { + if n := len(job.Args); n != 1 { + return job.Errorf("Usage: %s TERM", job.Name) + } + var ( + term = job.Args[0] + metaHeaders = map[string][]string{} + authConfig = &auth.AuthConfig{} + ) + job.GetenvJson("authConfig", authConfig) + job.GetenvJson("metaHeaders", metaHeaders) + + r, err := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), auth.IndexServerAddress()) if err != nil { - return nil, err + return job.Error(err) } results, err := r.SearchRepositories(term) if err != nil { - return nil, err + return job.Error(err) } - return results.Results, nil + outs := engine.NewTable("star_count", 0) + for _, result := range results.Results { + out := &engine.Env{} + out.Import(result) + outs.Add(out) + } + outs.ReverseSort() + if _, err := outs.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK } -func (srv *Server) ImageInsert(name, url, path string, out io.Writer, sf *utils.StreamFormatter) error { - out = utils.NewWriteFlusher(out) +func (srv *Server) ImageInsert(job *engine.Job) engine.Status { + if len(job.Args) != 3 { + return job.Errorf("Usage: %s IMAGE URL PATH\n", job.Name) + } + + var ( + name = job.Args[0] + url = job.Args[1] + path = job.Args[2] + ) + + sf := utils.NewStreamFormatter(job.GetenvBool("json")) + + out := utils.NewWriteFlusher(job.Stdout) img, err := srv.runtime.repositories.LookupImage(name) if err != nil { - return err + return job.Error(err) } file, err := utils.Download(url) if err != nil { - return err + return job.Error(err) } defer file.Body.Close() - config, _, _, err := ParseRun([]string{img.ID, "echo", "insert", url, path}, srv.runtime.capabilities) + config, _, _, err := ParseRun([]string{img.ID, "echo", "insert", url, path}, srv.runtime.sysInfo) if err != nil { - return err + return job.Error(err) } c, _, err := srv.runtime.Create(config, "") if err != nil { - return err + return job.Error(err) } - if err := c.Inject(utils.ProgressReader(file.Body, int(file.ContentLength), out, sf, false, "", "Downloading"), path); err != nil { - return err + if err := c.Inject(utils.ProgressReader(file.Body, int(file.ContentLength), out, sf, false, utils.TruncateID(img.ID), "Downloading"), path); err != nil { + return job.Error(err) } // FIXME: Handle custom repo, tag comment, author img, err = srv.runtime.Commit(c, "", "", img.Comment, img.Author, nil) if err != nil { - return err + out.Write(sf.FormatError(err)) + return engine.StatusErr } - out.Write(sf.FormatStatus(img.ID, "")) - return nil + out.Write(sf.FormatStatus("", img.ID)) + return engine.StatusOK } -func (srv *Server) ImagesViz(out io.Writer) error { +func (srv *Server) ImagesViz(job *engine.Job) engine.Status { images, _ := srv.runtime.graph.Map() if images == nil { - return nil + return engine.StatusOK } - out.Write([]byte("digraph docker {\n")) + job.Stdout.Write([]byte("digraph docker {\n")) var ( parentImage *Image @@ -518,12 +686,12 @@ func (srv *Server) ImagesViz(out io.Writer) error { for _, image := range images { parentImage, err = image.GetParent() if err != nil { - return fmt.Errorf("Error while getting parent image: %v", err) + return job.Errorf("Error while getting parent image: %v", err) } if parentImage != nil { - out.Write([]byte(" \"" + parentImage.ID + "\" -> \"" + image.ID + "\"\n")) + job.Stdout.Write([]byte(" \"" + parentImage.ID + "\" -> \"" + image.ID + "\"\n")) } else { - out.Write([]byte(" base -> \"" + image.ID + "\" [style=invis]\n")) + job.Stdout.Write([]byte(" base -> \"" + image.ID + "\" [style=invis]\n")) } } @@ -536,29 +704,29 @@ func (srv *Server) ImagesViz(out io.Writer) error { } for id, repos := range reporefs { - out.Write([]byte(" \"" + id + "\" [label=\"" + id + "\\n" + strings.Join(repos, "\\n") + "\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n")) + job.Stdout.Write([]byte(" \"" + id + "\" [label=\"" + id + "\\n" + strings.Join(repos, "\\n") + "\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n")) } - out.Write([]byte(" base [style=invisible]\n}\n")) - return nil + job.Stdout.Write([]byte(" base [style=invisible]\n}\n")) + return engine.StatusOK } -func (srv *Server) Images(all bool, filter string) ([]APIImages, error) { +func (srv *Server) Images(job *engine.Job) engine.Status { var ( allImages map[string]*Image err error ) - if all { + if job.GetenvBool("all") { allImages, err = srv.runtime.graph.Map() } else { allImages, err = srv.runtime.graph.Heads() } if err != nil { - return nil, err + return job.Error(err) } - lookup := make(map[string]APIImages) + lookup := make(map[string]*engine.Env) for name, repository := range srv.runtime.repositories.Repositories { - if filter != "" { - if match, _ := path.Match(filter, name); !match { + if job.Getenv("filter") != "" { + if match, _ := path.Match(job.Getenv("filter"), name); !match { continue } } @@ -570,48 +738,46 @@ func (srv *Server) Images(all bool, filter string) ([]APIImages, error) { } if out, exists := lookup[id]; exists { - out.RepoTags = append(out.RepoTags, fmt.Sprintf("%s:%s", name, tag)) - - lookup[id] = out + out.SetList("RepoTags", append(out.GetList("RepoTags"), fmt.Sprintf("%s:%s", name, tag))) } else { - var out APIImages - + out := &engine.Env{} delete(allImages, id) - - out.ParentId = image.Parent - out.RepoTags = []string{fmt.Sprintf("%s:%s", name, tag)} - out.ID = image.ID - out.Created = image.Created.Unix() - out.Size = image.Size - out.VirtualSize = image.getParentsSize(0) + image.Size - + out.Set("ParentId", image.Parent) + out.SetList("RepoTags", []string{fmt.Sprintf("%s:%s", name, tag)}) + out.Set("Id", image.ID) + out.SetInt64("Created", image.Created.Unix()) + out.SetInt64("Size", image.Size) + out.SetInt64("VirtualSize", image.getParentsSize(0)+image.Size) lookup[id] = out } } } - outs := make([]APIImages, 0, len(lookup)) + outs := engine.NewTable("Created", len(lookup)) for _, value := range lookup { - outs = append(outs, value) + outs.Add(value) } // Display images which aren't part of a repository/tag - if filter == "" { + if job.Getenv("filter") == "" { for _, image := range allImages { - var out APIImages - out.ID = image.ID - out.ParentId = image.Parent - out.RepoTags = []string{":"} - out.Created = image.Created.Unix() - out.Size = image.Size - out.VirtualSize = image.getParentsSize(0) + image.Size - outs = append(outs, out) + out := &engine.Env{} + out.Set("ParentId", image.Parent) + out.SetList("RepoTags", []string{":"}) + out.Set("Id", image.ID) + out.SetInt64("Created", image.Created.Unix()) + out.SetInt64("Size", image.Size) + out.SetInt64("VirtualSize", image.getParentsSize(0)+image.Size) + outs.Add(out) } } - sortImagesByCreationAndTag(outs) - return outs, nil + outs.ReverseSort() + if _, err := outs.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK } func (srv *Server) DockerInfo(job *engine.Job) engine.Status { @@ -622,13 +788,6 @@ func (srv *Server) DockerInfo(job *engine.Job) engine.Status { } else { imgcount = len(images) } - lxcVersion := "" - if output, err := exec.Command("lxc-version").CombinedOutput(); err == nil { - outputStr := string(output) - if len(strings.SplitN(outputStr, ":", 2)) == 2 { - lxcVersion = strings.TrimSpace(strings.SplitN(string(output), ":", 2)[1]) - } - } kernelVersion := "" if kv, err := utils.GetKernelVersion(); err == nil { kernelVersion = kv.String() @@ -646,29 +805,32 @@ func (srv *Server) DockerInfo(job *engine.Job) engine.Status { v.SetInt("Images", imgcount) v.Set("Driver", srv.runtime.driver.String()) v.SetJson("DriverStatus", srv.runtime.driver.Status()) - v.SetBool("MemoryLimit", srv.runtime.capabilities.MemoryLimit) - v.SetBool("SwapLimit", srv.runtime.capabilities.SwapLimit) - v.SetBool("IPv4Forwarding", !srv.runtime.capabilities.IPv4ForwardingDisabled) + v.SetBool("MemoryLimit", srv.runtime.sysInfo.MemoryLimit) + v.SetBool("SwapLimit", srv.runtime.sysInfo.SwapLimit) + v.SetBool("IPv4Forwarding", !srv.runtime.sysInfo.IPv4ForwardingDisabled) v.SetBool("Debug", os.Getenv("DEBUG") != "") v.SetInt("NFd", utils.GetTotalUsedFds()) v.SetInt("NGoroutines", runtime.NumGoroutine()) - v.Set("LXCVersion", lxcVersion) + v.Set("ExecutionDriver", srv.runtime.execDriver.Name()) v.SetInt("NEventsListener", len(srv.events)) v.Set("KernelVersion", kernelVersion) v.Set("IndexServerAddress", auth.IndexServerAddress()) v.Set("InitSha1", utils.INITSHA1) v.Set("InitPath", initPath) if _, err := v.WriteTo(job.Stdout); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } return engine.StatusOK } -func (srv *Server) ImageHistory(name string) ([]APIHistory, error) { +func (srv *Server) ImageHistory(job *engine.Job) engine.Status { + if n := len(job.Args); n != 1 { + return job.Errorf("Usage: %s IMAGE", job.Name) + } + name := job.Args[0] image, err := srv.runtime.repositories.LookupImage(name) if err != nil { - return nil, err + return job.Error(err) } lookupMap := make(map[string][]string) @@ -682,43 +844,54 @@ func (srv *Server) ImageHistory(name string) ([]APIHistory, error) { } } - outs := []APIHistory{} //produce [] when empty instead of 'null' + outs := engine.NewTable("Created", 0) err = image.WalkHistory(func(img *Image) error { - var out APIHistory - out.ID = img.ID - out.Created = img.Created.Unix() - out.CreatedBy = strings.Join(img.ContainerConfig.Cmd, " ") - out.Tags = lookupMap[img.ID] - out.Size = img.Size - outs = append(outs, out) + out := &engine.Env{} + out.Set("Id", img.ID) + out.SetInt64("Created", img.Created.Unix()) + out.Set("CreatedBy", strings.Join(img.ContainerConfig.Cmd, " ")) + out.SetList("Tags", lookupMap[img.ID]) + out.SetInt64("Size", img.Size) + outs.Add(out) return nil }) - return outs, nil - + outs.ReverseSort() + if _, err := outs.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK } -func (srv *Server) ContainerTop(name, psArgs string) (*APITop, error) { +func (srv *Server) ContainerTop(job *engine.Job) engine.Status { + if len(job.Args) != 1 && len(job.Args) != 2 { + return job.Errorf("Not enough arguments. Usage: %s CONTAINER [PS_ARGS]\n", job.Name) + } + var ( + name = job.Args[0] + psArgs = "-ef" + ) + + if len(job.Args) == 2 && job.Args[1] != "" { + psArgs = job.Args[1] + } + if container := srv.runtime.Get(name); container != nil { if !container.State.IsRunning() { - return nil, fmt.Errorf("Container %s is not running", name) + return job.Errorf("Container %s is not running", name) } - pids, err := cgroups.GetPidsForContainer(container.ID) + pids, err := srv.runtime.execDriver.GetPidsForContainer(container.ID) if err != nil { - return nil, err - } - if len(psArgs) == 0 { - psArgs = "-ef" + return job.Error(err) } output, err := exec.Command("ps", psArgs).Output() if err != nil { - return nil, fmt.Errorf("Error running ps: %s", err) + return job.Errorf("Error running ps: %s", err) } lines := strings.Split(string(output), "\n") header := strings.Fields(lines[0]) - procs := APITop{ - Titles: header, - } + out := &engine.Env{} + out.SetList("Titles", header) pidIndex := -1 for i, name := range header { @@ -727,9 +900,10 @@ func (srv *Server) ContainerTop(name, psArgs string) (*APITop, error) { } } if pidIndex == -1 { - return nil, errors.New("Couldn't find PID field in ps output") + return job.Errorf("Couldn't find PID field in ps output") } + processes := [][]string{} for _, line := range lines[1:] { if len(line) == 0 { continue @@ -737,37 +911,65 @@ func (srv *Server) ContainerTop(name, psArgs string) (*APITop, error) { fields := strings.Fields(line) p, err := strconv.Atoi(fields[pidIndex]) if err != nil { - return nil, fmt.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err) + return job.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err) } for _, pid := range pids { if pid == p { // Make sure number of fields equals number of header titles // merging "overhanging" fields - processes := fields[:len(procs.Titles)-1] - processes = append(processes, strings.Join(fields[len(procs.Titles)-1:], " ")) - - procs.Processes = append(procs.Processes, processes) + process := fields[:len(header)-1] + process = append(process, strings.Join(fields[len(header)-1:], " ")) + processes = append(processes, process) } } } - return &procs, nil + out.SetJson("Processes", processes) + out.WriteTo(job.Stdout) + return engine.StatusOK } - return nil, fmt.Errorf("No such container: %s", name) + return job.Errorf("No such container: %s", name) } -func (srv *Server) ContainerChanges(name string) ([]archive.Change, error) { +func (srv *Server) ContainerChanges(job *engine.Job) engine.Status { + if n := len(job.Args); n != 1 { + return job.Errorf("Usage: %s CONTAINER", job.Name) + } + name := job.Args[0] if container := srv.runtime.Get(name); container != nil { - return container.Changes() + outs := engine.NewTable("", 0) + changes, err := container.Changes() + if err != nil { + return job.Error(err) + } + for _, change := range changes { + out := &engine.Env{} + if err := out.Import(change); err != nil { + return job.Error(err) + } + outs.Add(out) + } + if _, err := outs.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + } else { + return job.Errorf("No such container: %s", name) } - return nil, fmt.Errorf("No such container: %s", name) + return engine.StatusOK } -func (srv *Server) Containers(all, size bool, n int, since, before string) []APIContainers { - var foundBefore bool - var displayed int - out := []APIContainers{} +func (srv *Server) Containers(job *engine.Job) engine.Status { + var ( + foundBefore bool + displayed int + all = job.GetenvBool("all") + since = job.Getenv("since") + before = job.Getenv("before") + n = job.GetenvInt("limit") + size = job.GetenvBool("size") + ) + outs := engine.NewTable("Created", 0) names := map[string][]string{} srv.runtime.containerGraph.Walk("/", func(p string, e *graphdb.Entity) error { @@ -776,7 +978,7 @@ func (srv *Server) Containers(all, size bool, n int, since, before string) []API }, -1) for _, container := range srv.runtime.List() { - if !container.State.IsRunning() && !all && n == -1 && since == "" && before == "" { + if !container.State.IsRunning() && !all && n <= 0 && since == "" && before == "" { continue } if before != "" && !foundBefore { @@ -785,56 +987,57 @@ func (srv *Server) Containers(all, size bool, n int, since, before string) []API } continue } - if displayed == n { + if n > 0 && displayed == n { break } if container.ID == since || utils.TruncateID(container.ID) == since { break } displayed++ - c := createAPIContainer(names[container.ID], container, size, srv.runtime) - out = append(out, c) + out := &engine.Env{} + out.Set("Id", container.ID) + out.SetList("Names", names[container.ID]) + out.Set("Image", srv.runtime.repositories.ImageName(container.Image)) + out.Set("Command", fmt.Sprintf("%s %s", container.Path, strings.Join(container.Args, " "))) + out.SetInt64("Created", container.Created.Unix()) + out.Set("Status", container.State.String()) + str, err := container.NetworkSettings.PortMappingAPI().ToListString() + if err != nil { + return job.Error(err) + } + out.Set("Ports", str) + if size { + sizeRw, sizeRootFs := container.GetSize() + out.SetInt64("SizeRw", sizeRw) + out.SetInt64("SizeRootFs", sizeRootFs) + } + outs.Add(out) } - return out + outs.ReverseSort() + if _, err := outs.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK } -func createAPIContainer(names []string, container *Container, size bool, runtime *Runtime) APIContainers { - c := APIContainers{ - ID: container.ID, - } - c.Names = names - c.Image = runtime.repositories.ImageName(container.Image) - c.Command = fmt.Sprintf("%s %s", container.Path, strings.Join(container.Args, " ")) - c.Created = container.Created.Unix() - c.Status = container.State.String() - c.Ports = container.NetworkSettings.PortMappingAPI() - if size { - c.SizeRw, c.SizeRootFs = container.GetSize() - } - return c -} func (srv *Server) ContainerCommit(job *engine.Job) engine.Status { if len(job.Args) != 1 { - job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name) - return engine.StatusErr + return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name) } name := job.Args[0] container := srv.runtime.Get(name) if container == nil { - job.Errorf("No such container: %s", name) - return engine.StatusErr + return job.Errorf("No such container: %s", name) } var config Config if err := job.GetenvJson("config", &config); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } img, err := srv.runtime.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), &config) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } job.Printf("%s\n", img.ID) return engine.StatusOK @@ -842,16 +1045,14 @@ func (srv *Server) ContainerCommit(job *engine.Job) engine.Status { func (srv *Server) ImageTag(job *engine.Job) engine.Status { if len(job.Args) != 2 && len(job.Args) != 3 { - job.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name) - return engine.StatusErr + return job.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name) } var tag string if len(job.Args) == 3 { tag = job.Args[2] } if err := srv.runtime.repositories.Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } return engine.StatusOK } @@ -1086,30 +1287,45 @@ func (srv *Server) poolRemove(kind, key string) error { return nil } -func (srv *Server) ImagePull(localName string, tag string, out io.Writer, sf *utils.StreamFormatter, authConfig *auth.AuthConfig, metaHeaders map[string][]string, parallel bool) error { - out = utils.NewWriteFlusher(out) +func (srv *Server) ImagePull(job *engine.Job) engine.Status { + if n := len(job.Args); n != 1 && n != 2 { + return job.Errorf("Usage: %s IMAGE [TAG]", job.Name) + } + var ( + localName = job.Args[0] + tag string + sf = utils.NewStreamFormatter(job.GetenvBool("json")) + authConfig = &auth.AuthConfig{} + metaHeaders map[string][]string + ) + if len(job.Args) > 1 { + tag = job.Args[1] + } + + job.GetenvJson("authConfig", authConfig) + job.GetenvJson("metaHeaders", metaHeaders) c, err := srv.poolAdd("pull", localName+":"+tag) if err != nil { if c != nil { // Another pull of the same repository is already taking place; just wait for it to finish - out.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", localName)) + job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", localName)) <-c - return nil + return engine.StatusOK } - return err + return job.Error(err) } defer srv.poolRemove("pull", localName+":"+tag) // Resolve the Repository name from fqn to endpoint + name endpoint, remoteName, err := registry.ResolveRepositoryName(localName) if err != nil { - return err + return job.Error(err) } r, err := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), endpoint) if err != nil { - return err + return job.Error(err) } if endpoint == auth.IndexServerAddress() { @@ -1117,130 +1333,125 @@ func (srv *Server) ImagePull(localName string, tag string, out io.Writer, sf *ut localName = remoteName } - if err = srv.pullRepository(r, out, localName, remoteName, tag, sf, parallel); err != nil { - return err + if err = srv.pullRepository(r, job.Stdout, localName, remoteName, tag, sf, job.GetenvBool("parallel")); err != nil { + return job.Error(err) } - return nil + return engine.StatusOK } // Retrieve the all the images to be uploaded in the correct order -// Note: we can't use a map as it is not ordered -func (srv *Server) getImageList(localRepo map[string]string) ([][]*registry.ImgData, error) { - imgList := map[string]*registry.ImgData{} - depGraph := utils.NewDependencyGraph() +func (srv *Server) getImageList(localRepo map[string]string) ([]string, map[string][]string, error) { + var ( + imageList []string + imagesSeen map[string]bool = make(map[string]bool) + tagsByImage map[string][]string = make(map[string][]string) + ) for tag, id := range localRepo { - img, err := srv.runtime.graph.Get(id) - if err != nil { - return nil, err - } - depGraph.NewNode(img.ID) - img.WalkHistory(func(current *Image) error { - imgList[current.ID] = ®istry.ImgData{ - ID: current.ID, - Tag: tag, - } - parent, err := current.GetParent() + var imageListForThisTag []string + + tagsByImage[id] = append(tagsByImage[id], tag) + + for img, err := srv.runtime.graph.Get(id); img != nil; img, err = img.GetParent() { if err != nil { - return err + return nil, nil, err } - if parent == nil { - return nil + + if imagesSeen[img.ID] { + // This image is already on the list, we can ignore it and all its parents + break } - depGraph.NewNode(parent.ID) - depGraph.AddDependency(current.ID, parent.ID) - return nil - }) - } - traversalMap, err := depGraph.GenerateTraversalMap() - if err != nil { - return nil, err - } - - utils.Debugf("Traversal map: %v", traversalMap) - result := [][]*registry.ImgData{} - for _, round := range traversalMap { - dataRound := []*registry.ImgData{} - for _, imgID := range round { - dataRound = append(dataRound, imgList[imgID]) + imagesSeen[img.ID] = true + imageListForThisTag = append(imageListForThisTag, img.ID) } - result = append(result, dataRound) - } - return result, nil -} -func flatten(slc [][]*registry.ImgData) []*registry.ImgData { - result := []*registry.ImgData{} - for _, x := range slc { - result = append(result, x...) + // reverse the image list for this tag (so the "most"-parent image is first) + for i, j := 0, len(imageListForThisTag)-1; i < j; i, j = i+1, j-1 { + imageListForThisTag[i], imageListForThisTag[j] = imageListForThisTag[j], imageListForThisTag[i] + } + + // append to main image list + imageList = append(imageList, imageListForThisTag...) } - return result + + utils.Debugf("Image list: %v", imageList) + utils.Debugf("Tags by image: %v", tagsByImage) + + return imageList, tagsByImage, nil } func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName, remoteName string, localRepo map[string]string, sf *utils.StreamFormatter) error { out = utils.NewWriteFlusher(out) - imgList, err := srv.getImageList(localRepo) + utils.Debugf("Local repo: %s", localRepo) + imgList, tagsByImage, err := srv.getImageList(localRepo) if err != nil { return err } - flattenedImgList := flatten(imgList) + out.Write(sf.FormatStatus("", "Sending image list")) var repoData *registry.RepositoryData - repoData, err = r.PushImageJSONIndex(remoteName, flattenedImgList, false, nil) + var imageIndex []*registry.ImgData + + for _, imgId := range imgList { + if tags, exists := tagsByImage[imgId]; exists { + // If an image has tags you must add an entry in the image index + // for each tag + for _, tag := range tags { + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: imgId, + Tag: tag, + }) + } + } else { + // If the image does not have a tag it still needs to be sent to the + // registry with an empty tag so that it is accociated with the repository + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: imgId, + Tag: "", + }) + + } + } + + utils.Debugf("Preparing to push %s with the following images and tags\n", localRepo) + for _, data := range imageIndex { + utils.Debugf("Pushing ID: %s with Tag: %s\n", data.ID, data.Tag) + } + + // Register all the images in a repository with the registry + // If an image is not in this list it will not be associated with the repository + repoData, err = r.PushImageJSONIndex(remoteName, imageIndex, false, nil) if err != nil { return err } for _, ep := range repoData.Endpoints { out.Write(sf.FormatStatus("", "Pushing repository %s (%d tags)", localName, len(localRepo))) - // This section can not be parallelized (each round depends on the previous one) - for i, round := range imgList { - // FIXME: This section can be parallelized - for _, elem := range round { - var pushTags func() error - pushTags = func() error { - if i < (len(imgList) - 1) { - // Only tag the top layer in the repository - return nil - } - out.Write(sf.FormatStatus("", "Pushing tags for rev [%s] on {%s}", utils.TruncateID(elem.ID), ep+"repositories/"+remoteName+"/tags/"+elem.Tag)) - if err := r.PushRegistryTag(remoteName, elem.ID, elem.Tag, ep, repoData.Tokens); err != nil { - return err - } - return nil - } - if _, exists := repoData.ImgList[elem.ID]; exists { - if err := pushTags(); err != nil { - return err - } - out.Write(sf.FormatProgress(utils.TruncateID(elem.ID), "Image already pushed, skipping", nil)) - continue - } else if r.LookupRemoteImage(elem.ID, ep, repoData.Tokens) { - if err := pushTags(); err != nil { - return err - } - out.Write(sf.FormatProgress(utils.TruncateID(elem.ID), "Image already pushed, skipping", nil)) - continue - } - checksum, err := srv.pushImage(r, out, remoteName, elem.ID, ep, repoData.Tokens, sf) - if err != nil { + for _, imgId := range imgList { + if r.LookupRemoteImage(imgId, ep, repoData.Tokens) { + out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", utils.TruncateID(imgId))) + } else { + if _, err := srv.pushImage(r, out, remoteName, imgId, ep, repoData.Tokens, sf); err != nil { // FIXME: Continue on error? return err } - elem.Checksum = checksum + } - if err := pushTags(); err != nil { + for _, tag := range tagsByImage[imgId] { + out.Write(sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", utils.TruncateID(imgId), ep+"repositories/"+remoteName+"/tags/"+tag)) + + if err := r.PushRegistryTag(remoteName, imgId, tag, ep, repoData.Tokens); err != nil { return err } } } } - if _, err := r.PushImageJSONIndex(remoteName, flattenedImgList, true, repoData.Endpoints); err != nil { + if _, err := r.PushImageJSONIndex(remoteName, imageIndex, true, repoData.Endpoints); err != nil { return err } @@ -1290,83 +1501,106 @@ func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID, } // FIXME: Allow to interrupt current push when new push of same image is done. -func (srv *Server) ImagePush(localName string, out io.Writer, sf *utils.StreamFormatter, authConfig *auth.AuthConfig, metaHeaders map[string][]string) error { +func (srv *Server) ImagePush(job *engine.Job) engine.Status { + if n := len(job.Args); n != 1 { + return job.Errorf("Usage: %s IMAGE", job.Name) + } + var ( + localName = job.Args[0] + sf = utils.NewStreamFormatter(job.GetenvBool("json")) + authConfig = &auth.AuthConfig{} + metaHeaders map[string][]string + ) + + job.GetenvJson("authConfig", authConfig) + job.GetenvJson("metaHeaders", metaHeaders) if _, err := srv.poolAdd("push", localName); err != nil { - return err + return job.Error(err) } defer srv.poolRemove("push", localName) // Resolve the Repository name from fqn to endpoint + name endpoint, remoteName, err := registry.ResolveRepositoryName(localName) if err != nil { - return err + return job.Error(err) } - out = utils.NewWriteFlusher(out) img, err := srv.runtime.graph.Get(localName) r, err2 := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), endpoint) if err2 != nil { - return err2 + return job.Error(err2) } if err != nil { reposLen := len(srv.runtime.repositories.Repositories[localName]) - out.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", localName, reposLen)) + job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", localName, reposLen)) // If it fails, try to get the repository if localRepo, exists := srv.runtime.repositories.Repositories[localName]; exists { - if err := srv.pushRepository(r, out, localName, remoteName, localRepo, sf); err != nil { - return err + if err := srv.pushRepository(r, job.Stdout, localName, remoteName, localRepo, sf); err != nil { + return job.Error(err) } - return nil + return engine.StatusOK } - return err + return job.Error(err) } var token []string - out.Write(sf.FormatStatus("", "The push refers to an image: [%s]", localName)) - if _, err := srv.pushImage(r, out, remoteName, img.ID, endpoint, token, sf); err != nil { - return err + job.Stdout.Write(sf.FormatStatus("", "The push refers to an image: [%s]", localName)) + if _, err := srv.pushImage(r, job.Stdout, remoteName, img.ID, endpoint, token, sf); err != nil { + return job.Error(err) } - return nil + return engine.StatusOK } -func (srv *Server) ImageImport(src, repo, tag string, in io.Reader, out io.Writer, sf *utils.StreamFormatter) error { - var archive io.Reader - var resp *http.Response +func (srv *Server) ImageImport(job *engine.Job) engine.Status { + if n := len(job.Args); n != 2 && n != 3 { + return job.Errorf("Usage: %s SRC REPO [TAG]", job.Name) + } + var ( + src = job.Args[0] + repo = job.Args[1] + tag string + sf = utils.NewStreamFormatter(job.GetenvBool("json")) + archive io.Reader + resp *http.Response + ) + if len(job.Args) > 2 { + tag = job.Args[2] + } if src == "-" { - archive = in + archive = job.Stdin } else { u, err := url.Parse(src) if err != nil { - return err + return job.Error(err) } if u.Scheme == "" { u.Scheme = "http" u.Host = src u.Path = "" } - out.Write(sf.FormatStatus("", "Downloading from %s", u)) + job.Stdout.Write(sf.FormatStatus("", "Downloading from %s", u)) // Download with curl (pretty progress bar) // If curl is not available, fallback to http.Get() resp, err = utils.Download(u.String()) if err != nil { - return err + return job.Error(err) } - archive = utils.ProgressReader(resp.Body, int(resp.ContentLength), out, sf, true, "", "Importing") + archive = utils.ProgressReader(resp.Body, int(resp.ContentLength), job.Stdout, sf, true, "", "Importing") } img, err := srv.runtime.graph.Create(archive, nil, "Imported from "+src, "", nil) if err != nil { - return err + return job.Error(err) } // Optionally register the image at REPO/TAG if repo != "" { if err := srv.runtime.repositories.Set(repo, tag, img.ID, true); err != nil { - return err + return job.Error(err) } } - out.Write(sf.FormatStatus("", img.ID)) - return nil + job.Stdout.Write(sf.FormatStatus("", img.ID)) + return engine.StatusOK } func (srv *Server) ContainerCreate(job *engine.Job) engine.Status { @@ -1374,36 +1608,42 @@ func (srv *Server) ContainerCreate(job *engine.Job) engine.Status { if len(job.Args) == 1 { name = job.Args[0] } else if len(job.Args) > 1 { - job.Printf("Usage: %s", job.Name) - return engine.StatusErr - } - var config Config - if err := job.ExportEnv(&config); err != nil { - job.Error(err) - return engine.StatusErr + return job.Errorf("Usage: %s", job.Name) } + config := ContainerConfigFromJob(job) if config.Memory != 0 && config.Memory < 524288 { - job.Errorf("Minimum memory limit allowed is 512k") - return engine.StatusErr + return job.Errorf("Minimum memory limit allowed is 512k") } - if config.Memory > 0 && !srv.runtime.capabilities.MemoryLimit { + if config.Memory > 0 && !srv.runtime.sysInfo.MemoryLimit { + job.Errorf("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n") config.Memory = 0 } - if config.Memory > 0 && !srv.runtime.capabilities.SwapLimit { + if config.Memory > 0 && !srv.runtime.sysInfo.SwapLimit { + job.Errorf("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n") config.MemorySwap = -1 } - container, buildWarnings, err := srv.runtime.Create(&config, name) + resolvConf, err := utils.GetResolvConf() + if err != nil { + return job.Error(err) + } + if !config.NetworkDisabled && len(config.Dns) == 0 && len(srv.runtime.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) { + job.Errorf("WARNING: Docker detected local DNS server on resolv.conf. Using default external servers: %v\n", defaultDns) + config.Dns = defaultDns + } + + container, buildWarnings, err := srv.runtime.Create(config, name) if err != nil { if srv.runtime.graph.IsNotExist(err) { _, tag := utils.ParseRepositoryTag(config.Image) if tag == "" { tag = DEFAULTTAG } - job.Errorf("No such image: %s (tag: %s)", config.Image, tag) - return engine.StatusErr + return job.Errorf("No such image: %s (tag: %s)", config.Image, tag) } - job.Error(err) - return engine.StatusErr + return job.Error(err) + } + if !container.Config.NetworkDisabled && srv.runtime.sysInfo.IPv4ForwardingDisabled { + job.Errorf("WARNING: IPv4 forwarding is disabled.\n") } srv.LogEvent("create", container.ID, srv.runtime.repositories.ImageName(container.Image)) // FIXME: this is necessary because runtime.Create might return a nil container @@ -1413,41 +1653,58 @@ func (srv *Server) ContainerCreate(job *engine.Job) engine.Status { job.Printf("%s\n", container.ID) } for _, warning := range buildWarnings { - job.Errorf("%s\n", warning) + return job.Errorf("%s\n", warning) } return engine.StatusOK } -func (srv *Server) ContainerRestart(name string, t int) error { +func (srv *Server) ContainerRestart(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s CONTAINER\n", job.Name) + } + var ( + name = job.Args[0] + t = 10 + ) + if job.EnvExists("t") { + t = job.GetenvInt("t") + } if container := srv.runtime.Get(name); container != nil { - if err := container.Restart(t); err != nil { - return fmt.Errorf("Cannot restart container %s: %s", name, err) + if err := container.Restart(int(t)); err != nil { + return job.Errorf("Cannot restart container %s: %s\n", name, err) } srv.LogEvent("restart", container.ID, srv.runtime.repositories.ImageName(container.Image)) } else { - return fmt.Errorf("No such container: %s", name) + return job.Errorf("No such container: %s\n", name) } - return nil + return engine.StatusOK } -func (srv *Server) ContainerDestroy(name string, removeVolume, removeLink bool) error { +func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name) + } + name := job.Args[0] + removeVolume := job.GetenvBool("removeVolume") + removeLink := job.GetenvBool("removeLink") + container := srv.runtime.Get(name) if removeLink { if container == nil { - return fmt.Errorf("No such link: %s", name) + return job.Errorf("No such link: %s", name) } name, err := getFullName(name) if err != nil { - return err + job.Error(err) } parent, n := path.Split(name) if parent == "/" { - return fmt.Errorf("Conflict, cannot remove the default name of the container") + return job.Errorf("Conflict, cannot remove the default name of the container") } pe := srv.runtime.containerGraph.Get(parent) if pe == nil { - return fmt.Errorf("Cannot get parent %s for name %s", parent, name) + return job.Errorf("Cannot get parent %s for name %s", parent, name) } parentContainer := srv.runtime.Get(pe.ID()) @@ -1460,47 +1717,62 @@ func (srv *Server) ContainerDestroy(name string, removeVolume, removeLink bool) } if err := srv.runtime.containerGraph.Delete(name); err != nil { - return err + return job.Error(err) } - return nil + return engine.StatusOK } if container != nil { if container.State.IsRunning() { - return fmt.Errorf("Impossible to remove a running container, please stop it first") - } - volumes := make(map[string]struct{}) - - binds := make(map[string]struct{}) - - for _, bind := range container.hostConfig.Binds { - splitBind := strings.Split(bind, ":") - source := splitBind[0] - binds[source] = struct{}{} - } - - // Store all the deleted containers volumes - for _, volumeId := range container.Volumes { - - // Skip the volumes mounted from external - if _, exists := binds[volumeId]; exists { - continue - } - - volumeId = strings.TrimSuffix(volumeId, "/layer") - volumeId = filepath.Base(volumeId) - volumes[volumeId] = struct{}{} + return job.Errorf("Impossible to remove a running container, please stop it first") } if err := srv.runtime.Destroy(container); err != nil { - return fmt.Errorf("Cannot destroy container %s: %s", name, err) + return job.Errorf("Cannot destroy container %s: %s", name, err) } srv.LogEvent("destroy", container.ID, srv.runtime.repositories.ImageName(container.Image)) if removeVolume { + var ( + volumes = make(map[string]struct{}) + binds = make(map[string]struct{}) + usedVolumes = make(map[string]*Container) + ) + + // the volume id is always the base of the path + getVolumeId := func(p string) string { + return filepath.Base(strings.TrimSuffix(p, "/layer")) + } + + // populate bind map so that they can be skipped and not removed + for _, bind := range container.hostConfig.Binds { + source := strings.Split(bind, ":")[0] + // TODO: refactor all volume stuff, all of it + // this is very important that we eval the link + // or comparing the keys to container.Volumes will not work + p, err := filepath.EvalSymlinks(source) + if err != nil { + return job.Error(err) + } + source = p + binds[source] = struct{}{} + } + + // Store all the deleted containers volumes + for _, volumeId := range container.Volumes { + // Skip the volumes mounted from external + // bind mounts here will will be evaluated for a symlink + if _, exists := binds[volumeId]; exists { + continue + } + + volumeId = getVolumeId(volumeId) + volumes[volumeId] = struct{}{} + } + // Retrieve all volumes from all remaining containers - usedVolumes := make(map[string]*Container) for _, container := range srv.runtime.List() { for _, containerVolumeId := range container.Volumes { + containerVolumeId = getVolumeId(containerVolumeId) usedVolumes[containerVolumeId] = container } } @@ -1512,19 +1784,19 @@ func (srv *Server) ContainerDestroy(name string, removeVolume, removeLink bool) continue } if err := srv.runtime.volumes.Delete(volumeId); err != nil { - return err + return job.Errorf("Error calling volumes.Delete(%q): %v", volumeId, err) } } } } else { - return fmt.Errorf("No such container: %s", name) + return job.Errorf("No such container: %s", name) } - return nil + return engine.StatusOK } var ErrImageReferenced = errors.New("Image referenced by a repository") -func (srv *Server) deleteImageAndChildren(id string, imgs *[]APIRmi, byParents map[string][]*Image) error { +func (srv *Server) deleteImageAndChildren(id string, imgs *engine.Table, byParents map[string][]*Image) error { // If the image is referenced by a repo, do not delete if len(srv.runtime.repositories.ByID()[id]) != 0 { return ErrImageReferenced @@ -1556,14 +1828,16 @@ func (srv *Server) deleteImageAndChildren(id string, imgs *[]APIRmi, byParents m if err != nil { return err } - *imgs = append(*imgs, APIRmi{Deleted: id}) + out := &engine.Env{} + out.Set("Deleted", id) + imgs.Add(out) srv.LogEvent("delete", id, "") return nil } return nil } -func (srv *Server) deleteImageParents(img *Image, imgs *[]APIRmi) error { +func (srv *Server) deleteImageParents(img *Image, imgs *engine.Table) error { if img.Parent != "" { parent, err := srv.runtime.graph.Get(img.Parent) if err != nil { @@ -1582,12 +1856,42 @@ func (srv *Server) deleteImageParents(img *Image, imgs *[]APIRmi) error { return nil } -func (srv *Server) deleteImage(img *Image, repoName, tag string) ([]APIRmi, error) { +func (srv *Server) DeleteImage(name string, autoPrune bool) (*engine.Table, error) { var ( - imgs = []APIRmi{} - tags = []string{} + repoName, tag string + img, err = srv.runtime.repositories.LookupImage(name) + imgs = engine.NewTable("", 0) + tags = []string{} ) + if err != nil { + return nil, fmt.Errorf("No such image: %s", name) + } + + // FIXME: What does autoPrune mean ? + if !autoPrune { + if err := srv.runtime.graph.Delete(img.ID); err != nil { + return nil, fmt.Errorf("Cannot delete image %s: %s", name, err) + } + return nil, nil + } + + if !strings.Contains(img.ID, name) { + repoName, tag = utils.ParseRepositoryTag(name) + } + + // If we have a repo and the image is not referenced anywhere else + // then just perform an untag and do not validate. + // + // i.e. only validate if we are performing an actual delete and not + // an untag op + if repoName != "" && len(srv.runtime.repositories.ByID()[img.ID]) == 1 { + // Prevent deletion if image is used by a container + if err := srv.canDeleteImage(img.ID); err != nil { + return nil, err + } + } + //If delete by id, see if the id belong only to one repository if repoName == "" { for _, repoAndTag := range srv.runtime.repositories.ByID()[img.ID] { @@ -1614,17 +1918,19 @@ func (srv *Server) deleteImage(img *Image, repoName, tag string) ([]APIRmi, erro return nil, err } if tagDeleted { - imgs = append(imgs, APIRmi{Untagged: img.ID}) + out := &engine.Env{} + out.Set("Untagged", img.ID) + imgs.Add(out) srv.LogEvent("untag", img.ID, "") } } if len(srv.runtime.repositories.ByID()[img.ID]) == 0 { - if err := srv.deleteImageAndChildren(img.ID, &imgs, nil); err != nil { + if err := srv.deleteImageAndChildren(img.ID, imgs, nil); err != nil { if err != ErrImageReferenced { return imgs, err } - } else if err := srv.deleteImageParents(img, &imgs); err != nil { + } else if err := srv.deleteImageParents(img, imgs); err != nil { if err != ErrImageReferenced { return imgs, err } @@ -1633,39 +1939,22 @@ func (srv *Server) deleteImage(img *Image, repoName, tag string) ([]APIRmi, erro return imgs, nil } -func (srv *Server) ImageDelete(name string, autoPrune bool) ([]APIRmi, error) { - var ( - repository, tag string - img, err = srv.runtime.repositories.LookupImage(name) - ) +func (srv *Server) ImageDelete(job *engine.Job) engine.Status { + if n := len(job.Args); n != 1 { + return job.Errorf("Usage: %s IMAGE", job.Name) + } + + imgs, err := srv.DeleteImage(job.Args[0], job.GetenvBool("autoPrune")) if err != nil { - return nil, fmt.Errorf("No such image: %s", name) + return job.Error(err) } - - // FIXME: What does autoPrune mean ? - if !autoPrune { - if err := srv.runtime.graph.Delete(img.ID); err != nil { - return nil, fmt.Errorf("Cannot delete image %s: %s", name, err) - } - return nil, nil + if len(imgs.Data) == 0 { + return job.Errorf("Conflict, %s wasn't deleted", job.Args[0]) } - - if !strings.Contains(img.ID, name) { - repository, tag = utils.ParseRepositoryTag(name) + if _, err := imgs.WriteListTo(job.Stdout); err != nil { + return job.Error(err) } - - // If we have a repo and the image is not referenced anywhere else - // then just perform an untag and do not validate. - // - // i.e. only validate if we are performing an actual delete and not - // an untag op - if repository != "" && len(srv.runtime.repositories.ByID()[img.ID]) == 1 { - // Prevent deletion if image is used by a container - if err := srv.canDeleteImage(img.ID); err != nil { - return nil, err - } - } - return srv.deleteImage(img, repository, tag) + return engine.StatusOK } func (srv *Server) canDeleteImage(imgID string) error { @@ -1753,24 +2042,18 @@ func (srv *Server) RegisterLinks(container *Container, hostConfig *HostConfig) e func (srv *Server) ContainerStart(job *engine.Job) engine.Status { if len(job.Args) < 1 { - job.Errorf("Usage: %s container_id", job.Name) - return engine.StatusErr + return job.Errorf("Usage: %s container_id", job.Name) } name := job.Args[0] runtime := srv.runtime container := runtime.Get(name) if container == nil { - job.Errorf("No such container: %s", name) - return engine.StatusErr + return job.Errorf("No such container: %s", name) } // If no environment was set, then no hostconfig was passed. if len(job.Environ()) > 0 { - var hostConfig HostConfig - if err := job.ExportEnv(&hostConfig); err != nil { - job.Error(err) - return engine.StatusErr - } + hostConfig := ContainerHostConfigFromJob(job) // Validate the HostConfig binds. Make sure that: // 1) the source of a bind mount isn't / // The bind mount "/:/foo" isn't allowed. @@ -1782,28 +2065,27 @@ func (srv *Server) ContainerStart(job *engine.Job) engine.Status { // refuse to bind mount "/" to the container if source == "/" { - job.Errorf("Invalid bind mount '%s' : source can't be '/'", bind) - return engine.StatusErr + return job.Errorf("Invalid bind mount '%s' : source can't be '/'", bind) } // ensure the source exists on the host _, err := os.Stat(source) if err != nil && os.IsNotExist(err) { - job.Errorf("Invalid bind mount '%s' : source doesn't exist", bind) - return engine.StatusErr + err = os.MkdirAll(source, 0755) + if err != nil { + return job.Errorf("Could not create local directory '%s' for bind mount: %s!", source, err.Error()) + } } } // Register any links from the host config before starting the container - if err := srv.RegisterLinks(container, &hostConfig); err != nil { - job.Error(err) - return engine.StatusErr + if err := srv.RegisterLinks(container, hostConfig); err != nil { + return job.Error(err) } - container.hostConfig = &hostConfig + container.hostConfig = hostConfig container.ToDisk() } if err := container.Start(); err != nil { - job.Errorf("Cannot start container %s: %s", name, err) - return engine.StatusErr + return job.Errorf("Cannot start container %s: %s", name, err) } srv.LogEvent("start", container.ID, runtime.repositories.ImageName(container.Image)) @@ -1812,31 +2094,29 @@ func (srv *Server) ContainerStart(job *engine.Job) engine.Status { func (srv *Server) ContainerStop(job *engine.Job) engine.Status { if len(job.Args) != 1 { - job.Errorf("Usage: %s CONTAINER\n", job.Name) - return engine.StatusErr + return job.Errorf("Usage: %s CONTAINER\n", job.Name) } - name := job.Args[0] - t := job.GetenvInt("t") - if t == -1 { - t = 10 + var ( + name = job.Args[0] + t = 10 + ) + if job.EnvExists("t") { + t = job.GetenvInt("t") } if container := srv.runtime.Get(name); container != nil { if err := container.Stop(int(t)); err != nil { - job.Errorf("Cannot stop container %s: %s\n", name, err) - return engine.StatusErr + return job.Errorf("Cannot stop container %s: %s\n", name, err) } srv.LogEvent("stop", container.ID, srv.runtime.repositories.ImageName(container.Image)) } else { - job.Errorf("No such container: %s\n", name) - return engine.StatusErr + return job.Errorf("No such container: %s\n", name) } return engine.StatusOK } func (srv *Server) ContainerWait(job *engine.Job) engine.Status { if len(job.Args) != 1 { - job.Errorf("Usage: %s", job.Name) - return engine.StatusErr + return job.Errorf("Usage: %s", job.Name) } name := job.Args[0] if container := srv.runtime.Get(name); container != nil { @@ -1844,41 +2124,48 @@ func (srv *Server) ContainerWait(job *engine.Job) engine.Status { job.Printf("%d\n", status) return engine.StatusOK } - job.Errorf("%s: no such container: %s", job.Name, name) - return engine.StatusErr + return job.Errorf("%s: no such container: %s", job.Name, name) } func (srv *Server) ContainerResize(job *engine.Job) engine.Status { if len(job.Args) != 3 { - job.Errorf("Not enough arguments. Usage: %s CONTAINER HEIGHT WIDTH\n", job.Name) - return engine.StatusErr + return job.Errorf("Not enough arguments. Usage: %s CONTAINER HEIGHT WIDTH\n", job.Name) } name := job.Args[0] height, err := strconv.Atoi(job.Args[1]) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } width, err := strconv.Atoi(job.Args[2]) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } if container := srv.runtime.Get(name); container != nil { if err := container.Resize(height, width); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } return engine.StatusOK } - job.Errorf("No such container: %s", name) - return engine.StatusErr + return job.Errorf("No such container: %s", name) } -func (srv *Server) ContainerAttach(name string, logs, stream, stdin, stdout, stderr bool, inStream io.ReadCloser, outStream, errStream io.Writer) error { +func (srv *Server) ContainerAttach(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s CONTAINER\n", job.Name) + } + + var ( + name = job.Args[0] + logs = job.GetenvBool("logs") + stream = job.GetenvBool("stream") + stdin = job.GetenvBool("stdin") + stdout = job.GetenvBool("stdout") + stderr = job.GetenvBool("stderr") + ) + container := srv.runtime.Get(name) if container == nil { - return fmt.Errorf("No such container: %s", name) + return job.Errorf("No such container: %s", name) } //logs @@ -1886,12 +2173,12 @@ func (srv *Server) ContainerAttach(name string, logs, stream, stdin, stdout, std cLog, err := container.ReadLog("json") if err != nil && os.IsNotExist(err) { // Legacy logs - utils.Errorf("Old logs format") + utils.Debugf("Old logs format") if stdout { cLog, err := container.ReadLog("stdout") if err != nil { utils.Errorf("Error reading logs (stdout): %s", err) - } else if _, err := io.Copy(outStream, cLog); err != nil { + } else if _, err := io.Copy(job.Stdout, cLog); err != nil { utils.Errorf("Error streaming logs (stdout): %s", err) } } @@ -1899,7 +2186,7 @@ func (srv *Server) ContainerAttach(name string, logs, stream, stdin, stdout, std cLog, err := container.ReadLog("stderr") if err != nil { utils.Errorf("Error reading logs (stderr): %s", err) - } else if _, err := io.Copy(errStream, cLog); err != nil { + } else if _, err := io.Copy(job.Stderr, cLog); err != nil { utils.Errorf("Error streaming logs (stderr): %s", err) } } @@ -1917,10 +2204,10 @@ func (srv *Server) ContainerAttach(name string, logs, stream, stdin, stdout, std break } if l.Stream == "stdout" && stdout { - fmt.Fprintf(outStream, "%s", l.Log) + fmt.Fprintf(job.Stdout, "%s", l.Log) } if l.Stream == "stderr" && stderr { - fmt.Fprintf(errStream, "%s", l.Log) + fmt.Fprintf(job.Stderr, "%s", l.Log) } } } @@ -1929,7 +2216,7 @@ func (srv *Server) ContainerAttach(name string, logs, stream, stdin, stdout, std //stream if stream { if container.State.IsGhost() { - return fmt.Errorf("Impossible to attach to a ghost container") + return job.Errorf("Impossible to attach to a ghost container") } var ( @@ -1943,16 +2230,16 @@ func (srv *Server) ContainerAttach(name string, logs, stream, stdin, stdout, std go func() { defer w.Close() defer utils.Debugf("Closing buffered stdin pipe") - io.Copy(w, inStream) + io.Copy(w, job.Stdin) }() cStdin = r - cStdinCloser = inStream + cStdinCloser = job.Stdin } if stdout { - cStdout = outStream + cStdout = job.Stdout } if stderr { - cStderr = errStream + cStderr = job.Stderr } <-container.Attach(cStdin, cStdinCloser, cStdout, cStderr) @@ -1963,7 +2250,7 @@ func (srv *Server) ContainerAttach(name string, logs, stream, stdin, stdout, std container.Wait() } } - return nil + return engine.StatusOK } func (srv *Server) ContainerInspect(name string) (*Container, error) { @@ -1980,25 +2267,77 @@ func (srv *Server) ImageInspect(name string) (*Image, error) { return nil, fmt.Errorf("No such image: %s", name) } -func (srv *Server) ContainerCopy(name string, resource string, out io.Writer) error { +func (srv *Server) JobInspect(job *engine.Job) engine.Status { + // TODO: deprecate KIND/conflict + if n := len(job.Args); n != 2 { + return job.Errorf("Usage: %s CONTAINER|IMAGE KIND", job.Name) + } + var ( + name = job.Args[0] + kind = job.Args[1] + object interface{} + conflict = job.GetenvBool("conflict") //should the job detect conflict between containers and images + image, errImage = srv.ImageInspect(name) + container, errContainer = srv.ContainerInspect(name) + ) + + if conflict && image != nil && container != nil { + return job.Errorf("Conflict between containers and images") + } + + switch kind { + case "image": + if errImage != nil { + return job.Error(errImage) + } + object = image + case "container": + if errContainer != nil { + return job.Error(errContainer) + } + object = &struct { + *Container + HostConfig *HostConfig + }{container, container.hostConfig} + default: + return job.Errorf("Unknown kind: %s", kind) + } + + b, err := json.Marshal(object) + if err != nil { + return job.Error(err) + } + job.Stdout.Write(b) + return engine.StatusOK +} + +func (srv *Server) ContainerCopy(job *engine.Job) engine.Status { + if len(job.Args) != 2 { + return job.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name) + } + + var ( + name = job.Args[0] + resource = job.Args[1] + ) + if container := srv.runtime.Get(name); container != nil { data, err := container.Copy(resource) if err != nil { - return err + return job.Error(err) } - if _, err := io.Copy(out, data); err != nil { - return err + if _, err := io.Copy(job.Stdout, data); err != nil { + return job.Error(err) } - return nil + return engine.StatusOK } - return fmt.Errorf("No such container: %s", name) - + return job.Errorf("No such container: %s", name) } func NewServer(eng *engine.Engine, config *DaemonConfig) (*Server, error) { - runtime, err := NewRuntime(config) + runtime, err := NewRuntime(config, eng) if err != nil { return nil, err } diff --git a/sorter.go b/sorter.go index c9a86b45c0..9b3e1a9486 100644 --- a/sorter.go +++ b/sorter.go @@ -2,39 +2,6 @@ package docker import "sort" -type imageSorter struct { - images []APIImages - by func(i1, i2 *APIImages) bool // Closure used in the Less method. -} - -// Len is part of sort.Interface. -func (s *imageSorter) Len() int { - return len(s.images) -} - -// Swap is part of sort.Interface. -func (s *imageSorter) Swap(i, j int) { - s.images[i], s.images[j] = s.images[j], s.images[i] -} - -// Less is part of sort.Interface. It is implemented by calling the "by" closure in the sorter. -func (s *imageSorter) Less(i, j int) bool { - return s.by(&s.images[i], &s.images[j]) -} - -// Sort []ApiImages by most recent creation date and tag name. -func sortImagesByCreationAndTag(images []APIImages) { - creationAndTag := func(i1, i2 *APIImages) bool { - return i1.Created > i2.Created - } - - sorter := &imageSorter{ - images: images, - by: creationAndTag} - - sort.Sort(sorter) -} - type portSorter struct { ports []Port by func(i, j Port) bool diff --git a/sysinit/sysinit.go b/sysinit/sysinit.go index f906b7d2dd..dcf0eddf56 100644 --- a/sysinit/sysinit.go +++ b/sysinit/sysinit.go @@ -4,163 +4,19 @@ import ( "encoding/json" "flag" "fmt" - "github.com/dotcloud/docker/pkg/netlink" - "github.com/dotcloud/docker/utils" - "github.com/syndtr/gocapability/capability" + "github.com/dotcloud/docker/execdriver" + _ "github.com/dotcloud/docker/execdriver/chroot" + _ "github.com/dotcloud/docker/execdriver/lxc" "io/ioutil" "log" - "net" "os" - "os/exec" - "strconv" "strings" - "syscall" ) -type DockerInitArgs struct { - user string - gateway string - ip string - workDir string - privileged bool - env []string - args []string - mtu int -} - -func setupHostname(args *DockerInitArgs) error { - hostname := getEnv(args, "HOSTNAME") - if hostname == "" { - return nil - } - return setHostname(hostname) -} - -// Setup networking -func setupNetworking(args *DockerInitArgs) error { - if args.ip != "" { - // eth0 - iface, err := net.InterfaceByName("eth0") - if err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - ip, ipNet, err := net.ParseCIDR(args.ip) - if err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - if err := netlink.NetworkLinkAddIp(iface, ip, ipNet); err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - if err := netlink.NetworkSetMTU(iface, args.mtu); err != nil { - return fmt.Errorf("Unable to set MTU: %v", err) - } - if err := netlink.NetworkLinkUp(iface); err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - - // loopback - iface, err = net.InterfaceByName("lo") - if err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - if err := netlink.NetworkLinkUp(iface); err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - } - if args.gateway != "" { - gw := net.ParseIP(args.gateway) - if gw == nil { - return fmt.Errorf("Unable to set up networking, %s is not a valid gateway IP", args.gateway) - } - - if err := netlink.AddDefaultGw(gw); err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - } - - return nil -} - -// Setup working directory -func setupWorkingDirectory(args *DockerInitArgs) error { - if args.workDir == "" { - return nil - } - if err := syscall.Chdir(args.workDir); err != nil { - return fmt.Errorf("Unable to change dir to %v: %v", args.workDir, err) - } - return nil -} - -// Takes care of dropping privileges to the desired user -func changeUser(args *DockerInitArgs) error { - if args.user == "" { - return nil - } - userent, err := utils.UserLookup(args.user) - if err != nil { - return fmt.Errorf("Unable to find user %v: %v", args.user, err) - } - - uid, err := strconv.Atoi(userent.Uid) - if err != nil { - return fmt.Errorf("Invalid uid: %v", userent.Uid) - } - gid, err := strconv.Atoi(userent.Gid) - if err != nil { - return fmt.Errorf("Invalid gid: %v", userent.Gid) - } - - if err := syscall.Setgid(gid); err != nil { - return fmt.Errorf("setgid failed: %v", err) - } - if err := syscall.Setuid(uid); err != nil { - return fmt.Errorf("setuid failed: %v", err) - } - - return nil -} - -func setupCapabilities(args *DockerInitArgs) error { - - if args.privileged { - return nil - } - - drop := []capability.Cap{ - capability.CAP_SETPCAP, - capability.CAP_SYS_MODULE, - capability.CAP_SYS_RAWIO, - capability.CAP_SYS_PACCT, - capability.CAP_SYS_ADMIN, - capability.CAP_SYS_NICE, - capability.CAP_SYS_RESOURCE, - capability.CAP_SYS_TIME, - capability.CAP_SYS_TTY_CONFIG, - capability.CAP_MKNOD, - capability.CAP_AUDIT_WRITE, - capability.CAP_AUDIT_CONTROL, - capability.CAP_MAC_OVERRIDE, - capability.CAP_MAC_ADMIN, - } - - c, err := capability.NewPid(os.Getpid()) - if err != nil { - return err - } - - c.Unset(capability.CAPS|capability.BOUNDS, drop...) - - if err := c.Apply(capability.CAPS | capability.BOUNDS); err != nil { - return err - } - return nil -} - // Clear environment pollution introduced by lxc-start -func setupEnv(args *DockerInitArgs) { +func setupEnv(args *execdriver.InitArgs) { os.Clearenv() - for _, kv := range args.env { + for _, kv := range args.Env { parts := strings.SplitN(kv, "=", 2) if len(parts) == 1 { parts = append(parts, "") @@ -169,50 +25,19 @@ func setupEnv(args *DockerInitArgs) { } } -func getEnv(args *DockerInitArgs, key string) string { - for _, kv := range args.env { - parts := strings.SplitN(kv, "=", 2) - if parts[0] == key && len(parts) == 2 { - return parts[1] - } - } - return "" -} - -func executeProgram(args *DockerInitArgs) error { +func executeProgram(args *execdriver.InitArgs) error { setupEnv(args) - - if err := setupHostname(args); err != nil { - return err - } - - if err := setupNetworking(args); err != nil { - return err - } - - if err := setupCapabilities(args); err != nil { - return err - } - - if err := setupWorkingDirectory(args); err != nil { - return err - } - - if err := changeUser(args); err != nil { - return err - } - - path, err := exec.LookPath(args.args[0]) + dockerInitFct, err := execdriver.GetInitFunc(args.Driver) if err != nil { - log.Printf("Unable to locate %v", args.args[0]) - os.Exit(127) - } - - if err := syscall.Exec(path, args.args, os.Environ()); err != nil { panic(err) } + return dockerInitFct(args) + + if args.Driver == "lxc" { + // Will never reach + } else if args.Driver == "chroot" { + } - // Will never reach here return nil } @@ -225,13 +50,16 @@ func SysInit() { os.Exit(1) } - // Get cmdline arguments - user := flag.String("u", "", "username or uid") - gateway := flag.String("g", "", "gateway address") - ip := flag.String("i", "", "ip address") - workDir := flag.String("w", "", "workdir") - privileged := flag.Bool("privileged", false, "privileged mode") - mtu := flag.Int("mtu", 1500, "interface mtu") + var ( + // Get cmdline arguments + user = flag.String("u", "", "username or uid") + gateway = flag.String("g", "", "gateway address") + ip = flag.String("i", "", "ip address") + workDir = flag.String("w", "", "workdir") + privileged = flag.Bool("privileged", false, "privileged mode") + mtu = flag.Int("mtu", 1500, "interface mtu") + driver = flag.String("driver", "", "exec driver") + ) flag.Parse() // Get env @@ -247,15 +75,16 @@ func SysInit() { // Propagate the plugin-specific container env variable env = append(env, "container="+os.Getenv("container")) - args := &DockerInitArgs{ - user: *user, - gateway: *gateway, - ip: *ip, - workDir: *workDir, - privileged: *privileged, - env: env, - args: flag.Args(), - mtu: *mtu, + args := &execdriver.InitArgs{ + User: *user, + Gateway: *gateway, + Ip: *ip, + WorkDir: *workDir, + Privileged: *privileged, + Env: env, + Args: flag.Args(), + Mtu: *mtu, + Driver: *driver, } if err := executeProgram(args); err != nil { diff --git a/utils.go b/utils.go index 3eb1eac045..e3ba08d51c 100644 --- a/utils.go +++ b/utils.go @@ -5,9 +5,10 @@ import ( "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/pkg/namesgenerator" "github.com/dotcloud/docker/utils" - "io/ioutil" + "io" "strconv" "strings" + "sync/atomic" ) type Change struct { @@ -328,20 +329,6 @@ func parseLink(rawLink string) (map[string]string, error) { return utils.PartParser("name:alias", rawLink) } -func RootIsShared() bool { - if data, err := ioutil.ReadFile("/proc/self/mountinfo"); err == nil { - for _, line := range strings.Split(string(data), "\n") { - cols := strings.Split(line, " ") - if len(cols) >= 6 && cols[4] == "/" { - return strings.HasPrefix(cols[6], "shared") - } - } - } - - // No idea, probably safe to assume so - return true -} - type checker struct { runtime *Runtime } @@ -354,3 +341,28 @@ func (c *checker) Exists(name string) bool { func generateRandomName(runtime *Runtime) (string, error) { return namesgenerator.GenerateRandomName(&checker{runtime}) } + +// Read an io.Reader and call a function when it returns EOF +func EofReader(r io.Reader, callback func()) *eofReader { + return &eofReader{ + Reader: r, + callback: callback, + } +} + +type eofReader struct { + io.Reader + gotEOF int32 + callback func() +} + +func (r *eofReader) Read(p []byte) (n int, err error) { + n, err = r.Reader.Read(p) + if err == io.EOF { + // Use atomics to make the gotEOF check threadsafe + if atomic.CompareAndSwapInt32(&r.gotEOF, 0, 1) { + r.callback() + } + } + return +} diff --git a/utils/fs.go b/utils/fs.go index e710926210..92864e5e16 100644 --- a/utils/fs.go +++ b/utils/fs.go @@ -24,10 +24,12 @@ func TreeSize(dir string) (size int64, err error) { // Check inode to handle hard links correctly inode := fileInfo.Sys().(*syscall.Stat_t).Ino - if _, exists := data[inode]; exists { + // inode is not a uint64 on all platforms. Cast it to avoid issues. + if _, exists := data[uint64(inode)]; exists { return nil } - data[inode] = false + // inode is not a uint64 on all platforms. Cast it to avoid issues. + data[uint64(inode)] = false size += s diff --git a/utils/streamformatter.go b/utils/streamformatter.go index 0c41d0bddd..9345c3cb16 100644 --- a/utils/streamformatter.go +++ b/utils/streamformatter.go @@ -82,3 +82,7 @@ func (sf *StreamFormatter) FormatProgress(id, action string, progress *JSONProgr func (sf *StreamFormatter) Used() bool { return sf.used } + +func (sf *StreamFormatter) Json() bool { + return sf.json +} diff --git a/utils/uname_linux.go b/utils/uname_linux.go index 063f932c99..2f4afb41bd 100644 --- a/utils/uname_linux.go +++ b/utils/uname_linux.go @@ -1,3 +1,5 @@ +// +build amd64 + package utils import ( diff --git a/utils/uname_darwin.go b/utils/uname_unsupported.go similarity index 52% rename from utils/uname_darwin.go rename to utils/uname_unsupported.go index a875e8c600..57b82ecab8 100644 --- a/utils/uname_darwin.go +++ b/utils/uname_unsupported.go @@ -1,3 +1,5 @@ +// +build !linux !amd64 + package utils import ( @@ -9,5 +11,5 @@ type Utsname struct { } func uname() (*Utsname, error) { - return nil, errors.New("Kernel version detection is not available on darwin") + return nil, errors.New("Kernel version detection is available only on linux") } diff --git a/utils/utils.go b/utils/utils.go index e046dfa2a5..542ab49702 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -6,6 +6,7 @@ import ( "crypto/sha256" "encoding/hex" "encoding/json" + "errors" "fmt" "index/suffixarray" "io" @@ -418,6 +419,7 @@ func GetTotalUsedFds() int { // TruncIndex allows the retrieval of string identifiers by any of their unique prefixes. // This is used to retrieve image and container IDs by more convenient shorthand prefixes. type TruncIndex struct { + sync.RWMutex index *suffixarray.Index ids map[string]bool bytes []byte @@ -432,6 +434,8 @@ func NewTruncIndex() *TruncIndex { } func (idx *TruncIndex) Add(id string) error { + idx.Lock() + defer idx.Unlock() if strings.Contains(id, " ") { return fmt.Errorf("Illegal character: ' '") } @@ -445,6 +449,8 @@ func (idx *TruncIndex) Add(id string) error { } func (idx *TruncIndex) Delete(id string) error { + idx.Lock() + defer idx.Unlock() if _, exists := idx.ids[id]; !exists { return fmt.Errorf("No such id: %s", id) } @@ -470,6 +476,8 @@ func (idx *TruncIndex) lookup(s string) (int, int, error) { } func (idx *TruncIndex) Get(s string) (string, error) { + idx.RLock() + defer idx.RUnlock() before, after, err := idx.lookup(s) //log.Printf("Get(%s) bytes=|%s| before=|%d| after=|%d|\n", s, idx.bytes, before, after) if err != nil { @@ -549,15 +557,11 @@ type KernelVersionInfo struct { } func (k *KernelVersionInfo) String() string { - flavor := "" - if len(k.Flavor) > 0 { - flavor = fmt.Sprintf("-%s", k.Flavor) - } - return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, flavor) + return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) } // Compare two KernelVersionInfo struct. -// Returns -1 if a < b, = if a == b, 1 it a > b +// Returns -1 if a < b, 0 if a == b, 1 it a > b func CompareKernelVersion(a, b *KernelVersionInfo) int { if a.Kernel < b.Kernel { return -1 @@ -606,41 +610,15 @@ func GetKernelVersion() (*KernelVersionInfo, error) { func ParseRelease(release string) (*KernelVersionInfo, error) { var ( - flavor string - kernel, major, minor int - err error + kernel, major, minor, parsed int + flavor string ) - tmp := strings.SplitN(release, "-", 2) - tmp2 := strings.Split(tmp[0], ".") - - if len(tmp2) > 0 { - kernel, err = strconv.Atoi(tmp2[0]) - if err != nil { - return nil, err - } - } - - if len(tmp2) > 1 { - major, err = strconv.Atoi(tmp2[1]) - if err != nil { - return nil, err - } - } - - if len(tmp2) > 2 { - // Removes "+" because git kernels might set it - minorUnparsed := strings.Trim(tmp2[2], "+") - minor, err = strconv.Atoi(minorUnparsed) - if err != nil { - return nil, err - } - } - - if len(tmp) == 2 { - flavor = tmp[1] - } else { - flavor = "" + // Ignore error from Sscanf to allow an empty flavor. Instead, just + // make sure we got all the version numbers. + parsed, _ = fmt.Sscanf(release, "%d.%d.%d%s", &kernel, &major, &minor, &flavor) + if parsed < 3 { + return nil, errors.New("Can't parse kernel version " + release) } return &KernelVersionInfo{ @@ -789,6 +767,8 @@ func ParseHost(defaultHost string, defaultPort int, defaultUnix, addr string) (s case strings.HasPrefix(addr, "tcp://"): proto = "tcp" addr = strings.TrimPrefix(addr, "tcp://") + case strings.HasPrefix(addr, "fd://"): + return addr, nil case addr == "": proto = "unix" addr = defaultUnix @@ -887,122 +867,6 @@ func UserLookup(uid string) (*User, error) { return nil, fmt.Errorf("User not found in /etc/passwd") } -type DependencyGraph struct { - nodes map[string]*DependencyNode -} - -type DependencyNode struct { - id string - deps map[*DependencyNode]bool -} - -func NewDependencyGraph() DependencyGraph { - return DependencyGraph{ - nodes: map[string]*DependencyNode{}, - } -} - -func (graph *DependencyGraph) addNode(node *DependencyNode) string { - if graph.nodes[node.id] == nil { - graph.nodes[node.id] = node - } - return node.id -} - -func (graph *DependencyGraph) NewNode(id string) string { - if graph.nodes[id] != nil { - return id - } - nd := &DependencyNode{ - id: id, - deps: map[*DependencyNode]bool{}, - } - graph.addNode(nd) - return id -} - -func (graph *DependencyGraph) AddDependency(node, to string) error { - if graph.nodes[node] == nil { - return fmt.Errorf("Node %s does not belong to this graph", node) - } - - if graph.nodes[to] == nil { - return fmt.Errorf("Node %s does not belong to this graph", to) - } - - if node == to { - return fmt.Errorf("Dependency loops are forbidden!") - } - - graph.nodes[node].addDependency(graph.nodes[to]) - return nil -} - -func (node *DependencyNode) addDependency(to *DependencyNode) bool { - node.deps[to] = true - return node.deps[to] -} - -func (node *DependencyNode) Degree() int { - return len(node.deps) -} - -// The magic happens here :: -func (graph *DependencyGraph) GenerateTraversalMap() ([][]string, error) { - Debugf("Generating traversal map. Nodes: %d", len(graph.nodes)) - result := [][]string{} - processed := map[*DependencyNode]bool{} - // As long as we haven't processed all nodes... - for len(processed) < len(graph.nodes) { - // Use a temporary buffer for processed nodes, otherwise - // nodes that depend on each other could end up in the same round. - tmpProcessed := []*DependencyNode{} - for _, node := range graph.nodes { - // If the node has more dependencies than what we have cleared, - // it won't be valid for this round. - if node.Degree() > len(processed) { - continue - } - // If it's already processed, get to the next one - if processed[node] { - continue - } - // It's not been processed yet and has 0 deps. Add it! - // (this is a shortcut for what we're doing below) - if node.Degree() == 0 { - tmpProcessed = append(tmpProcessed, node) - continue - } - // If at least one dep hasn't been processed yet, we can't - // add it. - ok := true - for dep := range node.deps { - if !processed[dep] { - ok = false - break - } - } - // All deps have already been processed. Add it! - if ok { - tmpProcessed = append(tmpProcessed, node) - } - } - Debugf("Round %d: found %d available nodes", len(result), len(tmpProcessed)) - // If no progress has been made this round, - // that means we have circular dependencies. - if len(tmpProcessed) == 0 { - return nil, fmt.Errorf("Could not find a solution to this dependency graph") - } - round := []string{} - for _, nd := range tmpProcessed { - round = append(round, nd.id) - processed[nd] = true - } - result = append(result, round) - } - return result, nil -} - // An StatusError reports an unsuccessful exit by a command. type StatusError struct { Status string @@ -1135,3 +999,19 @@ func CopyFile(src, dst string) (int64, error) { defer df.Close() return io.Copy(df, sf) } + +type readCloserWrapper struct { + io.Reader + closer func() error +} + +func (r *readCloserWrapper) Close() error { + return r.closer() +} + +func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { + return &readCloserWrapper{ + Reader: r, + closer: closer, + } +} diff --git a/utils/utils_test.go b/utils/utils_test.go index 1f23755d11..b0a5acb170 100644 --- a/utils/utils_test.go +++ b/utils/utils_test.go @@ -237,16 +237,16 @@ func TestCompareKernelVersion(t *testing.T) { &KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0}, 1) assertKernelVersion(t, - &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "0"}, - &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "16"}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0) assertKernelVersion(t, &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 5}, &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, 1) assertKernelVersion(t, - &KernelVersionInfo{Kernel: 3, Major: 0, Minor: 20, Flavor: "25"}, - &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "0"}, + &KernelVersionInfo{Kernel: 3, Major: 0, Minor: 20}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, -1) } @@ -407,69 +407,17 @@ func assertParseRelease(t *testing.T, release string, b *KernelVersionInfo, resu if r := CompareKernelVersion(a, b); r != result { t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result) } + if a.Flavor != b.Flavor { + t.Fatalf("Unexpected parsed kernel flavor. Found %s, expected %s", a.Flavor, b.Flavor) + } } func TestParseRelease(t *testing.T) { assertParseRelease(t, "3.8.0", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0) - assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54}, 0) - assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: "1"}, 0) - assertParseRelease(t, "3.8.0-19-generic", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "19-generic"}, 0) -} - -func TestDependencyGraphCircular(t *testing.T) { - g1 := NewDependencyGraph() - a := g1.NewNode("a") - b := g1.NewNode("b") - g1.AddDependency(a, b) - g1.AddDependency(b, a) - res, err := g1.GenerateTraversalMap() - if res != nil { - t.Fatalf("Expected nil result") - } - if err == nil { - t.Fatalf("Expected error (circular graph can not be resolved)") - } -} - -func TestDependencyGraph(t *testing.T) { - g1 := NewDependencyGraph() - a := g1.NewNode("a") - b := g1.NewNode("b") - c := g1.NewNode("c") - d := g1.NewNode("d") - g1.AddDependency(b, a) - g1.AddDependency(c, a) - g1.AddDependency(d, c) - g1.AddDependency(d, b) - res, err := g1.GenerateTraversalMap() - - if err != nil { - t.Fatalf("%s", err) - } - - if res == nil { - t.Fatalf("Unexpected nil result") - } - - if len(res) != 3 { - t.Fatalf("Expected map of length 3, found %d instead", len(res)) - } - - if len(res[0]) != 1 || res[0][0] != "a" { - t.Fatalf("Expected [a], found %v instead", res[0]) - } - - if len(res[1]) != 2 { - t.Fatalf("Expected 2 nodes for step 2, found %d", len(res[1])) - } - - if (res[1][0] != "b" && res[1][1] != "b") || (res[1][0] != "c" && res[1][1] != "c") { - t.Fatalf("Expected [b, c], found %v instead", res[1]) - } - - if len(res[2]) != 1 || res[2][0] != "d" { - t.Fatalf("Expected [d], found %v instead", res[2]) - } + assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) + assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) + assertParseRelease(t, "3.8.0-19-generic", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "-19-generic"}, 0) + assertParseRelease(t, "3.12.8tag", &KernelVersionInfo{Kernel: 3, Major: 12, Minor: 8, Flavor: "tag"}, 0) } func TestParsePortMapping(t *testing.T) { diff --git a/vendor/MAINTAINERS b/vendor/MAINTAINERS new file mode 120000 index 0000000000..72e53509b2 --- /dev/null +++ b/vendor/MAINTAINERS @@ -0,0 +1 @@ +../hack/MAINTAINERS \ No newline at end of file