diff --git a/.gitignore b/.gitignore index 2a86e41caf..68d2da95bc 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,7 @@ .vagrant* bin docker/docker +*.exe .*.swp a.out *.orig diff --git a/.mailmap b/.mailmap index 47860de4c3..826fae0ead 100644 --- a/.mailmap +++ b/.mailmap @@ -1,8 +1,10 @@ -# Generate AUTHORS: hack/generate-authors.sh +# Generate AUTHORS: project/generate-authors.sh # Tip for finding duplicates (besides scanning the output of AUTHORS for name # duplicates that aren't also email duplicates): scan the output of: # git log --format='%aE - %aN' | sort -uf +# +# For explanation on this file format: man git-shortlog @@ -29,6 +31,7 @@ Andy Smith + Walter Stanish @@ -54,6 +57,7 @@ Jean-Baptiste Dalido + @@ -63,10 +67,13 @@ Jean-Baptiste Dalido + Sven Dowideit Sven Dowideit Sven Dowideit Sven Dowideit <¨SvenDowideit@home.org.au¨> +Sven Dowideit +Sven Dowideit unclejack Alexandr Morozov @@ -97,3 +104,24 @@ Matthew Heon Francisco Carriedo + + + + +Brian Goff + + + +Hollie Teal + + + +Jessica Frazelle Jessie Frazelle + + + + + +Thomas LEVEIL Thomas LÉVEIL + + diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 174afae88a..0000000000 --- a/.travis.yml +++ /dev/null @@ -1,39 +0,0 @@ -# Note: right now we don't use go-specific features of travis. -# Later we might automate "go test" etc. (or do it inside a docker container...?) - -language: go - -go: -# This should match the version in the Dockerfile. - - 1.3.1 -# Test against older versions too, just for a little extra retrocompat. - - 1.2 - -# Let us have pretty experimental Docker-based Travis workers. -# (These spin up much faster than the VM-based ones.) -sudo: false - -# Disable the normal go build. -install: - - export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs exclude_graphdriver_devicemapper' # btrfs and devicemapper fail to compile thanks to a couple missing headers (which we can't install thanks to "sudo: false") - - export AUTO_GOPATH=1 -# some of Docker's unit tests don't work inside Travis (yet!), so we purge those test files for now - - rm -f daemon/graphdriver/btrfs/*_test.go # fails to compile (missing header) - - rm -f daemon/graphdriver/devmapper/*_test.go # fails to compile (missing header) - - rm -f daemon/execdriver/lxc/*_test.go # fails to run (missing "lxc-start") - - rm -f daemon/graphdriver/aufs/*_test.go # fails to run ("backing file system is unsupported for this graph driver") - - rm -f daemon/graphdriver/vfs/*_test.go # fails to run (not root, which these tests assume "/var/tmp/... no owned by uid 0") - - rm -f daemon/networkdriver/bridge/*_test.go # fails to run ("Failed to initialize network driver") - - rm -f graph/*_test.go # fails to run ("mkdir /tmp/docker-test.../vfs/dir/foo/etc/postgres: permission denied") - - rm -f pkg/mount/*_test.go # fails to run ("permission denied") - -before_script: - - env | sort - -script: - - hack/make.sh validate-dco - - hack/make.sh validate-gofmt - - DOCKER_CLIENTONLY=1 ./hack/make.sh dynbinary - - ./hack/make.sh dynbinary dyntest-unit - -# vim:set sw=2 ts=2: diff --git a/AUTHORS b/AUTHORS index 43904e9e34..3d3fe3c7bc 100644 --- a/AUTHORS +++ b/AUTHORS @@ -1,69 +1,87 @@ # This file lists all individuals having contributed content to the repository. -# For how it is generated, see `hack/generate-authors.sh`. +# For how it is generated, see `project/generate-authors.sh`. Aanand Prasad Aaron Feng Aaron Huslage Abel Muiño +Abhinav Ajgaonkar +Abin Shahab Adam Miller Adam Singer Aditya Adrian Mouat Adrien Folie +Ahmet Alp Balkan AJ Bowen -Al Tobey alambike +Alan Thompson +Albert Callarisa Albert Zhang Aleksa Sarai -Alex Gaynor -Alex Warhawk Alexander Larsson Alexander Shopov -Alexandr Morozov +Alexandr Morozov Alexey Kotlyarov Alexey Shamrin +Alex Gaynor Alexis THOMAS +Alex Warhawk almoehi +Al Tobey +Álvaro Lázaro amangoel +Amit Bakshi AnandkumarPatel -Andre Dublin <81dublin@gmail.com> +Anand Patil Andrea Luzzardi -Andrea Turli +Andreas Köhler Andreas Savvides Andreas Tiefenthaler +Andrea Turli +Andre Dublin <81dublin@gmail.com> Andrew Duckworth Andrew France Andrew Macgregor Andrew Munsell +Andrews Medina Andrew Weiss Andrew Williams -Andrews Medina +Andrey Petrov +Andrey Stolbovsky Andy Chambers andy diller Andy Goldstein Andy Kipp Andy Rothfusz Andy Smith +Andy Wilson Anthony Bishopric Anton Löfgren Anton Nikitin Antony Messerli apocas -Arnaud Porterie +ArikaChen +Arnaud Porterie +Arthur Gautier Asbjørn Enge +averagehuman +Avi Miller Barnaby Gray Barry Allard Bartłomiej Piotrowski bdevloed Ben Firshman +Benjamin Atkin +Benoit Chesneau Ben Sargent Ben Toews Ben Wiklund -Benjamin Atkin -Benoit Chesneau Bernerd Schaefer +Bert Goethals Bhiraj Butala bin liu +Blake Geno Bouke Haarsma Boyd Hemphill Brandon Liu @@ -80,10 +98,13 @@ Brian Shumate Brice Jaglin Briehan Lombaard Bruno Bigras +Bruno Binet Bruno Renié Bryan Bess Bryan Matsuo Bryan Murphy +Burke Libbey +Byung Kang Caleb Spare Calen Pennington Cameron Boehmer @@ -95,56 +116,68 @@ Charlie Lewis Chewey Chia-liang Kao Chris Alfonso +Chris Armstrong +chrismckinnel Chris Snow Chris St. Pierre -chrismckinnel Christian Berendt ChristoperBiscardi -Christophe Troestler Christopher Currie Christopher Rigor +Christophe Troestler Ciro S. Costa Clayton Coleman Colin Dunklau Colin Rice Colin Walters Cory Forsyth -cpuguy83 cressie176 Cruceru Calin-Cristian Daan van Berkel +Daehyeok.Mun Dafydd Crosby Dan Buch +Dan Cotora +Dan Griffin Dan Hirsch -Dan Keder -Dan McPherson -Dan Stine -Dan Walsh -Dan Williams +Daniel, Dao Quang Minh Daniel Exner +Daniel Farrell Daniel Garcia Daniel Gasienica +Daniel Menet Daniel Mizyrycki Daniel Norberg Daniel Nordberg Daniel Robinson Daniel Von Fange Daniel YC Lin -Daniel, Dao Quang Minh +Dan Keder +Dan McPherson Danny Berger Danny Yates +Dan Stine +Dan Walsh +Dan Williams Darren Coxall Darren Shepherd David Anderson David Calavera David Corking +Davide Ceretti David Gageot +David Gebler David Mcanulty +David Pelaez David Röthlisberger David Sissitka +Dawn Chen +decadent Deni Bertovic Derek +Derek McGowan Deric Crago +Deshi Xiao Dinesh Subhraveti Djibril Koné dkumor @@ -154,11 +187,13 @@ Dominik Honnef Don Spaulding Doug Davis doug tangren -Dr Nic Williams +dragon788 Dražen Lučanin +Dr Nic Williams Dustin Sallings Edmund Wagner Eiichi Tsukata +Eike Herzbach Eivind Uggedal Elias Probst Emil Hernvall @@ -166,17 +201,19 @@ Emily Rose Eric Hanchrow Eric Lee Eric Myhre -Eric Windisch +Eric Paris Eric Windisch Erik Hollensbe Erik Inge Bolsø +Erik Kristensen Erno Hopearuoho +Eugene Yakubovich eugenkrizo +evanderkoogh Evan Hazlett Evan Krall Evan Phoenix Evan Wies -evanderkoogh Eystein Måløy Stenberg ezbercih Fabio Falci @@ -186,49 +223,60 @@ Faiz Khan Fareed Dudhia Felix Rabe Fernando +Filipe Brandenburger Flavio Castelli FLGMwt Francisco Carriedo Francisco Souza Frank Macreery -Fred Lifton +Frank Rosquin Frederick F. Kautz IV Frederik Loeffert +Fred Lifton Freek Kalter Gabe Rosenhouse Gabor Nagy Gabriel Monroy Galen Sampson Gareth Rushgrove +gautam, prasanna Geoffrey Bachelet +George Xie Gereon Frey German DZ Gert van Valkenhoef Giuseppe Mazzotta Gleb Fotengauer-Malinovskiy +Gleb M Borisov Glyn Normington Goffert van Gool +golubbe Graydon Hoare Greg Thornton grunny Guilherme Salgado +Guillaume Dufour Guillaume J. Charmes Gurjeet Singh Guruprasad +Hans Rødtang Harald Albers Harley Laue Hector Castro Henning Sprang Hobofan -Hollie Teal -Hollie Teal -hollietealok +Hollie Teal +Huayi Zhang +Hugo Duncan Hunter Blanks +Hu Tao +Huu Nguyen hyeongkyu.lee Ian Babrou Ian Bull Ian Main Ian Truslove +Igor Dolzhikov ILYA Khlopotov inglesp Isaac Dupree @@ -236,8 +284,8 @@ Isabel Jimenez Isao Jonas Ivan Fraixedes Jack Danger Canty -Jake Moshenko jakedt +Jake Moshenko James Allen James Carr James DeFelice @@ -245,6 +293,7 @@ James Harrison Fisher James Kyle James Mills James Turnbull +Jan Keromnes Jan Pazdziora Jan Toebes Jaroslaw Zabiello @@ -256,31 +305,35 @@ Jason McVetta Jason Plum Jean-Baptiste Barth Jean-Baptiste Dalido +Jean-Paul Calderone Jeff Lindsay -Jeff Welch Jeffrey Bolle +Jeff Welch Jeremy Grosser +Jérôme Petazzoni Jesse Dubay +Jessica Frazelle Jezeniel Zapanta Jilles Oldenbeuving Jim Alateras -Jim Perrin Jimmy Cuadra +Jim Perrin Jiří Župka Joe Beda +Joe Ferguson +Joel Handwell Joe Shaw Joe Van Dyk -Joel Handwell Joffrey F Johan Euphrosine -Johan Rydberg Johannes 'fish' Ziemke +Johan Rydberg John Costa John Feminella John Gardiner Myers +John Gossman John OBrien III John Warwick -Jon Wedaman Jonas Pfenniger Jonathan Boulle Jonathan Camp @@ -288,22 +341,25 @@ Jonathan McCrohan Jonathan Mueller Jonathan Pares Jonathan Rudenberg +Jon Wedaman Joost Cassee Jordan Arentsen Jordan Sissel Joseph Anthony Pasquale Holsten Joseph Hager -Josh Josh Hawn +Josh Josh Poimboeuf +Josiah Kiehl JP +Julian Taylor Julien Barbier Julien Bordellier Julien Dubois Justin Force Justin Plock Justin Simonelis -Jérôme Petazzoni +Jyrki Puttonen Karan Lyons Karl Grzeszczak Kato Kazuyoshi @@ -311,57 +367,68 @@ Kawsar Saiyeed Keli Hu Ken Cochrane Ken ICHIKAWA -Kevin "qwazerty" Houdebert Kevin Clark Kevin J. Lynagh Kevin Menard +Kevin "qwazerty" Houdebert Kevin Wallace Keyvan Fatehi kies -Kim BKC Carlbacker kim0 +Kim BKC Carlbacker Kimbro Staken Kiran Gangadharan knappe Kohei Tsuruta +Konrad Kleine Konstantin Pelykh +krrg Kyle Conroy kyu Lachlan Coote +Lajos Papp +Lakshan Perera lalyos Lance Chen Lars R. Damerow Laurie Voss leeplay +Lei Jitang Len Weincier +Leszek Kowalski Levi Gross Lewis Peckover Liang-Chi Hsieh +limsy Lokesh Mandvekar Louis Opter lukaspustina lukemarsden +Madhu Venugopal Mahesh Tiyyagura +Malte Janduda Manfred Zabarauskas Manuel Meurer Manuel Woelker Marc Abramowitz Marc Kuo -Marc Tamsky Marco Hennings +Marc Tamsky Marcus Farkas -Marcus Ramberg marcuslinke +Marcus Ramberg Marek Goldmann Marius Voila Mark Allen Mark McGranaghan Marko Mikulicic +Marko Tibold Markus Fix Martijn van Oosterhout Martin Redmond Mason Malone Mateusz Sulima +Mathias Monnerville Mathieu Le Marec - Pasquet Matt Apperson Matt Bachmann @@ -372,17 +439,24 @@ Matthias Klumpp Matthias Kühnle mattymo mattyw -Max Shytikov -Maxim Treskin Maxime Petazzoni +Maxim Treskin +Max Shytikov +Médi-Rémi Hashim meejah +Mengdi Gao +Mert Yazıcıoğlu Michael Brown Michael Crosby Michael Gorsuch +Michael Hudson-Doyle Michael Neale -Michael Prokop -Michael Stapelberg Michaël Pailloncy +Michael Prokop +Michael Scharf +Michael Stapelberg +Michael Thies +Michal Jemala Michiel@unhosted Miguel Angel Fernández Mike Chelen @@ -395,32 +469,40 @@ Mohit Soni Morgante Pell Morten Siebuhr Mrunal Patel +mschurenko +Mustafa Akın Nan Monnand Deng Naoki Orii Nate Jones +Nathan Hsieh Nathan Kleyn Nathan LeClaire Nelson Chen Niall O'Higgins +Nicholas E. Rabenau Nick Payne Nick Stenning Nick Stinemates +Nicolas De loof Nicolas Dudebout +Nicolas Goy Nicolas Kaiser NikolaMandic noducks Nolan Darilek -O.S. Tezer +nzwsch OddBloke odk- Oguz Bilgic +Oh Jinkyun Ole Reifschneider Olivier Gambier +O.S. Tezer pandrew Pascal Borreli +Pascal Hartig Patrick Hemmer pattichen -Paul Paul Annesley Paul Bowsher Paul Hammond @@ -428,25 +510,39 @@ Paul Jimenez Paul Lietar Paul Morie Paul Nasrat +Paul Paul Weaver +Pavlos Ratis Peter Bourgon Peter Braden +Peter Ericson +Peter Salvatore Peter Waller -Phil -Phil Spitler +Phil Estes +Philipp Weissensteiner Phillip Alexander +Phil Spitler +Phil Piergiuliano Bossi Pierre-Alain RIVIERE +Pierre Piotr Bogdan +pixelistik +Prasanna Gautam +Przemek Hejman pysqz +Qiang Huang Quentin Brossard r0n22 Rafal Jeczalik +Rafe Colton Rajat Pandit Rajdeep Dua Ralph Bean Ramkumar Ramachandra Ramon van Alteren +Recursive Madman +Remi Rampin Renato Riccieri Santos Zannon rgstephens Rhys Hiltner @@ -455,6 +551,7 @@ Richo Healey Rick Bradley Rick van de Loo Robert Bachmann +Robert Bittle Robert Obryk Roberto G. Hashioka Robin Speekenbrink @@ -470,25 +567,30 @@ Rovanion Luckey Rudolph Gottesheim Ryan Anderson Ryan Aslett +Ryan Detzel Ryan Fowler Ryan O'Donnell Ryan Seto Ryan Thomas -s-ko Sam Alba Sam Bailey Sam J Sharpe Sam Reis Sam Rijs Samuel Andaya +Samuel PHAN satoru Satoshi Amemiya Scott Bessler Scott Collier +Scott Johnston +Scott Walls Sean Cronin Sean P. Kane Sebastiaan van Stijn -Sebastiaan van Stijn +Sébastien Luttringer +Sébastien +Sébastien Stormacq Senthil Kumar Selvaraj SeongJae Park Shane Canon @@ -496,28 +598,30 @@ shaunol Shawn Landden Shawn Siefkas Shih-Yuan Lee +shuai-z Silas Sewell Simon Taranto Sindhu S Sjoerd Langkemper +s-ko Solomon Hykes Song Gao Soulou soulshake Sridatta Thatipamala Sridhar Ratnakumar +Srini Brahmaroutu Steeve Morin Stefan Praszalowicz Stephen Crosby Steven Burgess +Steven Merrill sudosurootdev -Sven Dowideit +Sven Dowideit Sylvain Bellemare -Sébastien -Sébastien Luttringer -Sébastien Stormacq tang0th Tatsuki Sugiura +Ted M. Young Tehmasp Chaudhri Thatcher Peskens Thermionix @@ -526,25 +630,32 @@ Thomas Bikeev Thomas Frössman Thomas Hansen Thomas LEVEIL +Thomas Orozco Thomas Schroeter Tianon Gravi Tibor Vass Tim Bosse -Tim Ruffles -Tim Ruffles -Tim Terhorst +Tim Hockin Timothy Hobbs +Tim Ruffles +Tim Smith +Tim Terhorst tjmehta +tjwebb123 +tobe Tobias Bieniek Tobias Gesellchen Tobias Schmidt Tobias Schwab Todd Lunter +Tomasz Lipinski Tom Fotherby Tom Hulihan Tom Maaswinkel Tommaso Visconti +Tonis Tiigi Tony Daws +Torstein Husebø tpng Travis Cline Trent Ogren @@ -560,33 +671,43 @@ Victor Vieux Viktor Vojnovski Vincent Batts Vincent Bernat +Vincent Bernat +Vincent Giersch Vincent Mayers Vincent Woo Vinod Kulkarni +Vishal Doshi Vishnu Kannan Vitor Monteiro Vivek Agarwal +Vivek Dasgupta +Vivek Goyal Vladimir Bulyga Vladimir Kirillov Vladimir Rutsky +Vojtech Vitek (V-Teq) waitingkuo Walter Leibbrandt Walter Stanish +Ward Vandewege WarheadsSE Wes Morgan Will Dietz -Will Rouesnel -Will Weaver William Delanoue William Henry William Riancho William Thurston +Will Rouesnel +Will Weaver wyc Xiuming Chen +xuzhaokui Yang Bai Yasunori Mahata +Yohei Ueda Yurii Rashkovskii Zac Dover +Zach Borboa Zain Memon Zaiste! Zane DeGraffenried @@ -594,4 +715,4 @@ Zilin Du zimbatm Zoltan Tombol zqh -Álvaro Lázaro +尹吉峰 diff --git a/CHANGELOG.md b/CHANGELOG.md index fdc05c106e..346fe4ce2f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -46,7 +46,7 @@ #### Builder - Fix escaping `$` for environment variables - Fix issue with lowercase `onbuild` Dockerfile instruction -- Restrict envrionment variable expansion to `ENV`, `ADD`, `COPY`, `WORKDIR`, `EXPOSE`, `VOLUME` and `USER` +- Restrict environment variable expansion to `ENV`, `ADD`, `COPY`, `WORKDIR`, `EXPOSE`, `VOLUME` and `USER` ## 1.3.0 (2014-10-14) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3ed8bf9d43..77af00e40c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -6,27 +6,36 @@ feels wrong or incomplete. ## Topics -* [Security Reports](#security-reports) +* [Reporting Security Issues](#reporting-security-issues) * [Design and Cleanup Proposals](#design-and-cleanup-proposals) * [Reporting Issues](#reporting-issues) * [Build Environment](#build-environment) * [Contribution Guidelines](#contribution-guidelines) * [Community Guidelines](#docker-community-guidelines) -## Security Reports +## Reporting Security Issues -Please **DO NOT** file an issue for security related issues. Please send your -reports to [security@docker.com](mailto:security@docker.com) instead. +The Docker maintainers take security very seriously. If you discover a security issue, +please bring it to their attention right away! + +Please send your report privately to [security@docker.com](mailto:security@docker.com), +please **DO NOT** file a public issue. + +Security reports are greatly appreciated and we will publicly thank you for it. We also +like to send gifts - if you're into Docker shwag make sure to let us know :) +We currently do not offer a paid security bounty program, but are not ruling it out in +the future. ## Design and Cleanup Proposals When considering a design proposal, we are looking for: * A description of the problem this design proposal solves -* An issue -- not a pull request -- that describes what you will take action on +* A pull request, not an issue, that modifies the documentation describing + the feature you are proposing, adding new documentation if necessary. * Please prefix your issue with `Proposal:` in the title -* Please review [the existing Proposals](https://github.com/dotcloud/docker/issues?direction=asc&labels=Proposal&page=1&sort=created&state=open) - before reporting a new issue. You can always pair with someone if you both +* Please review [the existing Proposals](https://github.com/docker/docker/pulls?q=is%3Aopen+is%3Apr+label%3AProposal) + before reporting a new one. You can always pair with someone if you both have the same idea. When considering a cleanup task, we are looking for: @@ -39,6 +48,10 @@ When considering a cleanup task, we are looking for: ## Reporting Issues +A great way to contribute to the project is to send a detailed report when you +encounter an issue. We always appreciate a well-written, thorough bug report, +and will thank you for it! + When reporting [issues](https://github.com/docker/docker/issues) on GitHub please include your host OS (Ubuntu 12.04, Fedora 19, etc). Please include: @@ -62,7 +75,7 @@ docs](http://docs.docker.com/contributing/devenvironment/). ### Pull requests are always welcome We are always thrilled to receive pull requests, and do our best to -process them as fast as possible. Not sure if that typo is worth a pull +process them as quickly as possible. Not sure if that typo is worth a pull request? Do it! We will appreciate it. If your pull request is not accepted on the first try, don't be @@ -159,7 +172,7 @@ component affected. For example, if a change affects `docs/` and `registry/`, it needs an absolute majority from the maintainers of `docs/` AND, separately, an absolute majority of the maintainers of `registry/`. -For more details see [MAINTAINERS.md](hack/MAINTAINERS.md) +For more details see [MAINTAINERS.md](project/MAINTAINERS.md) ### Sign your work @@ -310,7 +323,7 @@ do need a fair way to deal with people who are making our community suck. will be addressed immediately and are not subject to 3 strikes or forgiveness. -* Contact james@docker.com to report abuse or appeal violations. In the case of +* Contact abuse@docker.com to report abuse or appeal violations. In the case of appeals, we know that mistakes happen, and we'll work with you to come up with a fair solution if there has been a misunderstanding. diff --git a/Dockerfile b/Dockerfile index bd943deebe..af559759b7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -23,7 +23,6 @@ # the case. Therefore, you don't have to disable it anymore. # -docker-version 0.6.1 FROM ubuntu:14.04 MAINTAINER Tianon Gravi (@tianon) @@ -69,7 +68,10 @@ RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 ENV DOCKER_CROSSPLATFORMS \ linux/386 linux/arm \ darwin/amd64 darwin/386 \ - freebsd/amd64 freebsd/386 freebsd/arm + freebsd/amd64 freebsd/386 freebsd/arm +# windows is experimental for now +# windows/amd64 windows/386 + # (set an explicit GOARM of 5 for maximum compatibility) ENV GOARM 5 RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done' @@ -104,7 +106,7 @@ RUN useradd --create-home --gid docker unprivilegeduser VOLUME /var/lib/docker WORKDIR /go/src/github.com/docker/docker -ENV DOCKER_BUILDTAGS apparmor selinux +ENV DOCKER_BUILDTAGS apparmor selinux btrfs_noversion # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] diff --git a/Makefile b/Makefile index 842cc18e71..6f76fa4d29 100644 --- a/Makefile +++ b/Makefile @@ -1,20 +1,39 @@ .PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli validate +# env vars passed through directly to Docker's build scripts +# to allow things like `make DOCKER_CLIENTONLY=1 binary` easily +# `docs/sources/contributing/devenvironment.md ` and `project/PACKAGERS.md` have some limited documentation of some of these +DOCKER_ENVS := \ + -e BUILDFLAGS \ + -e DOCKER_CLIENTONLY \ + -e DOCKER_EXECDRIVER \ + -e DOCKER_GRAPHDRIVER \ + -e TESTDIRS \ + -e TESTFLAGS \ + -e TIMEOUT +# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds + # to allow `make BINDDIR=. shell` or `make BINDDIR= test` # (default to no bind mount if DOCKER_HOST is set) BINDDIR := $(if $(DOCKER_HOST),,bundles) +DOCKER_MOUNT := $(if $(BINDDIR),-v "$(CURDIR)/$(BINDDIR):/go/src/github.com/docker/docker/$(BINDDIR)") + +# to allow `make DOCSDIR=docs docs-shell` (to create a bind mount in docs) +DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR)) + # to allow `make DOCSPORT=9000 docs` DOCSPORT := 8000 GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) -GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null) DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH)) DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH)) -DOCKER_MOUNT := $(if $(BINDDIR),-v "$(CURDIR)/$(BINDDIR):/go/src/github.com/docker/docker/$(BINDDIR)") -DOCKER_RUN_DOCKER := docker run --rm -it --privileged -e TIMEOUT -e BUILDFLAGS -e TESTFLAGS -e TESTDIRS -e DOCKER_GRAPHDRIVER -e DOCKER_EXECDRIVER $(DOCKER_MOUNT) "$(DOCKER_IMAGE)" -# to allow `make DOCSDIR=docs docs-shell` -DOCKER_RUN_DOCS := docker run --rm -it $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR)) -e AWS_S3_BUCKET +DOCKER_RUN_DOCKER := docker run --rm -it --privileged $(DOCKER_ENVS) $(DOCKER_MOUNT) "$(DOCKER_IMAGE)" + +DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET + +# for some docs workarounds (see below in "docs-build" target) +GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null) default: binary @@ -34,7 +53,7 @@ docs-shell: docs-build $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash docs-release: docs-build - $(DOCKER_RUN_DOCS) -e BUILD_ROOT "$(DOCKER_DOCS_IMAGE)" ./release.sh + $(DOCKER_RUN_DOCS) -e OPTIONS -e BUILD_ROOT "$(DOCKER_DOCS_IMAGE)" ./release.sh test: build $(DOCKER_RUN_DOCKER) hack/make.sh binary cross test-unit test-integration test-integration-cli diff --git a/README.md b/README.md index 857cd3c70a..c2273eb656 100644 --- a/README.md +++ b/README.md @@ -178,13 +178,14 @@ Contributing to Docker ====================== [![GoDoc](https://godoc.org/github.com/docker/docker?status.png)](https://godoc.org/github.com/docker/docker) -[![Travis](https://travis-ci.org/docker/docker.svg?branch=master)](https://travis-ci.org/docker/docker) +[![Build Status](https://ci.dockerproject.com/github.com/docker/docker/status.svg?branch=master)](https://ci.dockerproject.com/github.com/docker/docker) Want to hack on Docker? Awesome! There are instructions to get you -started [here](CONTRIBUTING.md). +started [here](CONTRIBUTING.md). If you'd like to contribute to the +documentation, please take a look at this [README.md](https://github.com/docker/docker/blob/master/docs/README.md). -They are probably not perfect, please let us know if anything feels -wrong or incomplete. +These instructions are probably not perfect, please let us know if anything +feels wrong or incomplete. ### Legal @@ -201,5 +202,7 @@ For more information, please see http://www.bis.doc.gov Licensing ========= -Docker is licensed under the Apache License, Version 2.0. See LICENSE for full license text. +Docker is licensed under the Apache License, Version 2.0. See +[LICENSE](https://github.com/docker/docker/blob/master/LICENSE) for the full +license text. diff --git a/VERSION b/VERSION index 31e5c84349..456ea726a0 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.3.3 +1.3.3-dev diff --git a/api/MAINTAINERS b/api/MAINTAINERS index e0f18f14f1..96abeae570 100644 --- a/api/MAINTAINERS +++ b/api/MAINTAINERS @@ -1 +1,2 @@ Victor Vieux (@vieux) +Jessie Frazelle (@jfrazelle) diff --git a/api/client/cli.go b/api/client/cli.go index 6bc3fc3507..e54eb8056e 100644 --- a/api/client/cli.go +++ b/api/client/cli.go @@ -3,12 +3,16 @@ package client import ( "crypto/tls" "encoding/json" + "errors" "fmt" "io" + "net" + "net/http" "os" "reflect" "strings" "text/template" + "time" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/term" @@ -34,6 +38,7 @@ type DockerCli struct { isTerminalIn bool // isTerminalOut describes if client's STDOUT is a TTY isTerminalOut bool + transport *http.Transport } var funcMap = template.FuncMap{ @@ -71,11 +76,11 @@ func (cli *DockerCli) Cmd(args ...string) error { method, exists := cli.getMethod(args[0]) if !exists { fmt.Println("Error: Command not found:", args[0]) - return cli.CmdHelp(args[1:]...) + return cli.CmdHelp() } return method(args[1:]...) } - return cli.CmdHelp(args...) + return cli.CmdHelp() } func (cli *DockerCli) Subcmd(name, signature, description string) *flag.FlagSet { @@ -100,6 +105,16 @@ func (cli *DockerCli) LoadConfigFile() (err error) { return err } +func (cli *DockerCli) CheckTtyInput(attachStdin, ttyMode bool) error { + // In order to attach to a container tty, input stream for the client must + // be a tty itself: redirecting or piping the client standard input is + // incompatible with `docker run -t`, `docker exec -t` or `docker attach`. + if ttyMode && attachStdin && !cli.isTerminalIn { + return errors.New("cannot enable tty mode on non tty input") + } + return nil +} + func NewDockerCli(in io.ReadCloser, out, err io.Writer, key libtrust.PrivateKey, proto, addr string, tlsConfig *tls.Config) *DockerCli { var ( inFd uintptr @@ -131,6 +146,23 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, key libtrust.PrivateKey, err = out } + // The transport is created here for reuse during the client session + tr := &http.Transport{ + TLSClientConfig: tlsConfig, + } + + // Why 32? See issue 8035 + timeout := 32 * time.Second + if proto == "unix" { + // no need in compressing for local communications + tr.DisableCompression = true + tr.Dial = func(_, _ string) (net.Conn, error) { + return net.DialTimeout(proto, addr, timeout) + } + } else { + tr.Dial = (&net.Dialer{Timeout: timeout}).Dial + } + return &DockerCli{ proto: proto, addr: addr, @@ -144,5 +176,6 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, key libtrust.PrivateKey, isTerminalOut: isTerminalOut, tlsConfig: tlsConfig, scheme: scheme, + transport: tr, } } diff --git a/api/client/commands.go b/api/client/commands.go index 2c44bb63c5..89e5796bbb 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -5,6 +5,7 @@ import ( "bytes" "encoding/base64" "encoding/json" + "errors" "fmt" "io" "io/ioutil" @@ -17,11 +18,11 @@ import ( "runtime" "strconv" "strings" - "syscall" "text/tabwriter" "text/template" "time" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/api" "github.com/docker/docker/dockerversion" "github.com/docker/docker/engine" @@ -29,7 +30,6 @@ import ( "github.com/docker/docker/nat" "github.com/docker/docker/opts" "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/log" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers/filters" @@ -38,6 +38,7 @@ import ( "github.com/docker/docker/pkg/term" "github.com/docker/docker/pkg/timeutils" "github.com/docker/docker/pkg/units" + "github.com/docker/docker/pkg/urlutil" "github.com/docker/docker/registry" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" @@ -47,6 +48,10 @@ const ( tarHeaderSize = 512 ) +var ( + acceptedImageFilterTags = map[string]struct{}{"dangling": {}} +) + func (cli *DockerCli) CmdHelp(args ...string) error { if len(args) > 1 { method, exists := cli.getMethod(args[:2]...) @@ -77,6 +82,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error { noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image") rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build") forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers, even after unsuccessful builds") + pull := cmd.Bool([]string{"-pull"}, false, "Always attempt to pull a newer version of the image") if err := cmd.Parse(args); err != nil { return nil } @@ -110,13 +116,13 @@ func (cli *DockerCli) CmdBuild(args ...string) error { } else { context = ioutil.NopCloser(buf) } - } else if utils.IsURL(cmd.Arg(0)) && (!utils.IsGIT(cmd.Arg(0)) || !hasGit) { + } else if urlutil.IsURL(cmd.Arg(0)) && (!urlutil.IsGitURL(cmd.Arg(0)) || !hasGit) { isRemote = true } else { root := cmd.Arg(0) - if utils.IsGIT(root) { + if urlutil.IsGitURL(root) { remoteURL := cmd.Arg(0) - if !strings.HasPrefix(remoteURL, "git://") && !strings.HasPrefix(remoteURL, "git@") && !utils.IsURL(remoteURL) { + if !urlutil.IsGitTransport(remoteURL) { remoteURL = "https://" + remoteURL } @@ -143,6 +149,11 @@ func (cli *DockerCli) CmdBuild(args ...string) error { return fmt.Errorf("Error reading .dockerignore: '%s'", err) } for _, pattern := range strings.Split(string(ignore), "\n") { + pattern = strings.TrimSpace(pattern) + if pattern == "" { + continue + } + pattern = filepath.Clean(pattern) ok, err := filepath.Match(pattern, "Dockerfile") if err != nil { return fmt.Errorf("Bad .dockerignore pattern: '%s', error: %s", pattern, err) @@ -169,7 +180,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error { // FIXME: ProgressReader shouldn't be this annoying to use if context != nil { sf := utils.NewStreamFormatter(false) - body = utils.ProgressReader(context, 0, cli.err, sf, true, "", "Sending build context to Docker daemon") + body = utils.ProgressReader(context, 0, cli.out, sf, true, "", "Sending build context to Docker daemon") } // Send the build context v := &url.Values{} @@ -208,6 +219,9 @@ func (cli *DockerCli) CmdBuild(args ...string) error { v.Set("forcerm", "1") } + if *pull { + v.Set("pull", "1") + } cli.LoadConfigFile() headers := http.Header(make(map[string][]string)) @@ -284,7 +298,10 @@ func (cli *DockerCli) CmdLogin(args ...string) error { // the password or email from the config file, so prompt them if username != authconfig.Username { if password == "" { - oldState, _ := term.SaveState(cli.inFd) + oldState, err := term.SaveState(cli.inFd) + if err != nil { + return err + } fmt.Fprintf(cli.out, "Password: ") term.DisableEcho(cli.inFd, oldState) @@ -467,33 +484,69 @@ func (cli *DockerCli) CmdInfo(args ...string) error { } out.Close() - fmt.Fprintf(cli.out, "Containers: %d\n", remoteInfo.GetInt("Containers")) - fmt.Fprintf(cli.out, "Images: %d\n", remoteInfo.GetInt("Images")) - fmt.Fprintf(cli.out, "Storage Driver: %s\n", remoteInfo.Get("Driver")) - var driverStatus [][2]string - if err := remoteInfo.GetJson("DriverStatus", &driverStatus); err != nil { - return err + if remoteInfo.Exists("Containers") { + fmt.Fprintf(cli.out, "Containers: %d\n", remoteInfo.GetInt("Containers")) } - for _, pair := range driverStatus { - fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1]) + if remoteInfo.Exists("Images") { + fmt.Fprintf(cli.out, "Images: %d\n", remoteInfo.GetInt("Images")) + } + if remoteInfo.Exists("Driver") { + fmt.Fprintf(cli.out, "Storage Driver: %s\n", remoteInfo.Get("Driver")) + } + if remoteInfo.Exists("DriverStatus") { + var driverStatus [][2]string + if err := remoteInfo.GetJson("DriverStatus", &driverStatus); err != nil { + return err + } + for _, pair := range driverStatus { + fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1]) + } + } + if remoteInfo.Exists("ExecutionDriver") { + fmt.Fprintf(cli.out, "Execution Driver: %s\n", remoteInfo.Get("ExecutionDriver")) + } + if remoteInfo.Exists("KernelVersion") { + fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion")) + } + if remoteInfo.Exists("OperatingSystem") { + fmt.Fprintf(cli.out, "Operating System: %s\n", remoteInfo.Get("OperatingSystem")) + } + if remoteInfo.Exists("NCPU") { + fmt.Fprintf(cli.out, "CPUs: %d\n", remoteInfo.GetInt("NCPU")) + } + if remoteInfo.Exists("MemTotal") { + fmt.Fprintf(cli.out, "Total Memory: %s\n", units.BytesSize(float64(remoteInfo.GetInt64("MemTotal")))) + } + if remoteInfo.Exists("Name") { + fmt.Fprintf(cli.out, "Name: %s\n", remoteInfo.Get("Name")) + } + if remoteInfo.Exists("ID") { + fmt.Fprintf(cli.out, "ID: %s\n", remoteInfo.Get("ID")) } - fmt.Fprintf(cli.out, "Execution Driver: %s\n", remoteInfo.Get("ExecutionDriver")) - fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion")) - fmt.Fprintf(cli.out, "Operating System: %s\n", remoteInfo.Get("OperatingSystem")) if remoteInfo.GetBool("Debug") || os.Getenv("DEBUG") != "" { - fmt.Fprintf(cli.out, "Debug mode (server): %v\n", remoteInfo.GetBool("Debug")) + if remoteInfo.Exists("Debug") { + fmt.Fprintf(cli.out, "Debug mode (server): %v\n", remoteInfo.GetBool("Debug")) + } fmt.Fprintf(cli.out, "Debug mode (client): %v\n", os.Getenv("DEBUG") != "") - fmt.Fprintf(cli.out, "Fds: %d\n", remoteInfo.GetInt("NFd")) - fmt.Fprintf(cli.out, "Goroutines: %d\n", remoteInfo.GetInt("NGoroutines")) - fmt.Fprintf(cli.out, "EventsListeners: %d\n", remoteInfo.GetInt("NEventsListener")) - + if remoteInfo.Exists("NFd") { + fmt.Fprintf(cli.out, "Fds: %d\n", remoteInfo.GetInt("NFd")) + } + if remoteInfo.Exists("NGoroutines") { + fmt.Fprintf(cli.out, "Goroutines: %d\n", remoteInfo.GetInt("NGoroutines")) + } + if remoteInfo.Exists("NEventsListener") { + fmt.Fprintf(cli.out, "EventsListeners: %d\n", remoteInfo.GetInt("NEventsListener")) + } if initSha1 := remoteInfo.Get("InitSha1"); initSha1 != "" { fmt.Fprintf(cli.out, "Init SHA1: %s\n", initSha1) } if initPath := remoteInfo.Get("InitPath"); initPath != "" { fmt.Fprintf(cli.out, "Init Path: %s\n", initPath) } + if root := remoteInfo.Get("DockerRootDir"); root != "" { + fmt.Fprintf(cli.out, "Docker Root Dir: %s\n", root) + } } if len(remoteInfo.GetList("IndexServerAddress")) != 0 { @@ -504,15 +557,22 @@ func (cli *DockerCli) CmdInfo(args ...string) error { fmt.Fprintf(cli.out, "Registry: %v\n", remoteInfo.GetList("IndexServerAddress")) } } - if !remoteInfo.GetBool("MemoryLimit") { + if remoteInfo.Exists("MemoryLimit") && !remoteInfo.GetBool("MemoryLimit") { fmt.Fprintf(cli.err, "WARNING: No memory limit support\n") } - if !remoteInfo.GetBool("SwapLimit") { + if remoteInfo.Exists("SwapLimit") && !remoteInfo.GetBool("SwapLimit") { fmt.Fprintf(cli.err, "WARNING: No swap limit support\n") } - if !remoteInfo.GetBool("IPv4Forwarding") { + if remoteInfo.Exists("IPv4Forwarding") && !remoteInfo.GetBool("IPv4Forwarding") { fmt.Fprintf(cli.err, "WARNING: IPv4 forwarding is disabled.\n") } + if remoteInfo.Exists("Labels") { + fmt.Fprintln(cli.out, "Labels:") + for _, attribute := range remoteInfo.GetList("Labels") { + fmt.Fprintf(cli.out, " %s\n", attribute) + } + } + return nil } @@ -575,7 +635,7 @@ func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { signal.CatchAll(sigc) go func() { for s := range sigc { - if s == syscall.SIGCHLD { + if s == signal.SIGCHLD { continue } var sig string @@ -586,7 +646,7 @@ func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { } } if sig == "" { - log.Errorf("Unsupported signal: %d. Discarding.", s) + log.Errorf("Unsupported signal: %v. Discarding.", s) } if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", cid, sig), nil, false)); err != nil { log.Debugf("Error sending signal: %s", err) @@ -614,18 +674,20 @@ func (cli *DockerCli) CmdStart(args ...string) error { return nil } + hijacked := make(chan io.Closer) + if *attach || *openStdin { if cmd.NArg() > 1 { return fmt.Errorf("You cannot start and attach multiple containers at once.") } - steam, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false) + stream, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false) if err != nil { return err } env := engine.Env{} - if err := env.Decode(steam); err != nil { + if err := env.Decode(stream); err != nil { return err } config := env.GetSubEnv("Config") @@ -650,8 +712,24 @@ func (cli *DockerCli) CmdStart(args ...string) error { v.Set("stderr", "1") cErr = promise.Go(func() error { - return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, nil, nil) + return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, hijacked, nil) }) + } else { + close(hijacked) + } + + // Acknowledge the hijack before starting + select { + case closer := <-hijacked: + // Make sure that the hijack gets closed when returning (results + // in closing the hijack chan and freeing server's goroutines) + if closer != nil { + defer closer.Close() + } + case err := <-cErr: + if err != nil { + return err + } } var encounteredError error @@ -681,7 +759,16 @@ func (cli *DockerCli) CmdStart(args ...string) error { log.Errorf("Error monitoring TTY size: %s", err) } } - return <-cErr + if attchErr := <-cErr; attchErr != nil { + return attchErr + } + _, status, err := getExitCode(cli, cmd.Arg(0)) + if err != nil { + return err + } + if status != 0 { + return &utils.StatusError{StatusCode: status} + } } return nil } @@ -798,7 +885,7 @@ func (cli *DockerCli) CmdInspect(args ...string) error { // Remove trailing ',' indented.Truncate(indented.Len() - 1) } - indented.WriteByte(']') + indented.WriteString("]\n") if tmpl == nil { if _, err := io.Copy(cli.out, indented); err != nil { @@ -857,13 +944,13 @@ func (cli *DockerCli) CmdPort(args ...string) error { return nil } - steam, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false) + stream, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false) if err != nil { return err } env := engine.Env{} - if err := env.Decode(steam); err != nil { + if err := env.Decode(stream); err != nil { return err } ports := nat.PortMap{} @@ -1195,7 +1282,7 @@ func (cli *DockerCli) CmdPull(args ...string) error { ) taglessRemote, tag := parsers.ParseRepositoryTag(remote) if tag == "" && !*allTags { - newRemote = taglessRemote + ":latest" + newRemote = taglessRemote + ":" + graph.DEFAULTTAG } if tag != "" && *allTags { return fmt.Errorf("tag can't be used with --all-tags/-a") @@ -1244,7 +1331,7 @@ func (cli *DockerCli) CmdPull(args ...string) error { } func (cli *DockerCli) CmdImages(args ...string) error { - cmd := cli.Subcmd("images", "[NAME]", "List images") + cmd := cli.Subcmd("images", "[REPOSITORY]", "List images") quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (by default filter out the intermediate image layers)") noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") @@ -1274,6 +1361,12 @@ func (cli *DockerCli) CmdImages(args ...string) error { } } + for name := range imageFilterArgs { + if _, ok := acceptedImageFilterTags[name]; !ok { + return fmt.Errorf("Invalid filter '%s'", name) + } + } + matchName := cmd.Arg(0) // FIXME: --viz and --tree are deprecated. Remove them in a future version. if *flViz || *flTree { @@ -1483,7 +1576,7 @@ func (cli *DockerCli) CmdPs(args ...string) error { cmd = cli.Subcmd("ps", "", "List containers") quiet = cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs") - size = cmd.Bool([]string{"s", "-size"}, false, "Display sizes") + size = cmd.Bool([]string{"s", "-size"}, false, "Display total file sizes") all = cmd.Bool([]string{"a", "-all"}, false, "Show all containers. Only running containers are shown by default.") noTrunc = cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") nLatest = cmd.Bool([]string{"l", "-latest"}, false, "Show only the latest created container, include non-running ones.") @@ -1692,6 +1785,10 @@ func (cli *DockerCli) CmdEvents(args ...string) error { cmd := cli.Subcmd("events", "", "Get real time events from the server") since := cmd.String([]string{"#since", "-since"}, "", "Show all events created since timestamp") until := cmd.String([]string{"-until"}, "", "Stream events until this timestamp") + + flFilter := opts.NewListOpts(nil) + cmd.Var(&flFilter, []string{"f", "-filter"}, "Provide filter values (i.e. 'event=stop')") + if err := cmd.Parse(args); err != nil { return nil } @@ -1701,9 +1798,20 @@ func (cli *DockerCli) CmdEvents(args ...string) error { return nil } var ( - v = url.Values{} - loc = time.FixedZone(time.Now().Zone()) + v = url.Values{} + loc = time.FixedZone(time.Now().Zone()) + eventFilterArgs = filters.Args{} ) + + // Consolidate all filter flags, and sanity check them early. + // They'll get process in the daemon/server. + for _, f := range flFilter.GetAll() { + var err error + eventFilterArgs, err = filters.ParseFlag(f, eventFilterArgs) + if err != nil { + return err + } + } var setTime = func(key, value string) { format := timeutils.RFC3339NanoFixed if len(value) < len(format) { @@ -1721,6 +1829,13 @@ func (cli *DockerCli) CmdEvents(args ...string) error { if *until != "" { setTime("until", *until) } + if len(eventFilterArgs) > 0 { + filterJson, err := filters.ToParam(eventFilterArgs) + if err != nil { + return err + } + v.Set("filters", filterJson) + } if err := cli.stream("GET", "/events?"+v.Encode(), nil, cli.out, nil); err != nil { return err } @@ -1797,13 +1912,13 @@ func (cli *DockerCli) CmdLogs(args ...string) error { } name := cmd.Arg(0) - steam, _, err := cli.call("GET", "/containers/"+name+"/json", nil, false) + stream, _, err := cli.call("GET", "/containers/"+name+"/json", nil, false) if err != nil { return err } env := engine.Env{} - if err := env.Decode(steam); err != nil { + if err := env.Decode(stream); err != nil { return err } @@ -1827,7 +1942,7 @@ func (cli *DockerCli) CmdAttach(args ...string) error { var ( cmd = cli.Subcmd("attach", "CONTAINER", "Attach to a running container") noStdin = cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach STDIN") - proxy = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy all received signals to the process (even in non-TTY mode). SIGCHLD, SIGKILL, and SIGSTOP are not proxied.") + proxy = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy all received signals to the process (non-TTY mode only). SIGCHLD, SIGKILL, and SIGSTOP are not proxied.") ) if err := cmd.Parse(args); err != nil { @@ -1859,6 +1974,10 @@ func (cli *DockerCli) CmdAttach(args ...string) error { tty = config.GetBool("Tty") ) + if err := cli.CheckTtyInput(!*noStdin, tty); err != nil { + return err + } + if tty && cli.isTerminalOut { if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil { log.Debugf("Error monitoring TTY size: %s", err) @@ -1994,7 +2113,7 @@ func (cli *DockerCli) pullImageCustomOut(image string, out io.Writer) error { repos, tag := parsers.ParseRepositoryTag(image) // pull only the image tagged 'latest' if no tag was specified if tag == "" { - tag = "latest" + tag = graph.DEFAULTTAG } v.Set("fromImage", repos) v.Set("tag", tag) @@ -2083,7 +2202,11 @@ func (cli *DockerCli) createContainer(config *runconfig.Config, hostConfig *runc stream, statusCode, err := cli.call("POST", "/containers/create?"+containerValues.Encode(), mergedConfig, false) //if image not found try to pull it if statusCode == 404 { - fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", config.Image) + repo, tag := parsers.ParseRepositoryTag(config.Image) + if tag == "" { + tag = graph.DEFAULTTAG + } + fmt.Fprintf(cli.err, "Unable to find image '%s:%s' locally\n", repo, tag) // we don't want to write to stdout anything apart from container.ID if err = cli.pullImageCustomOut(config.Image, cli.err); err != nil { @@ -2124,7 +2247,7 @@ func (cli *DockerCli) CmdCreate(args ...string) error { flName = cmd.String([]string{"-name"}, "", "Assign a name to the container") ) - config, hostConfig, cmd, err := runconfig.Parse(cmd, args, nil) + config, hostConfig, cmd, err := runconfig.Parse(cmd, args) if err != nil { return err } @@ -2151,7 +2274,7 @@ func (cli *DockerCli) CmdRun(args ...string) error { var ( flAutoRemove = cmd.Bool([]string{"#rm", "-rm"}, false, "Automatically remove the container when it exits (incompatible with -d)") flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: run the container in the background and print the new container ID") - flSigProxy = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy received signals to the process (even in non-TTY mode). SIGCHLD, SIGSTOP, and SIGKILL are not proxied.") + flSigProxy = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied.") flName = cmd.String([]string{"#name", "-name"}, "", "Assign a name to the container") flAttach *opts.ListOpts @@ -2160,7 +2283,7 @@ func (cli *DockerCli) CmdRun(args ...string) error { ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: --rm and -d") ) - config, hostConfig, cmd, err := runconfig.Parse(cmd, args, nil) + config, hostConfig, cmd, err := runconfig.Parse(cmd, args) if err != nil { return err } @@ -2169,7 +2292,11 @@ func (cli *DockerCli) CmdRun(args ...string) error { return nil } - if *flDetach { + if !*flDetach { + if err := cli.CheckTtyInput(config.AttachStdin, config.Tty); err != nil { + return err + } + } else { if fl := cmd.Lookup("attach"); fl != nil { flAttach = fl.Value.(*opts.ListOpts) if flAttach.Len() != 0 { @@ -2186,7 +2313,7 @@ func (cli *DockerCli) CmdRun(args ...string) error { config.StdinOnce = false } - // Disable flSigProxy in case on TTY + // Disable flSigProxy when in TTY mode sigProxy := *flSigProxy if config.Tty { sigProxy = false @@ -2208,7 +2335,7 @@ func (cli *DockerCli) CmdRun(args ...string) error { ) if !config.AttachStdout && !config.AttachStderr { - // Make this asynchrone in order to let the client write to stdin before having to read the ID + // Make this asynchronous to allow the client to write to stdin before having to read the ID waitDisplayId = make(chan struct{}) go func() { defer close(waitDisplayId) @@ -2220,7 +2347,7 @@ func (cli *DockerCli) CmdRun(args ...string) error { return ErrConflictRestartPolicyAndAutoRemove } - // We need to instanciate the chan because the select needs it. It can + // We need to instantiate the chan because the select needs it. It can // be closed but can't be uninitialized. hijacked := make(chan io.Closer) @@ -2267,8 +2394,8 @@ func (cli *DockerCli) CmdRun(args ...string) error { // Acknowledge the hijack before starting select { case closer := <-hijacked: - // Make sure that hijack gets closed when returning. (result - // in closing hijack chan and freeing server's goroutines. + // Make sure that the hijack gets closed when returning (results + // in closing the hijack chan and freeing server's goroutines) if closer != nil { defer closer.Close() } @@ -2280,7 +2407,7 @@ func (cli *DockerCli) CmdRun(args ...string) error { } //start the container - if _, _, err = readBody(cli.call("POST", "/containers/"+runResult.Get("Id")+"/start", hostConfig, false)); err != nil { + if _, _, err = readBody(cli.call("POST", "/containers/"+runResult.Get("Id")+"/start", nil, false)); err != nil { return err } @@ -2320,15 +2447,15 @@ func (cli *DockerCli) CmdRun(args ...string) error { return err } } else { + // No Autoremove: Simply retrieve the exit code if !config.Tty { - // In non-tty mode, we can't dettach, so we know we need to wait. + // In non-TTY mode, we can't detach, so we must wait for container exit if status, err = waitForExit(cli, runResult.Get("Id")); err != nil { return err } } else { - // In TTY mode, there is a race. If the process dies too slowly, the state can be update after the getExitCode call - // and result in a wrong exit code. - // No Autoremove: Simply retrieve the exit code + // In TTY mode, there is a race: if the process dies too slowly, the state could + // be updated after the getExitCode call and result in the wrong exit code being reported if _, status, err = getExitCode(cli, runResult.Get("Id")); err != nil { return err } @@ -2402,7 +2529,10 @@ func (cli *DockerCli) CmdSave(args ...string) error { if err != nil { return err } + } else if cli.isTerminalOut { + return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") } + if len(cmd.Args()) == 1 { image := cmd.Arg(0) if err := cli.stream("GET", "/images/"+image+"/get", nil, output, nil); err != nil { @@ -2450,7 +2580,7 @@ func (cli *DockerCli) CmdLoad(args ...string) error { } func (cli *DockerCli) CmdExec(args ...string) error { - cmd := cli.Subcmd("exec", "CONTAINER COMMAND [ARG...]", "Run a command in an existing container") + cmd := cli.Subcmd("exec", "CONTAINER COMMAND [ARG...]", "Run a command in a running container") execConfig, err := runconfig.ParseExec(cmd, args) if err != nil { @@ -2478,10 +2608,16 @@ func (cli *DockerCli) CmdExec(args ...string) error { return nil } - if execConfig.Detach { + if !execConfig.Detach { + if err := cli.CheckTtyInput(execConfig.AttachStdin, execConfig.Tty); err != nil { + return err + } + } else { if _, _, err := readBody(cli.call("POST", "/exec/"+execID+"/start", execConfig, false)); err != nil { return err } + // For now don't print this - wait for when we support exec wait() + // fmt.Fprintf(cli.out, "%s\n", execID) return nil } @@ -2544,5 +2680,14 @@ func (cli *DockerCli) CmdExec(args ...string) error { return err } + var status int + if _, status, err = getExecExitCode(cli, execID); err != nil { + return err + } + + if status != 0 { + return &utils.StatusError{StatusCode: status} + } + return nil } diff --git a/api/client/hijack.go b/api/client/hijack.go index d0b5e93ef0..617a0b3f61 100644 --- a/api/client/hijack.go +++ b/api/client/hijack.go @@ -13,9 +13,9 @@ import ( "strings" "time" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/api" "github.com/docker/docker/dockerversion" - "github.com/docker/docker/pkg/log" "github.com/docker/docker/pkg/promise" "github.com/docker/docker/pkg/stdcopy" "github.com/docker/docker/pkg/term" diff --git a/api/client/utils.go b/api/client/utils.go index 58b730bd1b..8de571bf4d 100644 --- a/api/client/utils.go +++ b/api/client/utils.go @@ -8,20 +8,18 @@ import ( "fmt" "io" "io/ioutil" - "net" "net/http" "net/url" "os" gosignal "os/signal" "strconv" "strings" - "syscall" - "time" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/api" "github.com/docker/docker/dockerversion" "github.com/docker/docker/engine" - "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/signal" "github.com/docker/docker/pkg/stdcopy" "github.com/docker/docker/pkg/term" "github.com/docker/docker/registry" @@ -33,22 +31,7 @@ var ( ) func (cli *DockerCli) HTTPClient() *http.Client { - tr := &http.Transport{ - TLSClientConfig: cli.tlsConfig, - Dial: func(network, addr string) (net.Conn, error) { - // Why 32? See issue 8035 - return net.DialTimeout(cli.proto, cli.addr, 32*time.Second) - }, - } - if cli.proto == "unix" { - // XXX workaround for net/http Transport which caches connections, but is - // intended for tcp connections, not unix sockets. - tr.DisableKeepAlives = true - - // no need in compressing for local communications - tr.DisableCompression = true - } - return &http.Client{Transport: tr} + return &http.Client{Transport: cli.transport} } func (cli *DockerCli) encodeData(data interface{}) (*bytes.Buffer, error) { @@ -113,7 +96,12 @@ func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo b if strings.Contains(err.Error(), "connection refused") { return nil, -1, ErrConnectionRefused } - return nil, -1, err + + if cli.tlsConfig == nil { + return nil, -1, fmt.Errorf("%v. Are you trying to connect to a TLS-enabled daemon without TLS?", err) + } + return nil, -1, fmt.Errorf("An error occurred trying to connect: %v", err) + } if resp.StatusCode < 200 || resp.StatusCode >= 400 { @@ -228,7 +216,7 @@ func waitForExit(cli *DockerCli, containerId string) (int, error) { // getExitCode perform an inspect on the container. It returns // the running state and the exit code. func getExitCode(cli *DockerCli, containerId string) (bool, int, error) { - steam, _, err := cli.call("GET", "/containers/"+containerId+"/json", nil, false) + stream, _, err := cli.call("GET", "/containers/"+containerId+"/json", nil, false) if err != nil { // If we can't connect, then the daemon probably died. if err != ErrConnectionRefused { @@ -238,7 +226,7 @@ func getExitCode(cli *DockerCli, containerId string) (bool, int, error) { } var result engine.Env - if err := result.Decode(steam); err != nil { + if err := result.Decode(stream); err != nil { return false, -1, err } @@ -246,11 +234,31 @@ func getExitCode(cli *DockerCli, containerId string) (bool, int, error) { return state.GetBool("Running"), state.GetInt("ExitCode"), nil } +// getExecExitCode perform an inspect on the exec command. It returns +// the running state and the exit code. +func getExecExitCode(cli *DockerCli, execId string) (bool, int, error) { + stream, _, err := cli.call("GET", "/exec/"+execId+"/json", nil, false) + if err != nil { + // If we can't connect, then the daemon probably died. + if err != ErrConnectionRefused { + return false, -1, err + } + return false, -1, nil + } + + var result engine.Env + if err := result.Decode(stream); err != nil { + return false, -1, err + } + + return result.GetBool("Running"), result.GetInt("ExitCode"), nil +} + func (cli *DockerCli) monitorTtySize(id string, isExec bool) error { cli.resizeTty(id, isExec) sigchan := make(chan os.Signal, 1) - gosignal.Notify(sigchan, syscall.SIGWINCH) + gosignal.Notify(sigchan, signal.SIGWINCH) go func() { for _ = range sigchan { cli.resizeTty(id, isExec) diff --git a/api/common.go b/api/common.go index 3eecaa0455..71e72f69e0 100644 --- a/api/common.go +++ b/api/common.go @@ -3,16 +3,19 @@ package api import ( "fmt" "mime" + "os" + "path" "strings" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/engine" - "github.com/docker/docker/pkg/log" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/version" + "github.com/docker/libtrust" ) const ( - APIVERSION version.Version = "1.15" + APIVERSION version.Version = "1.16" DEFAULTHTTPHOST = "127.0.0.1" DEFAULTUNIXSOCKET = "/var/run/docker.sock" ) @@ -47,3 +50,25 @@ func MatchesContentType(contentType, expectedType string) bool { } return err == nil && mimetype == expectedType } + +// LoadOrCreateTrustKey attempts to load the libtrust key at the given path, +// otherwise generates a new one +func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) { + err := os.MkdirAll(path.Dir(trustKeyPath), 0700) + if err != nil { + return nil, err + } + trustKey, err := libtrust.LoadKeyFile(trustKeyPath) + if err == libtrust.ErrKeyFileDoesNotExist { + trustKey, err = libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("Error generating key: %s", err) + } + if err := libtrust.SaveKey(trustKeyPath, trustKey); err != nil { + return nil, fmt.Errorf("Error saving key file: %s", err) + } + } else if err != nil { + return nil, fmt.Errorf("Error loading key file: %s", err) + } + return trustKey, nil +} diff --git a/api/server/server.go b/api/server/server.go index 93b8b60a8f..629ad0ba02 100644 --- a/api/server/server.go +++ b/api/server/server.go @@ -3,8 +3,7 @@ package server import ( "bufio" "bytes" - "crypto/tls" - "crypto/x509" + "encoding/base64" "encoding/json" "expvar" @@ -19,14 +18,17 @@ import ( "strings" "syscall" + "crypto/tls" + "crypto/x509" + "code.google.com/p/go.net/websocket" "github.com/docker/libcontainer/user" "github.com/gorilla/mux" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/api" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/listenbuffer" - "github.com/docker/docker/pkg/log" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/stdcopy" "github.com/docker/docker/pkg/systemd" @@ -39,6 +41,18 @@ var ( activationLock chan struct{} ) +type HttpServer struct { + srv *http.Server + l net.Listener +} + +func (s *HttpServer) Serve() error { + return s.srv.Serve(s.l) +} +func (s *HttpServer) Close() error { + return s.l.Close() +} + type HttpApiFunc func(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error func hijackServer(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) { @@ -51,6 +65,18 @@ func hijackServer(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) { return conn, conn, nil } +func closeStreams(streams ...interface{}) { + for _, stream := range streams { + if tcpc, ok := stream.(interface { + CloseWrite() error + }); ok { + tcpc.CloseWrite() + } else if closer, ok := stream.(io.Closer); ok { + closer.Close() + } + } +} + // Check to make sure request's Content-Type is application/json func checkForJson(r *http.Request) error { ct := r.Header.Get("Content-Type") @@ -92,17 +118,18 @@ func httpError(w http.ResponseWriter, err error) { // FIXME: this is brittle and should not be necessary. // If we need to differentiate between different possible error types, we should // create appropriate error types with clearly defined meaning. - if strings.Contains(err.Error(), "No such") { + errStr := strings.ToLower(err.Error()) + if strings.Contains(errStr, "no such") { statusCode = http.StatusNotFound - } else if strings.Contains(err.Error(), "Bad parameter") { + } else if strings.Contains(errStr, "bad parameter") { statusCode = http.StatusBadRequest - } else if strings.Contains(err.Error(), "Conflict") { + } else if strings.Contains(errStr, "conflict") { statusCode = http.StatusConflict - } else if strings.Contains(err.Error(), "Impossible") { + } else if strings.Contains(errStr, "impossible") { statusCode = http.StatusNotAcceptable - } else if strings.Contains(err.Error(), "Wrong login/password") { + } else if strings.Contains(errStr, "wrong login/password") { statusCode = http.StatusUnauthorized - } else if strings.Contains(err.Error(), "hasn't been activated") { + } else if strings.Contains(errStr, "hasn't been activated") { statusCode = http.StatusForbidden } @@ -300,6 +327,7 @@ func getEvents(eng *engine.Engine, version version.Version, w http.ResponseWrite streamJSON(job, w, true) job.Setenv("since", r.Form.Get("since")) job.Setenv("until", r.Form.Get("until")) + job.Setenv("filters", r.Form.Get("filters")) return job.Run() } @@ -855,20 +883,7 @@ func postContainersAttach(eng *engine.Engine, version version.Version, w http.Re if err != nil { return err } - defer func() { - if tcpc, ok := inStream.(*net.TCPConn); ok { - tcpc.CloseWrite() - } else { - inStream.Close() - } - }() - defer func() { - if tcpc, ok := outStream.(*net.TCPConn); ok { - tcpc.CloseWrite() - } else if closer, ok := outStream.(io.Closer); ok { - closer.Close() - } - }() + defer closeStreams(inStream, outStream) var errStream io.Writer @@ -941,6 +956,15 @@ func getContainersByName(eng *engine.Engine, version version.Version, w http.Res return job.Run() } +func getExecByID(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter 'id'") + } + var job = eng.Job("execInspect", vars["id"]) + streamJSON(job, w, false) + return job.Run() +} + func getImagesByName(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") @@ -1001,6 +1025,9 @@ func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWrite } else { job.Setenv("rm", r.FormValue("rm")) } + if r.FormValue("pull") == "1" && version.GreaterThanOrEqualTo("1.16") { + job.Setenv("pull", "1") + } job.Stdin.Add(r.Body) job.Setenv("remote", r.FormValue("remote")) job.Setenv("t", r.FormValue("t")) @@ -1050,7 +1077,7 @@ func postContainersCopy(eng *engine.Engine, version version.Version, w http.Resp w.Header().Set("Content-Type", "application/x-tar") if err := job.Run(); err != nil { log.Errorf("%s", err.Error()) - if strings.Contains(err.Error(), "No such container") { + if strings.Contains(strings.ToLower(err.Error()), "no such container") { w.WriteHeader(http.StatusNotFound) } else if strings.Contains(err.Error(), "no such file or directory") { return fmt.Errorf("Could not find the file %s in container %s", origResource, vars["name"]) @@ -1106,21 +1133,7 @@ func postContainerExecStart(eng *engine.Engine, version version.Version, w http. if err != nil { return err } - - defer func() { - if tcpc, ok := inStream.(*net.TCPConn); ok { - tcpc.CloseWrite() - } else { - inStream.Close() - } - }() - defer func() { - if tcpc, ok := outStream.(*net.TCPConn); ok { - tcpc.CloseWrite() - } else if closer, ok := outStream.(io.Closer); ok { - closer.Close() - } - }() + defer closeStreams(inStream, outStream) var errStream io.Writer @@ -1166,7 +1179,7 @@ func optionsHandler(eng *engine.Engine, version version.Version, w http.Response } func writeCorsHeaders(w http.ResponseWriter, r *http.Request) { w.Header().Add("Access-Control-Allow-Origin", "*") - w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept") + w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth") w.Header().Add("Access-Control-Allow-Methods", "GET, POST, DELETE, PUT, OPTIONS") } @@ -1231,6 +1244,7 @@ func AttachProfiler(router *mux.Router) { router.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) router.HandleFunc("/debug/pprof/profile", pprof.Profile) router.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + router.HandleFunc("/debug/pprof/block", pprof.Handler("block").ServeHTTP) router.HandleFunc("/debug/pprof/heap", pprof.Handler("heap").ServeHTTP) router.HandleFunc("/debug/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP) router.HandleFunc("/debug/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP) @@ -1262,6 +1276,7 @@ func createRouter(eng *engine.Engine, logging, enableCors bool, dockerVersion st "/containers/{name:.*}/top": getContainersTop, "/containers/{name:.*}/logs": getContainersLogs, "/containers/{name:.*}/attach/ws": wsContainersAttach, + "/exec/{id:.*}/json": getExecByID, }, "POST": { "/auth": postAuth, @@ -1333,9 +1348,14 @@ func ServeRequest(eng *engine.Engine, apiversion version.Version, w http.Respons return nil } -// ServeFD creates an http.Server and sets it up to serve given a socket activated +// serveFd creates an http.Server and sets it up to serve given a socket activated // argument. -func ServeFd(addr string, handle http.Handler) error { +func serveFd(addr string, job *engine.Job) error { + r, err := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version")) + if err != nil { + return err + } + ls, e := systemd.ListenFD(addr) if e != nil { return e @@ -1353,7 +1373,7 @@ func ServeFd(addr string, handle http.Handler) error { for i := range ls { listener := ls[i] go func() { - httpSrv := http.Server{Handler: handle} + httpSrv := http.Server{Handler: r} chErrors <- httpSrv.Serve(listener) }() } @@ -1369,7 +1389,11 @@ func ServeFd(addr string, handle http.Handler) error { } func lookupGidByName(nameOrGid string) (int, error) { - groups, err := user.ParseGroupFilter(func(g *user.Group) bool { + groupFile, err := user.GetGroupFile() + if err != nil { + return -1, err + } + groups, err := user.ParseGroupFileFilter(groupFile, func(g user.Group) bool { return g.Name == nameOrGid || strconv.Itoa(g.Gid) == nameOrGid }) if err != nil { @@ -1381,6 +1405,41 @@ func lookupGidByName(nameOrGid string) (int, error) { return -1, fmt.Errorf("Group %s not found", nameOrGid) } +func setupTls(cert, key, ca string, l net.Listener) (net.Listener, error) { + tlsCert, err := tls.LoadX509KeyPair(cert, key) + if err != nil { + return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", + cert, key, err) + } + tlsConfig := &tls.Config{ + NextProtos: []string{"http/1.1"}, + Certificates: []tls.Certificate{tlsCert}, + // Avoid fallback on insecure SSL protocols + MinVersion: tls.VersionTLS10, + } + + if ca != "" { + certPool := x509.NewCertPool() + file, err := ioutil.ReadFile(ca) + if err != nil { + return nil, fmt.Errorf("Couldn't read CA certificate: %s", err) + } + certPool.AppendCertsFromPEM(file) + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + tlsConfig.ClientCAs = certPool + } + + return tls.NewListener(l, tlsConfig), nil +} + +func newListener(proto, addr string, bufferRequests bool) (net.Listener, error) { + if bufferRequests { + return listenbuffer.NewListenBuffer(proto, addr, activationLock) + } + + return net.Listen(proto, addr) +} + func changeGroup(addr string, nameOrGid string) error { gid, err := lookupGidByName(nameOrGid) if err != nil { @@ -1391,99 +1450,95 @@ func changeGroup(addr string, nameOrGid string) error { return os.Chown(addr, 0, gid) } -// ListenAndServe sets up the required http.Server and gets it listening for -// each addr passed in and does protocol specific checking. -func ListenAndServe(proto, addr string, job *engine.Job) error { - var l net.Listener +func setSocketGroup(addr, group string) error { + if group == "" { + return nil + } + + if err := changeGroup(addr, group); err != nil { + if group != "docker" { + return err + } + log.Debugf("Warning: could not chgrp %s to docker: %v", addr, err) + } + + return nil +} + +func setupUnixHttp(addr string, job *engine.Job) (*HttpServer, error) { r, err := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version")) if err != nil { - return err + return nil, err } - if proto == "fd" { - return ServeFd(addr, r) + if err := syscall.Unlink(addr); err != nil && !os.IsNotExist(err) { + return nil, err } + mask := syscall.Umask(0777) + defer syscall.Umask(mask) - if proto == "unix" { - if err := syscall.Unlink(addr); err != nil && !os.IsNotExist(err) { - return err - } - } - - var oldmask int - if proto == "unix" { - oldmask = syscall.Umask(0777) - } - - if job.GetenvBool("BufferRequests") { - l, err = listenbuffer.NewListenBuffer(proto, addr, activationLock) - } else { - l, err = net.Listen(proto, addr) - } - - if proto == "unix" { - syscall.Umask(oldmask) - } + l, err := newListener("unix", addr, job.GetenvBool("BufferRequests")) if err != nil { - return err + return nil, err } - if proto != "unix" && (job.GetenvBool("Tls") || job.GetenvBool("TlsVerify")) { - tlsCert := job.Getenv("TlsCert") - tlsKey := job.Getenv("TlsKey") - cert, err := tls.LoadX509KeyPair(tlsCert, tlsKey) - if err != nil { - return fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", - tlsCert, tlsKey, err) - } - tlsConfig := &tls.Config{ - NextProtos: []string{"http/1.1"}, - Certificates: []tls.Certificate{cert}, - // Avoid fallback on insecure SSL protocols - MinVersion: tls.VersionTLS10, - } + if err := setSocketGroup(addr, job.Getenv("SocketGroup")); err != nil { + return nil, err + } + + if err := os.Chmod(addr, 0660); err != nil { + return nil, err + } + + return &HttpServer{&http.Server{Addr: addr, Handler: r}, l}, nil +} + +func setupTcpHttp(addr string, job *engine.Job) (*HttpServer, error) { + if !strings.HasPrefix(addr, "127.0.0.1") && !job.GetenvBool("TlsVerify") { + log.Infof("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") + } + + r, err := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version")) + if err != nil { + return nil, err + } + + l, err := newListener("tcp", addr, job.GetenvBool("BufferRequests")) + if err != nil { + return nil, err + } + + if job.GetenvBool("Tls") || job.GetenvBool("TlsVerify") { + var tlsCa string if job.GetenvBool("TlsVerify") { - certPool := x509.NewCertPool() - file, err := ioutil.ReadFile(job.Getenv("TlsCa")) - if err != nil { - return fmt.Errorf("Couldn't read CA certificate: %s", err) - } - certPool.AppendCertsFromPEM(file) - - tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert - tlsConfig.ClientCAs = certPool + tlsCa = job.Getenv("TlsCa") + } + l, err = setupTls(job.Getenv("TlsCert"), job.Getenv("TlsKey"), tlsCa, l) + if err != nil { + return nil, err } - l = tls.NewListener(l, tlsConfig) } + return &HttpServer{&http.Server{Addr: addr, Handler: r}, l}, nil +} +// NewServer sets up the required Server and does protocol specific checking. +func NewServer(proto, addr string, job *engine.Job) (Server, error) { // Basic error and sanity checking switch proto { + case "fd": + return nil, serveFd(addr, job) case "tcp": - if !strings.HasPrefix(addr, "127.0.0.1") && !job.GetenvBool("TlsVerify") { - log.Infof("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") - } + return setupTcpHttp(addr, job) case "unix": - socketGroup := job.Getenv("SocketGroup") - if socketGroup != "" { - if err := changeGroup(addr, socketGroup); err != nil { - if socketGroup == "docker" { - // if the user hasn't explicitly specified the group ownership, don't fail on errors. - log.Debugf("Warning: could not chgrp %s to docker: %s", addr, err.Error()) - } else { - return err - } - } - - } - if err := os.Chmod(addr, 0660); err != nil { - return err - } + return setupUnixHttp(addr, job) default: - return fmt.Errorf("Invalid protocol format.") + return nil, fmt.Errorf("Invalid protocol format.") } +} - httpSrv := http.Server{Addr: addr, Handler: r} - return httpSrv.Serve(l) +type Server interface { + Serve() error + Close() error } // ServeApi loops through all of the protocols sent in to docker and spawns @@ -1505,7 +1560,12 @@ func ServeApi(job *engine.Job) engine.Status { } go func() { log.Infof("Listening for HTTP on %s (%s)", protoAddrParts[0], protoAddrParts[1]) - chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], job) + srv, err := NewServer(protoAddrParts[0], protoAddrParts[1], job) + if err != nil { + chErrors <- err + return + } + chErrors <- srv.Serve() }() } diff --git a/builder/dispatchers.go b/builder/dispatchers.go index 2184e48a81..db7476c5ed 100644 --- a/builder/dispatchers.go +++ b/builder/dispatchers.go @@ -14,8 +14,8 @@ import ( "regexp" "strings" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/nat" - "github.com/docker/docker/pkg/log" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/runconfig" ) @@ -31,21 +31,39 @@ func nullDispatch(b *Builder, args []string, attributes map[string]bool, origina // in the dockerfile available from the next statement on via ${foo}. // func env(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) != 2 { - return fmt.Errorf("ENV accepts two arguments") + if len(args) == 0 { + return fmt.Errorf("ENV is missing arguments") } - fullEnv := fmt.Sprintf("%s=%s", args[0], args[1]) + if len(args)%2 != 0 { + // should never get here, but just in case + return fmt.Errorf("Bad input to ENV, too many args") + } - for i, envVar := range b.Config.Env { - envParts := strings.SplitN(envVar, "=", 2) - if args[0] == envParts[0] { - b.Config.Env[i] = fullEnv - return b.commit("", b.Config.Cmd, fmt.Sprintf("ENV %s", fullEnv)) + commitStr := "ENV" + + for j := 0; j < len(args); j++ { + // name ==> args[j] + // value ==> args[j+1] + newVar := args[j] + "=" + args[j+1] + "" + commitStr += " " + newVar + + gotOne := false + for i, envVar := range b.Config.Env { + envParts := strings.SplitN(envVar, "=", 2) + if envParts[0] == args[j] { + b.Config.Env[i] = newVar + gotOne = true + break + } } + if !gotOne { + b.Config.Env = append(b.Config.Env, newVar) + } + j++ } - b.Config.Env = append(b.Config.Env, fullEnv) - return b.commit("", b.Config.Cmd, fmt.Sprintf("ENV %s", fullEnv)) + + return b.commit("", b.Config.Cmd, commitStr) } // MAINTAINER some text @@ -97,6 +115,12 @@ func from(b *Builder, args []string, attributes map[string]bool, original string name := args[0] image, err := b.Daemon.Repositories().LookupImage(name) + if b.Pull { + image, err = b.pullImage(name) + if err != nil { + return err + } + } if err != nil { if b.Daemon.Graph().IsNotExist(err) { image, err = b.pullImage(name) @@ -183,7 +207,7 @@ func run(b *Builder, args []string, attributes map[string]bool, original string) runCmd.SetOutput(ioutil.Discard) runCmd.Usage = nil - config, _, _, err := runconfig.Parse(runCmd, append([]string{b.image}, args...), nil) + config, _, _, err := runconfig.Parse(runCmd, append([]string{b.image}, args...)) if err != nil { return err } diff --git a/builder/evaluator.go b/builder/evaluator.go index 7884d36ac2..3d9ebb162c 100644 --- a/builder/evaluator.go +++ b/builder/evaluator.go @@ -27,10 +27,10 @@ import ( "path" "strings" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/builder/parser" "github.com/docker/docker/daemon" "github.com/docker/docker/engine" - "github.com/docker/docker/pkg/log" "github.com/docker/docker/pkg/tarsum" "github.com/docker/docker/registry" "github.com/docker/docker/runconfig" @@ -90,6 +90,7 @@ type Builder struct { // controls how images and containers are handled between steps. Remove bool ForceRemove bool + Pull bool AuthConfig *registry.AuthConfig AuthConfigFile *registry.ConfigFile diff --git a/builder/internals.go b/builder/internals.go index fa8b9f703c..c1fd617a56 100644 --- a/builder/internals.go +++ b/builder/internals.go @@ -9,6 +9,7 @@ import ( "fmt" "io" "io/ioutil" + "net/http" "net/url" "os" "path" @@ -18,17 +19,17 @@ import ( "syscall" "time" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/builder/parser" "github.com/docker/docker/daemon" imagepkg "github.com/docker/docker/image" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/log" "github.com/docker/docker/pkg/parsers" - "github.com/docker/docker/pkg/promise" "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/tarsum" + "github.com/docker/docker/pkg/urlutil" "github.com/docker/docker/registry" "github.com/docker/docker/utils" ) @@ -217,7 +218,7 @@ func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath stri origPath = strings.TrimPrefix(origPath, "./") // In the remote/URL case, download it and gen its hashcode - if utils.IsURL(origPath) { + if urlutil.IsURL(origPath) { if !allowRemote { return fmt.Errorf("Source can't be a URL for %s", cmdName) } @@ -257,8 +258,21 @@ func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath stri fmt.Fprintf(b.OutStream, "\n") tmpFile.Close() - // Remove the mtime of the newly created tmp file - if err := system.UtimesNano(tmpFileName, make([]syscall.Timespec, 2)); err != nil { + // Set the mtime to the Last-Modified header value if present + // Otherwise just remove atime and mtime + times := make([]syscall.Timespec, 2) + + lastMod := resp.Header.Get("Last-Modified") + if lastMod != "" { + mTime, err := http.ParseTime(lastMod) + // If we can't parse it then just let it default to 'zero' + // otherwise use the parsed time value + if err == nil { + times[1] = syscall.NsecToTimespec(mTime.UnixNano()) + } + } + + if err := system.UtimesNano(tmpFileName, times); err != nil { return err } @@ -514,25 +528,19 @@ func (b *Builder) create() (*daemon.Container, error) { } func (b *Builder) run(c *daemon.Container) error { - var errCh chan error - if b.Verbose { - errCh = promise.Go(func() error { - // FIXME: call the 'attach' job so that daemon.Attach can be made private - // - // FIXME (LK4D4): Also, maybe makes sense to call "logs" job, it is like attach - // but without hijacking for stdin. Also, with attach there can be race - // condition because of some output already was printed before it. - return <-b.Daemon.Attach(&c.StreamConfig, c.Config.OpenStdin, c.Config.StdinOnce, c.Config.Tty, nil, nil, b.OutStream, b.ErrStream) - }) - } - //start the container if err := c.Start(); err != nil { return err } - if errCh != nil { - if err := <-errCh; err != nil { + if b.Verbose { + logsJob := b.Engine.Job("logs", c.ID) + logsJob.Setenv("follow", "1") + logsJob.Setenv("stdout", "1") + logsJob.Setenv("stderr", "1") + logsJob.Stdout.Add(b.OutStream) + logsJob.Stderr.Set(b.ErrStream) + if err := logsJob.Run(); err != nil { return err } } @@ -641,37 +649,45 @@ func (b *Builder) addContext(container *daemon.Container, orig, dest string, dec resPath = path.Join(destPath, path.Base(origPath)) } - return fixPermissions(resPath, 0, 0) + return fixPermissions(origPath, resPath, 0, 0, destExists) } -func copyAsDirectory(source, destination string, destinationExists bool) error { +func copyAsDirectory(source, destination string, destExisted bool) error { if err := chrootarchive.CopyWithTar(source, destination); err != nil { return err } + return fixPermissions(source, destination, 0, 0, destExisted) +} - if destinationExists { - files, err := ioutil.ReadDir(source) +func fixPermissions(source, destination string, uid, gid int, destExisted bool) error { + // If the destination didn't already exist, or the destination isn't a + // directory, then we should Lchown the destination. Otherwise, we shouldn't + // Lchown the destination. + destStat, err := os.Stat(destination) + if err != nil { + // This should *never* be reached, because the destination must've already + // been created while untar-ing the context. + return err + } + doChownDestination := !destExisted || !destStat.IsDir() + + // We Walk on the source rather than on the destination because we don't + // want to change permissions on things we haven't created or modified. + return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error { + // Do not alter the walk root iff. it existed before, as it doesn't fall under + // the domain of "things we should chown". + if !doChownDestination && (source == fullpath) { + return nil + } + + // Path is prefixed by source: substitute with destination instead. + cleaned, err := filepath.Rel(source, fullpath) if err != nil { return err } - for _, file := range files { - if err := fixPermissions(filepath.Join(destination, file.Name()), 0, 0); err != nil { - return err - } - } - return nil - } - - return fixPermissions(destination, 0, 0) -} - -func fixPermissions(destination string, uid, gid int) error { - return filepath.Walk(destination, func(path string, info os.FileInfo, err error) error { - if err := os.Lchown(path, uid, gid); err != nil && !os.IsNotExist(err) { - return err - } - return nil + fullpath = path.Join(destination, cleaned) + return os.Lchown(fullpath, uid, gid) }) } diff --git a/builder/job.go b/builder/job.go index 555232c9ae..20299d490a 100644 --- a/builder/job.go +++ b/builder/job.go @@ -5,13 +5,13 @@ import ( "io/ioutil" "os" "os/exec" - "strings" "github.com/docker/docker/daemon" "github.com/docker/docker/engine" "github.com/docker/docker/graph" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/urlutil" "github.com/docker/docker/registry" "github.com/docker/docker/utils" ) @@ -36,6 +36,7 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status { noCache = job.GetenvBool("nocache") rm = job.GetenvBool("rm") forceRm = job.GetenvBool("forcerm") + pull = job.GetenvBool("pull") authConfig = ®istry.AuthConfig{} configFile = ®istry.ConfigFile{} tag string @@ -58,8 +59,8 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status { if remoteURL == "" { context = ioutil.NopCloser(job.Stdin) - } else if utils.IsGIT(remoteURL) { - if !strings.HasPrefix(remoteURL, "git://") { + } else if urlutil.IsGitURL(remoteURL) { + if !urlutil.IsGitTransport(remoteURL) { remoteURL = "https://" + remoteURL } root, err := ioutil.TempDir("", "docker-build-git") @@ -77,7 +78,7 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status { return job.Error(err) } context = c - } else if utils.IsURL(remoteURL) { + } else if urlutil.IsURL(remoteURL) { f, err := utils.Download(remoteURL) if err != nil { return job.Error(err) @@ -112,6 +113,7 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status { UtilizeCache: !noCache, Remove: rm, ForceRemove: forceRm, + Pull: pull, OutOld: job.Stdout, StreamFormatter: sf, AuthConfig: authConfig, @@ -124,7 +126,7 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status { } if repoName != "" { - b.Daemon.Repositories().Set(repoName, tag, id, false) + b.Daemon.Repositories().Set(repoName, tag, id, true) } return engine.StatusOK } diff --git a/builder/parser/line_parsers.go b/builder/parser/line_parsers.go index 358e2f73a0..abde85d292 100644 --- a/builder/parser/line_parsers.go +++ b/builder/parser/line_parsers.go @@ -12,6 +12,7 @@ import ( "fmt" "strconv" "strings" + "unicode" ) var ( @@ -41,17 +42,139 @@ func parseSubCommand(rest string) (*Node, map[string]bool, error) { // parse environment like statements. Note that this does *not* handle // variable interpolation, which will be handled in the evaluator. func parseEnv(rest string) (*Node, map[string]bool, error) { - node := &Node{} - rootnode := node - strs := TOKEN_WHITESPACE.Split(rest, 2) + // This is kind of tricky because we need to support the old + // variant: ENV name value + // as well as the new one: ENV name=value ... + // The trigger to know which one is being used will be whether we hit + // a space or = first. space ==> old, "=" ==> new - if len(strs) < 2 { - return nil, nil, fmt.Errorf("ENV must have two arguments") + const ( + inSpaces = iota // looking for start of a word + inWord + inQuote + ) + + words := []string{} + phase := inSpaces + word := "" + quote := '\000' + blankOK := false + var ch rune + + for pos := 0; pos <= len(rest); pos++ { + if pos != len(rest) { + ch = rune(rest[pos]) + } + + if phase == inSpaces { // Looking for start of word + if pos == len(rest) { // end of input + break + } + if unicode.IsSpace(ch) { // skip spaces + continue + } + phase = inWord // found it, fall thru + } + if (phase == inWord || phase == inQuote) && (pos == len(rest)) { + if blankOK || len(word) > 0 { + words = append(words, word) + } + break + } + if phase == inWord { + if unicode.IsSpace(ch) { + phase = inSpaces + if blankOK || len(word) > 0 { + words = append(words, word) + + // Look for = and if no there assume + // we're doing the old stuff and + // just read the rest of the line + if !strings.Contains(word, "=") { + word = strings.TrimSpace(rest[pos:]) + words = append(words, word) + break + } + } + word = "" + blankOK = false + continue + } + if ch == '\'' || ch == '"' { + quote = ch + blankOK = true + phase = inQuote + continue + } + if ch == '\\' { + if pos+1 == len(rest) { + continue // just skip \ at end + } + pos++ + ch = rune(rest[pos]) + } + word += string(ch) + continue + } + if phase == inQuote { + if ch == quote { + phase = inWord + continue + } + if ch == '\\' { + if pos+1 == len(rest) { + phase = inWord + continue // just skip \ at end + } + pos++ + ch = rune(rest[pos]) + } + word += string(ch) + } } - node.Value = strs[0] - node.Next = &Node{} - node.Next.Value = strs[1] + if len(words) == 0 { + return nil, nil, fmt.Errorf("ENV must have some arguments") + } + + // Old format (ENV name value) + var rootnode *Node + + if !strings.Contains(words[0], "=") { + node := &Node{} + rootnode = node + strs := TOKEN_WHITESPACE.Split(rest, 2) + + if len(strs) < 2 { + return nil, nil, fmt.Errorf("ENV must have two arguments") + } + + node.Value = strs[0] + node.Next = &Node{} + node.Next.Value = strs[1] + } else { + var prevNode *Node + for i, word := range words { + if !strings.Contains(word, "=") { + return nil, nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word) + } + parts := strings.SplitN(word, "=", 2) + + name := &Node{} + value := &Node{} + + name.Next = value + name.Value = parts[0] + value.Value = parts[1] + + if i == 0 { + rootnode = name + } else { + prevNode.Next = name + } + prevNode = value + } + } return rootnode, nil, nil } diff --git a/builder/parser/parser.go b/builder/parser/parser.go index 6b0ab7ab8c..ad42a1586e 100644 --- a/builder/parser/parser.go +++ b/builder/parser/parser.go @@ -103,10 +103,6 @@ func Parse(rwc io.Reader) (*Node, error) { for scanner.Scan() { scannedLine := strings.TrimLeftFunc(scanner.Text(), unicode.IsSpace) - if stripComments(scannedLine) == "" { - continue - } - line, child, err := parseLine(scannedLine) if err != nil { return nil, err @@ -129,6 +125,12 @@ func Parse(rwc io.Reader) (*Node, error) { break } } + if child == nil && line != "" { + line, child, err = parseLine(line) + if err != nil { + return nil, err + } + } } if child != nil { diff --git a/builder/parser/testfiles-negative/env_equals_env/Dockerfile b/builder/parser/testfiles-negative/env_no_value/Dockerfile similarity index 50% rename from builder/parser/testfiles-negative/env_equals_env/Dockerfile rename to builder/parser/testfiles-negative/env_no_value/Dockerfile index 08675148ae..1d65578794 100644 --- a/builder/parser/testfiles-negative/env_equals_env/Dockerfile +++ b/builder/parser/testfiles-negative/env_no_value/Dockerfile @@ -1,3 +1,3 @@ FROM busybox -ENV PATH=PATH +ENV PATH diff --git a/builder/parser/testfiles/docker/Dockerfile b/builder/parser/testfiles/docker/Dockerfile index fba1d8b993..de6ebca8f7 100644 --- a/builder/parser/testfiles/docker/Dockerfile +++ b/builder/parser/testfiles/docker/Dockerfile @@ -23,7 +23,6 @@ # the case. Therefore, you don't have to disable it anymore. # -docker-version 0.6.1 FROM ubuntu:14.04 MAINTAINER Tianon Gravi (@tianon) diff --git a/builder/parser/testfiles/docker/result b/builder/parser/testfiles/docker/result index db74e869be..80f219ecb4 100644 --- a/builder/parser/testfiles/docker/result +++ b/builder/parser/testfiles/docker/result @@ -1,4 +1,3 @@ -(docker-version) (from "ubuntu:14.04") (maintainer "Tianon Gravi (@tianon)") (run "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq apt-utils aufs-tools automake btrfs-tools build-essential curl dpkg-sig git iptables libapparmor-dev libcap-dev libsqlite3-dev lxc=1.0* mercurial pandoc parallel reprepro ruby1.9.1 ruby1.9.1-dev s3cmd=1.1.0* --no-install-recommends") diff --git a/builder/parser/testfiles/env/Dockerfile b/builder/parser/testfiles/env/Dockerfile new file mode 100644 index 0000000000..bb78503cce --- /dev/null +++ b/builder/parser/testfiles/env/Dockerfile @@ -0,0 +1,15 @@ +FROM ubuntu +ENV name value +ENV name=value +ENV name=value name2=value2 +ENV name="value value1" +ENV name=value\ value2 +ENV name="value'quote space'value2" +ENV name='value"double quote"value2' +ENV name=value\ value2 name2=value2\ value3 +ENV name=value \ + name1=value1 \ + name2="value2a \ + value2b" \ + name3="value3a\n\"value3b\"" \ + name4="value4a\\nvalue4b" \ diff --git a/builder/parser/testfiles/env/result b/builder/parser/testfiles/env/result new file mode 100644 index 0000000000..a473d0fa39 --- /dev/null +++ b/builder/parser/testfiles/env/result @@ -0,0 +1,10 @@ +(from "ubuntu") +(env "name" "value") +(env "name" "value") +(env "name" "value" "name2" "value2") +(env "name" "value value1") +(env "name" "value value2") +(env "name" "value'quote space'value2") +(env "name" "value\"double quote\"value2") +(env "name" "value value2" "name2" "value2 value3") +(env "name" "value" "name1" "value1" "name2" "value2a value2b" "name3" "value3an\"value3b\"" "name4" "value4a\\nvalue4b") diff --git a/contrib/check-config.sh b/contrib/check-config.sh index afaabbc956..72e3108fe1 100755 --- a/contrib/check-config.sh +++ b/contrib/check-config.sh @@ -76,7 +76,7 @@ check_flags() { for flag in "$@"; do echo "- $(check_flag "$flag")" done -} +} if [ ! -e "$CONFIG" ]; then wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config..." @@ -135,7 +135,7 @@ flags=( DEVPTS_MULTIPLE_INSTANCES CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED MACVLAN VETH BRIDGE - NF_NAT_IPV4 IP_NF_TARGET_MASQUERADE + NF_NAT_IPV4 IP_NF_FILTER IP_NF_TARGET_MASQUERADE NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK} NF_NAT NF_NAT_NEEDED ) @@ -153,16 +153,20 @@ check_flags "${flags[@]}" echo '- Storage Drivers:' { echo '- "'$(wrap_color 'aufs' blue)'":' - check_flags AUFS_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY | sed 's/^/ /' + check_flags AUFS_FS | sed 's/^/ /' if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)" fi + check_flags EXT4_FS_POSIX_ACL EXT4_FS_SECURITY | sed 's/^/ /' echo '- "'$(wrap_color 'btrfs' blue)'":' check_flags BTRFS_FS | sed 's/^/ /' echo '- "'$(wrap_color 'devicemapper' blue)'":' check_flags BLK_DEV_DM DM_THIN_PROVISIONING EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY | sed 's/^/ /' + + echo '- "'$(wrap_color 'overlay' blue)'":' + check_flags OVERLAY_FS | sed 's/^/ /' } | sed 's/^/ /' echo diff --git a/contrib/completion/MAINTAINERS b/contrib/completion/MAINTAINERS new file mode 100644 index 0000000000..03ee2dde3d --- /dev/null +++ b/contrib/completion/MAINTAINERS @@ -0,0 +1,2 @@ +Tianon Gravi (@tianon) +Jessie Frazelle (@jfrazelle) diff --git a/contrib/completion/bash/docker b/contrib/completion/bash/docker index cc16d4825f..5364944faf 100755 --- a/contrib/completion/bash/docker +++ b/contrib/completion/bash/docker @@ -1,8 +1,8 @@ -#!bash +#!/bin/bash # # bash completion file for core docker commands # -# This script provides supports completion of: +# This script provides completion of: # - commands and their options # - container ids and names # - image repos and tags @@ -11,9 +11,9 @@ # To enable the completions either: # - place this file in /etc/bash_completion.d # or -# - copy this file and add the line below to your .bashrc after -# bash completion features are loaded -# . docker.bash +# - copy this file to e.g. ~/.docker-completion.sh and add the line +# below to your .bashrc after bash completion features are loaded +# . ~/.docker-completion.sh # # Note: # Currently, the completions will not work if the docker daemon is not @@ -99,13 +99,60 @@ __docker_pos_first_nonflag() { echo $counter } +__docker_resolve_hostname() { + command -v host >/dev/null 2>&1 || return + COMPREPLY=( $(host 2>/dev/null "${cur%:}" | awk '/has address/ {print $4}') ) +} + +__docker_capabilities() { + # The list of capabilities is defined in types.go, ALL was added manually. + COMPREPLY=( $( compgen -W " + ALL + AUDIT_CONTROL + AUDIT_WRITE + BLOCK_SUSPEND + CHOWN + DAC_OVERRIDE + DAC_READ_SEARCH + FOWNER + FSETID + IPC_LOCK + IPC_OWNER + KILL + LEASE + LINUX_IMMUTABLE + MAC_ADMIN + MAC_OVERRIDE + MKNOD + NET_ADMIN + NET_BIND_SERVICE + NET_BROADCAST + NET_RAW + SETFCAP + SETGID + SETPCAP + SETUID + SYS_ADMIN + SYS_BOOT + SYS_CHROOT + SYSLOG + SYS_MODULE + SYS_NICE + SYS_PACCT + SYS_PTRACE + SYS_RAWIO + SYS_RESOURCE + SYS_TIME + SYS_TTY_CONFIG + WAKE_ALARM + " -- "$cur" ) ) +} + _docker_docker() { case "$prev" in -H) return ;; - *) - ;; esac case "$cur" in @@ -138,8 +185,6 @@ _docker_build() { __docker_image_repos_and_tags return ;; - *) - ;; esac case "$cur" in @@ -160,8 +205,6 @@ _docker_commit() { -m|--message|-a|--author|--run) return ;; - *) - ;; esac case "$cur" in @@ -222,7 +265,7 @@ _docker_create() { __docker_containers_all return ;; - -v|--volume) + -v|--volume|--device) case "$cur" in *:*) # TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine) @@ -255,19 +298,72 @@ _docker_create() { esac return ;; - --entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|-c|--cpu-shares|-n|--name|-p|--publish|--expose|--dns|--lxc-conf) + --add-host) + case "$cur" in + *:) + __docker_resolve_hostname + return + ;; + esac + ;; + --cap-add|--cap-drop) + __docker_capabilities return ;; - *) + --net) + case "$cur" in + container:*) + local cur=${cur#*:} + __docker_containers_all + ;; + *) + COMPREPLY=( $( compgen -W "bridge none container: host" -- "$cur") ) + if [ "${COMPREPLY[*]}" = "container:" ] ; then + compopt -o nospace + fi + ;; + esac + return + ;; + --restart) + case "$cur" in + on-failure:*) + ;; + *) + COMPREPLY=( $( compgen -W "no on-failure on-failure: always" -- "$cur") ) + ;; + esac + return + ;; + --security-opt) + case "$cur" in + label:*:*) + ;; + label:*) + local cur=${cur##*:} + COMPREPLY=( $( compgen -W "user: role: type: level: disable" -- "$cur") ) + if [ "${COMPREPLY[*]}" != "disable" ] ; then + compopt -o nospace + fi + ;; + *) + COMPREPLY=( $( compgen -W "label apparmor" -S ":" -- "$cur") ) + compopt -o nospace + ;; + esac + return + ;; + --entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-p|--publish|--expose|--dns|--lxc-conf|--dns-search) + return ;; esac case "$cur" in -*) - COMPREPLY=( $( compgen -W "-n --networking --privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir -c --cpu-shares --name -a --attach -v --volume --link -e --env -p --publish --expose --dns --volumes-from --lxc-conf" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "--privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir --cpuset -c --cpu-shares --name -a --attach -v --volume --link -e --env --env-file -p --publish --expose --dns --volumes-from --lxc-conf --security-opt --add-host --cap-add --cap-drop --device --dns-search --net --restart" -- "$cur" ) ) ;; *) - local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf') + local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--env-file|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf|--security-opt|--add-host|--cap-add|--cap-drop|--device|--dns-search|--net|--restart') if [ $cword -eq $counter ]; then __docker_image_repos_and_tags_and_ids @@ -288,16 +384,12 @@ _docker_events() { --since) return ;; - *) - ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--since" -- "$cur" ) ) ;; - *) - ;; esac } @@ -376,8 +468,6 @@ _docker_inspect() { -f|--format) return ;; - *) - ;; esac case "$cur" in @@ -403,16 +493,12 @@ _docker_login() { -u|--username|-p|--password|-e|--email) return ;; - *) - ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "-u --username -p --password -e --email" -- "$cur" ) ) ;; - *) - ;; esac } @@ -452,16 +538,12 @@ _docker_ps() { -n) return ;; - *) - ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "-q --quiet -s --size -a --all --no-trunc -l --latest --since --before -n" -- "$cur" ) ) ;; - *) - ;; esac } @@ -470,8 +552,6 @@ _docker_pull() { -t|--tag) return ;; - *) - ;; esac case "$cur" in @@ -499,8 +579,6 @@ _docker_restart() { -t|--time) return ;; - *) - ;; esac case "$cur" in @@ -520,7 +598,6 @@ _docker_rm() { return ;; *) - local force= for arg in "${COMP_WORDS[@]}"; do case "$arg" in -f|--force) @@ -553,7 +630,7 @@ _docker_run() { __docker_containers_all return ;; - -v|--volume) + -v|--volume|--device) case "$cur" in *:*) # TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine) @@ -586,20 +663,72 @@ _docker_run() { esac return ;; - --entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-p|--publish|--expose|--dns|--lxc-conf) + --add-host) + case "$cur" in + *:) + __docker_resolve_hostname + return + ;; + esac + ;; + --cap-add|--cap-drop) + __docker_capabilities return ;; - *) + --net) + case "$cur" in + container:*) + local cur=${cur#*:} + __docker_containers_all + ;; + *) + COMPREPLY=( $( compgen -W "bridge none container: host" -- "$cur") ) + if [ "${COMPREPLY[*]}" = "container:" ] ; then + compopt -o nospace + fi + ;; + esac + return + ;; + --restart) + case "$cur" in + on-failure:*) + ;; + *) + COMPREPLY=( $( compgen -W "no on-failure on-failure: always" -- "$cur") ) + ;; + esac + return + ;; + --security-opt) + case "$cur" in + label:*:*) + ;; + label:*) + local cur=${cur##*:} + COMPREPLY=( $( compgen -W "user: role: type: level: disable" -- "$cur") ) + if [ "${COMPREPLY[*]}" != "disable" ] ; then + compopt -o nospace + fi + ;; + *) + COMPREPLY=( $( compgen -W "label apparmor" -S ":" -- "$cur") ) + compopt -o nospace + ;; + esac + return + ;; + --entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-p|--publish|--expose|--dns|--lxc-conf|--dns-search) + return ;; esac case "$cur" in -*) - COMPREPLY=( $( compgen -W "--rm -d --detach -n --networking --privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir --cpuset -c --cpu-shares --sig-proxy --name -a --attach -v --volume --link -e --env -p --publish --expose --dns --volumes-from --lxc-conf --security-opt" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "--rm -d --detach --privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir --cpuset -c --cpu-shares --sig-proxy --name -a --attach -v --volume --link -e --env --env-file -p --publish --expose --dns --volumes-from --lxc-conf --security-opt --add-host --cap-add --cap-drop --device --dns-search --net --restart" -- "$cur" ) ) ;; *) - - local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf|--security-opt') + local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--env-file|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf|--security-opt|--add-host|--cap-add|--cap-drop|--device|--dns-search|--net|--restart') if [ $cword -eq $counter ]; then __docker_image_repos_and_tags_and_ids @@ -620,16 +749,12 @@ _docker_search() { -s|--stars) return ;; - *) - ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--no-trunc --automated -s --stars" -- "$cur" ) ) ;; - *) - ;; esac } @@ -649,8 +774,6 @@ _docker_stop() { -t|--time) return ;; - *) - ;; esac case "$cur" in @@ -752,7 +875,7 @@ _docker() { local cur prev words cword _get_comp_words_by_ref -n : cur prev words cword - local command='docker' + local command='docker' cpos=0 local counter=1 while [ $counter -lt $cword ]; do case "${words[$counter]}" in diff --git a/contrib/completion/fish/docker.fish b/contrib/completion/fish/docker.fish index 48b0279cee..a082adc02c 100644 --- a/contrib/completion/fish/docker.fish +++ b/contrib/completion/fish/docker.fish @@ -53,7 +53,7 @@ complete -c docker -f -n '__fish_docker_no_subcommand' -s d -l daemon -d 'Enable complete -c docker -f -n '__fish_docker_no_subcommand' -l dns -d 'Force docker to use specific DNS servers' complete -c docker -f -n '__fish_docker_no_subcommand' -s e -l exec-driver -d 'Force the docker runtime to use a specific exec driver' complete -c docker -f -n '__fish_docker_no_subcommand' -s g -l graph -d 'Path to use as the root of the docker runtime' -complete -c docker -f -n '__fish_docker_no_subcommand' -l icc -d 'Enable inter-container communication' +complete -c docker -f -n '__fish_docker_no_subcommand' -l icc -d 'Allow unrestricted inter-container and Docker daemon host communication' complete -c docker -f -n '__fish_docker_no_subcommand' -l ip -d 'Default IP address to use when binding container ports' complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-forward -d 'Disable enabling of net.ipv4.ip_forward' complete -c docker -f -n '__fish_docker_no_subcommand' -l iptables -d "Disable docker's addition of iptables rules" @@ -67,7 +67,7 @@ complete -c docker -f -n '__fish_docker_no_subcommand' -s v -l version -d 'Print # attach complete -c docker -f -n '__fish_docker_no_subcommand' -a attach -d 'Attach to a running container' complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l no-stdin -d 'Do not attach stdin' -complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l sig-proxy -d 'Proxify all received signal to the process (even in non-tty mode)' +complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l sig-proxy -d 'Proxify all received signal to the process (non-TTY mode only)' complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -a '(__fish_print_docker_containers running)' -d "Container" # build @@ -185,7 +185,7 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s l -l latest -d ' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s n -d 'Show n last created containers, include non-running ones.' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l no-trunc -d "Don't truncate output" complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s q -l quiet -d 'Only display numeric IDs' -complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s s -l size -d 'Display sizes' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s s -l size -d 'Display total file sizes' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l since -d 'Show only containers created since Id or Name, include non-running ones.' # pull @@ -237,7 +237,7 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l name -d 'Assign complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s p -l publish -d "Publish a container's port to the host (format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort) (use 'docker port' to see the actual mapping)" complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l privileged -d 'Give extended privileges to this container' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l rm -d 'Automatically remove the container when it exits (incompatible with -d)' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l sig-proxy -d 'Proxify all received signal to the process (even in non-tty mode)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l sig-proxy -d 'Proxify all received signal to the process (non-TTY mode only)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s t -l tty -d 'Allocate a pseudo-tty' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s u -l user -d 'Username or UID' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s v -l volume -d 'Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)' diff --git a/contrib/completion/zsh/_docker b/contrib/completion/zsh/_docker index aff59ee77c..9104f385d7 100644 --- a/contrib/completion/zsh/_docker +++ b/contrib/completion/zsh/_docker @@ -177,7 +177,9 @@ __docker_commands () { if ( [[ ${+_docker_subcommands} -eq 0 ]] || _cache_invalid docker_subcommands) \ && ! _retrieve_cache docker_subcommands; then - _docker_subcommands=(${${${${(f)"$(_call_program commands docker 2>&1)"}[5,-1]}## #}/ ##/:}) + local -a lines + lines=(${(f)"$(_call_program commands docker 2>&1)"}) + _docker_subcommands=(${${${lines[$((${lines[(i)Commands:]} + 1)),${lines[(I) *]}]}## #}/ ##/:}) _docker_subcommands=($_docker_subcommands 'help:Show help for a command') _store_cache docker_subcommands _docker_subcommands fi @@ -190,22 +192,23 @@ __docker_subcommand () { (attach) _arguments \ '--no-stdin[Do not attach stdin]' \ - '--sig-proxy[Proxify all received signal]' \ + '--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]' \ ':containers:__docker_runningcontainers' ;; (build) _arguments \ - '--force-rm[Always remove intermediate containers, even after unsuccessful builds]' \ + '--force-rm[Always remove intermediate containers]' \ '--no-cache[Do not use cache when building the image]' \ - '-q[Suppress verbose build output]' \ + {-q,--quiet}'[Suppress verbose build output]' \ '--rm[Remove intermediate containers after a successful build]' \ - '-t:repository:__docker_repositories_with_tags' \ + {-t,--tag=-}'[Repository, name and tag to be applied]:repository:__docker_repositories_with_tags' \ ':path or URL:_directories' ;; (commit) _arguments \ - '--author=-[Author]:author: ' \ - '-m[Commit message]:message: ' \ + {-a,--author=-}'[Author]:author: ' \ + {-m,--message=-}'[Commit message]:message: ' \ + {-p,--pause}'[Pause container during commit]' \ '--run=-[Configuration automatically applied when the image is run]:configuration: ' \ ':container:__docker_containers' \ ':repository:__docker_repositories_with_tags' @@ -224,60 +227,40 @@ __docker_subcommand () { ;; esac ;; - (create) - _arguments \ - '-P[Publish all exposed ports to the host]' \ - '-a[Attach to stdin, stdout or stderr]' \ - '-c=-[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)' \ - '--cidfile=-[Write the container ID to the file]:CID file:_files' \ - '*--dns=-[Set custom dns servers]:dns server: ' \ - '*-e=-[Set environment variables]:environment variable: ' \ - '--entrypoint=-[Overwrite the default entrypoint of the image]:entry point: ' \ - '*--expose=-[Expose a port from the container without publishing it]: ' \ - '-h=-[Container host name]:hostname:_hosts' \ - '-i[Keep stdin open even if not attached]' \ - '--link=-[Add link to another container]:link:->link' \ - '--lxc-conf=-[Add custom lxc options]:lxc options: ' \ - '-m=-[Memory limit (in bytes)]:limit: ' \ - '--name=-[Container name]:name: ' \ - '*-p=-[Expose a container'"'"'s port to the host]:port:_ports' \ - '--privileged[Give extended privileges to this container]' \ - '-t[Allocate a pseudo-tty]' \ - '-u=-[Username or UID]:user:_users' \ - '*-v=-[Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)]:volume: '\ - '--volumes-from=-[Mount volumes from the specified container]:volume: ' \ - '-w=-[Working directory inside the container]:directory:_directories' \ - '(-):images:__docker_images' \ - '(-):command: _command_names -e' \ - '*::arguments: _normal' (diff|export) _arguments '*:containers:__docker_containers' ;; + (events) + _arguments \ + '--since=-[Events created since this timestamp]:timestamp: ' \ + '--until=-[Events created until this timestamp]:timestamp: ' + ;; (exec) _arguments \ - '-d[Detached mode: leave the container running in the background]' \ - '-i[Keep stdin open even if not attached]' \ - '-t[Allocate a pseudo-tty]' \ + {-d,--detach}'[Detached mode: leave the container running in the background]' \ + {-i,--interactive}'[Keep stdin open even if not attached]' \ + {-t,--tty}'[Allocate a pseudo-tty]' \ ':containers:__docker_runningcontainers' ;; (history) _arguments \ '--no-trunc[Do not truncate output]' \ - '-q[Only show numeric IDs]' \ + {-q,--quiet}'[Only show numeric IDs]' \ '*:images:__docker_images' ;; (images) _arguments \ - '-a[Show all images]' \ + {-a,--all}'[Show all images]' \ + '*'{-f,--filter=-}'[Filter values]:filter: ' \ '--no-trunc[Do not truncate output]' \ - '-q[Only show numeric IDs]' \ + {-q,--quiet}'[Only show numeric IDs]' \ '--tree[Output graph in tree format]' \ '--viz[Output graph in graphviz format]' \ ':repository:__docker_repositories' ;; (inspect) _arguments \ - '--format=-[Format the output using the given go template]:template: ' \ + {-f,--format=-}'[Format the output using the given go template]:template: ' \ '*:containers:__docker_containers' ;; (import) @@ -298,20 +281,29 @@ __docker_subcommand () { '3:file:_files' ;; (kill) - _arguments '*:containers:__docker_runningcontainers' + _arguments \ + {-s,--signal=-}'[Signal to send]:signal:_signals' \ + '*:containers:__docker_runningcontainers' ;; (load) + _arguments \ + {-i,--input=-}'[Read from tar archive file]:tar:_files' ;; (login) _arguments \ - '-e[Email]:email: ' \ - '-p[Password]:password: ' \ - '-u[Username]:username: ' \ + {-e,--email=-}'[Email]:email: ' \ + {-p,--password=-}'[Password]:password: ' \ + {-u,--user=-}'[Username]:username: ' \ + ':server: ' + ;; + (logout) + _arguments \ ':server: ' ;; (logs) _arguments \ - '-f[Follow log output]' \ + {-f,--follow}'[Follow log output]' \ + {-t,--timestamps}'[Show timestamps]' \ '*:containers:__docker_containers' ;; (port) @@ -319,24 +311,32 @@ __docker_subcommand () { '1:containers:__docker_runningcontainers' \ '2:port:_ports' ;; + (pause|unpause) + _arguments \ + '1:containers:__docker_runningcontainers' + ;; (start) _arguments \ - '-a[Attach container'"'"'s stdout/stderr and forward all signals]' \ - '-i[Attach container'"'"'s stding]' \ + {-a,--attach}'[Attach container'"'"'s stdout/stderr and forward all signals]' \ + {-i,--interactive}'[Attach container'"'"'s stding]' \ '*:containers:__docker_stoppedcontainers' ;; (rm) _arguments \ - '--link[Remove the specified link and not the underlying container]' \ - '-v[Remove the volumes associated to the container]' \ + {-f,--force}'[Force removal]' \ + {-l,--link}'[Remove the specified link and not the underlying container]' \ + {-v,--volumes}'[Remove the volumes associated to the container]' \ '*:containers:__docker_stoppedcontainers' ;; (rmi) _arguments \ + {-f,--force}'[Force removal]' \ + '--no-prune[Do not delete untagged parents]' \ '*:images:__docker_images' ;; (restart|stop) - _arguments '-t[Number of seconds to try to stop for before killing the container]:seconds to before killing:(1 5 10 30 60)' \ + _arguments \ + {-t,--time=-}'[Number of seconds to try to stop for before killing the container]:seconds to before killing:(1 5 10 30 60)' \ '*:containers:__docker_runningcontainers' ;; (top) @@ -352,47 +352,58 @@ __docker_subcommand () { ;; (ps) _arguments \ - '-a[Show all containers]' \ + {-a,--all}'[Show all containers]' \ '--before=-[Show only container created before...]:containers:__docker_containers' \ - '-l[Show only the latest created container]' \ + '*'{-f,--filter=-}'[Filter values]:filter: ' \ + {-l,--latest}'[Show only the latest created container]' \ '-n[Show n last created containers, include non-running one]:n:(1 5 10 25 50)' \ '--no-trunc[Do not truncate output]' \ - '-q[Only show numeric IDs]' \ - '-s[Display sizes]' \ + {-q,--quiet}'[Only show numeric IDs]' \ + {-s,--size}'[Display total file sizes]' \ '--since=-[Show only containers created since...]:containers:__docker_containers' ;; (tag) _arguments \ - '-f[force]'\ + {-f,--force}'[force]'\ ':image:__docker_images'\ ':repository:__docker_repositories_with_tags' ;; - (run) + (create|run) _arguments \ - '-P[Publish all exposed ports to the host]' \ - '-a[Attach to stdin, stdout or stderr]' \ - '-c[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)' \ + {-a,--attach}'[Attach to stdin, stdout or stderr]' \ + '*--add-host=-[Add a custom host-to-IP mapping]:host\:ip mapping: ' \ + {-c,--cpu-shares=-}'[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)' \ + '*--cap-add=-[Add Linux capabilities]:capability: ' \ + '*--cap-drop=-[Drop Linux capabilities]:capability: ' \ '--cidfile=-[Write the container ID to the file]:CID file:_files' \ - '-d[Detached mode: leave the container running in the background]' \ + '--cpuset=-[CPUs in which to allow execution]:CPU set: ' \ + {-d,--detach}'[Detached mode: leave the container running in the background]' \ + '*--device=-[Add a host device to the container]:device:_files' \ '*--dns=-[Set custom dns servers]:dns server: ' \ - '*-e[Set environment variables]:environment variable: ' \ + '*--dns-search=-[Set custom DNS search domains]:dns domains: ' \ + '*'{-e,--environment=-}'[Set environment variables]:environment variable: ' \ '--entrypoint=-[Overwrite the default entrypoint of the image]:entry point: ' \ + '*--env-file=-[Read environment variables from a file]:environment file:_files' \ '*--expose=-[Expose a port from the container without publishing it]: ' \ - '-h[Container host name]:hostname:_hosts' \ - '-i[Keep stdin open even if not attached]' \ - '--link=-[Add link to another container]:link:->link' \ - '--lxc-conf=-[Add custom lxc options]:lxc options: ' \ + {-h,--hostname=-}'[Container host name]:hostname:_hosts' \ + {-i,--interactive}'[Keep stdin open even if not attached]' \ + '*--link=-[Add link to another container]:link:->link' \ + '*--lxc-conf=-[Add custom lxc options]:lxc options: ' \ '-m[Memory limit (in bytes)]:limit: ' \ '--name=-[Container name]:name: ' \ - '*-p[Expose a container'"'"'s port to the host]:port:_ports' \ + '--net=-[Network mode]:network mode:(bridge none container: host)' \ + {-P,--publish-all}'[Publish all exposed ports]' \ + '*'{-p,--publish=-}'[Expose a container'"'"'s port to the host]:port:_ports' \ '--privileged[Give extended privileges to this container]' \ + '--restart=-[Restart policy]:restart policy:(no on-failure always)' \ '--rm[Remove intermediate containers when it exits]' \ - '--sig-proxy[Proxify all received signal]' \ - '-t[Allocate a pseudo-tty]' \ - '-u[Username or UID]:user:_users' \ - '*-v[Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)]:volume: '\ - '--volumes-from=-[Mount volumes from the specified container]:volume: ' \ - '-w[Working directory inside the container]:directory:_directories' \ + '*--security-opt=-[Security options]:security option: ' \ + '--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]' \ + {-t,--tty}'[Allocate a pseudo-tty]' \ + {-u,--user=-}'[Username or UID]:user:_users' \ + '*-v[Bind mount a volume]:volume: '\ + '*--volumes-from=-[Mount volumes from the specified container]:volume: ' \ + {-w,--workdir=-}'[Working directory inside the container]:directory:_directories' \ '(-):images:__docker_images' \ '(-):command: _command_names -e' \ '*::arguments: _normal' @@ -416,6 +427,7 @@ __docker_subcommand () { ;; (save) _arguments \ + {-o,--output=-}'[Write to file]:file:_files' \ ':images:__docker_images' ;; (wait) diff --git a/contrib/desktop-integration/chromium/Dockerfile b/contrib/desktop-integration/chromium/Dockerfile index 0e0a7ce90e..5cacd1f999 100644 --- a/contrib/desktop-integration/chromium/Dockerfile +++ b/contrib/desktop-integration/chromium/Dockerfile @@ -20,8 +20,6 @@ # docker run --volumes-from chromium-data -v /tmp/.X11-unix:/tmp/.X11-unix \ # -e DISPLAY=unix$DISPLAY chromium -DOCKER_VERSION 1.3 - # Base docker image FROM debian:jessie MAINTAINER Jessica Frazelle diff --git a/contrib/desktop-integration/gparted/Dockerfile b/contrib/desktop-integration/gparted/Dockerfile index 6db1d24098..e76e658973 100644 --- a/contrib/desktop-integration/gparted/Dockerfile +++ b/contrib/desktop-integration/gparted/Dockerfile @@ -17,8 +17,6 @@ # -e DISPLAY=unix$DISPLAY gparted # -DOCKER-VERSION 1.3 - # Base docker image FROM debian:jessie MAINTAINER Jessica Frazelle diff --git a/contrib/docker-device-tool/device_tool.go b/contrib/docker-device-tool/device_tool.go index 23d19f0237..ffc34a54e0 100644 --- a/contrib/docker-device-tool/device_tool.go +++ b/contrib/docker-device-tool/device_tool.go @@ -3,12 +3,15 @@ package main import ( "flag" "fmt" - "github.com/docker/docker/daemon/graphdriver/devmapper" "os" "path" "sort" "strconv" "strings" + + log "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver/devmapper" + "github.com/docker/docker/pkg/devicemapper" ) func usage() { @@ -60,6 +63,7 @@ func main() { if *flDebug { os.Setenv("DEBUG", "1") + log.SetLevel(log.DebugLevel) } if flag.NArg() < 1 { @@ -69,7 +73,7 @@ func main() { args := flag.Args() home := path.Join(*root, "devicemapper") - devices, err := devmapper.NewDeviceSet(home, false) + devices, err := devmapper.NewDeviceSet(home, false, nil) if err != nil { fmt.Println("Can't initialize device mapper: ", err) os.Exit(1) @@ -142,7 +146,7 @@ func main() { usage() } - err := devices.RemoveDevice(args[1]) + err := devicemapper.RemoveDevice(args[1]) if err != nil { fmt.Println("Can't remove device: ", err) os.Exit(1) @@ -153,7 +157,7 @@ func main() { usage() } - err := devices.MountDevice(args[1], args[2], false) + err := devices.MountDevice(args[1], args[2], "") if err != nil { fmt.Println("Can't create snap device: ", err) os.Exit(1) diff --git a/contrib/host-integration/Dockerfile.dev b/contrib/host-integration/Dockerfile.dev index 1c0fbd8323..c8df852899 100644 --- a/contrib/host-integration/Dockerfile.dev +++ b/contrib/host-integration/Dockerfile.dev @@ -2,8 +2,6 @@ # This Dockerfile will create an image that allows to generate upstart and # systemd scripts (more to come) # -# docker-version 0.6.2 -# FROM ubuntu:12.10 MAINTAINER Guillaume J. Charmes diff --git a/contrib/init/systemd/MAINTAINERS b/contrib/init/systemd/MAINTAINERS index 760a76d6fe..b9ba55b3fb 100644 --- a/contrib/init/systemd/MAINTAINERS +++ b/contrib/init/systemd/MAINTAINERS @@ -1,2 +1,3 @@ Lokesh Mandvekar (@lsm5) Brandon Philips (@philips) +Jessie Frazelle (@jfrazelle) diff --git a/contrib/init/upstart/MAINTAINERS b/contrib/init/upstart/MAINTAINERS new file mode 100644 index 0000000000..03ee2dde3d --- /dev/null +++ b/contrib/init/upstart/MAINTAINERS @@ -0,0 +1,2 @@ +Tianon Gravi (@tianon) +Jessie Frazelle (@jfrazelle) diff --git a/contrib/mkimage-arch.sh b/contrib/mkimage-arch.sh index e83b2b6731..35cb1617d5 100755 --- a/contrib/mkimage-arch.sh +++ b/contrib/mkimage-arch.sh @@ -60,6 +60,6 @@ mknod -m 600 $DEV/initctl p mknod -m 666 $DEV/ptmx c 5 2 ln -sf /proc/self/fd $DEV/fd -tar --numeric-owner -C $ROOTFS -c . | docker import - archlinux +tar --numeric-owner --xattrs --acls -C $ROOTFS -c . | docker import - archlinux docker run -i -t archlinux echo Success. rm -rf $ROOTFS diff --git a/contrib/mkimage-yum.sh b/contrib/mkimage-yum.sh index f21a63a225..80f7b4956f 100755 --- a/contrib/mkimage-yum.sh +++ b/contrib/mkimage-yum.sh @@ -57,7 +57,7 @@ mknod -m 666 "$target"/dev/tty0 c 4 0 mknod -m 666 "$target"/dev/urandom c 1 9 mknod -m 666 "$target"/dev/zero c 1 5 -yum -c "$yum_config" --installroot="$target" --setopt=tsflags=nodocs \ +yum -c "$yum_config" --installroot="$target" --releasever=/ --setopt=tsflags=nodocs \ --setopt=group_package_types=mandatory -y groupinstall Core yum -c "$yum_config" --installroot="$target" -y clean all diff --git a/contrib/mkimage/debootstrap b/contrib/mkimage/debootstrap index fcda497839..65f154aa95 100755 --- a/contrib/mkimage/debootstrap +++ b/contrib/mkimage/debootstrap @@ -15,9 +15,12 @@ done suite="$1" shift +# allow for DEBOOTSTRAP=qemu-debootstrap ./mkimage.sh ... +: ${DEBOOTSTRAP:=debootstrap} + ( set -x - debootstrap "${before[@]}" "$suite" "$rootfsDir" "$@" + $DEBOOTSTRAP "${before[@]}" "$suite" "$rootfsDir" "$@" ) # now for some Docker-specific tweaks diff --git a/daemon/MAINTAINERS b/daemon/MAINTAINERS index 434aad9d57..9360465f2d 100644 --- a/daemon/MAINTAINERS +++ b/daemon/MAINTAINERS @@ -3,4 +3,5 @@ Victor Vieux (@vieux) Michael Crosby (@crosbymichael) Cristian Staretu (@unclejack) Tibor Vass (@tiborvass) +Vishnu Kannan (@vishh) volumes.go: Brian Goff (@cpuguy83) diff --git a/daemon/attach.go b/daemon/attach.go index 7ccaadf442..599b272472 100644 --- a/daemon/attach.go +++ b/daemon/attach.go @@ -6,10 +6,10 @@ import ( "os" "time" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/jsonlog" - "github.com/docker/docker/pkg/log" "github.com/docker/docker/pkg/promise" "github.com/docker/docker/utils" ) @@ -83,7 +83,6 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status { var ( cStdin io.ReadCloser cStdout, cStderr io.Writer - cStdinCloser io.Closer ) if stdin { @@ -94,7 +93,6 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status { io.Copy(w, job.Stdin) }() cStdin = r - cStdinCloser = job.Stdin } if stdout { cStdout = job.Stdout @@ -103,7 +101,7 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status { cStderr = job.Stderr } - <-daemon.Attach(&container.StreamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, cStdin, cStdinCloser, cStdout, cStderr) + <-daemon.attach(&container.StreamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, cStdin, cStdout, cStderr) // If we are in stdinonce mode, wait for the process to end // otherwise, simply return if container.Config.StdinOnce && !container.Config.Tty { @@ -113,13 +111,7 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status { return engine.StatusOK } -// FIXME: this should be private, and every outside subsystem -// should go through the "container_attach" job. But that would require -// that job to be properly documented, as well as the relationship between -// Attach and ContainerAttach. -// -// This method is in use by builder/builder.go. -func (daemon *Daemon) Attach(streamConfig *StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdinCloser io.Closer, stdout io.Writer, stderr io.Writer) chan error { +func (daemon *Daemon) attach(streamConfig *StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error { var ( cStdout, cStderr io.ReadCloser nJobs int @@ -136,10 +128,10 @@ func (daemon *Daemon) Attach(streamConfig *StreamConfig, openStdin, stdinOnce, t go func() { log.Debugf("attach: stdin: begin") defer log.Debugf("attach: stdin: end") - // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr if stdinOnce && !tty { defer cStdin.Close() } else { + // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr defer func() { if cStdout != nil { cStdout.Close() @@ -179,9 +171,6 @@ func (daemon *Daemon) Attach(streamConfig *StreamConfig, openStdin, stdinOnce, t if stdinOnce && stdin != nil { defer stdin.Close() } - if stdinCloser != nil { - defer stdinCloser.Close() - } _, err := io.Copy(stdout, cStdout) if err == io.ErrClosedPipe { err = nil @@ -195,9 +184,6 @@ func (daemon *Daemon) Attach(streamConfig *StreamConfig, openStdin, stdinOnce, t } else { // Point stdout of container to a no-op writer. go func() { - if stdinCloser != nil { - defer stdinCloser.Close() - } if cStdout, err := streamConfig.StdoutPipe(); err != nil { log.Errorf("attach: stdout pipe: %s", err) } else { @@ -219,9 +205,6 @@ func (daemon *Daemon) Attach(streamConfig *StreamConfig, openStdin, stdinOnce, t if stdinOnce && stdin != nil { defer stdin.Close() } - if stdinCloser != nil { - defer stdinCloser.Close() - } _, err := io.Copy(stderr, cStderr) if err == io.ErrClosedPipe { err = nil @@ -235,10 +218,6 @@ func (daemon *Daemon) Attach(streamConfig *StreamConfig, openStdin, stdinOnce, t } else { // Point stderr at a no-op writer. go func() { - if stdinCloser != nil { - defer stdinCloser.Close() - } - if cStderr, err := streamConfig.StderrPipe(); err != nil { log.Errorf("attach: stdout pipe: %s", err) } else { @@ -257,8 +236,6 @@ func (daemon *Daemon) Attach(streamConfig *StreamConfig, openStdin, stdinOnce, t } }() - // FIXME: how to clean up the stdin goroutine without the unwanted side effect - // of closing the passed stdin? Add an intermediary io.Pipe? for i := 0; i < nJobs; i++ { log.Debugf("attach: waiting for job %d/%d", i+1, nJobs) if err := <-errors; err != nil { diff --git a/daemon/config.go b/daemon/config.go index e45e73b99d..4d9041e895 100644 --- a/daemon/config.go +++ b/daemon/config.go @@ -40,6 +40,8 @@ type Config struct { DisableNetwork bool EnableSelinuxSupport bool Context map[string][]string + TrustKeyPath string + Labels []string } // InstallFlags adds command-line options to the top-level flag parser for @@ -57,7 +59,7 @@ func (config *Config) InstallFlags() { flag.StringVar(&config.BridgeIface, []string{"b", "-bridge"}, "", "Attach containers to a pre-existing network bridge\nuse 'none' to disable container networking") flag.StringVar(&config.FixedCIDR, []string{"-fixed-cidr"}, "", "IPv4 subnet for fixed IPs (ex: 10.20.0.0/16)\nthis subnet must be nested in the bridge subnet (which is defined by -b or --bip)") opts.ListVar(&config.InsecureRegistries, []string{"-insecure-registry"}, "Enable insecure communication with specified registries (no certificate verification for HTTPS and enable HTTP fallback) (e.g., localhost:5000 or 10.20.0.0/16)") - flag.BoolVar(&config.InterContainerCommunication, []string{"#icc", "-icc"}, true, "Enable inter-container communication") + flag.BoolVar(&config.InterContainerCommunication, []string{"#icc", "-icc"}, true, "Allow unrestricted inter-container and Docker daemon host communication") flag.StringVar(&config.GraphDriver, []string{"s", "-storage-driver"}, "", "Force the Docker runtime to use a specific storage driver") flag.StringVar(&config.ExecDriver, []string{"e", "-exec-driver"}, "native", "Force the Docker runtime to use a specific exec driver") flag.BoolVar(&config.EnableSelinuxSupport, []string{"-selinux-enabled"}, false, "Enable selinux support. SELinux does not presently support the BTRFS storage driver") @@ -68,6 +70,7 @@ func (config *Config) InstallFlags() { opts.IPListVar(&config.Dns, []string{"#dns", "-dns"}, "Force Docker to use specific DNS servers") opts.DnsSearchListVar(&config.DnsSearch, []string{"-dns-search"}, "Force Docker to use specific DNS search domains") opts.MirrorListVar(&config.Mirrors, []string{"-registry-mirror"}, "Specify a preferred Docker registry mirror") + opts.LabelListVar(&config.Labels, []string{"-label"}, "Set key=value labels to the daemon (displayed in `docker info`)") // Localhost is by default considered as an insecure registry // This is a stop-gap for people who are running a private registry on localhost (especially on Boot2docker). @@ -78,7 +81,7 @@ func (config *Config) InstallFlags() { config.InsecureRegistries = append(config.InsecureRegistries, "127.0.0.0/8") } -func GetDefaultNetworkMtu() int { +func getDefaultNetworkMtu() int { if iface, err := networkdriver.GetDefaultRouteIface(); err == nil { return iface.MTU } diff --git a/daemon/container.go b/daemon/container.go index 6fd4507972..45658c5830 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -17,6 +17,7 @@ import ( "github.com/docker/libcontainer/devices" "github.com/docker/libcontainer/label" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/engine" "github.com/docker/docker/image" @@ -25,7 +26,6 @@ import ( "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/broadcastwriter" "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/log" "github.com/docker/docker/pkg/networkfs/etchosts" "github.com/docker/docker/pkg/networkfs/resolvconf" "github.com/docker/docker/pkg/promise" @@ -102,13 +102,17 @@ func (container *Container) FromDisk() error { return err } - data, err := ioutil.ReadFile(pth) + jsonSource, err := os.Open(pth) if err != nil { return err } + defer jsonSource.Close() + + dec := json.NewDecoder(jsonSource) + // Load container settings // udp broke compat of docker.PortMapping, but it's not used when loading a container, we can skip it - if err := json.Unmarshal(data, container); err != nil && !strings.Contains(err.Error(), "docker.PortMapping") { + if err := dec.Decode(container); err != nil && !strings.Contains(err.Error(), "docker.PortMapping") { return err } @@ -229,6 +233,18 @@ func populateCommand(c *Container, env []string) error { return fmt.Errorf("invalid network mode: %s", c.hostConfig.NetworkMode) } + ipc := &execdriver.Ipc{} + + if c.hostConfig.IpcMode.IsContainer() { + ic, err := c.getIpcContainer() + if err != nil { + return err + } + ipc.ContainerID = ic.ID + } else { + ipc.HostIpc = c.hostConfig.IpcMode.IsHost() + } + // Build lists of devices allowed and created within the container. userSpecifiedDevices := make([]*devices.Device, len(c.hostConfig.Devices)) for i, deviceMapping := range c.hostConfig.Devices { @@ -244,7 +260,10 @@ func populateCommand(c *Container, env []string) error { autoCreatedDevices := append(devices.DefaultAutoCreatedDevices, userSpecifiedDevices...) // TODO: this can be removed after lxc-conf is fully deprecated - lxcConfig := mergeLxcConfIntoOptions(c.hostConfig) + lxcConfig, err := mergeLxcConfIntoOptions(c.hostConfig) + if err != nil { + return err + } resources := &execdriver.Resources{ Memory: c.Config.Memory, @@ -270,6 +289,7 @@ func populateCommand(c *Container, env []string) error { InitPath: "/.dockerinit", WorkingDir: c.Config.WorkingDir, Network: en, + Ipc: ipc, Resources: resources, AllowedDevices: allowedDevices, AutoCreatedDevices: autoCreatedDevices, @@ -297,6 +317,12 @@ func (container *Container) Start() (err error) { // setup has been cleaned up properly defer func() { if err != nil { + container.setError(err) + // if no one else has set it, make sure we don't leave it at zero + if container.ExitCode == 0 { + container.ExitCode = 128 + } + container.toDisk() container.cleanup() } }() @@ -414,7 +440,7 @@ func (container *Container) buildHostsFiles(IP string) error { } container.HostsPath = hostsPath - extraContent := make(map[string]string) + var extraContent []etchosts.Record children, err := container.daemon.Children(container.Name) if err != nil { @@ -423,15 +449,15 @@ func (container *Container) buildHostsFiles(IP string) error { for linkAlias, child := range children { _, alias := path.Split(linkAlias) - extraContent[alias] = child.NetworkSettings.IPAddress + extraContent = append(extraContent, etchosts.Record{Hosts: alias, IP: child.NetworkSettings.IPAddress}) } for _, extraHost := range container.hostConfig.ExtraHosts { parts := strings.Split(extraHost, ":") - extraContent[parts[0]] = parts[1] + extraContent = append(extraContent, etchosts.Record{Hosts: parts[0], IP: parts[1]}) } - return etchosts.Build(container.HostsPath, IP, container.Config.Hostname, container.Config.Domainname, &extraContent) + return etchosts.Build(container.HostsPath, IP, container.Config.Hostname, container.Config.Domainname, extraContent) } func (container *Container) buildHostnameAndHostsFiles(IP string) error { @@ -455,6 +481,7 @@ func (container *Container) AllocateNetwork() error { ) job := eng.Job("allocate_interface", container.ID) + job.Setenv("RequestedMac", container.Config.MacAddress) if env, err = job.Stdout.AddEnv(); err != nil { return err } @@ -525,7 +552,9 @@ func (container *Container) ReleaseNetwork() { } eng := container.daemon.eng - eng.Job("release_interface", container.ID).Run() + job := eng.Job("release_interface", container.ID) + job.SetenvBool("overrideShutdown", true) + job.Run() container.NetworkSettings = &NetworkSettings{} } @@ -576,6 +605,10 @@ func (container *Container) cleanup() { if err := container.Unmount(); err != nil { log.Errorf("%v: Failed to umount filesystem: %v", container.ID, err) } + + for _, eConfig := range container.execCommands.s { + container.daemon.unregisterExecCommand(eConfig) + } } func (container *Container) KillSig(sig int) error { @@ -691,6 +724,9 @@ func (container *Container) Restart(seconds int) error { } func (container *Container) Resize(h, w int) error { + if !container.IsRunning() { + return fmt.Errorf("Cannot resize container %s, container is not running", container.ID) + } return container.command.ProcessConfig.Terminal.Resize(h, w) } @@ -826,19 +862,25 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) { return nil, err } - var filter []string - basePath, err := container.getResourcePath(resource) if err != nil { container.Unmount() return nil, err } + // Check if this is actually in a volume + for _, mnt := range container.VolumeMounts() { + if len(mnt.MountToPath) > 0 && strings.HasPrefix(resource, mnt.MountToPath[1:]) { + return mnt.Export(resource) + } + } + stat, err := os.Stat(basePath) if err != nil { container.Unmount() return nil, err } + var filter []string if !stat.IsDir() { d, f := path.Split(basePath) basePath = d @@ -965,7 +1007,7 @@ func (container *Container) updateParentsHosts() error { c := container.daemon.Get(cid) if c != nil && !container.daemon.config.DisableNetwork && container.hostConfig.NetworkMode.IsPrivate() { if err := etchosts.Update(c.HostsPath, container.NetworkSettings.IPAddress, container.Name[1:]); err != nil { - return fmt.Errorf("Failed to update /etc/hosts in parent container: %v", err) + log.Errorf("Failed to update /etc/hosts in parent container: %v", err) } } } @@ -1228,10 +1270,25 @@ func (container *Container) GetMountLabel() string { return container.MountLabel } +func (container *Container) getIpcContainer() (*Container, error) { + containerID := container.hostConfig.IpcMode.Container() + c := container.daemon.Get(containerID) + if c == nil { + return nil, fmt.Errorf("no such container to join IPC: %s", containerID) + } + if !c.IsRunning() { + return nil, fmt.Errorf("cannot join IPC of a non running container: %s", containerID) + } + return c, nil +} + func (container *Container) getNetworkedContainer() (*Container, error) { parts := strings.SplitN(string(container.hostConfig.NetworkMode), ":", 2) switch parts[0] { case "container": + if len(parts) != 2 { + return nil, fmt.Errorf("no container specified to join network") + } nc := container.daemon.Get(parts[1]) if nc == nil { return nil, fmt.Errorf("no such container to join network: %s", parts[1]) diff --git a/daemon/create.go b/daemon/create.go index e72b0ef206..f9d986491f 100644 --- a/daemon/create.go +++ b/daemon/create.go @@ -1,10 +1,13 @@ package daemon import ( + "fmt" + "github.com/docker/docker/engine" "github.com/docker/docker/graph" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/runconfig" + "github.com/docker/libcontainer/label" ) func (daemon *Daemon) ContainerCreate(job *engine.Job) engine.Status { @@ -50,12 +53,9 @@ func (daemon *Daemon) ContainerCreate(job *engine.Job) engine.Status { job.Errorf("IPv4 forwarding is disabled.\n") } container.LogEvent("create") - // FIXME: this is necessary because daemon.Create might return a nil container - // with a non-nil error. This should not happen! Once it's fixed we - // can remove this workaround. - if container != nil { - job.Printf("%s\n", container.ID) - } + + job.Printf("%s\n", container.ID) + for _, warning := range buildWarnings { job.Errorf("%s\n", warning) } @@ -80,6 +80,12 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos if warnings, err = daemon.mergeAndVerifyConfig(config, img); err != nil { return nil, nil, err } + if hostConfig != nil && hostConfig.SecurityOpt == nil { + hostConfig.SecurityOpt, err = daemon.GenerateSecurityOpt(hostConfig.IpcMode) + if err != nil { + return nil, nil, err + } + } if container, err = daemon.newContainer(name, config, img); err != nil { return nil, nil, err } @@ -94,8 +100,33 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos return nil, nil, err } } + if err := container.Mount(); err != nil { + return nil, nil, err + } + defer container.Unmount() + if err := container.prepareVolumes(); err != nil { + return nil, nil, err + } if err := container.ToDisk(); err != nil { return nil, nil, err } return container, warnings, nil } + +func (daemon *Daemon) GenerateSecurityOpt(ipcMode runconfig.IpcMode) ([]string, error) { + if ipcMode.IsHost() { + return label.DisableSecOpt(), nil + } + if ipcContainer := ipcMode.Container(); ipcContainer != "" { + c := daemon.Get(ipcContainer) + if c == nil { + return nil, fmt.Errorf("no such container to join IPC: %s", ipcContainer) + } + if !c.IsRunning() { + return nil, fmt.Errorf("cannot join IPC of a non running container: %s", ipcContainer) + } + + return label.DupSecOpt(c.ProcessLabel), nil + } + return nil, nil +} diff --git a/daemon/daemon.go b/daemon/daemon.go index 9f90643c4e..a2e6a79bd6 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -14,6 +14,8 @@ import ( "github.com/docker/libcontainer/label" + log "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/daemon/execdriver/execdrivers" "github.com/docker/docker/daemon/execdriver/lxc" @@ -29,7 +31,6 @@ import ( "github.com/docker/docker/pkg/broadcastwriter" "github.com/docker/docker/pkg/graphdb" "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/log" "github.com/docker/docker/pkg/namesgenerator" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers/kernel" @@ -83,6 +84,7 @@ func (c *contStore) List() []*Container { } type Daemon struct { + ID string repository string sysInitPath string containers *contStore @@ -128,6 +130,7 @@ func (daemon *Daemon) Install(eng *engine.Engine) error { "execCreate": daemon.ContainerExecCreate, "execStart": daemon.ContainerExecStart, "execResize": daemon.ContainerExecResize, + "execInspect": daemon.ContainerExecInspect, } { if err := eng.Register(name, method); err != nil { return err @@ -231,7 +234,7 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err log.Debugf("killing old running container %s", container.ID) existingPid := container.Pid - container.SetStopped(0) + container.SetStopped(&execdriver.ExitStatus{0, false}) // We only have to handle this for lxc because the other drivers will ensure that // no processes are left when docker dies @@ -263,7 +266,7 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err log.Debugf("Marking as stopped") - container.SetStopped(-127) + container.SetStopped(&execdriver.ExitStatus{-127, false}) if err := container.ToDisk(); err != nil { return err } @@ -304,7 +307,7 @@ func (daemon *Daemon) restore() error { ) if !debug { - log.Infof("Loading containers: ") + log.Infof("Loading containers: start.") } dir, err := ioutil.ReadDir(daemon.repository) if err != nil { @@ -392,7 +395,8 @@ func (daemon *Daemon) restore() error { } if !debug { - log.Infof(": done.") + fmt.Println() + log.Infof("Loading containers: done.") } return nil @@ -692,6 +696,9 @@ func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig. if child == nil { return fmt.Errorf("Could not get container for %s", parts["name"]) } + if child.hostConfig.NetworkMode.IsHost() { + return runconfig.ErrConflictHostNetworkAndLinks + } if err := daemon.RegisterLink(container, child, parts["alias"]); err != nil { return err } @@ -717,10 +724,8 @@ func NewDaemon(config *Config, eng *engine.Engine) (*Daemon, error) { } func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error) { - // Apply configuration defaults if config.Mtu == 0 { - // FIXME: GetDefaultNetwork Mtu doesn't need to be public anymore - config.Mtu = GetDefaultNetworkMtu() + config.Mtu = getDefaultNetworkMtu() } // Check for mutually incompatible config options if config.BridgeIface != "" && config.BridgeIP != "" { @@ -893,7 +898,13 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error) return nil, err } + trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath) + if err != nil { + return nil, err + } + daemon := &Daemon{ + ID: trustKey.PublicKey().KeyID(), repository: daemonRepo, containers: &contStore{s: make(map[string]*Container)}, execCommands: newExecStore(), @@ -918,7 +929,6 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error) eng.OnShutdown(func() { // FIXME: if these cleanup steps can be called concurrently, register // them as separate handlers to speed up total shutdown time - // FIXME: use engine logging instead of log.Errorf if err := daemon.shutdown(); err != nil { log.Errorf("daemon.shutdown(): %s", err) } @@ -968,6 +978,7 @@ func (daemon *Daemon) Mount(container *Container) error { if container.basefs == "" { container.basefs = dir } else if container.basefs != dir { + daemon.driver.Put(container.ID) return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", daemon.driver, container.ID, container.basefs, dir) } @@ -989,7 +1000,7 @@ func (daemon *Daemon) Diff(container *Container) (archive.Archive, error) { return daemon.driver.Diff(container.ID, initID) } -func (daemon *Daemon) Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { +func (daemon *Daemon) Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) { return daemon.execDriver.Run(c.command, pipes, startCallback) } diff --git a/daemon/daemon_aufs.go b/daemon/daemon_aufs.go index a370a4ce3c..7d4d3c32e9 100644 --- a/daemon/daemon_aufs.go +++ b/daemon/daemon_aufs.go @@ -3,10 +3,10 @@ package daemon import ( + log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/aufs" "github.com/docker/docker/graph" - "github.com/docker/docker/pkg/log" ) // Given the graphdriver ad, if it is aufs, then migrate it. diff --git a/daemon/daemon_overlay.go b/daemon/daemon_overlay.go new file mode 100644 index 0000000000..25d6e80285 --- /dev/null +++ b/daemon/daemon_overlay.go @@ -0,0 +1,7 @@ +// +build !exclude_graphdriver_overlay + +package daemon + +import ( + _ "github.com/docker/docker/daemon/graphdriver/overlay" +) diff --git a/daemon/delete.go b/daemon/delete.go index 77be926c1c..55678f90a1 100644 --- a/daemon/delete.go +++ b/daemon/delete.go @@ -5,8 +5,8 @@ import ( "os" "path" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/engine" - "github.com/docker/docker/pkg/log" ) func (daemon *Daemon) ContainerRm(job *engine.Job) engine.Status { diff --git a/daemon/exec.go b/daemon/exec.go index 0ab1c0bf5f..ecdbc58d85 100644 --- a/daemon/exec.go +++ b/daemon/exec.go @@ -9,12 +9,12 @@ import ( "strings" "sync" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/daemon/execdriver/lxc" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/broadcastwriter" "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/log" "github.com/docker/docker/pkg/promise" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" @@ -24,6 +24,7 @@ type execConfig struct { sync.Mutex ID string Running bool + ExitCode int ProcessConfig execdriver.ProcessConfig StreamConfig OpenStdin bool @@ -97,7 +98,9 @@ func (d *Daemon) getActiveContainer(name string) (*Container, error) { if !container.IsRunning() { return nil, fmt.Errorf("Container %s is not running", name) } - + if container.IsPaused() { + return nil, fmt.Errorf("Container %s is paused, unpause the container before exec", name) + } return container, nil } @@ -117,13 +120,14 @@ func (d *Daemon) ContainerExecCreate(job *engine.Job) engine.Status { return job.Error(err) } - config := runconfig.ExecConfigFromJob(job) + config, err := runconfig.ExecConfigFromJob(job) + if err != nil { + return job.Error(err) + } entrypoint, args := d.getEntrypointAndArgs(nil, config.Cmd) processConfig := execdriver.ProcessConfig{ - Privileged: config.Privileged, - User: config.User, Tty: config.Tty, Entrypoint: entrypoint, Arguments: args, @@ -155,7 +159,6 @@ func (d *Daemon) ContainerExecStart(job *engine.Job) engine.Status { var ( cStdin io.ReadCloser cStdout, cStderr io.Writer - cStdinCloser io.Closer execName = job.Args[0] ) @@ -183,10 +186,10 @@ func (d *Daemon) ContainerExecStart(job *engine.Job) engine.Status { r, w := io.Pipe() go func() { defer w.Close() + defer log.Debugf("Closing buffered stdin pipe") io.Copy(w, job.Stdin) }() cStdin = r - cStdinCloser = job.Stdin } if execConfig.OpenStdout { cStdout = job.Stdout @@ -204,12 +207,13 @@ func (d *Daemon) ContainerExecStart(job *engine.Job) engine.Status { execConfig.StreamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin } - attachErr := d.Attach(&execConfig.StreamConfig, execConfig.OpenStdin, false, execConfig.ProcessConfig.Tty, cStdin, cStdinCloser, cStdout, cStderr) + attachErr := d.attach(&execConfig.StreamConfig, execConfig.OpenStdin, true, execConfig.ProcessConfig.Tty, cStdin, cStdout, cStderr) execErr := make(chan error) - // Remove exec from daemon and container. - defer d.unregisterExecCommand(execConfig) + // Note, the execConfig data will be removed when the container + // itself is deleted. This allows us to query it (for things like + // the exitStatus) even after the cmd is done running. go func() { err := container.Exec(execConfig) @@ -232,7 +236,17 @@ func (d *Daemon) ContainerExecStart(job *engine.Job) engine.Status { } func (d *Daemon) Exec(c *Container, execConfig *execConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { - return d.execDriver.Exec(c.command, &execConfig.ProcessConfig, pipes, startCallback) + exitStatus, err := d.execDriver.Exec(c.command, &execConfig.ProcessConfig, pipes, startCallback) + + // On err, make sure we don't leave ExitCode at zero + if err != nil && exitStatus == 0 { + exitStatus = 128 + } + + execConfig.ExitCode = exitStatus + execConfig.Running = false + + return exitStatus, err } func (container *Container) Exec(execConfig *execConfig) error { diff --git a/daemon/execdriver/driver.go b/daemon/execdriver/driver.go index 22e4c4647c..411265814d 100644 --- a/daemon/execdriver/driver.go +++ b/daemon/execdriver/driver.go @@ -40,8 +40,17 @@ type TtyTerminal interface { Master() *os.File } +// ExitStatus provides exit reasons for a container. +type ExitStatus struct { + // The exit code with which the container exited. + ExitCode int + + // Whether the container encountered an OOM. + OOMKilled bool +} + type Driver interface { - Run(c *Command, pipes *Pipes, startCallback StartCallback) (int, error) // Run executes the process and blocks until the process exits and returns the exit code + Run(c *Command, pipes *Pipes, startCallback StartCallback) (ExitStatus, error) // Run executes the process and blocks until the process exits and returns the exit code // Exec executes the process in an existing container, blocks until the process exits and returns the exit code Exec(c *Command, processConfig *ProcessConfig, pipes *Pipes, startCallback StartCallback) (int, error) Kill(c *Command, sig int) error @@ -62,6 +71,12 @@ type Network struct { HostNetworking bool `json:"host_networking"` } +// IPC settings of the container +type Ipc struct { + ContainerID string `json:"container_id"` // id of the container to join ipc. + HostIpc bool `json:"host_ipc"` +} + type NetworkInterface struct { Gateway string `json:"gateway"` IPAddress string `json:"ip"` @@ -106,6 +121,7 @@ type Command struct { WorkingDir string `json:"working_dir"` ConfigPath string `json:"config_path"` // this should be able to be removed when the lxc template is moved into the driver Network *Network `json:"network"` + Ipc *Ipc `json:"ipc"` Resources *Resources `json:"resources"` Mounts []Mount `json:"mounts"` AllowedDevices []*devices.Device `json:"allowed_devices"` diff --git a/daemon/execdriver/lxc/MAINTAINERS b/daemon/execdriver/lxc/MAINTAINERS index e9753be645..ac8ff535ff 100644 --- a/daemon/execdriver/lxc/MAINTAINERS +++ b/daemon/execdriver/lxc/MAINTAINERS @@ -1 +1,2 @@ +# the LXC exec driver needs more maintainers and contributions Dinesh Subhraveti (@dineshs-altiscale) diff --git a/daemon/execdriver/lxc/driver.go b/daemon/execdriver/lxc/driver.go index 0809b05c1e..642247c851 100644 --- a/daemon/execdriver/lxc/driver.go +++ b/daemon/execdriver/lxc/driver.go @@ -17,8 +17,8 @@ import ( "github.com/kr/pty" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" - "github.com/docker/docker/pkg/log" "github.com/docker/docker/pkg/term" "github.com/docker/docker/utils" "github.com/docker/libcontainer/cgroups" @@ -55,7 +55,7 @@ func (d *driver) Name() string { return fmt.Sprintf("%s-%s", DriverName, version) } -func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { +func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) { var ( term execdriver.Terminal err error @@ -76,20 +76,27 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba }) if err := d.generateEnvConfig(c); err != nil { - return -1, err + return execdriver.ExitStatus{-1, false}, err } configPath, err := d.generateLXCConfig(c) if err != nil { - return -1, err + return execdriver.ExitStatus{-1, false}, err } params := []string{ "lxc-start", "-n", c.ID, "-f", configPath, - "--", - c.InitPath, + } + if c.Network.ContainerID != "" { + params = append(params, + "--share-net", c.Network.ContainerID, + ) } + params = append(params, + "--", + c.InitPath, + ) if c.Network.Interface != nil { params = append(params, "-g", c.Network.Interface.Gateway, @@ -116,14 +123,6 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba params = append(params, "-w", c.WorkingDir) } - if len(c.CapAdd) > 0 { - params = append(params, fmt.Sprintf("-cap-add=%s", strings.Join(c.CapAdd, ":"))) - } - - if len(c.CapDrop) > 0 { - params = append(params, fmt.Sprintf("-cap-drop=%s", strings.Join(c.CapDrop, ":"))) - } - params = append(params, "--", c.ProcessConfig.Entrypoint) params = append(params, c.ProcessConfig.Arguments...) @@ -155,11 +154,11 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba c.ProcessConfig.Args = append([]string{name}, arg...) if err := nodes.CreateDeviceNodes(c.Rootfs, c.AutoCreatedDevices); err != nil { - return -1, err + return execdriver.ExitStatus{-1, false}, err } if err := c.ProcessConfig.Start(); err != nil { - return -1, err + return execdriver.ExitStatus{-1, false}, err } var ( @@ -183,7 +182,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba c.ProcessConfig.Process.Kill() c.ProcessConfig.Wait() } - return -1, err + return execdriver.ExitStatus{-1, false}, err } c.ContainerPid = pid @@ -194,7 +193,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba <-waitLock - return getExitCode(c), waitErr + return execdriver.ExitStatus{getExitCode(c), false}, waitErr } /// Return the exit code of the process diff --git a/daemon/execdriver/lxc/init.go b/daemon/execdriver/lxc/init.go index 680f53e1a4..e99502667d 100644 --- a/daemon/execdriver/lxc/init.go +++ b/daemon/execdriver/lxc/init.go @@ -6,7 +6,6 @@ import ( "fmt" "io/ioutil" "log" - "net" "os" "os/exec" "runtime" @@ -14,7 +13,6 @@ import ( "syscall" "github.com/docker/docker/pkg/reexec" - "github.com/docker/libcontainer/netlink" ) // Args provided to the init function for a driver @@ -59,12 +57,7 @@ func setupNamespace(args *InitArgs) error { if err := setupEnv(args); err != nil { return err } - if err := setupHostname(args); err != nil { - return err - } - if err := setupNetworking(args); err != nil { - return err - } + if err := finalizeNamespace(args); err != nil { return err } @@ -138,59 +131,6 @@ func setupEnv(args *InitArgs) error { return nil } -func setupHostname(args *InitArgs) error { - hostname := getEnv(args, "HOSTNAME") - if hostname == "" { - return nil - } - return setHostname(hostname) -} - -// Setup networking -func setupNetworking(args *InitArgs) error { - if args.Ip != "" { - // eth0 - iface, err := net.InterfaceByName("eth0") - if err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - ip, ipNet, err := net.ParseCIDR(args.Ip) - if err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - if err := netlink.NetworkLinkAddIp(iface, ip, ipNet); err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - if err := netlink.NetworkSetMTU(iface, args.Mtu); err != nil { - return fmt.Errorf("Unable to set MTU: %v", err) - } - if err := netlink.NetworkLinkUp(iface); err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - - // loopback - iface, err = net.InterfaceByName("lo") - if err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - if err := netlink.NetworkLinkUp(iface); err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - } - if args.Gateway != "" { - gw := net.ParseIP(args.Gateway) - if gw == nil { - return fmt.Errorf("Unable to set up networking, %s is not a valid gateway IP", args.Gateway) - } - - if err := netlink.AddDefaultGw(gw.String(), "eth0"); err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - } - - return nil -} - // Setup working directory func setupWorkingDirectory(args *InitArgs) error { if args.WorkDir == "" { diff --git a/daemon/execdriver/lxc/lxc_init_linux.go b/daemon/execdriver/lxc/lxc_init_linux.go index 625caa1608..78bdd11fb9 100644 --- a/daemon/execdriver/lxc/lxc_init_linux.go +++ b/daemon/execdriver/lxc/lxc_init_linux.go @@ -2,74 +2,19 @@ package lxc import ( "fmt" - "strings" - "syscall" - - "github.com/docker/docker/daemon/execdriver" - "github.com/docker/docker/daemon/execdriver/native/template" "github.com/docker/libcontainer/namespaces" - "github.com/docker/libcontainer/security/capabilities" - "github.com/docker/libcontainer/system" "github.com/docker/libcontainer/utils" ) -func setHostname(hostname string) error { - return syscall.Sethostname([]byte(hostname)) -} - func finalizeNamespace(args *InitArgs) error { if err := utils.CloseExecFrom(3); err != nil { return err } - // We use the native drivers default template so that things like caps are consistent - // across both drivers - container := template.New() - - if !args.Privileged { - // drop capabilities in bounding set before changing user - if err := capabilities.DropBoundingSet(container.Capabilities); err != nil { - return fmt.Errorf("drop bounding set %s", err) - } - - // preserve existing capabilities while we change users - if err := system.SetKeepCaps(); err != nil { - return fmt.Errorf("set keep caps %s", err) - } - } - if err := namespaces.SetupUser(args.User); err != nil { return fmt.Errorf("setup user %s", err) } - if !args.Privileged { - if err := system.ClearKeepCaps(); err != nil { - return fmt.Errorf("clear keep caps %s", err) - } - - var ( - adds []string - drops []string - ) - - if args.CapAdd != "" { - adds = strings.Split(args.CapAdd, ":") - } - if args.CapDrop != "" { - drops = strings.Split(args.CapDrop, ":") - } - - caps, err := execdriver.TweakCapabilities(container.Capabilities, adds, drops) - if err != nil { - return err - } - - // drop all other capabilities - if err := capabilities.DropCapabilities(caps); err != nil { - return fmt.Errorf("drop capabilities %s", err) - } - } - if err := setupWorkingDirectory(args); err != nil { return err } diff --git a/daemon/execdriver/lxc/lxc_init_unsupported.go b/daemon/execdriver/lxc/lxc_init_unsupported.go index b3f2ae68eb..97bc8a984c 100644 --- a/daemon/execdriver/lxc/lxc_init_unsupported.go +++ b/daemon/execdriver/lxc/lxc_init_unsupported.go @@ -2,12 +2,6 @@ package lxc -import "github.com/docker/docker/daemon/execdriver" - -func setHostname(hostname string) error { - panic("Not supported on darwin") -} - -func finalizeNamespace(args *execdriver.InitArgs) error { +func finalizeNamespace(args *InitArgs) error { panic("Not supported on darwin") } diff --git a/daemon/execdriver/lxc/lxc_template.go b/daemon/execdriver/lxc/lxc_template.go index 2cd63dc72d..9402c0697c 100644 --- a/daemon/execdriver/lxc/lxc_template.go +++ b/daemon/execdriver/lxc/lxc_template.go @@ -1,11 +1,12 @@ package lxc import ( + "github.com/docker/docker/daemon/execdriver" + nativeTemplate "github.com/docker/docker/daemon/execdriver/native/template" + "github.com/docker/libcontainer/label" + "os" "strings" "text/template" - - "github.com/docker/docker/daemon/execdriver" - "github.com/docker/libcontainer/label" ) const LxcTemplate = ` @@ -15,6 +16,13 @@ lxc.network.type = veth lxc.network.link = {{.Network.Interface.Bridge}} lxc.network.name = eth0 lxc.network.mtu = {{.Network.Mtu}} +{{if .Network.Interface.IPAddress}} +lxc.network.ipv4 = {{.Network.Interface.IPAddress}}/{{.Network.Interface.IPPrefixLen}} +{{end}} +{{if .Network.Interface.Gateway}} +lxc.network.ipv4.gateway = {{.Network.Interface.Gateway}} +{{end}} +lxc.network.flags = up {{else if .Network.HostNetworking}} lxc.network.type = none {{else}} @@ -70,10 +78,23 @@ lxc.mount.entry = devpts {{escapeFstabSpaces $ROOTFS}}/dev/pts devpts {{formatMo lxc.mount.entry = shm {{escapeFstabSpaces $ROOTFS}}/dev/shm tmpfs {{formatMountLabel "size=65536k,nosuid,nodev,noexec" ""}} 0 0 {{range $value := .Mounts}} +{{$createVal := isDirectory $value.Source}} {{if $value.Writable}} -lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none rbind,rw 0 0 +lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none rbind,rw,create={{$createVal}} 0 0 {{else}} -lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none rbind,ro 0 0 +lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none rbind,ro,create={{$createVal}} 0 0 +{{end}} +{{end}} + +{{if .ProcessConfig.Env}} +lxc.utsname = {{getHostname .ProcessConfig.Env}} +{{end}} + +{{if .ProcessConfig.Privileged}} +# No cap values are needed, as lxc is starting in privileged mode +{{else}} +{{range $value := keepCapabilities .CapAdd .CapDrop}} +lxc.cap.keep = {{$value}} {{end}} {{end}} @@ -117,6 +138,33 @@ func escapeFstabSpaces(field string) string { return strings.Replace(field, " ", "\\040", -1) } +func keepCapabilities(adds []string, drops []string) []string { + container := nativeTemplate.New() + caps, err := execdriver.TweakCapabilities(container.Capabilities, adds, drops) + var newCaps []string + for _, cap := range caps { + newCaps = append(newCaps, strings.ToLower(cap)) + } + if err != nil { + return []string{} + } + return newCaps +} + +func isDirectory(source string) string { + f, err := os.Stat(source) + if err != nil { + if os.IsNotExist(err) { + return "dir" + } + return "" + } + if f.IsDir() { + return "dir" + } + return "file" +} + func getMemorySwap(v *execdriver.Resources) int64 { // By default, MemorySwap is set to twice the size of RAM. // If you want to omit MemorySwap, set it to `-1'. @@ -137,12 +185,25 @@ func getLabel(c map[string][]string, name string) string { return "" } +func getHostname(env []string) string { + for _, kv := range env { + parts := strings.SplitN(kv, "=", 2) + if parts[0] == "HOSTNAME" && len(parts) == 2 { + return parts[1] + } + } + return "" +} + func init() { var err error funcMap := template.FuncMap{ "getMemorySwap": getMemorySwap, "escapeFstabSpaces": escapeFstabSpaces, "formatMountLabel": label.FormatMountLabel, + "isDirectory": isDirectory, + "keepCapabilities": keepCapabilities, + "getHostname": getHostname, } LxcTemplateCompiled, err = template.New("lxc").Funcs(funcMap).Parse(LxcTemplate) if err != nil { diff --git a/daemon/execdriver/lxc/lxc_template_unit_test.go b/daemon/execdriver/lxc/lxc_template_unit_test.go index 900700b740..77435114fd 100644 --- a/daemon/execdriver/lxc/lxc_template_unit_test.go +++ b/daemon/execdriver/lxc/lxc_template_unit_test.go @@ -14,6 +14,7 @@ import ( "time" "github.com/docker/docker/daemon/execdriver" + nativeTemplate "github.com/docker/docker/daemon/execdriver/native/template" "github.com/docker/libcontainer/devices" ) @@ -104,6 +105,10 @@ func TestCustomLxcConfig(t *testing.T) { } func grepFile(t *testing.T, path string, pattern string) { + grepFileWithReverse(t, path, pattern, false) +} + +func grepFileWithReverse(t *testing.T, path string, pattern string, inverseGrep bool) { f, err := os.Open(path) if err != nil { t.Fatal(err) @@ -117,9 +122,15 @@ func grepFile(t *testing.T, path string, pattern string) { for err == nil { line, err = r.ReadString('\n') if strings.Contains(line, pattern) == true { + if inverseGrep { + t.Fatalf("grepFile: pattern \"%s\" found in \"%s\"", pattern, path) + } return } } + if inverseGrep { + return + } t.Fatalf("grepFile: pattern \"%s\" not found in \"%s\"", pattern, path) } @@ -140,3 +151,152 @@ func TestEscapeFstabSpaces(t *testing.T) { } } } + +func TestIsDirectory(t *testing.T) { + tempDir, err := ioutil.TempDir("", "TestIsDir") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempDir) + + tempFile, err := ioutil.TempFile(tempDir, "TestIsDirFile") + if err != nil { + t.Fatal(err) + } + + if isDirectory(tempDir) != "dir" { + t.Logf("Could not identify %s as a directory", tempDir) + t.Fail() + } + + if isDirectory(tempFile.Name()) != "file" { + t.Logf("Could not identify %s as a file", tempFile.Name()) + t.Fail() + } +} + +func TestCustomLxcConfigMounts(t *testing.T) { + root, err := ioutil.TempDir("", "TestCustomLxcConfig") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(root) + tempDir, err := ioutil.TempDir("", "TestIsDir") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempDir) + + tempFile, err := ioutil.TempFile(tempDir, "TestIsDirFile") + if err != nil { + t.Fatal(err) + } + os.MkdirAll(path.Join(root, "containers", "1"), 0777) + + driver, err := NewDriver(root, "", false) + if err != nil { + t.Fatal(err) + } + processConfig := execdriver.ProcessConfig{ + Privileged: false, + } + mounts := []execdriver.Mount{ + { + Source: tempDir, + Destination: tempDir, + Writable: false, + Private: true, + }, + { + Source: tempFile.Name(), + Destination: tempFile.Name(), + Writable: true, + Private: true, + }, + } + command := &execdriver.Command{ + ID: "1", + LxcConfig: []string{ + "lxc.utsname = docker", + "lxc.cgroup.cpuset.cpus = 0,1", + }, + Network: &execdriver.Network{ + Mtu: 1500, + Interface: nil, + }, + Mounts: mounts, + ProcessConfig: processConfig, + } + + p, err := driver.generateLXCConfig(command) + if err != nil { + t.Fatal(err) + } + + grepFile(t, p, "lxc.utsname = docker") + grepFile(t, p, "lxc.cgroup.cpuset.cpus = 0,1") + + grepFile(t, p, fmt.Sprintf("lxc.mount.entry = %s %s none rbind,ro,create=%s 0 0", tempDir, "/"+tempDir, "dir")) + grepFile(t, p, fmt.Sprintf("lxc.mount.entry = %s %s none rbind,rw,create=%s 0 0", tempFile.Name(), "/"+tempFile.Name(), "file")) +} + +func TestCustomLxcConfigMisc(t *testing.T) { + root, err := ioutil.TempDir("", "TestCustomLxcConfig") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(root) + os.MkdirAll(path.Join(root, "containers", "1"), 0777) + driver, err := NewDriver(root, "", false) + if err != nil { + t.Fatal(err) + } + processConfig := execdriver.ProcessConfig{ + Privileged: false, + } + + processConfig.Env = []string{"HOSTNAME=testhost"} + command := &execdriver.Command{ + ID: "1", + LxcConfig: []string{ + "lxc.cgroup.cpuset.cpus = 0,1", + }, + Network: &execdriver.Network{ + Mtu: 1500, + Interface: &execdriver.NetworkInterface{ + Gateway: "10.10.10.1", + IPAddress: "10.10.10.10", + IPPrefixLen: 24, + Bridge: "docker0", + }, + }, + ProcessConfig: processConfig, + CapAdd: []string{"net_admin", "syslog"}, + CapDrop: []string{"kill", "mknod"}, + } + + p, err := driver.generateLXCConfig(command) + if err != nil { + t.Fatal(err) + } + // network + grepFile(t, p, "lxc.network.type = veth") + grepFile(t, p, "lxc.network.link = docker0") + grepFile(t, p, "lxc.network.name = eth0") + grepFile(t, p, "lxc.network.ipv4 = 10.10.10.10/24") + grepFile(t, p, "lxc.network.ipv4.gateway = 10.10.10.1") + grepFile(t, p, "lxc.network.flags = up") + + // hostname + grepFile(t, p, "lxc.utsname = testhost") + grepFile(t, p, "lxc.cgroup.cpuset.cpus = 0,1") + container := nativeTemplate.New() + for _, cap := range container.Capabilities { + cap = strings.ToLower(cap) + if cap != "mknod" && cap != "kill" { + grepFile(t, p, fmt.Sprintf("lxc.cap.keep = %s", cap)) + } + } + grepFileWithReverse(t, p, fmt.Sprintf("lxc.cap.keep = kill"), true) + grepFileWithReverse(t, p, fmt.Sprintf("lxc.cap.keep = mknod"), true) +} diff --git a/daemon/execdriver/native/create.go b/daemon/execdriver/native/create.go index 492247e492..de103eca8a 100644 --- a/daemon/execdriver/native/create.go +++ b/daemon/execdriver/native/create.go @@ -36,6 +36,10 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Config, e container.MountConfig.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != "" container.RestrictSys = true + if err := d.createIpc(container, c); err != nil { + return nil, err + } + if err := d.createNetwork(container, c); err != nil { return nil, err } @@ -124,6 +128,28 @@ func (d *driver) createNetwork(container *libcontainer.Config, c *execdriver.Com return nil } +func (d *driver) createIpc(container *libcontainer.Config, c *execdriver.Command) error { + if c.Ipc.HostIpc { + container.Namespaces["NEWIPC"] = false + return nil + } + + if c.Ipc.ContainerID != "" { + d.Lock() + active := d.activeContainers[c.Ipc.ContainerID] + d.Unlock() + + if active == nil || active.cmd.Process == nil { + return fmt.Errorf("%s is not a valid running container to join", c.Ipc.ContainerID) + } + cmd := active.cmd + + container.IpcNsPath = filepath.Join("/proc", fmt.Sprint(cmd.Process.Pid), "ns", "ipc") + } + + return nil +} + func (d *driver) setPrivileged(container *libcontainer.Config) (err error) { container.Capabilities = capabilities.GetAllCapabilities() container.Cgroups.AllowAllDevices = true diff --git a/daemon/execdriver/native/driver.go b/daemon/execdriver/native/driver.go index 3628d7b575..01455a8101 100644 --- a/daemon/execdriver/native/driver.go +++ b/daemon/execdriver/native/driver.go @@ -14,6 +14,7 @@ import ( "sync" "syscall" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/pkg/term" "github.com/docker/libcontainer" @@ -60,11 +61,20 @@ func NewDriver(root, initPath string) (*driver, error) { }, nil } -func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { +func (d *driver) notifyOnOOM(config *libcontainer.Config) (<-chan struct{}, error) { + return fs.NotifyOnOOM(config.Cgroups) +} + +type execOutput struct { + exitCode int + err error +} + +func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) { // take the Command and populate the libcontainer.Config from it container, err := d.createContainer(c) if err != nil { - return -1, err + return execdriver.ExitStatus{-1, false}, err } var term execdriver.Terminal @@ -75,7 +85,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes) } if err != nil { - return -1, err + return execdriver.ExitStatus{-1, false}, err } c.ProcessConfig.Terminal = term @@ -92,40 +102,66 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba ) if err := d.createContainerRoot(c.ID); err != nil { - return -1, err + return execdriver.ExitStatus{-1, false}, err } defer d.cleanContainer(c.ID) if err := d.writeContainerFile(container, c.ID); err != nil { - return -1, err + return execdriver.ExitStatus{-1, false}, err } - return namespaces.Exec(container, c.ProcessConfig.Stdin, c.ProcessConfig.Stdout, c.ProcessConfig.Stderr, c.ProcessConfig.Console, dataPath, args, func(container *libcontainer.Config, console, dataPath, init string, child *os.File, args []string) *exec.Cmd { - c.ProcessConfig.Path = d.initPath - c.ProcessConfig.Args = append([]string{ - DriverName, - "-console", console, - "-pipe", "3", - "-root", filepath.Join(d.root, c.ID), - "--", - }, args...) + execOutputChan := make(chan execOutput, 1) + waitForStart := make(chan struct{}) - // set this to nil so that when we set the clone flags anything else is reset - c.ProcessConfig.SysProcAttr = &syscall.SysProcAttr{ - Cloneflags: uintptr(namespaces.GetNamespaceFlags(container.Namespaces)), - } - c.ProcessConfig.ExtraFiles = []*os.File{child} + go func() { + exitCode, err := namespaces.Exec(container, c.ProcessConfig.Stdin, c.ProcessConfig.Stdout, c.ProcessConfig.Stderr, c.ProcessConfig.Console, dataPath, args, func(container *libcontainer.Config, console, dataPath, init string, child *os.File, args []string) *exec.Cmd { + c.ProcessConfig.Path = d.initPath + c.ProcessConfig.Args = append([]string{ + DriverName, + "-console", console, + "-pipe", "3", + "-root", filepath.Join(d.root, c.ID), + "--", + }, args...) - c.ProcessConfig.Env = container.Env - c.ProcessConfig.Dir = container.RootFs + // set this to nil so that when we set the clone flags anything else is reset + c.ProcessConfig.SysProcAttr = &syscall.SysProcAttr{ + Cloneflags: uintptr(namespaces.GetNamespaceFlags(container.Namespaces)), + } + c.ProcessConfig.ExtraFiles = []*os.File{child} - return &c.ProcessConfig.Cmd - }, func() { - if startCallback != nil { - c.ContainerPid = c.ProcessConfig.Process.Pid - startCallback(&c.ProcessConfig, c.ContainerPid) - } - }) + c.ProcessConfig.Env = container.Env + c.ProcessConfig.Dir = container.RootFs + + return &c.ProcessConfig.Cmd + }, func() { + close(waitForStart) + if startCallback != nil { + c.ContainerPid = c.ProcessConfig.Process.Pid + startCallback(&c.ProcessConfig, c.ContainerPid) + } + }) + execOutputChan <- execOutput{exitCode, err} + }() + + select { + case execOutput := <-execOutputChan: + return execdriver.ExitStatus{execOutput.exitCode, false}, execOutput.err + case <-waitForStart: + break + } + + oomKill := false + oomKillNotification, err := d.notifyOnOOM(container) + if err == nil { + _, oomKill = <-oomKillNotification + } else { + log.Warnf("WARNING: Your kernel does not support OOM notifications: %s", err) + } + // wait for the container to exit. + execOutput := <-execOutputChan + + return execdriver.ExitStatus{execOutput.exitCode, oomKill}, execOutput.err } func (d *driver) Kill(p *execdriver.Command, sig int) error { diff --git a/daemon/execdriver/native/init.go b/daemon/execdriver/native/init.go index c1c988d934..754d842c3b 100644 --- a/daemon/execdriver/native/init.go +++ b/daemon/execdriver/native/init.go @@ -13,7 +13,6 @@ import ( "github.com/docker/docker/pkg/reexec" "github.com/docker/libcontainer" "github.com/docker/libcontainer/namespaces" - "github.com/docker/libcontainer/syncpipe" ) func init() { @@ -48,12 +47,7 @@ func initializer() { writeError(err) } - syncPipe, err := syncpipe.NewSyncPipeFromFd(0, uintptr(*pipe)) - if err != nil { - writeError(err) - } - - if err := namespaces.Init(container, rootfs, *console, syncPipe, flag.Args()); err != nil { + if err := namespaces.Init(container, rootfs, *console, os.NewFile(uintptr(*pipe), "child"), flag.Args()); err != nil { writeError(err) } diff --git a/daemon/execdriver/native/utils.go b/daemon/execdriver/native/utils.go index e337cf4316..88aefaf382 100644 --- a/daemon/execdriver/native/utils.go +++ b/daemon/execdriver/native/utils.go @@ -3,10 +3,10 @@ package native import ( + "encoding/json" "os" "github.com/docker/libcontainer" - "github.com/docker/libcontainer/syncpipe" ) func findUserArgs() []string { @@ -21,15 +21,9 @@ func findUserArgs() []string { // loadConfigFromFd loads a container's config from the sync pipe that is provided by // fd 3 when running a process func loadConfigFromFd() (*libcontainer.Config, error) { - syncPipe, err := syncpipe.NewSyncPipeFromFd(0, 3) - if err != nil { - return nil, err - } - var config *libcontainer.Config - if err := syncPipe.ReadFromParent(&config); err != nil { + if err := json.NewDecoder(os.NewFile(3, "child")).Decode(&config); err != nil { return nil, err } - return config, nil } diff --git a/daemon/graphdriver/aufs/aufs.go b/daemon/graphdriver/aufs/aufs.go index bbaad08e38..55cfd00c1f 100644 --- a/daemon/graphdriver/aufs/aufs.go +++ b/daemon/graphdriver/aufs/aufs.go @@ -30,10 +30,10 @@ import ( "sync" "syscall" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/log" mountpk "github.com/docker/docker/pkg/mount" "github.com/docker/docker/utils" "github.com/docker/libcontainer/label" @@ -99,7 +99,7 @@ func Init(root string, options []string) (graphdriver.Driver, error) { return nil, err } - if err := graphdriver.MakePrivate(root); err != nil { + if err := mountpk.MakePrivate(root); err != nil { return nil, err } @@ -301,6 +301,7 @@ func (a *Driver) Diff(id, parent string) (archive.Archive, error) { // AUFS doesn't need the parent layer to produce a diff. return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ Compression: archive.Uncompressed, + Excludes: []string{".wh..wh.*"}, }) } @@ -412,39 +413,44 @@ func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err erro } }() - if err = a.tryMount(ro, rw, target, mountLabel); err != nil { - if err = a.mountRw(rw, target, mountLabel); err != nil { - return - } + // Mount options are clipped to page size(4096 bytes). If there are more + // layers then these are remounted individually using append. - for _, layer := range ro { - data := label.FormatMountLabel(fmt.Sprintf("append:%s=ro+wh", layer), mountLabel) - if err = mount("none", target, "aufs", MsRemount, data); err != nil { - return + b := make([]byte, syscall.Getpagesize()-len(mountLabel)-50) // room for xino & mountLabel + bp := copy(b, fmt.Sprintf("br:%s=rw", rw)) + + firstMount := true + i := 0 + + for { + for ; i < len(ro); i++ { + layer := fmt.Sprintf(":%s=ro+wh", ro[i]) + + if firstMount { + if bp+len(layer) > len(b) { + break + } + bp += copy(b[bp:], layer) + } else { + data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel) + if err = mount("none", target, "aufs", MsRemount, data); err != nil { + return + } } } + + if firstMount { + data := label.FormatMountLabel(fmt.Sprintf("%s,xino=/dev/shm/aufs.xino", string(b[:bp])), mountLabel) + if err = mount("none", target, "aufs", 0, data); err != nil { + return + } + firstMount = false + } + + if i == len(ro) { + break + } } + return } - -// Try to mount using the aufs fast path, if this fails then -// append ro layers. -func (a *Driver) tryMount(ro []string, rw, target, mountLabel string) (err error) { - var ( - rwBranch = fmt.Sprintf("%s=rw", rw) - roBranches = fmt.Sprintf("%s=ro+wh:", strings.Join(ro, "=ro+wh:")) - data = label.FormatMountLabel(fmt.Sprintf("br:%v:%v,xino=/dev/shm/aufs.xino", rwBranch, roBranches), mountLabel) - ) - return mount("none", target, "aufs", 0, data) -} - -func (a *Driver) mountRw(rw, target, mountLabel string) error { - data := label.FormatMountLabel(fmt.Sprintf("br:%s,xino=/dev/shm/aufs.xino", rw), mountLabel) - return mount("none", target, "aufs", 0, data) -} - -func rollbackMount(target string, err error) { - if err != nil { - Unmount(target) - } -} diff --git a/daemon/graphdriver/aufs/aufs_test.go b/daemon/graphdriver/aufs/aufs_test.go index 0c940bc9ed..c17a5dcce6 100644 --- a/daemon/graphdriver/aufs/aufs_test.go +++ b/daemon/graphdriver/aufs/aufs_test.go @@ -15,7 +15,8 @@ import ( ) var ( - tmp = path.Join(os.TempDir(), "aufs-tests", "aufs") + tmpOuter = path.Join(os.TempDir(), "aufs-tests") + tmp = path.Join(tmpOuter, "aufs") ) func init() { @@ -641,9 +642,13 @@ func hash(c string) string { return hex.EncodeToString(h.Sum(nil)) } -func TestMountMoreThan42Layers(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) +func testMountMoreThan42Layers(t *testing.T, mountPath string) { + if err := os.MkdirAll(mountPath, 0755); err != nil { + t.Fatal(err) + } + + defer os.RemoveAll(mountPath) + d := testInit(mountPath, t).(*Driver) defer d.Cleanup() var last string var expected int @@ -664,24 +669,24 @@ func TestMountMoreThan42Layers(t *testing.T) { if err := d.Create(current, parent); err != nil { t.Logf("Current layer %d", i) - t.Fatal(err) + t.Error(err) } point, err := d.Get(current, "") if err != nil { t.Logf("Current layer %d", i) - t.Fatal(err) + t.Error(err) } f, err := os.Create(path.Join(point, current)) if err != nil { t.Logf("Current layer %d", i) - t.Fatal(err) + t.Error(err) } f.Close() if i%10 == 0 { if err := os.Remove(path.Join(point, parent)); err != nil { t.Logf("Current layer %d", i) - t.Fatal(err) + t.Error(err) } expected-- } @@ -691,13 +696,37 @@ func TestMountMoreThan42Layers(t *testing.T) { // Perform the actual mount for the top most image point, err := d.Get(last, "") if err != nil { - t.Fatal(err) + t.Error(err) } files, err := ioutil.ReadDir(point) if err != nil { - t.Fatal(err) + t.Error(err) } if len(files) != expected { - t.Fatalf("Expected %d got %d", expected, len(files)) + t.Errorf("Expected %d got %d", expected, len(files)) + } +} + +func TestMountMoreThan42Layers(t *testing.T) { + os.RemoveAll(tmpOuter) + testMountMoreThan42Layers(t, tmp) +} + +func TestMountMoreThan42LayersMatchingPathLength(t *testing.T) { + defer os.RemoveAll(tmpOuter) + zeroes := "0" + for { + // This finds a mount path so that when combined into aufs mount options + // 4096 byte boundary would be in between the paths or in permission + // section. For '/tmp' it will use '/tmp/aufs-tests/00000000/aufs' + mountPath := path.Join(tmpOuter, zeroes, "aufs") + pathLength := 77 + len(mountPath) + + if mod := 4095 % pathLength; mod == 0 || mod > pathLength-2 { + t.Logf("Using path: %s", mountPath) + testMountMoreThan42Layers(t, mountPath) + return + } + zeroes += "0" } } diff --git a/daemon/graphdriver/aufs/mount.go b/daemon/graphdriver/aufs/mount.go index fa74e05b07..bb935f6919 100644 --- a/daemon/graphdriver/aufs/mount.go +++ b/daemon/graphdriver/aufs/mount.go @@ -4,7 +4,7 @@ import ( "os/exec" "syscall" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" ) func Unmount(target string) error { diff --git a/daemon/graphdriver/btrfs/btrfs.go b/daemon/graphdriver/btrfs/btrfs.go index 26102aa1ef..a3964b963c 100644 --- a/daemon/graphdriver/btrfs/btrfs.go +++ b/daemon/graphdriver/btrfs/btrfs.go @@ -40,7 +40,7 @@ func Init(home string, options []string) (graphdriver.Driver, error) { return nil, err } - if err := graphdriver.MakePrivate(home); err != nil { + if err := mount.MakePrivate(home); err != nil { return nil, err } @@ -60,7 +60,14 @@ func (d *Driver) String() string { } func (d *Driver) Status() [][2]string { - return nil + status := [][2]string{} + if bv := BtrfsBuildVersion(); bv != "-" { + status = append(status, [2]string{"Build Version", bv}) + } + if lv := BtrfsLibVersion(); lv != -1 { + status = append(status, [2]string{"Library Version", fmt.Sprintf("%d", lv)}) + } + return status } func (d *Driver) Cleanup() error { diff --git a/daemon/graphdriver/btrfs/version.go b/daemon/graphdriver/btrfs/version.go new file mode 100644 index 0000000000..89ed85749d --- /dev/null +++ b/daemon/graphdriver/btrfs/version.go @@ -0,0 +1,24 @@ +// +build linux,!btrfs_noversion + +package btrfs + +/* +#include + +// because around version 3.16, they did not define lib version yet +int my_btrfs_lib_version() { +#ifdef BTRFS_LIB_VERSION + return BTRFS_LIB_VERSION; +#else + return -1; +#endif +} +*/ +import "C" + +func BtrfsBuildVersion() string { + return string(C.BTRFS_BUILD_VERSION) +} +func BtrfsLibVersion() int { + return int(C.BTRFS_LIB_VERSION) +} diff --git a/daemon/graphdriver/btrfs/version_none.go b/daemon/graphdriver/btrfs/version_none.go new file mode 100644 index 0000000000..69a4e51cf8 --- /dev/null +++ b/daemon/graphdriver/btrfs/version_none.go @@ -0,0 +1,13 @@ +// +build linux,btrfs_noversion + +package btrfs + +// TODO(vbatts) remove this work-around once supported linux distros are on +// btrfs utililties of >= 3.16.1 + +func BtrfsBuildVersion() string { + return "-" +} +func BtrfsLibVersion() int { + return -1 +} diff --git a/daemon/graphdriver/btrfs/version_test.go b/daemon/graphdriver/btrfs/version_test.go new file mode 100644 index 0000000000..d96e33f3df --- /dev/null +++ b/daemon/graphdriver/btrfs/version_test.go @@ -0,0 +1,13 @@ +// +build linux + +package btrfs + +import ( + "testing" +) + +func TestBuildVersion(t *testing.T) { + if len(BtrfsBuildVersion()) == 0 { + t.Errorf("expected output from btrfs build version, but got empty string") + } +} diff --git a/daemon/graphdriver/devmapper/MAINTAINERS b/daemon/graphdriver/devmapper/MAINTAINERS index 9e629d5fcc..9382fc3a42 100644 --- a/daemon/graphdriver/devmapper/MAINTAINERS +++ b/daemon/graphdriver/devmapper/MAINTAINERS @@ -1 +1,2 @@ Alexander Larsson (@alexlarsson) +Vincent Batts (@vbatts) diff --git a/daemon/graphdriver/devmapper/README.md b/daemon/graphdriver/devmapper/README.md index c42620247b..3b69cef84f 100644 --- a/daemon/graphdriver/devmapper/README.md +++ b/daemon/graphdriver/devmapper/README.md @@ -100,6 +100,25 @@ Here is the list of supported options: ``docker -d --storage-opt dm.mountopt=nodiscard`` + * `dm.thinpooldev` + + Specifies a custom blockdevice to use for the thin pool. + + If using a block device for device mapper storage, ideally lvm2 + would be used to create/manage the thin-pool volume that is then + handed to docker to exclusively create/manage the thin and thin + snapshot volumes needed for it's containers. Managing the thin-pool + outside of docker makes for the most feature-rich method of having + docker utilize device mapper thin provisioning as the backing + storage for docker's containers. lvm2-based thin-pool management + feature highlights include: automatic or interactive thin-pool + resize support, dynamically change thin-pool features, automatic + thinp metadata checking when lvm2 activates the thin-pool, etc. + + Example use: + + ``docker -d --storage-opt dm.thinpooldev=/dev/mapper/thin-pool`` + * `dm.datadev` Specifies a custom blockdevice to use for data for the thin pool. diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index ccaea0181e..71502a483c 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -18,8 +18,9 @@ import ( "syscall" "time" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/devicemapper" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/units" "github.com/docker/libcontainer/label" @@ -29,9 +30,20 @@ var ( DefaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 DefaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 DefaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 - DefaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors + DefaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors + MaxDeviceId int = 0xffffff // 24 bit, pool limit + DeviceIdMapSz int = (MaxDeviceId + 1) / 8 ) +const deviceSetMetaFile string = "deviceset-metadata" +const transactionMetaFile string = "transaction-metadata" + +type Transaction struct { + OpenTransactionId uint64 `json:"open_transaction_id"` + DeviceIdHash string `json:"device_hash"` + DeviceId int `json:"device_id"` +} + type DevInfo struct { Hash string `json:"-"` DeviceId int `json:"device_id"` @@ -62,13 +74,13 @@ type MetaData struct { } type DeviceSet struct { - MetaData - sync.Mutex // Protects Devices map and serializes calls into libdevmapper - root string - devicePrefix string - TransactionId uint64 - NewTransactionId uint64 - nextDeviceId int + MetaData `json:"-"` + sync.Mutex `json:"-"` // Protects Devices map and serializes calls into libdevmapper + root string + devicePrefix string + TransactionId uint64 `json:"-"` + NextDeviceId int `json:"next_device_id"` + deviceIdMap []byte // Options dataLoopbackSize int64 @@ -81,6 +93,8 @@ type DeviceSet struct { metadataDevice string doBlkDiscard bool thinpBlockSize uint32 + thinPoolDevice string + Transaction `json:"-"` } type DiskUsage struct { @@ -138,12 +152,23 @@ func (devices *DeviceSet) metadataFile(info *DevInfo) string { return path.Join(devices.metadataDir(), file) } +func (devices *DeviceSet) transactionMetaFile() string { + return path.Join(devices.metadataDir(), transactionMetaFile) +} + +func (devices *DeviceSet) deviceSetMetaFile() string { + return path.Join(devices.metadataDir(), deviceSetMetaFile) +} + func (devices *DeviceSet) oldMetadataFile() string { return path.Join(devices.loopbackDir(), "json") } func (devices *DeviceSet) getPoolName() string { - return devices.devicePrefix + "-pool" + if devices.thinPoolDevice == "" { + return devices.devicePrefix + "-pool" + } + return devices.thinPoolDevice } func (devices *DeviceSet) getPoolDevName() string { @@ -189,8 +214,16 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { } func (devices *DeviceSet) allocateTransactionId() uint64 { - devices.NewTransactionId = devices.NewTransactionId + 1 - return devices.NewTransactionId + devices.OpenTransactionId = devices.TransactionId + 1 + return devices.OpenTransactionId +} + +func (devices *DeviceSet) updatePoolTransactionId() error { + if err := devicemapper.SetTransactionId(devices.getPoolDevName(), devices.TransactionId, devices.OpenTransactionId); err != nil { + return fmt.Errorf("Error setting devmapper transaction ID: %s", err) + } + devices.TransactionId = devices.OpenTransactionId + return nil } func (devices *DeviceSet) removeMetadata(info *DevInfo) error { @@ -200,11 +233,8 @@ func (devices *DeviceSet) removeMetadata(info *DevInfo) error { return nil } -func (devices *DeviceSet) saveMetadata(info *DevInfo) error { - jsonData, err := json.Marshal(info) - if err != nil { - return fmt.Errorf("Error encoding metadata to json: %s", err) - } +// Given json data and file path, write it to disk +func (devices *DeviceSet) writeMetaFile(jsonData []byte, filePath string) error { tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp") if err != nil { return fmt.Errorf("Error creating metadata file: %s", err) @@ -223,19 +253,48 @@ func (devices *DeviceSet) saveMetadata(info *DevInfo) error { if err := tmpFile.Close(); err != nil { return fmt.Errorf("Error closing metadata file %s: %s", tmpFile.Name(), err) } - if err := os.Rename(tmpFile.Name(), devices.metadataFile(info)); err != nil { + if err := os.Rename(tmpFile.Name(), filePath); err != nil { return fmt.Errorf("Error committing metadata file %s: %s", tmpFile.Name(), err) } - if devices.NewTransactionId != devices.TransactionId { - if err = setTransactionId(devices.getPoolDevName(), devices.TransactionId, devices.NewTransactionId); err != nil { - return fmt.Errorf("Error setting devmapper transition ID: %s", err) - } - devices.TransactionId = devices.NewTransactionId + return nil +} + +func (devices *DeviceSet) saveMetadata(info *DevInfo) error { + jsonData, err := json.Marshal(info) + if err != nil { + return fmt.Errorf("Error encoding metadata to json: %s", err) + } + if err := devices.writeMetaFile(jsonData, devices.metadataFile(info)); err != nil { + return err } return nil } +func (devices *DeviceSet) markDeviceIdUsed(deviceId int) { + var mask byte + i := deviceId % 8 + mask = 1 << uint(i) + devices.deviceIdMap[deviceId/8] = devices.deviceIdMap[deviceId/8] | mask +} + +func (devices *DeviceSet) markDeviceIdFree(deviceId int) { + var mask byte + i := deviceId % 8 + mask = ^(1 << uint(i)) + devices.deviceIdMap[deviceId/8] = devices.deviceIdMap[deviceId/8] & mask +} + +func (devices *DeviceSet) isDeviceIdFree(deviceId int) bool { + var mask byte + i := deviceId % 8 + mask = (1 << uint(i)) + if (devices.deviceIdMap[deviceId/8] & mask) != 0 { + return false + } + return true +} + func (devices *DeviceSet) lookupDevice(hash string) (*DevInfo, error) { devices.devicesLock.Lock() defer devices.devicesLock.Unlock() @@ -251,13 +310,91 @@ func (devices *DeviceSet) lookupDevice(hash string) (*DevInfo, error) { return info, nil } -func (devices *DeviceSet) registerDevice(id int, hash string, size uint64) (*DevInfo, error) { +func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo) error { + + // Skip some of the meta files which are not device files. + if strings.HasSuffix(finfo.Name(), ".migrated") { + log.Debugf("Skipping file %s", path) + return nil + } + + if finfo.Name() == deviceSetMetaFile { + log.Debugf("Skipping file %s", path) + return nil + } + + log.Debugf("Loading data for file %s", path) + + hash := finfo.Name() + if hash == "base" { + hash = "" + } + + dinfo := devices.loadMetadata(hash) + if dinfo == nil { + return fmt.Errorf("Error loading device metadata file %s", hash) + } + + if dinfo.DeviceId > MaxDeviceId { + log.Errorf("Warning: Ignoring Invalid DeviceId=%d", dinfo.DeviceId) + return nil + } + + devices.Lock() + devices.markDeviceIdUsed(dinfo.DeviceId) + devices.Unlock() + + log.Debugf("Added deviceId=%d to DeviceIdMap", dinfo.DeviceId) + return nil +} + +func (devices *DeviceSet) constructDeviceIdMap() error { + log.Debugf("[deviceset] constructDeviceIdMap()") + defer log.Debugf("[deviceset] constructDeviceIdMap() END") + + var scan = func(path string, info os.FileInfo, err error) error { + if err != nil { + log.Debugf("Can't walk the file %s", path) + return nil + } + + // Skip any directories + if info.IsDir() { + return nil + } + + return devices.deviceFileWalkFunction(path, info) + } + + return filepath.Walk(devices.metadataDir(), scan) +} + +func (devices *DeviceSet) unregisterDevice(id int, hash string) error { + log.Debugf("unregisterDevice(%v, %v)", id, hash) + info := &DevInfo{ + Hash: hash, + DeviceId: id, + } + + devices.devicesLock.Lock() + delete(devices.Devices, hash) + devices.devicesLock.Unlock() + + if err := devices.removeMetadata(info); err != nil { + log.Debugf("Error removing meta data: %s", err) + return err + } + + return nil +} + +func (devices *DeviceSet) registerDevice(id int, hash string, size uint64, transactionId uint64) (*DevInfo, error) { log.Debugf("registerDevice(%v, %v)", id, hash) info := &DevInfo{ Hash: hash, DeviceId: id, Size: size, - TransactionId: devices.allocateTransactionId(), + TransactionId: transactionId, Initialized: false, devices: devices, } @@ -280,11 +417,11 @@ func (devices *DeviceSet) registerDevice(id int, hash string, size uint64) (*Dev func (devices *DeviceSet) activateDeviceIfNeeded(info *DevInfo) error { log.Debugf("activateDeviceIfNeeded(%v)", info.Hash) - if devinfo, _ := getInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { + if devinfo, _ := devicemapper.GetInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { return nil } - return activateDevice(devices.getPoolDevName(), info.Name(), info.DeviceId, info.Size) + return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceId, info.Size) } func (devices *DeviceSet) createFilesystem(info *DevInfo) error { @@ -320,19 +457,8 @@ func (devices *DeviceSet) createFilesystem(info *DevInfo) error { return nil } -func (devices *DeviceSet) initMetaData() error { - _, _, _, params, err := getStatus(devices.getPoolName()) - if err != nil { - return err - } - - if _, err := fmt.Sscanf(params, "%d", &devices.TransactionId); err != nil { - return err - } - devices.NewTransactionId = devices.TransactionId - +func (devices *DeviceSet) migrateOldMetaData() error { // Migrate old metadatafile - jsonData, err := ioutil.ReadFile(devices.oldMetadataFile()) if err != nil && !os.IsNotExist(err) { return err @@ -347,11 +473,7 @@ func (devices *DeviceSet) initMetaData() error { for hash, info := range m.Devices { info.Hash = hash - - // If the transaction id is larger than the actual one we lost the device due to some crash - if info.TransactionId <= devices.TransactionId { - devices.saveMetadata(info) - } + devices.saveMetadata(info) } if err := os.Rename(devices.oldMetadataFile(), devices.oldMetadataFile()+".migrated"); err != nil { return err @@ -362,6 +484,149 @@ func (devices *DeviceSet) initMetaData() error { return nil } +func (devices *DeviceSet) initMetaData() error { + if err := devices.migrateOldMetaData(); err != nil { + return err + } + + _, transactionId, _, _, _, _, err := devices.poolStatus() + if err != nil { + return err + } + + devices.TransactionId = transactionId + + if err := devices.constructDeviceIdMap(); err != nil { + return err + } + + if err := devices.processPendingTransaction(); err != nil { + return err + } + return nil +} + +func (devices *DeviceSet) incNextDeviceId() { + // Ids are 24bit, so wrap around + devices.NextDeviceId = (devices.NextDeviceId + 1) & MaxDeviceId +} + +func (devices *DeviceSet) getNextFreeDeviceId() (int, error) { + devices.incNextDeviceId() + for i := 0; i <= MaxDeviceId; i++ { + if devices.isDeviceIdFree(devices.NextDeviceId) { + devices.markDeviceIdUsed(devices.NextDeviceId) + return devices.NextDeviceId, nil + } + devices.incNextDeviceId() + } + + return 0, fmt.Errorf("Unable to find a free device Id") +} + +func (devices *DeviceSet) createRegisterDevice(hash string) (*DevInfo, error) { + deviceId, err := devices.getNextFreeDeviceId() + if err != nil { + return nil, err + } + + if err := devices.openTransaction(hash, deviceId); err != nil { + log.Debugf("Error opening transaction hash = %s deviceId = %d", hash, deviceId) + devices.markDeviceIdFree(deviceId) + return nil, err + } + + for { + if err := devicemapper.CreateDevice(devices.getPoolDevName(), deviceId); err != nil { + if devicemapper.DeviceIdExists(err) { + // Device Id already exists. This should not + // happen. Now we have a mechianism to find + // a free device Id. So something is not right. + // Give a warning and continue. + log.Errorf("Warning: Device Id %d exists in pool but it is supposed to be unused", deviceId) + deviceId, err = devices.getNextFreeDeviceId() + if err != nil { + return nil, err + } + // Save new device id into transaction + devices.refreshTransaction(deviceId) + continue + } + log.Debugf("Error creating device: %s", err) + devices.markDeviceIdFree(deviceId) + return nil, err + } + break + } + + log.Debugf("Registering device (id %v) with FS size %v", deviceId, devices.baseFsSize) + info, err := devices.registerDevice(deviceId, hash, devices.baseFsSize, devices.OpenTransactionId) + if err != nil { + _ = devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) + devices.markDeviceIdFree(deviceId) + return nil, err + } + + if err := devices.closeTransaction(); err != nil { + devices.unregisterDevice(deviceId, hash) + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) + devices.markDeviceIdFree(deviceId) + return nil, err + } + return info, nil +} + +func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *DevInfo) error { + deviceId, err := devices.getNextFreeDeviceId() + if err != nil { + return err + } + + if err := devices.openTransaction(hash, deviceId); err != nil { + log.Debugf("Error opening transaction hash = %s deviceId = %d", hash, deviceId) + devices.markDeviceIdFree(deviceId) + return err + } + + for { + if err := devicemapper.CreateSnapDevice(devices.getPoolDevName(), deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil { + if devicemapper.DeviceIdExists(err) { + // Device Id already exists. This should not + // happen. Now we have a mechianism to find + // a free device Id. So something is not right. + // Give a warning and continue. + log.Errorf("Warning: Device Id %d exists in pool but it is supposed to be unused", deviceId) + deviceId, err = devices.getNextFreeDeviceId() + if err != nil { + return err + } + // Save new device id into transaction + devices.refreshTransaction(deviceId) + continue + } + log.Debugf("Error creating snap device: %s", err) + devices.markDeviceIdFree(deviceId) + return err + } + break + } + + if _, err := devices.registerDevice(deviceId, hash, baseInfo.Size, devices.OpenTransactionId); err != nil { + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) + devices.markDeviceIdFree(deviceId) + log.Debugf("Error registering device: %s", err) + return err + } + + if err := devices.closeTransaction(); err != nil { + devices.unregisterDevice(deviceId, hash) + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) + devices.markDeviceIdFree(deviceId) + return err + } + return nil +} + func (devices *DeviceSet) loadMetadata(hash string) *DevInfo { info := &DevInfo{Hash: hash, devices: devices} @@ -374,11 +639,6 @@ func (devices *DeviceSet) loadMetadata(hash string) *DevInfo { return nil } - // If the transaction id is larger than the actual one we lost the device due to some crash - if info.TransactionId > devices.TransactionId { - return nil - } - return info } @@ -390,31 +650,35 @@ func (devices *DeviceSet) setupBaseImage() error { if oldInfo != nil && !oldInfo.Initialized { log.Debugf("Removing uninitialized base image") - if err := devices.deleteDevice(oldInfo); err != nil { + if err := devices.DeleteDevice(""); err != nil { return err } } - log.Debugf("Initializing base device-manager snapshot") + if devices.thinPoolDevice != "" && oldInfo == nil { + _, transactionId, dataUsed, _, _, _, err := devices.poolStatus() + if err != nil { + return err + } + if dataUsed != 0 { + return fmt.Errorf("Unable to take ownership of thin-pool (%s) that already has used data blocks", + devices.thinPoolDevice) + } + if transactionId != 0 { + return fmt.Errorf("Unable to take ownership of thin-pool (%s) with non-zero transaction Id", + devices.thinPoolDevice) + } + } - id := devices.nextDeviceId + log.Debugf("Initializing base device-mapper thin volume") // Create initial device - if err := createDevice(devices.getPoolDevName(), &id); err != nil { - return err - } - - // Ids are 24bit, so wrap around - devices.nextDeviceId = (id + 1) & 0xffffff - - log.Debugf("Registering base device (id %v) with FS size %v", id, devices.baseFsSize) - info, err := devices.registerDevice(id, "", devices.baseFsSize) + info, err := devices.createRegisterDevice("") if err != nil { - _ = deleteDevice(devices.getPoolDevName(), id) return err } - log.Debugf("Creating filesystem on base device-manager snapshot") + log.Debugf("Creating filesystem on base device-mapper thin volume") if err = devices.activateDeviceIfNeeded(info); err != nil { return err @@ -447,11 +711,12 @@ func setCloseOnExec(name string) { } } -func (devices *DeviceSet) log(level int, file string, line int, dmError int, message string) { +func (devices *DeviceSet) DMLog(level int, file string, line int, dmError int, message string) { if level >= 7 { return // Ignore _LOG_DEBUG } + // FIXME(vbatts) push this back into ./pkg/devicemapper/ log.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) } @@ -489,7 +754,7 @@ func (devices *DeviceSet) ResizePool(size int64) error { return fmt.Errorf("Can't shrink file") } - dataloopback := FindLoopDeviceFor(datafile) + dataloopback := devicemapper.FindLoopDeviceFor(datafile) if dataloopback == nil { return fmt.Errorf("Unable to find loopback mount for: %s", datafilename) } @@ -501,7 +766,7 @@ func (devices *DeviceSet) ResizePool(size int64) error { } defer metadatafile.Close() - metadataloopback := FindLoopDeviceFor(metadatafile) + metadataloopback := devicemapper.FindLoopDeviceFor(metadatafile) if metadataloopback == nil { return fmt.Errorf("Unable to find loopback mount for: %s", metadatafilename) } @@ -513,32 +778,166 @@ func (devices *DeviceSet) ResizePool(size int64) error { } // Reload size for loopback device - if err := LoopbackSetCapacity(dataloopback); err != nil { + if err := devicemapper.LoopbackSetCapacity(dataloopback); err != nil { return fmt.Errorf("Unable to update loopback capacity: %s", err) } // Suspend the pool - if err := suspendDevice(devices.getPoolName()); err != nil { + if err := devicemapper.SuspendDevice(devices.getPoolName()); err != nil { return fmt.Errorf("Unable to suspend pool: %s", err) } // Reload with the new block sizes - if err := reloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil { + if err := devicemapper.ReloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil { return fmt.Errorf("Unable to reload pool: %s", err) } // Resume the pool - if err := resumeDevice(devices.getPoolName()); err != nil { + if err := devicemapper.ResumeDevice(devices.getPoolName()); err != nil { return fmt.Errorf("Unable to resume pool: %s", err) } return nil } -func (devices *DeviceSet) initDevmapper(doInit bool) error { - logInit(devices) +func (devices *DeviceSet) loadTransactionMetaData() error { + jsonData, err := ioutil.ReadFile(devices.transactionMetaFile()) + if err != nil { + // There is no active transaction. This will be the case + // during upgrade. + if os.IsNotExist(err) { + devices.OpenTransactionId = devices.TransactionId + return nil + } + return err + } - _, err := getDriverVersion() + json.Unmarshal(jsonData, &devices.Transaction) + return nil +} + +func (devices *DeviceSet) saveTransactionMetaData() error { + jsonData, err := json.Marshal(&devices.Transaction) + if err != nil { + return fmt.Errorf("Error encoding metadata to json: %s", err) + } + + return devices.writeMetaFile(jsonData, devices.transactionMetaFile()) +} + +func (devices *DeviceSet) removeTransactionMetaData() error { + if err := os.RemoveAll(devices.transactionMetaFile()); err != nil { + return err + } + return nil +} + +func (devices *DeviceSet) rollbackTransaction() error { + log.Debugf("Rolling back open transaction: TransactionId=%d hash=%s device_id=%d", devices.OpenTransactionId, devices.DeviceIdHash, devices.DeviceId) + + // A device id might have already been deleted before transaction + // closed. In that case this call will fail. Just leave a message + // in case of failure. + if err := devicemapper.DeleteDevice(devices.getPoolDevName(), devices.DeviceId); err != nil { + log.Errorf("Warning: Unable to delete device: %s", err) + } + + dinfo := &DevInfo{Hash: devices.DeviceIdHash} + if err := devices.removeMetadata(dinfo); err != nil { + log.Errorf("Warning: Unable to remove meta data: %s", err) + } else { + devices.markDeviceIdFree(devices.DeviceId) + } + + if err := devices.removeTransactionMetaData(); err != nil { + log.Errorf("Warning: Unable to remove transaction meta file %s: %s", devices.transactionMetaFile(), err) + } + + return nil +} + +func (devices *DeviceSet) processPendingTransaction() error { + if err := devices.loadTransactionMetaData(); err != nil { + return err + } + + // If there was open transaction but pool transaction Id is same + // as open transaction Id, nothing to roll back. + if devices.TransactionId == devices.OpenTransactionId { + return nil + } + + // If open transaction Id is less than pool transaction Id, something + // is wrong. Bail out. + if devices.OpenTransactionId < devices.TransactionId { + log.Errorf("Warning: Open Transaction id %d is less than pool transaction id %d", devices.OpenTransactionId, devices.TransactionId) + return nil + } + + // Pool transaction Id is not same as open transaction. There is + // a transaction which was not completed. + if err := devices.rollbackTransaction(); err != nil { + return fmt.Errorf("Rolling back open transaction failed: %s", err) + } + + devices.OpenTransactionId = devices.TransactionId + return nil +} + +func (devices *DeviceSet) loadDeviceSetMetaData() error { + jsonData, err := ioutil.ReadFile(devices.deviceSetMetaFile()) + if err != nil { + // For backward compatibility return success if file does + // not exist. + if os.IsNotExist(err) { + return nil + } + return err + } + + return json.Unmarshal(jsonData, devices) +} + +func (devices *DeviceSet) saveDeviceSetMetaData() error { + jsonData, err := json.Marshal(devices) + if err != nil { + return fmt.Errorf("Error encoding metadata to json: %s", err) + } + + return devices.writeMetaFile(jsonData, devices.deviceSetMetaFile()) +} + +func (devices *DeviceSet) openTransaction(hash string, DeviceId int) error { + devices.allocateTransactionId() + devices.DeviceIdHash = hash + devices.DeviceId = DeviceId + if err := devices.saveTransactionMetaData(); err != nil { + return fmt.Errorf("Error saving transaction meta data: %s", err) + } + return nil +} + +func (devices *DeviceSet) refreshTransaction(DeviceId int) error { + devices.DeviceId = DeviceId + if err := devices.saveTransactionMetaData(); err != nil { + return fmt.Errorf("Error saving transaction meta data: %s", err) + } + return nil +} + +func (devices *DeviceSet) closeTransaction() error { + if err := devices.updatePoolTransactionId(); err != nil { + log.Debugf("Failed to close Transaction") + return err + } + return nil +} + +func (devices *DeviceSet) initDevmapper(doInit bool) error { + // give ourselves to libdm as a log handler + devicemapper.LogInit(devices) + + _, err := devicemapper.GetDriverVersion() if err != nil { // Can't even get driver version, assume not supported return graphdriver.ErrNotSupported @@ -564,11 +963,11 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino) log.Debugf("Generated prefix: %s", devices.devicePrefix) - // Check for the existence of the device -pool + // Check for the existence of the thin-pool device log.Debugf("Checking for existence of the pool '%s'", devices.getPoolName()) - info, err := getInfo(devices.getPoolName()) + info, err := devicemapper.GetInfo(devices.getPoolName()) if info == nil { - log.Debugf("Error device getInfo: %s", err) + log.Debugf("Error device devicemapper.GetInfo: %s", err) return err } @@ -583,7 +982,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { createdLoopback := false // If the pool doesn't exist, create it - if info.Exists == 0 { + if info.Exists == 0 && devices.thinPoolDevice == "" { log.Debugf("Pool doesn't exist. Creating it.") var ( @@ -610,7 +1009,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { return err } - dataFile, err = attachLoopDevice(data) + dataFile, err = devicemapper.AttachLoopDevice(data) if err != nil { return err } @@ -641,7 +1040,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { return err } - metadataFile, err = attachLoopDevice(metadata) + metadataFile, err = devicemapper.AttachLoopDevice(metadata) if err != nil { return err } @@ -653,7 +1052,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { } defer metadataFile.Close() - if err := createPool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { + if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { return err } } @@ -666,6 +1065,12 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { } } + // Right now this loads only NextDeviceId. If there is more metatadata + // down the line, we might have to move it earlier. + if err = devices.loadDeviceSetMetaData(); err != nil { + return err + } + // Setup the base image if doInit { if err := devices.setupBaseImage(); err != nil { @@ -678,6 +1083,9 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { } func (devices *DeviceSet) AddDevice(hash, baseHash string) error { + log.Debugf("[deviceset] AddDevice() hash=%s basehash=%s", hash, baseHash) + defer log.Debugf("[deviceset] AddDevice END") + baseInfo, err := devices.lookupDevice(baseHash) if err != nil { return err @@ -693,21 +1101,10 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string) error { return fmt.Errorf("device %s already exists", hash) } - deviceId := devices.nextDeviceId - - if err := createSnapDevice(devices.getPoolDevName(), &deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil { - log.Debugf("Error creating snap device: %s", err) + if err := devices.createRegisterSnapDevice(hash, baseInfo); err != nil { return err } - // Ids are 24bit, so wrap around - devices.nextDeviceId = (deviceId + 1) & 0xffffff - - if _, err := devices.registerDevice(deviceId, hash, baseInfo.Size); err != nil { - deleteDevice(devices.getPoolDevName(), deviceId) - log.Debugf("Error registering device: %s", err) - return err - } return nil } @@ -717,13 +1114,13 @@ func (devices *DeviceSet) deleteDevice(info *DevInfo) error { // on the thin pool when we remove a thinp device, so we do it // manually if err := devices.activateDeviceIfNeeded(info); err == nil { - if err := BlockDeviceDiscard(info.DevName()); err != nil { + if err := devicemapper.BlockDeviceDiscard(info.DevName()); err != nil { log.Debugf("Error discarding block on device: %s (ignoring)", err) } } } - devinfo, _ := getInfo(info.Name()) + devinfo, _ := devicemapper.GetInfo(info.Name()) if devinfo != nil && devinfo.Exists != 0 { if err := devices.removeDeviceAndWait(info.Name()); err != nil { log.Debugf("Error removing device: %s", err) @@ -731,24 +1128,26 @@ func (devices *DeviceSet) deleteDevice(info *DevInfo) error { } } - if err := deleteDevice(devices.getPoolDevName(), info.DeviceId); err != nil { + if err := devices.openTransaction(info.Hash, info.DeviceId); err != nil { + log.Debugf("Error opening transaction hash = %s deviceId = %d", "", info.DeviceId) + return err + } + + if err := devicemapper.DeleteDevice(devices.getPoolDevName(), info.DeviceId); err != nil { log.Debugf("Error deleting device: %s", err) return err } - devices.allocateTransactionId() - devices.devicesLock.Lock() - delete(devices.Devices, info.Hash) - devices.devicesLock.Unlock() - - if err := devices.removeMetadata(info); err != nil { - devices.devicesLock.Lock() - devices.Devices[info.Hash] = info - devices.devicesLock.Unlock() - log.Debugf("Error removing meta data: %s", err) + if err := devices.unregisterDevice(info.DeviceId, info.Hash); err != nil { return err } + if err := devices.closeTransaction(); err != nil { + return err + } + + devices.markDeviceIdFree(info.DeviceId) + return nil } @@ -771,12 +1170,17 @@ func (devices *DeviceSet) deactivatePool() error { log.Debugf("[devmapper] deactivatePool()") defer log.Debugf("[devmapper] deactivatePool END") devname := devices.getPoolDevName() - devinfo, err := getInfo(devname) + + devinfo, err := devicemapper.GetInfo(devname) if err != nil { return err } + if d, err := devicemapper.GetDeps(devname); err == nil { + // Access to more Debug output + log.Debugf("[devmapper] devicemapper.GetDeps() %s: %#v", devname, d) + } if devinfo.Exists != 0 { - return removeDevice(devname) + return devicemapper.RemoveDevice(devname) } return nil @@ -792,7 +1196,7 @@ func (devices *DeviceSet) deactivateDevice(info *DevInfo) error { log.Errorf("Warning: error waiting for device %s to close: %s", info.Hash, err) } - devinfo, err := getInfo(info.Name()) + devinfo, err := devicemapper.GetInfo(info.Name()) if err != nil { return err } @@ -811,11 +1215,11 @@ func (devices *DeviceSet) removeDeviceAndWait(devname string) error { var err error for i := 0; i < 1000; i++ { - err = removeDevice(devname) + err = devicemapper.RemoveDevice(devname) if err == nil { break } - if err != ErrBusy { + if err != devicemapper.ErrBusy { return err } @@ -843,7 +1247,7 @@ func (devices *DeviceSet) waitRemove(devname string) error { defer log.Debugf("[deviceset %s] waitRemove(%s) END", devices.devicePrefix, devname) i := 0 for ; i < 1000; i++ { - devinfo, err := getInfo(devname) + devinfo, err := devicemapper.GetInfo(devname) if err != nil { // If there is an error we assume the device doesn't exist. // The error might actually be something else, but we can't differentiate. @@ -872,7 +1276,7 @@ func (devices *DeviceSet) waitRemove(devname string) error { func (devices *DeviceSet) waitClose(info *DevInfo) error { i := 0 for ; i < 1000; i++ { - devinfo, err := getInfo(info.Name()) + devinfo, err := devicemapper.GetInfo(info.Name()) if err != nil { return err } @@ -893,7 +1297,6 @@ func (devices *DeviceSet) waitClose(info *DevInfo) error { } func (devices *DeviceSet) Shutdown() error { - log.Debugf("[deviceset %s] shutdown()", devices.devicePrefix) log.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root) defer log.Debugf("[deviceset %s] shutdown END", devices.devicePrefix) @@ -937,9 +1340,13 @@ func (devices *DeviceSet) Shutdown() error { } devices.Lock() - if err := devices.deactivatePool(); err != nil { - log.Debugf("Shutdown deactivate pool , error: %s", err) + if devices.thinPoolDevice == "" { + if err := devices.deactivatePool(); err != nil { + log.Debugf("Shutdown deactivate pool , error: %s", err) + } } + + devices.saveDeviceSetMetaData() devices.Unlock() return nil @@ -1060,7 +1467,7 @@ func (devices *DeviceSet) HasActivatedDevice(hash string) bool { devices.Lock() defer devices.Unlock() - devinfo, _ := getInfo(info.Name()) + devinfo, _ := devicemapper.GetInfo(info.Name()) return devinfo != nil && devinfo.Exists != 0 } @@ -1082,7 +1489,7 @@ func (devices *DeviceSet) List() []string { func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) { var params string - _, sizeInSectors, _, params, err = getStatus(devName) + _, sizeInSectors, _, params, err = devicemapper.GetStatus(devName) if err != nil { return } @@ -1127,7 +1534,7 @@ func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionId, dataUsed, dataTotal, metadataUsed, metadataTotal uint64, err error) { var params string - if _, totalSizeInSectors, _, params, err = getStatus(devices.getPoolName()); err == nil { + if _, totalSizeInSectors, _, params, err = devicemapper.GetStatus(devices.getPoolName()); err == nil { _, err = fmt.Sscanf(params, "%d %d/%d %d/%d", &transactionId, &metadataUsed, &metadataTotal, &dataUsed, &dataTotal) } return @@ -1170,7 +1577,7 @@ func (devices *DeviceSet) Status() *Status { } func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error) { - SetDevDir("/dev") + devicemapper.SetDevDir("/dev") devices := &DeviceSet{ root: root, @@ -1181,6 +1588,7 @@ func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error filesystem: "ext4", doBlkDiscard: true, thinpBlockSize: DefaultThinpBlockSize, + deviceIdMap: make([]byte, DeviceIdMapSz), } foundBlkDiscard := false @@ -1222,6 +1630,8 @@ func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error devices.metadataDevice = val case "dm.datadev": devices.dataDevice = val + case "dm.thinpooldev": + devices.thinPoolDevice = strings.TrimPrefix(val, "/dev/mapper/") case "dm.blkdiscard": foundBlkDiscard = true devices.doBlkDiscard, err = strconv.ParseBool(val) @@ -1241,7 +1651,7 @@ func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error } // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive - if !foundBlkDiscard && devices.dataDevice != "" { + if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") { devices.doBlkDiscard = false } diff --git a/daemon/graphdriver/devmapper/devmapper_test.go b/daemon/graphdriver/devmapper/devmapper_test.go index 167261999e..6cb7572384 100644 --- a/daemon/graphdriver/devmapper/devmapper_test.go +++ b/daemon/graphdriver/devmapper/devmapper_test.go @@ -3,8 +3,9 @@ package devmapper import ( - "github.com/docker/docker/daemon/graphdriver/graphtest" "testing" + + "github.com/docker/docker/daemon/graphdriver/graphtest" ) func init() { @@ -12,6 +13,9 @@ func init() { DefaultDataLoopbackSize = 300 * 1024 * 1024 DefaultMetaDataLoopbackSize = 200 * 1024 * 1024 DefaultBaseFsSize = 300 * 1024 * 1024 + if err := graphtest.InitLoopbacks(); err != nil { + panic(err) + } } // This avoids creating a new driver for each test if all tests are run diff --git a/daemon/graphdriver/devmapper/driver.go b/daemon/graphdriver/devmapper/driver.go index 8f9de85d4e..b20f3e5450 100644 --- a/daemon/graphdriver/devmapper/driver.go +++ b/daemon/graphdriver/devmapper/driver.go @@ -8,8 +8,9 @@ import ( "os" "path" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/devicemapper" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/units" ) @@ -34,7 +35,7 @@ func Init(home string, options []string) (graphdriver.Driver, error) { return nil, err } - if err := graphdriver.MakePrivate(home); err != nil { + if err := mount.MakePrivate(home); err != nil { return nil, err } @@ -63,7 +64,7 @@ func (d *Driver) Status() [][2]string { {"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(int64(s.Metadata.Used)))}, {"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(int64(s.Metadata.Total)))}, } - if vStr, err := GetLibraryVersion(); err == nil { + if vStr, err := devicemapper.GetLibraryVersion(); err == nil { status = append(status, [2]string{"Library Version", vStr}) } return status diff --git a/daemon/graphdriver/driver.go b/daemon/graphdriver/driver.go index 91040db97a..95479bf64f 100644 --- a/daemon/graphdriver/driver.go +++ b/daemon/graphdriver/driver.go @@ -7,7 +7,6 @@ import ( "path" "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/mount" ) type FsMagic uint64 @@ -81,6 +80,8 @@ var ( "btrfs", "devicemapper", "vfs", + // experimental, has to be enabled manually for now + "overlay", } ErrNotSupported = errors.New("driver not supported") @@ -139,18 +140,3 @@ func New(root string, options []string) (driver Driver, err error) { } return nil, fmt.Errorf("No supported storage backend found") } - -func MakePrivate(mountPoint string) error { - mounted, err := mount.Mounted(mountPoint) - if err != nil { - return err - } - - if !mounted { - if err := mount.Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { - return err - } - } - - return mount.ForceMount("", mountPoint, "none", "private") -} diff --git a/daemon/graphdriver/fsdiff.go b/daemon/graphdriver/fsdiff.go index 14e27d60c7..48852a5631 100644 --- a/daemon/graphdriver/fsdiff.go +++ b/daemon/graphdriver/fsdiff.go @@ -1,13 +1,15 @@ +// +build daemon + package graphdriver import ( "fmt" "time" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/log" "github.com/docker/docker/utils" ) diff --git a/daemon/graphdriver/graphtest/graphtest.go b/daemon/graphdriver/graphtest/graphtest.go index 6407e1205d..67f15c594d 100644 --- a/daemon/graphdriver/graphtest/graphtest.go +++ b/daemon/graphdriver/graphtest/graphtest.go @@ -1,6 +1,7 @@ package graphtest import ( + "fmt" "io/ioutil" "os" "path" @@ -20,6 +21,46 @@ type Driver struct { refCount int } +// InitLoopbacks ensures that the loopback devices are properly created within +// the system running the device mapper tests. +func InitLoopbacks() error { + stat_t, err := getBaseLoopStats() + if err != nil { + return err + } + // create atleast 8 loopback files, ya, that is a good number + for i := 0; i < 8; i++ { + loopPath := fmt.Sprintf("/dev/loop%d", i) + // only create new loopback files if they don't exist + if _, err := os.Stat(loopPath); err != nil { + if mkerr := syscall.Mknod(loopPath, + uint32(stat_t.Mode|syscall.S_IFBLK), int((7<<8)|(i&0xff)|((i&0xfff00)<<12))); mkerr != nil { + return mkerr + } + os.Chown(loopPath, int(stat_t.Uid), int(stat_t.Gid)) + } + } + return nil +} + +// getBaseLoopStats inspects /dev/loop0 to collect uid,gid, and mode for the +// loop0 device on the system. If it does not exist we assume 0,0,0660 for the +// stat data +func getBaseLoopStats() (*syscall.Stat_t, error) { + loop0, err := os.Stat("/dev/loop0") + if err != nil { + if os.IsNotExist(err) { + return &syscall.Stat_t{ + Uid: 0, + Gid: 0, + Mode: 0660, + }, nil + } + return nil, err + } + return loop0.Sys().(*syscall.Stat_t), nil +} + func newDriver(t *testing.T, name string) *Driver { root, err := ioutil.TempDir("/var/tmp", "docker-graphtest-") if err != nil { @@ -33,7 +74,7 @@ func newDriver(t *testing.T, name string) *Driver { d, err := graphdriver.GetDriver(name, root, nil) if err != nil { if err == graphdriver.ErrNotSupported || err == graphdriver.ErrPrerequisites { - t.Skip("Driver %s not supported", name) + t.Skipf("Driver %s not supported", name) } t.Fatal(err) } diff --git a/daemon/graphdriver/overlay/copy.go b/daemon/graphdriver/overlay/copy.go new file mode 100644 index 0000000000..ae6bee517b --- /dev/null +++ b/daemon/graphdriver/overlay/copy.go @@ -0,0 +1,157 @@ +// +build linux + +package overlay + +import ( + "fmt" + "io" + "os" + "path/filepath" + "syscall" + + "github.com/docker/docker/pkg/system" +) + +type CopyFlags int + +const ( + CopyHardlink CopyFlags = 1 << iota +) + +func copyRegular(srcPath, dstPath string, mode os.FileMode) error { + srcFile, err := os.Open(srcPath) + if err != nil { + return err + } + defer srcFile.Close() + + dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE, mode) + if err != nil { + return err + } + defer dstFile.Close() + + _, err = io.Copy(dstFile, srcFile) + + return err +} + +func copyXattr(srcPath, dstPath, attr string) error { + data, err := system.Lgetxattr(srcPath, attr) + if err != nil { + return err + } + if data != nil { + if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil { + return err + } + } + return nil +} + +func copyDir(srcDir, dstDir string, flags CopyFlags) error { + err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(srcDir, srcPath) + if err != nil { + return err + } + + dstPath := filepath.Join(dstDir, relPath) + if err != nil { + return err + } + + stat, ok := f.Sys().(*syscall.Stat_t) + if !ok { + return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath) + } + + switch f.Mode() & os.ModeType { + case 0: // Regular file + if flags&CopyHardlink != 0 { + if err := os.Link(srcPath, dstPath); err != nil { + return err + } + } else { + if err := copyRegular(srcPath, dstPath, f.Mode()); err != nil { + return err + } + } + + case os.ModeDir: + if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) { + return err + } + + case os.ModeSymlink: + link, err := os.Readlink(srcPath) + if err != nil { + return err + } + + if err := os.Symlink(link, dstPath); err != nil { + return err + } + + case os.ModeNamedPipe: + fallthrough + case os.ModeSocket: + if err := syscall.Mkfifo(dstPath, stat.Mode); err != nil { + return err + } + + case os.ModeDevice: + if err := syscall.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil { + return err + } + + default: + return fmt.Errorf("Unknown file type for %s\n", srcPath) + } + + if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil { + return err + } + + if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil { + return err + } + + // We need to copy this attribute if it appears in an overlay upper layer, as + // this function is used to copy those. It is set by overlay if a directory + // is removed and then re-created and should not inherit anything from the + // same dir in the lower dir. + if err := copyXattr(srcPath, dstPath, "trusted.overlay.opaque"); err != nil { + return err + } + + isSymlink := f.Mode()&os.ModeSymlink != 0 + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if !isSymlink { + if err := os.Chmod(dstPath, f.Mode()); err != nil { + return err + } + } + + ts := []syscall.Timespec{stat.Atim, stat.Mtim} + // syscall.UtimesNano doesn't support a NOFOLLOW flag atm, and + if !isSymlink { + if err := system.UtimesNano(dstPath, ts); err != nil { + return err + } + } else { + if err := system.LUtimesNano(dstPath, ts); err != nil { + return err + } + } + return nil + }) + return err +} diff --git a/daemon/graphdriver/overlay/overlay.go b/daemon/graphdriver/overlay/overlay.go new file mode 100644 index 0000000000..2569ccb6d1 --- /dev/null +++ b/daemon/graphdriver/overlay/overlay.go @@ -0,0 +1,370 @@ +// +build linux + +package overlay + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path" + "sync" + "syscall" + + log "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/libcontainer/label" +) + +// This is a small wrapper over the NaiveDiffWriter that lets us have a custom +// implementation of ApplyDiff() + +var ( + ErrApplyDiffFallback = fmt.Errorf("Fall back to normal ApplyDiff") +) + +type ApplyDiffProtoDriver interface { + graphdriver.ProtoDriver + ApplyDiff(id, parent string, diff archive.ArchiveReader) (bytes int64, err error) +} + +type naiveDiffDriverWithApply struct { + graphdriver.Driver + applyDiff ApplyDiffProtoDriver +} + +func NaiveDiffDriverWithApply(driver ApplyDiffProtoDriver) graphdriver.Driver { + return &naiveDiffDriverWithApply{ + Driver: graphdriver.NaiveDiffDriver(driver), + applyDiff: driver, + } +} + +func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff archive.ArchiveReader) (int64, error) { + b, err := d.applyDiff.ApplyDiff(id, parent, diff) + if err == ErrApplyDiffFallback { + return d.Driver.ApplyDiff(id, parent, diff) + } + return b, err +} + +// This backend uses the overlay union filesystem for containers +// plus hard link file sharing for images. + +// Each container/image can have a "root" subdirectory which is a plain +// filesystem hierarchy, or they can use overlay. + +// If they use overlay there is a "upper" directory and a "lower-id" +// file, as well as "merged" and "work" directories. The "upper" +// directory has the upper layer of the overlay, and "lower-id" contains +// the id of the parent whose "root" directory shall be used as the lower +// layer in the overlay. The overlay itself is mounted in the "merged" +// directory, and the "work" dir is needed for overlay to work. + +// When a overlay layer is created there are two cases, either the +// parent has a "root" dir, then we start out with a empty "upper" +// directory overlaid on the parents root. This is typically the +// case with the init layer of a container which is based on an image. +// If there is no "root" in the parent, we inherit the lower-id from +// the parent and start by making a copy if the parents "upper" dir. +// This is typically the case for a container layer which copies +// its parent -init upper layer. + +// Additionally we also have a custom implementation of ApplyLayer +// which makes a recursive copy of the parent "root" layer using +// hardlinks to share file data, and then applies the layer on top +// of that. This means all child images share file (but not directory) +// data with the parent. + +type ActiveMount struct { + count int + path string + mounted bool +} +type Driver struct { + home string + sync.Mutex // Protects concurrent modification to active + active map[string]*ActiveMount +} + +func init() { + graphdriver.Register("overlay", Init) +} + +func Init(home string, options []string) (graphdriver.Driver, error) { + if err := supportsOverlay(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + // Create the driver home dir + if err := os.MkdirAll(home, 0755); err != nil && !os.IsExist(err) { + return nil, err + } + + d := &Driver{ + home: home, + active: make(map[string]*ActiveMount), + } + + return NaiveDiffDriverWithApply(d), nil +} + +func supportsOverlay() error { + // We can try to modprobe overlay first before looking at + // proc/filesystems for when overlay is supported + exec.Command("modprobe", "overlay").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if s.Text() == "nodev\toverlay" { + return nil + } + } + log.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") + return graphdriver.ErrNotSupported +} + +func (d *Driver) String() string { + return "overlay" +} + +func (d *Driver) Status() [][2]string { + return nil +} + +func (d *Driver) Cleanup() error { + return nil +} + +func (d *Driver) Create(id string, parent string) (retErr error) { + dir := d.dir(id) + if err := os.MkdirAll(path.Dir(dir), 0700); err != nil { + return err + } + if err := os.Mkdir(dir, 0700); err != nil { + return err + } + + defer func() { + // Clean up on failure + if retErr != nil { + os.RemoveAll(dir) + } + }() + + // Toplevel images are just a "root" dir + if parent == "" { + if err := os.Mkdir(path.Join(dir, "root"), 0755); err != nil { + return err + } + return nil + } + + parentDir := d.dir(parent) + + // Ensure parent exists + if _, err := os.Lstat(parentDir); err != nil { + return err + } + + // If parent has a root, just do a overlay to it + parentRoot := path.Join(parentDir, "root") + + if s, err := os.Lstat(parentRoot); err == nil { + if err := os.Mkdir(path.Join(dir, "upper"), s.Mode()); err != nil { + return err + } + if err := os.Mkdir(path.Join(dir, "work"), 0700); err != nil { + return err + } + if err := os.Mkdir(path.Join(dir, "merged"), 0700); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(dir, "lower-id"), []byte(parent), 0666); err != nil { + return err + } + return nil + } + + // Otherwise, copy the upper and the lower-id from the parent + + lowerId, err := ioutil.ReadFile(path.Join(parentDir, "lower-id")) + if err != nil { + return err + } + + if err := ioutil.WriteFile(path.Join(dir, "lower-id"), lowerId, 0666); err != nil { + return err + } + + parentUpperDir := path.Join(parentDir, "upper") + s, err := os.Lstat(parentUpperDir) + if err != nil { + return err + } + + upperDir := path.Join(dir, "upper") + if err := os.Mkdir(upperDir, s.Mode()); err != nil { + return err + } + if err := os.Mkdir(path.Join(dir, "work"), 0700); err != nil { + return err + } + if err := os.Mkdir(path.Join(dir, "merged"), 0700); err != nil { + return err + } + + return copyDir(parentUpperDir, upperDir, 0) +} + +func (d *Driver) dir(id string) string { + return path.Join(d.home, id) +} + +func (d *Driver) Remove(id string) error { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return err + } + return os.RemoveAll(dir) +} + +func (d *Driver) Get(id string, mountLabel string) (string, error) { + // Protect the d.active from concurrent access + d.Lock() + defer d.Unlock() + + mount := d.active[id] + if mount != nil { + mount.count++ + return mount.path, nil + } else { + mount = &ActiveMount{count: 1} + } + + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return "", err + } + + // If id has a root, just return it + rootDir := path.Join(dir, "root") + if _, err := os.Stat(rootDir); err == nil { + mount.path = rootDir + d.active[id] = mount + return mount.path, nil + } + + lowerId, err := ioutil.ReadFile(path.Join(dir, "lower-id")) + if err != nil { + return "", err + } + lowerDir := path.Join(d.dir(string(lowerId)), "root") + upperDir := path.Join(dir, "upper") + workDir := path.Join(dir, "work") + mergedDir := path.Join(dir, "merged") + + opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir) + if err := syscall.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil { + return "", err + } + mount.path = mergedDir + mount.mounted = true + d.active[id] = mount + + return mount.path, nil +} + +func (d *Driver) Put(id string) { + // Protect the d.active from concurrent access + d.Lock() + defer d.Unlock() + + mount := d.active[id] + if mount == nil { + log.Debugf("Put on a non-mounted device %s", id) + return + } + + mount.count-- + if mount.count > 0 { + return + } + + if mount.mounted { + if err := syscall.Unmount(mount.path, 0); err != nil { + log.Debugf("Failed to unmount %s overlay: %v", id, err) + } + } + + delete(d.active, id) +} + +func (d *Driver) ApplyDiff(id string, parent string, diff archive.ArchiveReader) (bytes int64, err error) { + dir := d.dir(id) + + if parent == "" { + return 0, ErrApplyDiffFallback + } + + parentRootDir := path.Join(d.dir(parent), "root") + if _, err := os.Stat(parentRootDir); err != nil { + return 0, ErrApplyDiffFallback + } + + // We now know there is a parent, and it has a "root" directory containing + // the full root filesystem. We can just hardlink it and apply the + // layer. This relies on two things: + // 1) ApplyDiff is only run once on a clean (no writes to upper layer) container + // 2) ApplyDiff doesn't do any in-place writes to files (would break hardlinks) + // These are all currently true and are not expected to break + + tmpRootDir, err := ioutil.TempDir(dir, "tmproot") + if err != nil { + return 0, err + } + defer func() { + if err != nil { + os.RemoveAll(tmpRootDir) + } else { + os.RemoveAll(path.Join(dir, "upper")) + os.RemoveAll(path.Join(dir, "work")) + os.RemoveAll(path.Join(dir, "merged")) + os.RemoveAll(path.Join(dir, "lower-id")) + } + }() + + if err = copyDir(parentRootDir, tmpRootDir, CopyHardlink); err != nil { + return 0, err + } + + if err := chrootarchive.ApplyLayer(tmpRootDir, diff); err != nil { + return 0, err + } + + rootDir := path.Join(dir, "root") + if err := os.Rename(tmpRootDir, rootDir); err != nil { + return 0, err + } + + changes, err := archive.ChangesDirs(rootDir, parentRootDir) + if err != nil { + return 0, err + } + + return archive.ChangesSize(rootDir, changes), nil +} + +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} diff --git a/daemon/graphdriver/overlay/overlay_test.go b/daemon/graphdriver/overlay/overlay_test.go new file mode 100644 index 0000000000..88194e4ff8 --- /dev/null +++ b/daemon/graphdriver/overlay/overlay_test.go @@ -0,0 +1,28 @@ +package overlay + +import ( + "github.com/docker/docker/daemon/graphdriver/graphtest" + "testing" +) + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestOverlaySetup and TestOverlayTeardown +func TestOverlaySetup(t *testing.T) { + graphtest.GetDriver(t, "overlay") +} + +func TestOverlayCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "overlay") +} + +func TestOverlayCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "overlay") +} + +func TestOverlayCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "overlay") +} + +func TestOverlayTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/daemon/image_delete.go b/daemon/image_delete.go index 332db7b4c0..b0b0c3a023 100644 --- a/daemon/image_delete.go +++ b/daemon/image_delete.go @@ -133,6 +133,9 @@ func (daemon *Daemon) canDeleteImage(imgID string, force bool) error { for _, container := range daemon.List() { parent, err := daemon.Repositories().LookupImage(container.Image) if err != nil { + if daemon.Graph().IsNotExist(err) { + return nil + } return err } diff --git a/daemon/info.go b/daemon/info.go index 3d3c9ba6ca..518722b6c2 100644 --- a/daemon/info.go +++ b/daemon/info.go @@ -4,11 +4,12 @@ import ( "os" "runtime" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/dockerversion" "github.com/docker/docker/engine" - "github.com/docker/docker/pkg/log" "github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/docker/pkg/parsers/operatingsystem" + "github.com/docker/docker/pkg/system" "github.com/docker/docker/registry" "github.com/docker/docker/utils" ) @@ -37,6 +38,11 @@ func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status { operatingSystem += " (containerized)" } + meminfo, err := system.ReadMemInfo() + if err != nil { + log.Errorf("Could not read system memory info: %v", err) + } + // if we still have the original dockerinit binary from before we copied it locally, let's return the path to that, since that's more intuitive (the copied path is trivial to derive by hand given VERSION) initPath := utils.DockerInitPath("") if initPath == "" { @@ -50,6 +56,7 @@ func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status { return job.Error(err) } v := &engine.Env{} + v.Set("ID", daemon.ID) v.SetInt("Containers", len(daemon.List())) v.SetInt("Images", imgcount) v.Set("Driver", daemon.GraphDriver().String()) @@ -67,6 +74,13 @@ func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status { v.Set("IndexServerAddress", registry.IndexServerAddress()) v.Set("InitSha1", dockerversion.INITSHA1) v.Set("InitPath", initPath) + v.SetInt("NCPU", runtime.NumCPU()) + v.SetInt64("MemTotal", meminfo.MemTotal) + v.Set("DockerRootDir", daemon.Config().Root) + if hostname, err := os.Hostname(); err == nil { + v.Set("Name", hostname) + } + v.SetList("Labels", daemon.Config().Labels) if _, err := v.WriteTo(job.Stdout); err != nil { return job.Error(err) } diff --git a/daemon/inspect.go b/daemon/inspect.go index cf2ed644d0..a6ff2de693 100644 --- a/daemon/inspect.go +++ b/daemon/inspect.go @@ -65,3 +65,21 @@ func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status { } return job.Errorf("No such container: %s", name) } + +func (daemon *Daemon) ContainerExecInspect(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("usage: %s ID", job.Name) + } + id := job.Args[0] + eConfig, err := daemon.getExecConfig(id) + if err != nil { + return job.Error(err) + } + + b, err := json.Marshal(*eConfig) + if err != nil { + return job.Error(err) + } + job.Stdout.Write(b) + return engine.StatusOK +} diff --git a/daemon/list.go b/daemon/list.go index 25360b679e..29d7298fc2 100644 --- a/daemon/list.go +++ b/daemon/list.go @@ -28,7 +28,6 @@ func (daemon *Daemon) Containers(job *engine.Job) engine.Status { size = job.GetenvBool("size") psFilters filters.Args filt_exited []int - filt_status []string ) outs := engine.NewTable("Created", 0) @@ -46,8 +45,6 @@ func (daemon *Daemon) Containers(job *engine.Job) engine.Status { } } - filt_status, _ = psFilters["status"] - names := map[string][]string{} daemon.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error { names[e.ID()] = append(names[e.ID()], p) @@ -76,6 +73,15 @@ func (daemon *Daemon) Containers(job *engine.Job) engine.Status { if !container.Running && !all && n <= 0 && since == "" && before == "" { return nil } + + if !psFilters.Match("name", container.Name) { + return nil + } + + if !psFilters.Match("id", container.ID) { + return nil + } + if before != "" && !foundBefore { if container.ID == beforeCont.ID { foundBefore = true @@ -102,10 +108,9 @@ func (daemon *Daemon) Containers(job *engine.Job) engine.Status { return nil } } - for _, status := range filt_status { - if container.State.StateString() != strings.ToLower(status) { - return nil - } + + if !psFilters.Match("status", container.State.StateString()) { + return nil } displayed++ out := &engine.Env{} diff --git a/daemon/logs.go b/daemon/logs.go index b4df401efd..6c9373f737 100644 --- a/daemon/logs.go +++ b/daemon/logs.go @@ -7,10 +7,11 @@ import ( "io" "os" "strconv" + "sync" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/jsonlog" - "github.com/docker/docker/pkg/log" "github.com/docker/docker/pkg/tailfile" "github.com/docker/docker/pkg/timeutils" ) @@ -112,24 +113,36 @@ func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status { } if follow && container.IsRunning() { errors := make(chan error, 2) + wg := sync.WaitGroup{} + if stdout { + wg.Add(1) stdoutPipe := container.StdoutLogPipe() defer stdoutPipe.Close() go func() { errors <- jsonlog.WriteLog(stdoutPipe, job.Stdout, format) + wg.Done() }() } if stderr { + wg.Add(1) stderrPipe := container.StderrLogPipe() defer stderrPipe.Close() go func() { errors <- jsonlog.WriteLog(stderrPipe, job.Stderr, format) + wg.Done() }() } - err := <-errors - if err != nil { - log.Errorf("%s", err) + + wg.Wait() + close(errors) + + for err := range errors { + if err != nil { + log.Errorf("%s", err) + } } + } return engine.StatusOK } diff --git a/daemon/monitor.go b/daemon/monitor.go index b5dd741012..12a6996330 100644 --- a/daemon/monitor.go +++ b/daemon/monitor.go @@ -6,8 +6,8 @@ import ( "sync" "time" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" - "github.com/docker/docker/pkg/log" "github.com/docker/docker/runconfig" ) @@ -100,7 +100,7 @@ func (m *containerMonitor) Close() error { func (m *containerMonitor) Start() error { var ( err error - exitStatus int + exitStatus execdriver.ExitStatus // this variable indicates where we in execution flow: // before Run or after afterRun bool @@ -110,7 +110,7 @@ func (m *containerMonitor) Start() error { defer func() { if afterRun { m.container.Lock() - m.container.setStopped(exitStatus) + m.container.setStopped(&exitStatus) defer m.container.Unlock() } m.Close() @@ -138,6 +138,7 @@ func (m *containerMonitor) Start() error { // if we receive an internal error from the initial start of a container then lets // return it instead of entering the restart loop if m.container.RestartCount == 0 { + m.container.ExitCode = -1 m.resetContainer(false) return err @@ -149,10 +150,10 @@ func (m *containerMonitor) Start() error { // here container.Lock is already lost afterRun = true - m.resetMonitor(err == nil && exitStatus == 0) + m.resetMonitor(err == nil && exitStatus.ExitCode == 0) - if m.shouldRestart(exitStatus) { - m.container.SetRestarting(exitStatus) + if m.shouldRestart(exitStatus.ExitCode) { + m.container.SetRestarting(&exitStatus) m.container.LogEvent("die") m.resetContainer(true) @@ -163,10 +164,12 @@ func (m *containerMonitor) Start() error { // we need to check this before reentering the loop because the waitForNextRestart could have // been terminated by a request from a user if m.shouldStop { + m.container.ExitCode = exitStatus.ExitCode return err } continue } + m.container.ExitCode = exitStatus.ExitCode m.container.LogEvent("die") m.resetContainer(true) return err @@ -206,7 +209,7 @@ func (m *containerMonitor) waitForNextRestart() { // shouldRestart checks the restart policy and applies the rules to determine if // the container's process should be restarted -func (m *containerMonitor) shouldRestart(exitStatus int) bool { +func (m *containerMonitor) shouldRestart(exitCode int) bool { m.mux.Lock() defer m.mux.Unlock() @@ -225,7 +228,7 @@ func (m *containerMonitor) shouldRestart(exitStatus int) bool { return false } - return exitStatus != 0 + return exitCode != 0 } return false diff --git a/daemon/networkdriver/bridge/driver.go b/daemon/networkdriver/bridge/driver.go index e05a2c21a5..e0467b6bd7 100644 --- a/daemon/networkdriver/bridge/driver.go +++ b/daemon/networkdriver/bridge/driver.go @@ -4,16 +4,17 @@ import ( "fmt" "io/ioutil" "net" - "strings" + "os" + "strconv" "sync" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/networkdriver" "github.com/docker/docker/daemon/networkdriver/ipallocator" - "github.com/docker/docker/daemon/networkdriver/portallocator" "github.com/docker/docker/daemon/networkdriver/portmapper" "github.com/docker/docker/engine" + "github.com/docker/docker/nat" "github.com/docker/docker/pkg/iptables" - "github.com/docker/docker/pkg/log" "github.com/docker/docker/pkg/networkfs/resolvconf" "github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/libcontainer/netlink" @@ -104,8 +105,8 @@ func InitDriver(job *engine.Job) engine.Status { if !usingDefaultBridge { return job.Error(err) } - // If the iface is not found, try to create it - if err := createBridge(bridgeIP); err != nil { + // If the bridge interface is not found (or has no address), try to create it and/or add an address + if err := configureBridge(bridgeIP); err != nil { return job.Error(err) } @@ -193,7 +194,7 @@ func setupIPTables(addr net.Addr, icc, ipmasq bool) error { if output, err := iptables.Raw(append([]string{"-I"}, natArgs...)...); err != nil { return fmt.Errorf("Unable to enable network bridge NAT: %s", err) } else if len(output) != 0 { - return fmt.Errorf("Error iptables postrouting: %s", output) + return &iptables.ChainError{Chain: "POSTROUTING", Output: output} } } } @@ -234,7 +235,7 @@ func setupIPTables(addr net.Addr, icc, ipmasq bool) error { if output, err := iptables.Raw(append([]string{"-I"}, outgoingArgs...)...); err != nil { return fmt.Errorf("Unable to allow outgoing packets: %s", err) } else if len(output) != 0 { - return fmt.Errorf("Error iptables allow outgoing: %s", output) + return &iptables.ChainError{Chain: "FORWARD outgoing", Output: output} } } @@ -245,16 +246,18 @@ func setupIPTables(addr net.Addr, icc, ipmasq bool) error { if output, err := iptables.Raw(append([]string{"-I"}, existingArgs...)...); err != nil { return fmt.Errorf("Unable to allow incoming packets: %s", err) } else if len(output) != 0 { - return fmt.Errorf("Error iptables allow incoming: %s", output) + return &iptables.ChainError{Chain: "FORWARD incoming", Output: output} } } return nil } -// CreateBridgeIface creates a network bridge interface on the host system with the name `ifaceName`, -// and attempts to configure it with an address which doesn't conflict with any other interface on the host. -// If it can't find an address which doesn't conflict, it will return an error. -func createBridge(bridgeIP string) error { +// configureBridge attempts to create and configure a network bridge interface named `bridgeIface` on the host +// If bridgeIP is empty, it will try to find a non-conflicting IP from the Docker-specified private ranges +// If the bridge `bridgeIface` already exists, it will only perform the IP address association with the existing +// bridge (fixes issue #8444) +// If an address which doesn't conflict with existing interfaces can't be found, an error is returned. +func configureBridge(bridgeIP string) error { nameservers := []string{} resolvConf, _ := resolvconf.Get() // we don't check for an error here, because we don't really care @@ -295,7 +298,10 @@ func createBridge(bridgeIP string) error { log.Debugf("Creating bridge %s with network %s", bridgeIface, ifaceAddr) if err := createBridgeIface(bridgeIface); err != nil { - return err + // the bridge may already exist, therefore we can ignore an "exists" error + if !os.IsExist(err) { + return err + } } iface, err := net.InterfaceByName(bridgeIface) @@ -461,22 +467,13 @@ func AllocatePort(job *engine.Job) engine.Status { if host, err = portmapper.Map(container, ip, hostPort); err == nil { break } - - if allocerr, ok := err.(portallocator.ErrPortAlreadyAllocated); ok { - // There is no point in immediately retrying to map an explicitly - // chosen port. - if hostPort != 0 { - job.Logf("Failed to bind %s for container address %s: %s", allocerr.IPPort(), container.String(), allocerr.Error()) - break - } - - // Automatically chosen 'free' port failed to bind: move on the next. - job.Logf("Failed to bind %s for container address %s. Trying another port.", allocerr.IPPort(), container.String()) - } else { - // some other error during mapping - job.Logf("Received an unexpected error during port allocation: %s", err.Error()) + // There is no point in immediately retrying to map an explicitly + // chosen port. + if hostPort != 0 { + job.Logf("Failed to allocate and map port %d: %s", hostPort, err) break } + job.Logf("Failed to allocate and map port: %s, retry: %d", err, i+1) } if err != nil { @@ -509,18 +506,13 @@ func LinkContainers(job *engine.Job) engine.Status { ignoreErrors = job.GetenvBool("IgnoreErrors") ports = job.GetenvList("Ports") ) - split := func(p string) (string, string) { - parts := strings.Split(p, "/") - return parts[0], parts[1] - } - - for _, p := range ports { - port, proto := split(p) + for _, value := range ports { + port := nat.Port(value) if output, err := iptables.Raw(action, "FORWARD", "-i", bridgeIface, "-o", bridgeIface, - "-p", proto, + "-p", port.Proto(), "-s", parentIP, - "--dport", port, + "--dport", strconv.Itoa(port.Int()), "-d", childIP, "-j", "ACCEPT"); !ignoreErrors && err != nil { return job.Error(err) @@ -530,9 +522,9 @@ func LinkContainers(job *engine.Job) engine.Status { if output, err := iptables.Raw(action, "FORWARD", "-i", bridgeIface, "-o", bridgeIface, - "-p", proto, + "-p", port.Proto(), "-s", childIP, - "--sport", port, + "--sport", strconv.Itoa(port.Int()), "-d", parentIP, "-j", "ACCEPT"); !ignoreErrors && err != nil { return job.Error(err) diff --git a/daemon/networkdriver/ipallocator/allocator.go b/daemon/networkdriver/ipallocator/allocator.go index a1aaabbdfe..a8625c0300 100644 --- a/daemon/networkdriver/ipallocator/allocator.go +++ b/daemon/networkdriver/ipallocator/allocator.go @@ -1,31 +1,38 @@ package ipallocator import ( - "encoding/binary" "errors" + "math/big" "net" "sync" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/networkdriver" ) // allocatedMap is thread-unsafe set of allocated IP type allocatedMap struct { - p map[uint32]struct{} - last uint32 - begin uint32 - end uint32 + p map[string]struct{} + last *big.Int + begin *big.Int + end *big.Int } func newAllocatedMap(network *net.IPNet) *allocatedMap { firstIP, lastIP := networkdriver.NetworkRange(network) - begin := ipToInt(firstIP) + 2 - end := ipToInt(lastIP) - 1 + begin := big.NewInt(0).Add(ipToBigInt(firstIP), big.NewInt(1)) + end := big.NewInt(0).Sub(ipToBigInt(lastIP), big.NewInt(1)) + + // if IPv4 network, then allocation range starts at begin + 1 because begin is bridge IP + if len(firstIP) == 4 { + begin = begin.Add(begin, big.NewInt(1)) + } + return &allocatedMap{ - p: make(map[uint32]struct{}), + p: make(map[string]struct{}), begin: begin, end: end, - last: begin - 1, // so first allocated will be begin + last: big.NewInt(0).Sub(begin, big.NewInt(1)), // so first allocated will be begin } } @@ -56,13 +63,16 @@ func RegisterSubnet(network *net.IPNet, subnet *net.IPNet) error { } n := newAllocatedMap(network) beginIP, endIP := networkdriver.NetworkRange(subnet) - begin, end := ipToInt(beginIP)+1, ipToInt(endIP)-1 - if !(begin >= n.begin && end <= n.end && begin < end) { + begin := big.NewInt(0).Add(ipToBigInt(beginIP), big.NewInt(1)) + end := big.NewInt(0).Sub(ipToBigInt(endIP), big.NewInt(1)) + + // Check that subnet is within network + if !(begin.Cmp(n.begin) >= 0 && end.Cmp(n.end) <= 0 && begin.Cmp(end) == -1) { return ErrBadSubnet } - n.begin = begin - n.end = end - n.last = begin - 1 + n.begin.Set(begin) + n.end.Set(end) + n.last.Sub(begin, big.NewInt(1)) allocatedIPs[key] = n return nil } @@ -93,28 +103,25 @@ func ReleaseIP(network *net.IPNet, ip net.IP) error { lock.Lock() defer lock.Unlock() if allocated, exists := allocatedIPs[network.String()]; exists { - pos := ipToInt(ip) - delete(allocated.p, pos) + delete(allocated.p, ip.String()) } return nil } func (allocated *allocatedMap) checkIP(ip net.IP) (net.IP, error) { - pos := ipToInt(ip) - - // Verify that the IP address has not been already allocated. - if _, ok := allocated.p[pos]; ok { + if _, ok := allocated.p[ip.String()]; ok { return nil, ErrIPAlreadyAllocated } + pos := ipToBigInt(ip) // Verify that the IP address is within our network range. - if pos < allocated.begin || pos > allocated.end { + if pos.Cmp(allocated.begin) == -1 || pos.Cmp(allocated.end) == 1 { return nil, ErrIPOutOfRange } // Register the IP. - allocated.p[pos] = struct{}{} - allocated.last = pos + allocated.p[ip.String()] = struct{}{} + allocated.last.Set(pos) return ip, nil } @@ -122,29 +129,38 @@ func (allocated *allocatedMap) checkIP(ip net.IP) (net.IP, error) { // return an available ip if one is currently available. If not, // return the next available ip for the nextwork func (allocated *allocatedMap) getNextIP() (net.IP, error) { - for pos := allocated.last + 1; pos != allocated.last; pos++ { - if pos > allocated.end { - pos = allocated.begin + pos := big.NewInt(0).Set(allocated.last) + allRange := big.NewInt(0).Sub(allocated.end, allocated.begin) + for i := big.NewInt(0); i.Cmp(allRange) <= 0; i.Add(i, big.NewInt(1)) { + pos.Add(pos, big.NewInt(1)) + if pos.Cmp(allocated.end) == 1 { + pos.Set(allocated.begin) } - if _, ok := allocated.p[pos]; ok { + if _, ok := allocated.p[bigIntToIP(pos).String()]; ok { continue } - allocated.p[pos] = struct{}{} - allocated.last = pos - return intToIP(pos), nil + allocated.p[bigIntToIP(pos).String()] = struct{}{} + allocated.last.Set(pos) + return bigIntToIP(pos), nil } return nil, ErrNoAvailableIPs } -// Converts a 4 bytes IP into a 32 bit integer -func ipToInt(ip net.IP) uint32 { - return binary.BigEndian.Uint32(ip.To4()) +// Converts a 4 bytes IP into a 128 bit integer +func ipToBigInt(ip net.IP) *big.Int { + x := big.NewInt(0) + if ip4 := ip.To4(); ip4 != nil { + return x.SetBytes(ip4) + } + if ip6 := ip.To16(); ip6 != nil { + return x.SetBytes(ip6) + } + + log.Errorf("ipToBigInt: Wrong IP length! %s", ip) + return nil } -// Converts 32 bit integer into a 4 bytes IP address -func intToIP(n uint32) net.IP { - b := make([]byte, 4) - binary.BigEndian.PutUint32(b, n) - ip := net.IP(b) - return ip +// Converts 128 bit integer into a 4 bytes IP address +func bigIntToIP(v *big.Int) net.IP { + return net.IP(v.Bytes()) } diff --git a/daemon/networkdriver/ipallocator/allocator_test.go b/daemon/networkdriver/ipallocator/allocator_test.go index 056c13b647..8e0e853fac 100644 --- a/daemon/networkdriver/ipallocator/allocator_test.go +++ b/daemon/networkdriver/ipallocator/allocator_test.go @@ -2,6 +2,7 @@ package ipallocator import ( "fmt" + "math/big" "net" "testing" ) @@ -10,6 +11,46 @@ func reset() { allocatedIPs = networkSet{} } +func TestConversion(t *testing.T) { + ip := net.ParseIP("127.0.0.1") + i := ipToBigInt(ip) + if i.Cmp(big.NewInt(0x7f000001)) != 0 { + t.Fatal("incorrect conversion") + } + conv := bigIntToIP(i) + if !ip.Equal(conv) { + t.Error(conv.String()) + } +} + +func TestConversionIPv6(t *testing.T) { + ip := net.ParseIP("2a00:1450::1") + ip2 := net.ParseIP("2a00:1450::2") + ip3 := net.ParseIP("2a00:1450::1:1") + i := ipToBigInt(ip) + val, success := big.NewInt(0).SetString("2a001450000000000000000000000001", 16) + if !success { + t.Fatal("Hex-String to BigInt conversion failed.") + } + if i.Cmp(val) != 0 { + t.Fatal("incorrent conversion") + } + + conv := bigIntToIP(i) + conv2 := bigIntToIP(big.NewInt(0).Add(i, big.NewInt(1))) + conv3 := bigIntToIP(big.NewInt(0).Add(i, big.NewInt(0x10000))) + + if !ip.Equal(conv) { + t.Error("2a00:1450::1 should be equal to " + conv.String()) + } + if !ip2.Equal(conv2) { + t.Error("2a00:1450::2 should be equal to " + conv2.String()) + } + if !ip3.Equal(conv3) { + t.Error("2a00:1450::1:1 should be equal to " + conv3.String()) + } +} + func TestRequestNewIps(t *testing.T) { defer reset() network := &net.IPNet{ @@ -19,6 +60,7 @@ func TestRequestNewIps(t *testing.T) { var ip net.IP var err error + for i := 2; i < 10; i++ { ip, err = RequestIP(network, nil) if err != nil { @@ -29,7 +71,39 @@ func TestRequestNewIps(t *testing.T) { t.Fatalf("Expected ip %s got %s", expected, ip.String()) } } - value := intToIP(ipToInt(ip) + 1).String() + value := bigIntToIP(big.NewInt(0).Add(ipToBigInt(ip), big.NewInt(1))).String() + if err := ReleaseIP(network, ip); err != nil { + t.Fatal(err) + } + ip, err = RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + if ip.String() != value { + t.Fatalf("Expected to receive the next ip %s got %s", value, ip.String()) + } +} + +func TestRequestNewIpV6(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{0x2a, 0x00, 0x14, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, + Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}, // /64 netmask + } + + var ip net.IP + var err error + for i := 1; i < 10; i++ { + ip, err = RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + if expected := fmt.Sprintf("2a00:1450::%d", i); ip.String() != expected { + t.Fatalf("Expected ip %s got %s", expected, ip.String()) + } + } + value := bigIntToIP(big.NewInt(0).Add(ipToBigInt(ip), big.NewInt(1))).String() if err := ReleaseIP(network, ip); err != nil { t.Fatal(err) } @@ -59,6 +133,23 @@ func TestReleaseIp(t *testing.T) { } } +func TestReleaseIpV6(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{0x2a, 0x00, 0x14, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, + Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}, // /64 netmask + } + + ip, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + if err := ReleaseIP(network, ip); err != nil { + t.Fatal(err) + } +} + func TestGetReleasedIp(t *testing.T) { defer reset() network := &net.IPNet{ @@ -97,6 +188,44 @@ func TestGetReleasedIp(t *testing.T) { } } +func TestGetReleasedIpV6(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{0x2a, 0x00, 0x14, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, + Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0}, + } + + ip, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + value := ip.String() + if err := ReleaseIP(network, ip); err != nil { + t.Fatal(err) + } + + for i := 0; i < 253; i++ { + _, err = RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + err = ReleaseIP(network, ip) + if err != nil { + t.Fatal(err) + } + } + + ip, err = RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + if ip.String() != value { + t.Fatalf("Expected to receive same ip %s got %s", value, ip.String()) + } +} + func TestRequestSpecificIp(t *testing.T) { defer reset() network := &net.IPNet{ @@ -122,15 +251,28 @@ func TestRequestSpecificIp(t *testing.T) { } } -func TestConversion(t *testing.T) { - ip := net.ParseIP("127.0.0.1") - i := ipToInt(ip) - if i == 0 { - t.Fatal("converted to zero") +func TestRequestSpecificIpV6(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{0x2a, 0x00, 0x14, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, + Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}, // /64 netmask } - conv := intToIP(i) - if !ip.Equal(conv) { - t.Error(conv.String()) + + ip := net.ParseIP("2a00:1450::5") + + // Request a "good" IP. + if _, err := RequestIP(network, ip); err != nil { + t.Fatal(err) + } + + // Request the same IP again. + if _, err := RequestIP(network, ip); err != ErrIPAlreadyAllocated { + t.Fatalf("Got the same IP twice: %#v", err) + } + + // Request an out of range IP. + if _, err := RequestIP(network, net.ParseIP("2a00:1500::1")); err != ErrIPOutOfRange { + t.Fatalf("Got an out of range IP: %#v", err) } } @@ -144,6 +286,7 @@ func TestIPAllocator(t *testing.T) { } gwIP, n, _ := net.ParseCIDR("127.0.0.1/29") + network := &net.IPNet{IP: gwIP, Mask: n.Mask} // Pool after initialisation (f = free, u = used) // 2(f) - 3(f) - 4(f) - 5(f) - 6(f) @@ -237,13 +380,13 @@ func TestAllocateFirstIP(t *testing.T) { } firstIP := network.IP.To4().Mask(network.Mask) - first := ipToInt(firstIP) + 1 + first := big.NewInt(0).Add(ipToBigInt(firstIP), big.NewInt(1)) ip, err := RequestIP(network, nil) if err != nil { t.Fatal(err) } - allocated := ipToInt(ip) + allocated := ipToBigInt(ip) if allocated == first { t.Fatalf("allocated ip should not equal first ip: %d == %d", first, allocated) @@ -289,6 +432,65 @@ func TestAllocateAllIps(t *testing.T) { } assertIPEquals(t, first, again) + + // ensure that alloc.last == alloc.begin won't result in dead loop + if _, err := RequestIP(network, nil); err != ErrNoAvailableIPs { + t.Fatal(err) + } + + // Test by making alloc.last the only free ip and ensure we get it back + // #1. first of the range, (alloc.last == ipToInt(first) already) + if err := ReleaseIP(network, first); err != nil { + t.Fatal(err) + } + + ret, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + assertIPEquals(t, first, ret) + + // #2. last of the range, note that current is the last one + last := net.IPv4(192, 168, 0, 254) + setLastTo(t, network, last) + + ret, err = RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + assertIPEquals(t, last, ret) + + // #3. middle of the range + mid := net.IPv4(192, 168, 0, 7) + setLastTo(t, network, mid) + + ret, err = RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + assertIPEquals(t, mid, ret) +} + +// make sure the pool is full when calling setLastTo. +// we don't cheat here +func setLastTo(t *testing.T, network *net.IPNet, ip net.IP) { + if err := ReleaseIP(network, ip); err != nil { + t.Fatal(err) + } + + ret, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + assertIPEquals(t, ip, ret) + + if err := ReleaseIP(network, ip); err != nil { + t.Fatal(err) + } } func TestAllocateDifferentSubnets(t *testing.T) { @@ -301,11 +503,24 @@ func TestAllocateDifferentSubnets(t *testing.T) { IP: []byte{127, 0, 0, 1}, Mask: []byte{255, 255, 255, 0}, } + network3 := &net.IPNet{ + IP: []byte{0x2a, 0x00, 0x14, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, + Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}, // /64 netmask + } + network4 := &net.IPNet{ + IP: []byte{0x2a, 0x00, 0x16, 0x32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, + Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}, // /64 netmask + } expectedIPs := []net.IP{ 0: net.IPv4(192, 168, 0, 2), 1: net.IPv4(192, 168, 0, 3), 2: net.IPv4(127, 0, 0, 2), 3: net.IPv4(127, 0, 0, 3), + 4: net.ParseIP("2a00:1450::1"), + 5: net.ParseIP("2a00:1450::2"), + 6: net.ParseIP("2a00:1450::3"), + 7: net.ParseIP("2a00:1632::1"), + 8: net.ParseIP("2a00:1632::2"), } ip11, err := RequestIP(network1, nil) @@ -324,11 +539,37 @@ func TestAllocateDifferentSubnets(t *testing.T) { if err != nil { t.Fatal(err) } + ip31, err := RequestIP(network3, nil) + if err != nil { + t.Fatal(err) + } + ip32, err := RequestIP(network3, nil) + if err != nil { + t.Fatal(err) + } + ip33, err := RequestIP(network3, nil) + if err != nil { + t.Fatal(err) + } + ip41, err := RequestIP(network4, nil) + if err != nil { + t.Fatal(err) + } + ip42, err := RequestIP(network4, nil) + if err != nil { + t.Fatal(err) + } assertIPEquals(t, expectedIPs[0], ip11) assertIPEquals(t, expectedIPs[1], ip12) assertIPEquals(t, expectedIPs[2], ip21) assertIPEquals(t, expectedIPs[3], ip22) + assertIPEquals(t, expectedIPs[4], ip31) + assertIPEquals(t, expectedIPs[5], ip32) + assertIPEquals(t, expectedIPs[6], ip33) + assertIPEquals(t, expectedIPs[7], ip41) + assertIPEquals(t, expectedIPs[8], ip42) } + func TestRegisterBadTwice(t *testing.T) { defer reset() network := &net.IPNet{ @@ -378,6 +619,7 @@ func TestAllocateFromRange(t *testing.T) { IP: []byte{192, 168, 0, 8}, Mask: []byte{255, 255, 255, 248}, } + if err := RegisterSubnet(network, subnet); err != nil { t.Fatal(err) } diff --git a/daemon/networkdriver/network_test.go b/daemon/networkdriver/network_test.go index d655cb30e4..1a6336b5de 100644 --- a/daemon/networkdriver/network_test.go +++ b/daemon/networkdriver/network_test.go @@ -122,9 +122,6 @@ func TestNetworkRange(t *testing.T) { if !last.Equal(net.ParseIP("192.168.0.255")) { t.Error(last.String()) } - if size := NetworkSize(network.Mask); size != 256 { - t.Error(size) - } // Class A test _, network, _ = net.ParseCIDR("10.0.0.1/8") @@ -135,9 +132,6 @@ func TestNetworkRange(t *testing.T) { if !last.Equal(net.ParseIP("10.255.255.255")) { t.Error(last.String()) } - if size := NetworkSize(network.Mask); size != 16777216 { - t.Error(size) - } // Class A, random IP address _, network, _ = net.ParseCIDR("10.1.2.3/8") @@ -158,9 +152,6 @@ func TestNetworkRange(t *testing.T) { if !last.Equal(net.ParseIP("10.1.2.3")) { t.Error(last.String()) } - if size := NetworkSize(network.Mask); size != 1 { - t.Error(size) - } // 31bit mask _, network, _ = net.ParseCIDR("10.1.2.3/31") @@ -171,9 +162,6 @@ func TestNetworkRange(t *testing.T) { if !last.Equal(net.ParseIP("10.1.2.3")) { t.Error(last.String()) } - if size := NetworkSize(network.Mask); size != 2 { - t.Error(size) - } // 26bit mask _, network, _ = net.ParseCIDR("10.1.2.3/26") @@ -184,7 +172,4 @@ func TestNetworkRange(t *testing.T) { if !last.Equal(net.ParseIP("10.1.2.63")) { t.Error(last.String()) } - if size := NetworkSize(network.Mask); size != 64 { - t.Error(size) - } } diff --git a/daemon/networkdriver/portallocator/portallocator.go b/daemon/networkdriver/portallocator/portallocator.go index d4fcc6e725..3414d11e7a 100644 --- a/daemon/networkdriver/portallocator/portallocator.go +++ b/daemon/networkdriver/portallocator/portallocator.go @@ -14,7 +14,8 @@ type portMap struct { func newPortMap() *portMap { return &portMap{ - p: map[int]struct{}{}, + p: map[int]struct{}{}, + last: EndPortRange, } } @@ -135,13 +136,9 @@ func ReleaseAll() error { } func (pm *portMap) findPort() (int, error) { - if pm.last == 0 { - pm.p[BeginPortRange] = struct{}{} - pm.last = BeginPortRange - return BeginPortRange, nil - } - - for port := pm.last + 1; port != pm.last; port++ { + port := pm.last + for i := 0; i <= EndPortRange-BeginPortRange; i++ { + port++ if port > EndPortRange { port = BeginPortRange } diff --git a/daemon/networkdriver/portallocator/portallocator_test.go b/daemon/networkdriver/portallocator/portallocator_test.go index 9869c332e9..72581f1040 100644 --- a/daemon/networkdriver/portallocator/portallocator_test.go +++ b/daemon/networkdriver/portallocator/portallocator_test.go @@ -134,6 +134,19 @@ func TestAllocateAllPorts(t *testing.T) { if newPort != port { t.Fatalf("Expected port %d got %d", port, newPort) } + + // now pm.last == newPort, release it so that it's the only free port of + // the range, and ensure we get it back + if err := ReleasePort(defaultIP, "tcp", newPort); err != nil { + t.Fatal(err) + } + port, err = RequestPort(defaultIP, "tcp", 0) + if err != nil { + t.Fatal(err) + } + if newPort != port { + t.Fatalf("Expected port %d got %d", newPort, port) + } } func BenchmarkAllocatePorts(b *testing.B) { @@ -214,3 +227,19 @@ func TestPortAllocation(t *testing.T) { t.Fatal("Requesting a dynamic port should never allocate a used port") } } + +func TestNoDuplicateBPR(t *testing.T) { + defer reset() + + if port, err := RequestPort(defaultIP, "tcp", BeginPortRange); err != nil { + t.Fatal(err) + } else if port != BeginPortRange { + t.Fatalf("Expected port %d got %d", BeginPortRange, port) + } + + if port, err := RequestPort(defaultIP, "tcp", 0); err != nil { + t.Fatal(err) + } else if port == BeginPortRange { + t.Fatalf("Acquire(0) allocated the same port twice: %d", port) + } +} diff --git a/daemon/networkdriver/portmapper/mapper.go b/daemon/networkdriver/portmapper/mapper.go index 24ca0d892f..4bf8cd142c 100644 --- a/daemon/networkdriver/portmapper/mapper.go +++ b/daemon/networkdriver/portmapper/mapper.go @@ -6,9 +6,9 @@ import ( "net" "sync" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/networkdriver/portallocator" "github.com/docker/docker/pkg/iptables" - "github.com/docker/docker/pkg/log" ) type mapping struct { diff --git a/daemon/networkdriver/portmapper/proxy.go b/daemon/networkdriver/portmapper/proxy.go index af20469ed8..5d0aa0be0d 100644 --- a/daemon/networkdriver/portmapper/proxy.go +++ b/daemon/networkdriver/portmapper/proxy.go @@ -130,7 +130,12 @@ func (p *proxyCommand) Start() error { r.Read(buf) if string(buf) != "0\n" { - errStr, _ := ioutil.ReadAll(r) + errStr, err := ioutil.ReadAll(r) + if err != nil { + errchan <- fmt.Errorf("Error reading exit status from userland proxy: %v", err) + return + } + errchan <- fmt.Errorf("Error starting userland proxy: %s", errStr) return } @@ -140,7 +145,7 @@ func (p *proxyCommand) Start() error { select { case err := <-errchan: return err - case <-time.After(1 * time.Second): + case <-time.After(16 * time.Second): return fmt.Errorf("Timed out proxy starting the userland proxy") } } diff --git a/daemon/networkdriver/utils.go b/daemon/networkdriver/utils.go index 410d6010c4..07d95445a0 100644 --- a/daemon/networkdriver/utils.go +++ b/daemon/networkdriver/utils.go @@ -1,7 +1,6 @@ package networkdriver import ( - "encoding/binary" "errors" "fmt" "net" @@ -56,25 +55,21 @@ func NetworkOverlaps(netX *net.IPNet, netY *net.IPNet) bool { // Calculates the first and last IP addresses in an IPNet func NetworkRange(network *net.IPNet) (net.IP, net.IP) { - var ( - netIP = network.IP.To4() - firstIP = netIP.Mask(network.Mask) - lastIP = net.IPv4(0, 0, 0, 0).To4() - ) + var netIP net.IP + if network.IP.To4() != nil { + netIP = network.IP.To4() + } else if network.IP.To16() != nil { + netIP = network.IP.To16() + } else { + return nil, nil + } - for i := 0; i < len(lastIP); i++ { + lastIP := make([]byte, len(netIP), len(netIP)) + + for i := 0; i < len(netIP); i++ { lastIP[i] = netIP[i] | ^network.Mask[i] } - return firstIP, lastIP -} - -// Given a netmask, calculates the number of available hosts -func NetworkSize(mask net.IPMask) int32 { - m := net.IPv4Mask(0, 0, 0, 0) - for i := 0; i < net.IPv4len; i++ { - m[i] = ^mask[i] - } - return int32(binary.BigEndian.Uint32(m)) + 1 + return netIP.Mask(network.Mask), net.IP(lastIP) } // Return the IPv4 address of a network interface @@ -90,7 +85,7 @@ func GetIfaceAddr(name string) (net.Addr, error) { var addrs4 []net.Addr for _, addr := range addrs { ip := (addr.(*net.IPNet)).IP - if ip4 := ip.To4(); len(ip4) == net.IPv4len { + if ip4 := ip.To4(); ip4 != nil { addrs4 = append(addrs4, addr) } } diff --git a/daemon/state.go b/daemon/state.go index b7dc149959..3aba57090f 100644 --- a/daemon/state.go +++ b/daemon/state.go @@ -5,6 +5,7 @@ import ( "sync" "time" + "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/pkg/units" ) @@ -13,8 +14,10 @@ type State struct { Running bool Paused bool Restarting bool + OOMKilled bool Pid int ExitCode int + Error string // contains last known error when starting the container StartedAt time.Time FinishedAt time.Time waitChan chan struct{} @@ -137,6 +140,7 @@ func (s *State) SetRunning(pid int) { } func (s *State) setRunning(pid int) { + s.Error = "" s.Running = true s.Paused = false s.Restarting = false @@ -147,25 +151,26 @@ func (s *State) setRunning(pid int) { s.waitChan = make(chan struct{}) } -func (s *State) SetStopped(exitCode int) { +func (s *State) SetStopped(exitStatus *execdriver.ExitStatus) { s.Lock() - s.setStopped(exitCode) + s.setStopped(exitStatus) s.Unlock() } -func (s *State) setStopped(exitCode int) { +func (s *State) setStopped(exitStatus *execdriver.ExitStatus) { s.Running = false s.Restarting = false s.Pid = 0 s.FinishedAt = time.Now().UTC() - s.ExitCode = exitCode + s.ExitCode = exitStatus.ExitCode + s.OOMKilled = exitStatus.OOMKilled close(s.waitChan) // fire waiters for stop s.waitChan = make(chan struct{}) } // SetRestarting is when docker hanldes the auto restart of containers when they are // in the middle of a stop and being restarted again -func (s *State) SetRestarting(exitCode int) { +func (s *State) SetRestarting(exitStatus *execdriver.ExitStatus) { s.Lock() // we should consider the container running when it is restarting because of // all the checks in docker around rm/stop/etc @@ -173,12 +178,20 @@ func (s *State) SetRestarting(exitCode int) { s.Restarting = true s.Pid = 0 s.FinishedAt = time.Now().UTC() - s.ExitCode = exitCode + s.ExitCode = exitStatus.ExitCode + s.OOMKilled = exitStatus.OOMKilled close(s.waitChan) // fire waiters for stop s.waitChan = make(chan struct{}) s.Unlock() } +// setError sets the container's error state. This is useful when we want to +// know the error that occurred when container transits to another state +// when inspecting it +func (s *State) setError(err error) { + s.Error = err.Error() +} + func (s *State) IsRestarting() bool { s.Lock() res := s.Restarting diff --git a/daemon/state_test.go b/daemon/state_test.go index 35524356a3..32c005cf2e 100644 --- a/daemon/state_test.go +++ b/daemon/state_test.go @@ -4,6 +4,8 @@ import ( "sync/atomic" "testing" "time" + + "github.com/docker/docker/daemon/execdriver" ) func TestStateRunStop(t *testing.T) { @@ -47,7 +49,7 @@ func TestStateRunStop(t *testing.T) { atomic.StoreInt64(&exit, int64(exitCode)) close(stopped) }() - s.SetStopped(i) + s.SetStopped(&execdriver.ExitStatus{i, false}) if s.IsRunning() { t.Fatal("State is running") } diff --git a/daemon/utils.go b/daemon/utils.go index 9c43236e0b..6202e6d961 100644 --- a/daemon/utils.go +++ b/daemon/utils.go @@ -1,6 +1,7 @@ package daemon import ( + "errors" "fmt" "strings" @@ -32,9 +33,9 @@ func migratePortMappings(config *runconfig.Config, hostConfig *runconfig.HostCon return nil } -func mergeLxcConfIntoOptions(hostConfig *runconfig.HostConfig) []string { +func mergeLxcConfIntoOptions(hostConfig *runconfig.HostConfig) ([]string, error) { if hostConfig == nil { - return nil + return nil, nil } out := []string{} @@ -44,10 +45,13 @@ func mergeLxcConfIntoOptions(hostConfig *runconfig.HostConfig) []string { for _, pair := range lxcConf { // because lxc conf gets the driver name lxc.XXXX we need to trim it off // and let the lxc driver add it back later if needed + if !strings.Contains(pair.Key, ".") { + return nil, errors.New("Illegal Key passed into LXC Configurations") + } parts := strings.SplitN(pair.Key, ".", 2) out = append(out, fmt.Sprintf("%s=%s", parts[1], pair.Value)) } } - return out + return out, nil } diff --git a/daemon/utils_test.go b/daemon/utils_test.go index 7748b86022..8a2fa719ed 100644 --- a/daemon/utils_test.go +++ b/daemon/utils_test.go @@ -14,7 +14,10 @@ func TestMergeLxcConfig(t *testing.T) { }, } - out := mergeLxcConfIntoOptions(hostConfig) + out, err := mergeLxcConfIntoOptions(hostConfig) + if err != nil { + t.Fatalf("Failed to merge Lxc Config ", err) + } cpuset := out[0] if expected := "cgroups.cpuset=1,2"; cpuset != expected { diff --git a/daemon/volumes.go b/daemon/volumes.go index f98baa1c78..46ae5588af 100644 --- a/daemon/volumes.go +++ b/daemon/volumes.go @@ -2,6 +2,7 @@ package daemon import ( "fmt" + "io" "io/ioutil" "os" "path/filepath" @@ -9,11 +10,12 @@ import ( "strings" "syscall" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/log" "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/volumes" + "github.com/docker/libcontainer/label" ) type Mount struct { @@ -24,6 +26,18 @@ type Mount struct { copyData bool } +func (mnt *Mount) Export(resource string) (io.ReadCloser, error) { + var name string + if resource == mnt.MountToPath[1:] { + name = filepath.Base(resource) + } + path, err := filepath.Rel(mnt.MountToPath[1:], resource) + if err != nil { + return nil, err + } + return mnt.volume.Export(path, name) +} + func (container *Container) prepareVolumes() error { if container.Volumes == nil || len(container.Volumes) == 0 { container.Volumes = make(map[string]string) @@ -234,6 +248,12 @@ func (container *Container) setupMounts() error { mounts = append(mounts, execdriver.Mount{Source: container.HostsPath, Destination: "/etc/hosts", Writable: true, Private: true}) } + for _, m := range mounts { + if err := label.SetFileLabel(m.Source, container.MountLabel); err != nil { + return err + } + } + // Mount user specified volumes // Note, these are not private because you may want propagation of (un)mounts from host // volumes. For instance if you use -v /usr:/usr and the host later mounts /usr/share you diff --git a/docker/client.go b/docker/client.go index 27001cc557..cde1a6d3dc 100644 --- a/docker/client.go +++ b/docker/client.go @@ -3,7 +3,7 @@ package main import ( - "log" + "log" // see gh#8745, client needs to use go log pkg ) const CanDaemon = false diff --git a/docker/daemon.go b/docker/daemon.go index 8b5826f344..3128f7ee55 100644 --- a/docker/daemon.go +++ b/docker/daemon.go @@ -3,8 +3,7 @@ package main import ( - "log" - + log "github.com/Sirupsen/logrus" "github.com/docker/docker/builder" "github.com/docker/docker/builtins" "github.com/docker/docker/daemon" @@ -35,6 +34,8 @@ func mainDaemon() { eng := engine.New() signal.Trap(eng.Shutdown) + daemonCfg.TrustKeyPath = *flTrustKey + // Load builtins if err := builtins.Register(eng); err != nil { log.Fatal(err) @@ -53,6 +54,13 @@ func mainDaemon() { if err != nil { log.Fatal(err) } + log.Infof("docker daemon: %s %s; execdriver: %s; graphdriver: %s", + dockerversion.VERSION, + dockerversion.GITCOMMIT, + d.ExecutionDriver().Name(), + d.GraphDriver().String(), + ) + if err := d.Install(eng); err != nil { log.Fatal(err) } @@ -66,13 +74,6 @@ func mainDaemon() { log.Fatal(err) } }() - // TODO actually have a resolved graphdriver to show? - log.Printf("docker daemon: %s %s; execdriver: %s; graphdriver: %s", - dockerversion.VERSION, - dockerversion.GITCOMMIT, - daemonCfg.ExecDriver, - daemonCfg.GraphDriver, - ) // Serve api job := eng.Job("serveapi", flHosts...) diff --git a/docker/docker.go b/docker/docker.go index 12900ecb18..3137f5c99f 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -5,10 +5,10 @@ import ( "crypto/x509" "fmt" "io/ioutil" - "log" "os" "strings" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/api" "github.com/docker/docker/api/client" "github.com/docker/docker/dockerversion" @@ -28,6 +28,7 @@ func main() { if reexec.Init() { return } + flag.Parse() // FIXME: validate daemon flags here @@ -35,8 +36,22 @@ func main() { showVersion() return } + + if *flLogLevel != "" { + lvl, err := log.ParseLevel(*flLogLevel) + if err != nil { + log.Fatalf("Unable to parse logging level: %s", *flLogLevel) + } + initLogging(lvl) + } else { + initLogging(log.InfoLevel) + } + + // -D, --debug, -l/--log-level=debug processing + // When/if -D is removed this block can be deleted if *flDebug { os.Setenv("DEBUG", "1") + initLogging(log.DebugLevel) } if len(flHosts) == 0 { @@ -68,9 +83,14 @@ func main() { ) tlsConfig.InsecureSkipVerify = true + // Regardless of whether the user sets it to true or false, if they + // specify --tlsverify at all then we need to turn on tls + if flag.IsSet("-tlsverify") { + *flTls = true + } + // If we should verify the server, we need to load a trusted ca if *flTlsVerify { - *flTls = true certPool := x509.NewCertPool() file, err := ioutil.ReadFile(*flCa) if err != nil { diff --git a/docker/flags.go b/docker/flags.go index 61081ec996..6601b4fe8a 100644 --- a/docker/flags.go +++ b/docker/flags.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "path/filepath" + "runtime" "github.com/docker/docker/opts" flag "github.com/docker/docker/pkg/mflag" @@ -16,17 +17,25 @@ var ( func init() { if dockerCertPath == "" { - dockerCertPath = filepath.Join(os.Getenv("HOME"), ".docker") + dockerCertPath = filepath.Join(getHomeDir(), ".docker") } } +func getHomeDir() string { + if runtime.GOOS == "windows" { + return os.Getenv("USERPROFILE") + } + return os.Getenv("HOME") +} + var ( flVersion = flag.Bool([]string{"v", "-version"}, false, "Print version information and quit") flDaemon = flag.Bool([]string{"d", "-daemon"}, false, "Enable daemon mode") flDebug = flag.Bool([]string{"D", "-debug"}, false, "Enable debug mode") flSocketGroup = flag.String([]string{"G", "-group"}, "docker", "Group to assign the unix socket specified by -H when running in daemon mode\nuse '' (the empty string) to disable setting of a group") + flLogLevel = flag.String([]string{"l", "-log-level"}, "info", "Set the logging level") flEnableCors = flag.Bool([]string{"#api-enable-cors", "-api-enable-cors"}, false, "Enable CORS headers in the remote API") - flTls = flag.Bool([]string{"-tls"}, false, "Use TLS; implied by tls-verify flags") + flTls = flag.Bool([]string{"-tls"}, false, "Use TLS; implied by --tlsverify flag") flTlsVerify = flag.Bool([]string{"-tlsverify"}, dockerTlsVerify, "Use TLS and verify the remote (daemon: verify client, client: verify daemon)") // these are initialized in init() below since their default values depend on dockerCertPath which isn't fully initialized until init() runs @@ -62,7 +71,7 @@ func init() { {"create", "Create a new container"}, {"diff", "Inspect changes on a container's filesystem"}, {"events", "Get real time events from the server"}, - {"exec", "Run a command in an existing container"}, + {"exec", "Run a command in a running container"}, {"export", "Stream the contents of a container as a tar archive"}, {"history", "Show the history of an image"}, {"images", "List images"}, diff --git a/docker/log.go b/docker/log.go new file mode 100644 index 0000000000..cdbbd4408f --- /dev/null +++ b/docker/log.go @@ -0,0 +1,12 @@ +package main + +import ( + "os" + + log "github.com/Sirupsen/logrus" +) + +func initLogging(lvl log.Level) { + log.SetOutput(os.Stderr) + log.SetLevel(lvl) +} diff --git a/docs/Dockerfile b/docs/Dockerfile index 3c58193b99..d801ec2130 100644 --- a/docs/Dockerfile +++ b/docs/Dockerfile @@ -1,49 +1,59 @@ # # See the top level Makefile in https://github.com/docker/docker for usage. # -FROM debian:jessie -MAINTAINER Sven Dowideit (@SvenDowideit) +FROM debian:jessie +MAINTAINER Sven Dowideit (@SvenDowideit) -RUN apt-get update && apt-get install -y make python-pip python-setuptools vim-tiny git gettext python-dev libssl-dev +RUN apt-get update \ + && apt-get install -y \ + gettext \ + git \ + libssl-dev \ + make \ + python-dev \ + python-pip \ + python-setuptools \ + vim-tiny -RUN pip install mkdocs +RUN pip install mkdocs # add MarkdownTools to get transclusion # (future development) -#RUN easy_install -U setuptools -#RUN pip install MarkdownTools2 +#RUN easy_install -U setuptools +#RUN pip install MarkdownTools2 # this version works, the current versions fail in different ways -RUN pip install awscli==1.4.4 pyopenssl==0.12 - -# make sure the git clone is not an old cache - we've published old versions a few times now -ENV CACHE_BUST Jul2014 +RUN pip install awscli==1.4.4 pyopenssl==0.12 # get my sitemap.xml branch of mkdocs and use that for now -RUN git clone https://github.com/SvenDowideit/mkdocs &&\ - cd mkdocs/ &&\ - git checkout docker-markdown-merge &&\ - ./setup.py install +# commit hash of the newest commit of SvenDowideit/mkdocs on +# docker-markdown-merge branch, it is used to break docker cache +# see: https://github.com/SvenDowideit/mkdocs/tree/docker-markdown-merge +RUN git clone -b docker-markdown-merge https://github.com/SvenDowideit/mkdocs \ + && cd mkdocs/ \ + && git checkout ad32549c452963b8854951d6783f4736c0f7c5d5 \ + && ./setup.py install -ADD . /docs -ADD MAINTAINERS /docs/sources/humans.txt -WORKDIR /docs +COPY . /docs +COPY MAINTAINERS /docs/sources/humans.txt +WORKDIR /docs -RUN VERSION=$(cat /docs/VERSION) &&\ - MAJOR_MINOR="${VERSION%.*}" &&\ - for i in $(seq $MAJOR_MINOR -0.1 1.0) ; do echo "
  • Version v$i
  • " ; done > /docs/sources/versions.html_fragment &&\ - GIT_BRANCH=$(cat /docs/GIT_BRANCH) &&\ - GITCOMMIT=$(cat /docs/GITCOMMIT) &&\ - AWS_S3_BUCKET=$(cat /docs/AWS_S3_BUCKET) &&\ - BUILD_DATE=$(date) &&\ - sed -i "s/\$VERSION/$VERSION/g" /docs/theme/mkdocs/base.html &&\ - sed -i "s/\$MAJOR_MINOR/v$MAJOR_MINOR/g" /docs/theme/mkdocs/base.html &&\ - sed -i "s/\$GITCOMMIT/$GITCOMMIT/g" /docs/theme/mkdocs/base.html &&\ - sed -i "s/\$GIT_BRANCH/$GIT_BRANCH/g" /docs/theme/mkdocs/base.html &&\ - sed -i "s/\$BUILD_DATE/$BUILD_DATE/g" /docs/theme/mkdocs/base.html &&\ - sed -i "s/\$AWS_S3_BUCKET/$AWS_S3_BUCKET/g" /docs/theme/mkdocs/base.html +RUN VERSION=$(cat VERSION) \ + && MAJOR_MINOR="${VERSION%.*}" \ + && for i in $(seq $MAJOR_MINOR -0.1 1.0); do \ + echo "
  • Version v$i
  • "; \ + done > sources/versions.html_fragment \ + && GIT_BRANCH=$(cat GIT_BRANCH) \ + && GITCOMMIT=$(cat GITCOMMIT) \ + && AWS_S3_BUCKET=$(cat AWS_S3_BUCKET) \ + && BUILD_DATE=$(date) \ + && sed -i "s/\$VERSION/$VERSION/g" theme/mkdocs/base.html \ + && sed -i "s/\$MAJOR_MINOR/v$MAJOR_MINOR/g" theme/mkdocs/base.html \ + && sed -i "s/\$GITCOMMIT/$GITCOMMIT/g" theme/mkdocs/base.html \ + && sed -i "s/\$GIT_BRANCH/$GIT_BRANCH/g" theme/mkdocs/base.html \ + && sed -i "s/\$BUILD_DATE/$BUILD_DATE/g" theme/mkdocs/base.html \ + && sed -i "s/\$AWS_S3_BUCKET/$AWS_S3_BUCKET/g" theme/mkdocs/base.html -# note, EXPOSE is only last because of https://github.com/docker/docker/issues/3525 -EXPOSE 8000 +EXPOSE 8000 -CMD ["mkdocs", "serve"] +CMD ["mkdocs", "serve"] diff --git a/docs/MAINTAINERS b/docs/MAINTAINERS index d07b531d72..ecf56752c2 100644 --- a/docs/MAINTAINERS +++ b/docs/MAINTAINERS @@ -1,4 +1,3 @@ Fred Lifton (@fredlf) James Turnbull (@jamtur01) Sven Dowideit (@SvenDowideit) -O.S. Tezer (@OSTezer) diff --git a/docs/README.md b/docs/README.md index 27ed7eef11..de3999ba78 100755 --- a/docs/README.md +++ b/docs/README.md @@ -11,9 +11,8 @@ development) branch maps to the "master" documentation. ## Contributing -- Follow the contribution guidelines ([see - `../CONTRIBUTING.md`](../CONTRIBUTING.md)). -- [Remember to sign your work!](../CONTRIBUTING.md#sign-your-work) +Be sure to follow the [contribution guidelines](../CONTRIBUTING.md). +In particular, [remember to sign your work!](../CONTRIBUTING.md#sign-your-work) ## Getting Started @@ -41,26 +40,10 @@ to the menu definition in the `docs/mkdocs.yml` file. ## Style guide -The documentation is written with paragraphs wrapped at 80 column lines to make -it easier for terminal use. - -### Examples - -When writing examples, give the user hints by making them resemble what they see -in their shell: - -- Indent shell examples by 4 spaces so they get rendered as code. -- Start typed commands with `$ ` (dollar space), so that they are easily - differentiated from program output. -- Program output has no prefix. -- Comments begin with `# ` (hash space). -- In-container shell commands begin with `$$ ` (dollar dollar space). - -### Images - -When you need to add images, try to make them as small as possible (e.g., as -gifs). Usually images should go in the same directory as the `.md` file which -references them, or in a subdirectory if one already exists. +If you have questions about how to write for Docker's documentation (e.g., +questions about grammar, syntax, formatting, styling, language, or tone) please +see the [style guide](sources/contributing/docs_style-guide.md). If something +isn't clear in the guide, please submit a PR to help us improve it. ## Working using GitHub's file editor @@ -73,11 +56,11 @@ work!](../CONTRIBUTING.md#sign-your-work) ## Branches -**There are two branches related to editing docs**: `master` and a `docs` -branch. You should always edit the documentation on a local branch of the `master` +**There are two branches related to editing docs**: `master` and `docs`. You +should always edit the documentation on a local branch of the `master` branch, and send a PR against `master`. -That way your edits will automatically get included in later releases, and docs +That way your fixes will automatically get included in later releases, and docs maintainers can easily cherry-pick your changes into the `docs` release branch. In the rare case where your change is not forward-compatible, you may need to base your changes on the `docs` branch. @@ -95,8 +78,10 @@ found between Docker code releases. ## Publishing Documentation -To publish a copy of the documentation you need to have Docker up and running on your -machine. You'll also need a `docs/awsconfig` file containing AWS settings to deploy to. +To publish a copy of the documentation you need to have Docker up and running on +your machine. You'll also need a `docs/awsconfig` file containing the settings +you need to access the AWS bucket you'll be deploying to. + The release script will create an s3 if needed, and will then push the files to it. [profile dowideit-docs] aws_access_key_id = IHOIUAHSIDH234rwf.... @@ -115,7 +100,8 @@ also update the root docs pages by running make AWS_S3_BUCKET=dowideit-docs BUILD_ROOT=yes docs-release -> **Note:** if you are using Boot2Docker on OSX and the above command returns an error, +> **Note:** +> if you are using Boot2Docker on OSX and the above command returns an error, > `Post http:///var/run/docker.sock/build?rm=1&t=docker-docs%3Apost-1.2.0-docs_update-2: > dial unix /var/run/docker.sock: no such file or directory', you need to set the Docker > host. Run `$(boot2docker shellinit)` to see the correct variable to set. The command @@ -145,8 +131,8 @@ Once the PR has the needed `LGTM`s, merge it, then publish to our beta server to test: git fetch upstream - git checkout post-1.2.0-docs-update-1 - git reset --hard upstream/post-1.2.0-docs-update-1 + git checkout docs + git reset --hard upstream/docs make AWS_S3_BUCKET=beta-docs.docker.io BUILD_ROOT=yes docs-release Then go to http://beta-docs.docker.io.s3-website-us-west-2.amazonaws.com/ @@ -155,6 +141,8 @@ to view your results and make sure what you published is what you wanted. When you're happy with it, publish the docs to our live site: make AWS_S3_BUCKET=docs.docker.com BUILD_ROOT=yes docs-release + +Test the uncached version of the live docs at http://docs.docker.com.s3-website-us-east-1.amazonaws.com/ Note that the new docs will not appear live on the site until the cache (a complex, distributed CDN system) is flushed. This requires someone with S3 keys. Contact Docker diff --git a/docs/docs-update.py b/docs/docs-update.py index b605aeccb2..586bde482d 100755 --- a/docs/docs-update.py +++ b/docs/docs-update.py @@ -104,6 +104,11 @@ def update_man_pages(): re.MULTILINE | re.DOTALL ) + options_re = re.compile( + r".*# OPTIONS(.*?)# (HISTORY|EXAMPLES?).*", + re.MULTILINE | re.DOTALL + ) + example_re = re.compile( r".*# EXAMPLES?(.*)# HISTORY.*", re.MULTILINE | re.DOTALL @@ -116,8 +121,12 @@ def update_man_pages(): for command in cmds: print "COMMAND: "+command + if command == "": + print "SKIPPING" + continue history = "" description = "" + original_options = "" examples = "" if os.path.isfile("docs/man/docker-"+command+".1.md"): intext = open("docs/man/docker-"+command+".1.md", "r") @@ -126,6 +135,10 @@ def update_man_pages(): match = desc_re.match(txt) if match: description = match.group(1) + match = options_re.match(txt) + if match: + original_options = match.group(1) + #print "MATCHED OPTIONS\n" + original_options match = example_re.match(txt) if match: examples = match.group(1) @@ -148,7 +161,7 @@ def update_man_pages(): help_string = e.output last_key = "" - for l in str(help).split("\n"): + for l in str(help_string).split("\n"): l = l.rstrip() if l != "": match = re.match("Usage: docker {}(.*)".format(command), l) @@ -170,7 +183,7 @@ def update_man_pages(): # replace [OPTIONS] with the list of params options = "" - match = re.match("\[OPTIONS\](.*)", usage) + match = re.match("\[OPTIONS\]\s*(.*)", usage) if match: usage = match.group(1) @@ -178,11 +191,13 @@ def update_man_pages(): # TODO: sort without the `-`'s for key in sorted(params.keys(), key=lambda s: s.lower()): # split on commas, remove --?.*=.*, put in *'s mumble + flags = [] ps = [] opts = [] for k in key_params[key].split(","): match = re.match("(-+)([A-Za-z-0-9]*)(?:=(.*))?", k.lstrip()) if match: + flags.append("{}{}".format(match.group(1), match.group(2))) p = "**{}{}**".format(match.group(1), match.group(2)) o = "**{}{}**".format(match.group(1), match.group(2)) if match.group(3): @@ -203,7 +218,25 @@ def update_man_pages(): else: print "nomatch:{}".format(k) new_usage = "{}\n[{}]".format(new_usage, "|".join(ps)) + options = "{}{}\n {}\n\n".format(options, ", ".join(opts), params[key]) + + # look at the original options documentation and if its hand written, add it too. + print "SVEN_re: "+flags[0] + singleoption_re = re.compile( + r".*[\r\n]\*\*"+flags[0]+"\*\*([^\r\n]*)[\r\n]+(.*?)[\r\n](\*\*-|# [A-Z]|\*\*[A-Z]+\*\*).*", + #r""+flags[0]+"(.*)(^\*\*-.*)?", + re.MULTILINE | re.DOTALL + ) + match = singleoption_re.match(original_options) + if match: + info = match.group(2).strip() + print "MATCHED: " + match.group(1).strip() + if info != params[key].strip(): + #info = re.sub(params[key].strip(), '', info, flags=re.MULTILINE) + print "INFO changed: " +info + options = "{} {}\n\n".format(options, info.strip()) + if new_usage != "": new_usage = "{}\n".format(new_usage.strip()) usage = new_usage + usage @@ -230,8 +263,8 @@ def update_man_pages(): ".*{}.*".format(date_string), re.MULTILINE | re.DOTALL ) - if not recent_history_re.match(history): - outtext.write("{}, updated by Sven Dowideit \n".format(date_string)) +# if not recent_history_re.match(history): +# outtext.write("{}, updated by Sven Dowideit \n".format(date_string)) outtext.close() # main diff --git a/docs/man/docker-attach.1.md b/docs/man/docker-attach.1.md index 7deda6c75e..19fbaceb4a 100644 --- a/docs/man/docker-attach.1.md +++ b/docs/man/docker-attach.1.md @@ -8,7 +8,7 @@ docker-attach - Attach to a running container **docker attach** [**--no-stdin**[=*false*]] [**--sig-proxy**[=*true*]] - CONTAINER +CONTAINER # DESCRIPTION If you **docker run** a container in detached mode (**-d**), you can reattach to @@ -20,12 +20,15 @@ container, or `CTRL-\` to get a stacktrace of the Docker client when it quits. When you detach from a container the exit code will be returned to the client. +It is forbidden to redirect the standard input of a docker attach command while +attaching to a tty-enabled container (i.e.: launched with -t`). + # OPTIONS **--no-stdin**=*true*|*false* Do not attach STDIN. The default is *false*. **--sig-proxy**=*true*|*false* - Proxy all received signals to the process (even in non-TTY mode). SIGCHLD, SIGKILL, and SIGSTOP are not proxied. The default is *true*. + Proxy all received signals to the process (non-TTY mode only). SIGCHLD, SIGKILL, and SIGSTOP are not proxied. The default is *true*. # EXAMPLES diff --git a/docs/man/docker-build.1.md b/docs/man/docker-build.1.md index c562660b6f..67d7343af3 100644 --- a/docs/man/docker-build.1.md +++ b/docs/man/docker-build.1.md @@ -11,7 +11,7 @@ docker-build - Build a new image from the source code at PATH [**-q**|**--quiet**[=*false*]] [**--rm**[=*true*]] [**-t**|**--tag**[=*TAG*]] - PATH | URL | - +PATH | URL | - # DESCRIPTION This will read the Dockerfile from the directory specified in **PATH**. diff --git a/docs/man/docker-commit.1.md b/docs/man/docker-commit.1.md index 31edcc0397..0d1d5406cf 100644 --- a/docs/man/docker-commit.1.md +++ b/docs/man/docker-commit.1.md @@ -9,7 +9,7 @@ docker-commit - Create a new image from a container's changes [**-a**|**--author**[=*AUTHOR*]] [**-m**|**--message**[=*MESSAGE*]] [**-p**|**--pause**[=*true*]] - CONTAINER [REPOSITORY[:TAG]] +CONTAINER [REPOSITORY[:TAG]] # DESCRIPTION Using an existing container's name or ID you can create a new image. diff --git a/docs/man/docker-create.1.md b/docs/man/docker-create.1.md index c5ed0349c4..a83873794a 100644 --- a/docs/man/docker-create.1.md +++ b/docs/man/docker-create.1.md @@ -22,21 +22,24 @@ docker-create - Create a new container [**--expose**[=*[]*]] [**-h**|**--hostname**[=*HOSTNAME*]] [**-i**|**--interactive**[=*false*]] +[**--ipc**[=*IPC*]] [**--link**[=*[]*]] [**--lxc-conf**[=*[]*]] [**-m**|**--memory**[=*MEMORY*]] +[**--mac-address**[=*MAC-ADDRESS*]] [**--name**[=*NAME*]] [**--net**[=*"bridge"*]] [**-P**|**--publish-all**[=*false*]] [**-p**|**--publish**[=*[]*]] [**--privileged**[=*false*]] [**--restart**[=*RESTART*]] +[**--security-opt**[=*[]*]] [**-t**|**--tty**[=*false*]] [**-u**|**--user**[=*USER*]] [**-v**|**--volume**[=*[]*]] [**--volumes-from**[=*[]*]] [**-w**|**--workdir**[=*WORKDIR*]] - IMAGE [COMMAND] [ARG...] +IMAGE [COMMAND] [ARG...] # OPTIONS **-a**, **--attach**=[] @@ -61,10 +64,10 @@ docker-create - Create a new container CPUs in which to allow execution (0-3, 0,1) **--device**=[] - Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc) + Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm) **--dns-search**=[] - Set custom DNS search domains + Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain) **--dns**=[] Set custom DNS servers @@ -79,7 +82,7 @@ docker-create - Create a new container Read in a line delimited file of environment variables **--expose**=[] - Expose a port from the container without publishing it to your host + Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host **-h**, **--hostname**="" Container host name @@ -87,6 +90,11 @@ docker-create - Create a new container **-i**, **--interactive**=*true*|*false* Keep STDIN open even if not attached. The default is *false*. +**--ipc**="" + Default is to create a private IPC namespace (POSIX SysV IPC) for the container + 'container:': reuses another container shared memory, semaphores and message queues + 'host': use the host shared memory,semaphores and message queues inside the container. Note: the host mode gives the container full access to local shared memory and is therefore considered insecure. + **--link**=[] Add link to another container in the form of name:alias @@ -96,6 +104,9 @@ docker-create - Create a new container **-m**, **--memory**="" Memory limit (format: , where unit = b, k, m or g) +**--mac-address**="" + Container MAC address (e.g. 92:d0:c6:0a:29:33) + **--name**="" Assign a name to the container @@ -120,6 +131,9 @@ docker-create - Create a new container **--restart**="" Restart policy to apply when a container exits (no, on-failure[:max-retry], always) +**--security-opt**=[] + Security Options + **-t**, **--tty**=*true*|*false* Allocate a pseudo-TTY. The default is *false*. @@ -138,3 +152,4 @@ docker-create - Create a new container # HISTORY August 2014, updated by Sven Dowideit September 2014, updated by Sven Dowideit +November 2014, updated by Sven Dowideit diff --git a/docs/man/docker-exec.1.md b/docs/man/docker-exec.1.md index d5ec1265bd..3db296ed76 100644 --- a/docs/man/docker-exec.1.md +++ b/docs/man/docker-exec.1.md @@ -1,6 +1,6 @@ % DOCKER(1) Docker User Manuals % Docker Community -% SEPT 2014 +% JUNE 2014 # NAME docker-exec - Run a command in a running container @@ -9,21 +9,30 @@ docker-exec - Run a command in a running container [**-d**|**--detach**[=*false*]] [**-i**|**--interactive**[=*false*]] [**-t**|**--tty**[=*false*]] - CONTAINER COMMAND [ARG...] +CONTAINER COMMAND [ARG...] # DESCRIPTION Run a process in a running container. -# Options +The command started using `docker exec` will only run while the container's primary +process (`PID 1`) is running, and will not be restarted if the container is restarted. +If the container is paused, then the `docker exec` command will wait until the +container is unpaused, and then run + +# OPTIONS **-d**, **--detach**=*true*|*false* - Detached mode. This runs the new process in the background. + Detached mode: run command in the background. The default is *false*. **-i**, **--interactive**=*true*|*false* - When set to true, keep STDIN open even if not attached. The default is false. + Keep STDIN open even if not attached. The default is *false*. **-t**, **--tty**=*true*|*false* - When set to true Docker can allocate a pseudo-tty and attach to the standard -input of the process. This can be used, for example, to run a throwaway -interactive shell. The default value is false. + Allocate a pseudo-TTY. The default is *false*. + +The **-t** option is incompatible with a redirection of the docker client +standard input. + +# HISTORY +November 2014, updated by Sven Dowideit diff --git a/docs/man/docker-history.1.md b/docs/man/docker-history.1.md index ddb164e50b..65ec9cd173 100644 --- a/docs/man/docker-history.1.md +++ b/docs/man/docker-history.1.md @@ -8,7 +8,7 @@ docker-history - Show the history of an image **docker history** [**--no-trunc**[=*false*]] [**-q**|**--quiet**[=*false*]] - IMAGE +IMAGE # DESCRIPTION diff --git a/docs/man/docker-images.1.md b/docs/man/docker-images.1.md index c572ee674b..6c9e6a60b5 100644 --- a/docs/man/docker-images.1.md +++ b/docs/man/docker-images.1.md @@ -10,7 +10,7 @@ docker-images - List images [**-f**|**--filter**[=*[]*]] [**--no-trunc**[=*false*]] [**-q**|**--quiet**[=*false*]] - [NAME] +[REPOSITORY] # DESCRIPTION This command lists the images stored in the local Docker repository. @@ -58,25 +58,6 @@ used in builds use **-a**: docker images -a -## List images dependency tree hierarchy - -To list the images in the local repository (not the registry) in a dependency -tree format, use the **-t** option. - - docker images -t - -This displays a staggered hierarchy tree where the less indented image is -the oldest with dependent image layers branching inward (to the right) on -subsequent lines. The newest or top level image layer is listed last in -any tree branch. - -## List images in GraphViz format - -To display the list in a format consumable by a GraphViz tools run with -**-v**. For example to produce a .png graph file of the hierarchy use: - - docker images --viz | dot -Tpng -o docker.png - ## Listing only the shortened image IDs Listing just the shortened image IDs. This can be useful for some automated diff --git a/docs/man/docker-info.1.md b/docs/man/docker-info.1.md index bf64a7b543..0547b44b07 100644 --- a/docs/man/docker-info.1.md +++ b/docs/man/docker-info.1.md @@ -37,6 +37,8 @@ Here is a sample output: Execution Driver: native-0.2 Kernel Version: 3.13.0-24-generic Operating System: Ubuntu 14.04 LTS + CPUs: 1 + Total Memory: 2 GiB # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) diff --git a/docs/man/docker-kill.1.md b/docs/man/docker-kill.1.md index 3c8d59e6d5..d1d0ee7ad6 100644 --- a/docs/man/docker-kill.1.md +++ b/docs/man/docker-kill.1.md @@ -7,7 +7,7 @@ docker-kill - Kill a running container using SIGKILL or a specified signal # SYNOPSIS **docker kill** [**-s**|**--signal**[=*"KILL"*]] - CONTAINER [CONTAINER...] +CONTAINER [CONTAINER...] # DESCRIPTION diff --git a/docs/man/docker-login.1.md b/docs/man/docker-login.1.md index c269353079..e367050be2 100644 --- a/docs/man/docker-login.1.md +++ b/docs/man/docker-login.1.md @@ -9,7 +9,7 @@ docker-login - Register or log in to a Docker registry server, if no server is s [**-e**|**--email**[=*EMAIL*]] [**-p**|**--password**[=*PASSWORD*]] [**-u**|**--username**[=*USERNAME*]] - [SERVER] +[SERVER] # DESCRIPTION Register or Login to a docker registry server, if no server is diff --git a/docs/man/docker-port.1.md b/docs/man/docker-port.1.md index 97cc61b7e5..8c4c870dc2 100644 --- a/docs/man/docker-port.1.md +++ b/docs/man/docker-port.1.md @@ -5,11 +5,15 @@ docker-port - List port mappings for the CONTAINER, or lookup the public-facing port that is NAT-ed to the PRIVATE_PORT # SYNOPSIS -**docker port** CONTAINER [PRIVATE_PORT[/PROTO]] +**docker port** +CONTAINER [PRIVATE_PORT[/PROTO]] # DESCRIPTION List port mappings for the CONTAINER, or lookup the public-facing port that is NAT-ed to the PRIVATE_PORT +# OPTIONS +There are no available options. + # EXAMPLES You can find out all the ports mapped by not specifying a `PRIVATE_PORT`, or ask for just a specific mapping: @@ -30,3 +34,4 @@ ask for just a specific mapping: # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) June 2014, updated by Sven Dowideit +November 2014, updated by Sven Dowideit diff --git a/docs/man/docker-ps.1.md b/docs/man/docker-ps.1.md index bf22d87da5..d34d98396e 100644 --- a/docs/man/docker-ps.1.md +++ b/docs/man/docker-ps.1.md @@ -32,6 +32,7 @@ the running containers. **-f**, **--filter**=[] Provide filter values. Valid filters: exited= - containers with exit code of + status=(restarting|running|paused|exited) **-l**, **--latest**=*true*|*false* Show only the latest created container, include non-running ones. The default is *false*. @@ -46,7 +47,7 @@ the running containers. Only display numeric IDs. The default is *false*. **-s**, **--size**=*true*|*false* - Display sizes. The default is *false*. + Display total file sizes. The default is *false*. **--since**="" Show only containers created since Id or Name, include non-running ones. @@ -74,3 +75,4 @@ April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit August 2014, updated by Sven Dowideit +November 2014, updated by Sven Dowideit diff --git a/docs/man/docker-restart.1.md b/docs/man/docker-restart.1.md index 2a08caa5e8..9a22688000 100644 --- a/docs/man/docker-restart.1.md +++ b/docs/man/docker-restart.1.md @@ -7,7 +7,7 @@ docker-restart - Restart a running container # SYNOPSIS **docker restart** [**-t**|**--time**[=*10*]] - CONTAINER [CONTAINER...] +CONTAINER [CONTAINER...] # DESCRIPTION Restart each container listed. diff --git a/docs/man/docker-rm.1.md b/docs/man/docker-rm.1.md index bae6a7ea8c..b8f31bd687 100644 --- a/docs/man/docker-rm.1.md +++ b/docs/man/docker-rm.1.md @@ -9,7 +9,7 @@ docker-rm - Remove one or more containers [**-f**|**--force**[=*false*]] [**-l**|**--link**[=*false*]] [**-v**|**--volumes**[=*false*]] - CONTAINER [CONTAINER...] +CONTAINER [CONTAINER...] # DESCRIPTION diff --git a/docs/man/docker-run.1.md b/docs/man/docker-run.1.md index e3d846749d..44c5545084 100644 --- a/docs/man/docker-run.1.md +++ b/docs/man/docker-run.1.md @@ -23,24 +23,26 @@ docker-run - Run a command in a new container [**--expose**[=*[]*]] [**-h**|**--hostname**[=*HOSTNAME*]] [**-i**|**--interactive**[=*false*]] -[**--security-opt**[=*[]*]] +[**--ipc**[=*IPC*]] [**--link**[=*[]*]] [**--lxc-conf**[=*[]*]] [**-m**|**--memory**[=*MEMORY*]] +[**--mac-address**[=*MAC-ADDRESS*]] [**--name**[=*NAME*]] [**--net**[=*"bridge"*]] [**-P**|**--publish-all**[=*false*]] [**-p**|**--publish**[=*[]*]] [**--privileged**[=*false*]] -[**--restart**[=*POLICY*]] +[**--restart**[=*RESTART*]] [**--rm**[=*false*]] +[**--security-opt**[=*[]*]] [**--sig-proxy**[=*true*]] [**-t**|**--tty**[=*false*]] [**-u**|**--user**[=*USER*]] [**-v**|**--volume**[=*[]*]] [**--volumes-from**[=*[]*]] [**-w**|**--workdir**[=*WORKDIR*]] - IMAGE [COMMAND] [ARG...] +IMAGE [COMMAND] [ARG...] # DESCRIPTION @@ -57,21 +59,26 @@ all image dependencies, from the repository in the same way running **docker pull** IMAGE, before it starts the container from that image. # OPTIONS +**-a**, **--attach**=[] + Attach to STDIN, STDOUT or STDERR. -**-a**, **--attach**=*stdin*|*stdout*|*stderr* - Attach to stdin, stdout or stderr. In foreground mode (the default when -**-d** is not specified), **docker run** can start the process in the container + In foreground mode (the default when **-d** +is not specified), **docker run** can start the process in the container and attach the console to the process’s standard input, output, and standard error. It can even pretend to be a TTY (this is what most commandline executables expect) and pass along signals. The **-a** option can be set for each of stdin, stdout, and stderr. -**--add-host**=*hostname*:*ip* +**--add-host**=[] + Add a custom host-to-IP mapping (host:ip) + Add a line to /etc/hosts. The format is hostname:ip. The **--add-host** option can be set multiple times. **-c**, **--cpu-shares**=0 - CPU shares in relative weight. You can increase the priority of a container + CPU shares (relative weight) + + You can increase the priority of a container with the -c option. By default, all containers run at the same priority and get the same proportion of CPU cycles, but you can tell the kernel to give more shares of CPU time to one or more containers when you start them via **docker @@ -90,33 +97,40 @@ run**. CPUs in which to allow execution (0-3, 0,1) **-d**, **--detach**=*true*|*false* - Detached mode. This runs the container in the background. It outputs the new -container's ID and any error messages. At any time you can run **docker ps** in + Detached mode: run the container in the background and print the new container ID. The default is *false*. + + At any time you can run **docker ps** in the other shell to view a list of the running containers. You can reattach to a detached container with **docker attach**. If you choose to run a container in the detached mode, then you cannot use the **-rm** option. When attached in the tty mode, you can detach from a running container without stopping the process by pressing the keys CTRL-P CTRL-Q. + **--device**=[] - Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc) + Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm) **--dns-search**=[] - Set custom DNS search domains + Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain) -**--dns**=*IP-address* - Set custom DNS servers. This option can be used to override the DNS +**--dns**=[] + Set custom DNS servers + + This option can be used to override the DNS configuration passed to the container. Typically this is necessary when the host DNS configuration is invalid for the container (e.g., 127.0.0.1). When this is the case the **--dns** flags is necessary for every run. -**-e**, **--env**=*environment* - Set environment variables. This option allows you to specify arbitrary +**-e**, **--env**=[] + Set environment variables + + This option allows you to specify arbitrary environment variables that are available for the process that will be launched inside of the container. +**--entrypoint**="" + Overwrite the default ENTRYPOINT of the image -**--entrypoint**=*command* This option allows you to overwrite the default entrypoint of the image that is set in the Dockerfile. The ENTRYPOINT of an image is similar to a COMMAND because it specifies what executable to run when the container starts, but it is @@ -131,28 +145,28 @@ ENTRYPOINT. **--env-file**=[] Read in a line delimited file of environment variables -**--expose**=*port* - Expose a port from the container without publishing it to your host. A -containers port can be exposed to other containers in three ways: 1) The -developer can expose the port using the EXPOSE parameter of the Dockerfile, 2) -the operator can use the **--expose** option with **docker run**, or 3) the -container can be started with the **--link**. +**--expose**=[] + Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host + +**-h**, **--hostname**="" + Container host name -**-h**, **--hostname**=*hostname* Sets the container host name that is available inside the container. **-i**, **--interactive**=*true*|*false* + Keep STDIN open even if not attached. The default is *false*. + When set to true, keep stdin open even if not attached. The default is false. -**--security-opt**=*secdriver*:*name*:*value* - "label:user:USER" : Set the label user for the container - "label:role:ROLE" : Set the label role for the container - "label:type:TYPE" : Set the label type for the container - "label:level:LEVEL" : Set the label level for the container - "label:disable" : Turn off label confinement for the container +**--ipc**="" + Default is to create a private IPC namespace (POSIX SysV IPC) for the container + 'container:': reuses another container shared memory, semaphores and message queues + 'host': use the host shared memory,semaphores and message queues inside the container. Note: the host mode gives the container full access to local shared memory and is therefore considered insecure. -**--link**=*name*:*alias* - Add link to another container. The format is name:alias. If the operator +**--link**=[] + Add link to another container in the form of name:alias + + If the operator uses **--link** when starting the new client container, then the client container can access the exposed port via a private networking interface. Docker will set some environment variables in the client container to help indicate @@ -161,7 +175,9 @@ which interface and port to use. **--lxc-conf**=[] (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" -**-m**, **--memory**=*memory-limit* +**-m**, **--memory**="" + Memory limit (format: , where unit = b, k, m or g) + Allows you to constrain the memory available to a container. If the host supports swap memory, then the -m memory setting can be larger than physical RAM. If a limit of 0 is specified, the container's memory is not limited. The @@ -169,15 +185,23 @@ actual limit may be rounded up to a multiple of the operating system's page size, if it is not already. The memory limit should be formatted as follows: ``, where unit = b, k, m or g. -**--name**=*name* - Assign a name to the container. The operator can identify a container in -three ways: +**--mac-address**="" + Container MAC address (e.g. 92:d0:c6:0a:29:33) + + Remember that the MAC address in an Ethernet network must be unique. +The IPv6 link-local address will be based on the device's MAC address +according to RFC4862. + +**--name**="" + Assign a name to the container + + The operator can identify a container in three ways: UUID long identifier (“f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778”) UUID short identifier (“f78375b1c487”) Name (“jonah”) -The UUID identifiers come from the Docker daemon, and if a name is not assigned + The UUID identifiers come from the Docker daemon, and if a name is not assigned to the container with **--name** then the daemon will also generate a random string name. The name is useful when defining links (see **--link**) (or any other place you need to identify a container). This works for both background @@ -191,90 +215,97 @@ and foreground Docker containers. 'host': use the host network stack inside the container. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure. **-P**, **--publish-all**=*true*|*false* + Publish all exposed ports to the host interfaces. The default is *false*. + When set to true publish all exposed ports to the host interfaces. The default is false. If the operator uses -P (or -p) then Docker will make the exposed port accessible on the host and the ports will be available to any -client that can reach the host. When using -P, Docker will bind the exposed -ports to a random port on the host between 49153 and 65535. To find the +client that can reach the host. When using -P, Docker will bind the exposed +ports to a random port on the host between 49153 and 65535. To find the mapping between the host ports and the exposed ports, use **docker port**. **-p**, **--publish**=[] - Publish a container's port to the host (format: ip:hostPort:containerPort | -ip::containerPort | hostPort:containerPort | containerPort) (use **docker port** to see the -actual mapping) + Publish a container's port to the host + format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort + (use 'docker port' to see the actual mapping) **--privileged**=*true*|*false* - Give extended privileges to this container. By default, Docker containers are + Give extended privileges to this container. The default is *false*. + + By default, Docker containers are “unprivileged” (=false) and cannot, for example, run a Docker daemon inside the Docker container. This is because by default a container is not allowed to access any devices. A “privileged” container is given access to all devices. -When the operator executes **docker run --privileged**, Docker will enable access + When the operator executes **docker run --privileged**, Docker will enable access to all devices on the host as well as set some configuration in AppArmor to allow the container nearly all the same access to the host as processes running outside of a container on the host. +**--restart**="" + Restart policy to apply when a container exits (no, on-failure[:max-retry], always) **--rm**=*true*|*false* Automatically remove the container when it exits (incompatible with -d). The default is *false*. +**--security-opt**=[] + Security Options + + "label:user:USER" : Set the label user for the container + "label:role:ROLE" : Set the label role for the container + "label:type:TYPE" : Set the label type for the container + "label:level:LEVEL" : Set the label level for the container + "label:disable" : Turn off label confinement for the container + **--sig-proxy**=*true*|*false* - Proxy received signals to the process (even in non-TTY mode). SIGCHLD, SIGSTOP, and SIGKILL are not proxied. The default is *true*. + Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied. The default is *true*. **-t**, **--tty**=*true*|*false* + Allocate a pseudo-TTY. The default is *false*. + When set to true Docker can allocate a pseudo-tty and attach to the standard input of any container. This can be used, for example, to run a throwaway interactive shell. The default is value is false. +The **-t** option is incompatible with a redirection of the docker client +standard input. + **-u**, **--user**="" Username or UID +**-v**, **--volume**=[] + Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container) -**-v**, **--volume**=*volume*[:ro|:rw] - Bind mount a volume to the container. - -The **-v** option can be used one or + The **-v** option can be used one or more times to add one or more mounts to a container. These mounts can then be -used in other containers using the **--volumes-from** option. +used in other containers using the **--volumes-from** option. -The volume may be optionally suffixed with :ro or :rw to mount the volumes in + The volume may be optionally suffixed with :ro or :rw to mount the volumes in read-only or read-write mode, respectively. By default, the volumes are mounted read-write. See examples. -**--volumes-from**=*container-id*[:ro|:rw] +**--volumes-from**=[] + Mount volumes from the specified container(s) + Will mount volumes from the specified container identified by container-id. Once a volume is mounted in a one container it can be shared with other containers using the **--volumes-from** option when running those other containers. The volumes can be shared even if the original container with the -mount is not running. +mount is not running. -The container ID may be optionally suffixed with :ro or -:rw to mount the volumes in read-only or read-write mode, respectively. By -default, the volumes are mounted in the same mode (read write or read only) as + The container ID may be optionally suffixed with :ro or +:rw to mount the volumes in read-only or read-write mode, respectively. By +default, the volumes are mounted in the same mode (read write or read only) as the reference container. +**-w**, **--workdir**="" + Working directory inside the container -**-w**, **--workdir**=*directory* - Working directory inside the container. The default working directory for + The default working directory for running binaries within a container is the root directory (/). The developer can set a different default with the Dockerfile WORKDIR instruction. The operator can override the working directory by using the **-w** option. - -**IMAGE** - The image name or ID. You can specify a version of an image you'd like to run - the container with by adding image:tag to the command. For example, - `docker run ubuntu:14.04`. - - - -**COMMAND** - The command or program to run inside the image. - - -**ARG** - The arguments for the command to be run in the container. - # EXAMPLES ## Exposing log messages from the container to the host's log @@ -304,6 +335,71 @@ you’d like to connect instead, as in: # docker run -a stdin -a stdout -i -t fedora /bin/bash +## Sharing IPC between containers + +Using shm_server.c available here: http://www.cs.cf.ac.uk/Dave/C/node27.html + +Testing `--ipc=host` mode: + +Host shows a shared memory segment with 7 pids attached, happens to be from httpd: + +``` + $ sudo ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status + 0x01128e25 0 root 600 1000 7 +``` + +Now run a regular container, and it correctly does NOT see the shared memory segment from the host: + +``` + $ sudo docker run -it shm ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status +``` + +Run a container with the new `--ipc=host` option, and it now sees the shared memory segment from the host httpd: + + ``` + $ sudo docker run -it --ipc=host shm ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status + 0x01128e25 0 root 600 1000 7 +``` +Testing `--ipc=container:CONTAINERID` mode: + +Start a container with a program to create a shared memory segment: +``` + sudo docker run -it shm bash + $ sudo shm/shm_server & + $ sudo ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status + 0x0000162e 0 root 666 27 1 +``` +Create a 2nd container correctly shows no shared memory segment from 1st container: +``` + $ sudo docker run shm ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status +``` + +Create a 3rd container using the new --ipc=container:CONTAINERID option, now it shows the shared memory segment from the first: + +``` + $ sudo docker run -it --ipc=container:ed735b2264ac shm ipcs -m + $ sudo ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status + 0x0000162e 0 root 666 27 1 +``` + ## Linking Containers The link feature allows multiple containers to communicate with each other. For diff --git a/docs/man/docker-save.1.md b/docs/man/docker-save.1.md index ea78475b51..c02ffb101a 100644 --- a/docs/man/docker-save.1.md +++ b/docs/man/docker-save.1.md @@ -2,12 +2,12 @@ % Docker Community % JUNE 2014 # NAME -docker-save - Save an image to a tar archive (streamed to STDOUT by default) +docker-save - Save an image(s) to a tar archive (streamed to STDOUT by default) # SYNOPSIS **docker save** [**-o**|**--output**[=*OUTPUT*]] -IMAGE +IMAGE [IMAGE...] # DESCRIPTION Produces a tarred repository to the standard output stream. Contains all @@ -35,3 +35,4 @@ fedora image to a fedora-latest.tar: April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit +November 2014, updated by Sven Dowideit diff --git a/docs/man/docker-stop.1.md b/docs/man/docker-stop.1.md index 0cc19918c3..1b73e387e8 100644 --- a/docs/man/docker-stop.1.md +++ b/docs/man/docker-stop.1.md @@ -7,7 +7,7 @@ docker-stop - Stop a running container by sending SIGTERM and then SIGKILL after # SYNOPSIS **docker stop** [**-t**|**--time**[=*10*]] - CONTAINER [CONTAINER...] +CONTAINER [CONTAINER...] # DESCRIPTION Stop a running container (Send SIGTERM, and then SIGKILL after diff --git a/docs/man/docker-tag.1.md b/docs/man/docker-tag.1.md index a42ebe7702..e8550ec55d 100644 --- a/docs/man/docker-tag.1.md +++ b/docs/man/docker-tag.1.md @@ -7,7 +7,7 @@ docker-tag - Tag an image into a repository # SYNOPSIS **docker tag** [**-f**|**--force**[=*false*]] - IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG] +IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG] # DESCRIPTION This will give a new alias to an image in the repository. This refers to the diff --git a/docs/man/docker.1.md b/docs/man/docker.1.md index 26f5c2133a..e07687c18d 100644 --- a/docs/man/docker.1.md +++ b/docs/man/docker.1.md @@ -54,7 +54,7 @@ unix://[/path/to/socket] to use. IPv4 subnet for fixed IPs (ex: 10.20.0.0/16); this subnet must be nested in the bridge subnet (which is defined by \-b or \-\-bip) **--icc**=*true*|*false* - Enable inter\-container communication. Default is true. + Allow unrestricted inter\-container and Docker daemon host communication. If disabled, containers can still be linked together using **--link** option (see **docker-run(1)**). Default is true. **--ip**="" Default IP address to use when binding container ports. Default is `0.0.0.0`. @@ -65,18 +65,27 @@ unix://[/path/to/socket] to use. **--iptables**=*true*|*false* Disable Docker's addition of iptables rules. Default is true. +**-l**, **--log-level**="*debug*|*info*|*error*|*fatal*"" + Set the logging level. Default is `info`. + +**--label**="[]" + Set key=value labels to the daemon (displayed in `docker info`) + **--mtu**=VALUE Set the containers network mtu. Default is `1500`. **-p**="" Path to use for daemon PID file. Default is `/var/run/docker.pid` -**--registry-mirror=:// +**--registry-mirror**=:// Prepend a registry mirror to be used for image pulls. May be specified multiple times. **-s**="" Force the Docker runtime to use a specific storage driver. +**--storage-opt**=[] + Set storage driver options. See STORAGE DRIVER OPTIONS. + **-v**=*true*|*false* Print version information and quit. Default is false. @@ -196,13 +205,87 @@ inside it) **docker-wait(1)** Block until a container stops, then print its exit code -# EXAMPLES +# STORAGE DRIVER OPTIONS -For specific examples please see the man page for the specific Docker command. -For example: +Options to storage backend can be specified with **--storage-opt** flags. The +only backend which currently takes options is *devicemapper*. Therefore use these +flags with **-s=**devicemapper. + +Here is the list of *devicemapper* options: + +#### dm.basesize +Specifies the size to use when creating the base device, which limits the size +of images and containers. The default value is 10G. Note, thin devices are +inherently "sparse", so a 10G device which is mostly empty doesn't use 10 GB +of space on the pool. However, the filesystem will use more space for the empty +case the larger the device is. **Warning**: This value affects the system-wide +"base" empty filesystem that may already be initialized and inherited by pulled +images. + +#### dm.loopdatasize +Specifies the size to use when creating the loopback file for the "data" +device which is used for the thin pool. The default size is 100G. Note that the +file is sparse, so it will not initially take up this much space. + +#### dm.loopmetadatasize +Specifies the size to use when creating the loopback file for the "metadadata" +device which is used for the thin pool. The default size is 2G. Note that the +file is sparse, so it will not initially take up this much space. + +#### dm.fs +Specifies the filesystem type to use for the base device. The supported +options are "ext4" and "xfs". The default is "ext4" + +#### dm.mkfsarg +Specifies extra mkfs arguments to be used when creating the base device. + +#### dm.mountopt +Specifies extra mount options used when mounting the thin devices. + +#### dm.datadev +Specifies a custom blockdevice to use for data for the thin pool. + +If using a block device for device mapper storage, ideally both datadev and +metadatadev should be specified to completely avoid using the loopback device. + +#### dm.metadatadev +Specifies a custom blockdevice to use for metadata for the thin pool. + +For best performance the metadata should be on a different spindle than the +data, or even better on an SSD. + +If setting up a new metadata pool it is required to be valid. This can be +achieved by zeroing the first 4k to indicate empty metadata, like this: + + dd if=/dev/zero of=/dev/metadata_dev bs=4096 count=1 + +#### dm.blocksize +Specifies a custom blocksize to use for the thin pool. The default blocksize +is 64K. + +#### dm.blkdiscard +Enables or disables the use of blkdiscard when removing devicemapper devices. +This is enabled by default (only) if using loopback devices and is required to +res-parsify the loopback file on image/container removal. + +Disabling this on loopback can lead to *much* faster container removal times, +but will prevent the space used in `/var/lib/docker` directory from being returned to +the system for other use when containers are removed. + +# EXAMPLES +Launching docker daemon with *devicemapper* backend with particular block devices +for data and metadata: + + docker -d -s=devicemapper \ + --storage-opt dm.datadev=/dev/vdb \ + --storage-opt dm.metadatadev=/dev/vdc \ + --storage-opt dm.basesize=20G + +#### Client +For specific client examples please see the man page for the specific Docker +command. For example: man docker run # HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) based - on docker.com source material and internal work. +April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 25f84b5a08..06f9064d96 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -17,7 +17,6 @@ use_absolute_urls: true # theme: docker theme_dir: ./theme/mkdocs/ theme_center_lead: false -include_search: true copyright: Copyright © 2014, Docker, Inc. google_analytics: ['UA-6096819-11', 'docker.io'] @@ -99,6 +98,7 @@ pages: - ['articles/ambassador_pattern_linking.md', 'Articles', 'Cross-Host linking using ambassador containers'] - ['articles/runmetrics.md', 'Articles', 'Runtime metrics'] - ['articles/b2d_volume_resize.md', 'Articles', 'Increasing a Boot2Docker volume'] +- ['articles/systemd.md', 'Articles', 'Controlling and configuring Docker using Systemd'] # Reference - ['reference/index.md', '**HIDDEN**'] @@ -113,6 +113,7 @@ pages: - ['reference/api/registry_api_client_libraries.md', 'Reference', 'Docker Registry API Client Libraries'] - ['reference/api/hub_registry_spec.md', 'Reference', 'Docker Hub and Registry Spec'] - ['reference/api/docker_remote_api.md', 'Reference', 'Docker Remote API'] +- ['reference/api/docker_remote_api_v1.16.md', 'Reference', 'Docker Remote API v1.16'] - ['reference/api/docker_remote_api_v1.15.md', 'Reference', 'Docker Remote API v1.15'] - ['reference/api/docker_remote_api_v1.14.md', 'Reference', 'Docker Remote API v1.14'] - ['reference/api/docker_remote_api_v1.13.md', 'Reference', 'Docker Remote API v1.13'] @@ -148,3 +149,4 @@ pages: - ['contributing/index.md', '**HIDDEN**'] - ['contributing/contributing.md', 'Contribute', 'Contributing'] - ['contributing/devenvironment.md', 'Contribute', 'Development environment'] +- ['contributing/docs_style-guide.md', 'Contribute', 'Documentation style guide'] diff --git a/docs/release.sh b/docs/release.sh index cdb1a94c82..8df8960c75 100755 --- a/docs/release.sh +++ b/docs/release.sh @@ -14,6 +14,8 @@ If you're publishing the current release's documentation, also set `BUILD_ROOT=y make AWS_S3_BUCKET=docs-stage.docker.com docs-release will then push the documentation site to your s3 bucket. + + Note: you can add `OPTIONS=--dryrun` to see what will be done without sending to the server EOF exit 1 } @@ -22,7 +24,7 @@ EOF VERSION=$(cat VERSION) -if [ "$$AWS_S3_BUCKET" == "docs.docker.com" ]; then +if [ "$AWS_S3_BUCKET" == "docs.docker.com" ]; then if [ "${VERSION%-dev}" != "$VERSION" ]; then echo "Please do not push '-dev' documentation to docs.docker.com ($VERSION)" exit 1 @@ -86,39 +88,18 @@ upload_current_documentation() { # a really complicated way to send only the files we want # if there are too many in any one set, aws s3 sync seems to fall over with 2 files to go # versions.html_fragment - endings=( json txt html xml css js gif png JPG ttf svg woff html_fragment ) - for i in ${endings[@]}; do - include="" - for j in ${endings[@]}; do - if [ "$i" != "$j" ];then - include="$include --exclude *.$j" - fi - done - include="--include *.$i $include" + include="--recursive --include \"*.$i\" " echo "uploading *.$i" - run="aws s3 sync --profile $BUCKET --cache-control \"max-age=3600\" --acl public-read \ - $include \ - --exclude *.text* \ - --exclude *.*~ \ - --exclude *Dockerfile \ - --exclude *.DS_Store \ - --exclude *.psd \ - --exclude *.ai \ - --exclude *.eot \ - --exclude *.otf \ - --exclude *.rej \ - --exclude *.rst \ - --exclude *.orig \ - --exclude *.py \ - $src $dst" + run="aws s3 cp $src $dst $OPTIONS --profile $BUCKET --cache-control \"max-age=3600\" --acl public-read $include" + echo "=======================" + echo "$run" echo "=======================" - #echo "$run" - #echo "=======================" $run - done } -setup_s3 +if [ "$OPTIONS" != "--dryrun" ]; then + setup_s3 +fi # Default to only building the version specific docs so we don't clober the latest by accident with old versions if [ "$BUILD_ROOT" == "yes" ]; then diff --git a/docs/sources/articles/b2d_volume_resize.md b/docs/sources/articles/b2d_volume_resize.md index 7d6790965e..1b39b49eda 100644 --- a/docs/sources/articles/b2d_volume_resize.md +++ b/docs/sources/articles/b2d_volume_resize.md @@ -28,7 +28,7 @@ it. Using the command line VirtualBox tools, clone the VMDK image to a VDI image: - $ vboxmanage clonehd /full/path/to/boot2docker-hd.vmdk /full/path/to/.vdi -—format VDI -—variant Standard + $ vboxmanage clonehd /full/path/to/boot2docker-hd.vmdk /full/path/to/.vdi --format VDI --variant Standard ## 3. Resize the VDI volume @@ -36,7 +36,7 @@ Choose a size that will be appropriate for your needs. If you’re spinning up a lot of containers, or your containers are particularly large, larger will be better: - $ vboxmanage modifyhd /full/path/to/.vdi —-resize + $ vboxmanage modifyhd /full/path/to/.vdi --resize ## 4. Download a disk partitioning tool ISO diff --git a/docs/sources/articles/certificates.md b/docs/sources/articles/certificates.md index 90d3f1b356..e031676402 100644 --- a/docs/sources/articles/certificates.md +++ b/docs/sources/articles/certificates.md @@ -31,7 +31,7 @@ repository. > **Note:** > If there are multiple certificates, each will be tried in alphabetical -> order. If there is an authentication error (e.g., 403, 5xx, etc.), Docker +> order. If there is an authentication error (e.g., 403, 404, 5xx, etc.), Docker > will continue to try with the next certificate. Our example is set up like this: diff --git a/docs/sources/articles/dockerfile_best-practices.md b/docs/sources/articles/dockerfile_best-practices.md index 31f932d651..85095f1c03 100644 --- a/docs/sources/articles/dockerfile_best-practices.md +++ b/docs/sources/articles/dockerfile_best-practices.md @@ -113,12 +113,6 @@ the command string itself will be used to find a match. Once the cache is invalidated, all subsequent `Dockerfile` commands will generate new images and the cache will not be used. - bzr \ - cvs \ - git \ - mercurial \ - subversion - ## The Dockerfile instructions Below you'll find recommendations for the best way to write the @@ -287,7 +281,7 @@ things like: And instead, do something like: - RUN mdkir -p /usr/src/things \ + RUN mkdir -p /usr/src/things \ && curl -SL http://example.com/big.tar.gz \ | tar -xJC /usr/src/things \ && make -C /usr/src/things all @@ -313,8 +307,9 @@ beginning user will then be forced to learn about `ENTRYPOINT` and `--entrypoint`. In order to avoid a situation where commands are run without clear visibility -to the user, make sure your script ends with something like `exec "$@"`. After -the entrypoint completes, the script will transparently bootstrap the command +to the user, make sure your script ends with something like `exec "$@"` (see +[the exec builtin command](http://wiki.bash-hackers.org/commands/builtin/exec)). +After the entrypoint completes, the script will transparently bootstrap the command invoked by the user, making what has been run clear to the user (for example, `docker run -it mysql mysqld --some --flags` will transparently run `mysqld --some --flags` after `ENTRYPOINT` runs `initdb`). diff --git a/docs/sources/articles/host_integration.md b/docs/sources/articles/host_integration.md index 53fc2890e8..89fd2a1f7a 100644 --- a/docs/sources/articles/host_integration.md +++ b/docs/sources/articles/host_integration.md @@ -4,31 +4,51 @@ page_keywords: systemd, upstart, supervisor, docker, documentation, host integra # Automatically Start Containers -You can use your Docker containers with process managers like -`upstart`, `systemd` and `supervisor`. +As of Docker 1.2, +[restart policies](/reference/commandline/cli/#restart-policies) are the +built-in Docker mechanism for restarting containers when they exit. If set, +restart policies will be used when the Docker daemon starts up, as typically +happens after a system boot. Restart policies will ensure that linked containers +are started in the correct order. -## Introduction +If restart policies don't suit your needs (i.e., you have non-Docker processes +that depend on Docker containers), you can use a process manager like +[upstart](http://upstart.ubuntu.com/), +[systemd](http://freedesktop.org/wiki/Software/systemd/) or +[supervisor](http://supervisord.org/) instead. -If you want a process manager to manage your containers you will need to -run the docker daemon with the `-r=false` so that docker will not -automatically restart your containers when the host is restarted. + +## Using a Process Manager + +Docker does not set any restart policies by default, but be aware that they will +conflict with most process managers. So don't set restart policies if you are +using a process manager. + +*Note:* Prior to Docker 1.2, restarting of Docker containers had to be +explicitly disabled. Refer to the +[previous version](/v1.1/articles/host_integration/) of this article for the +details on how to do that. When you have finished setting up your image and are happy with your running container, you can then attach a process manager to manage it. -When you run `docker start -a` docker will automatically attach to the +When you run `docker start -a`, Docker will automatically attach to the running container, or start it if needed and forward all signals so that the process manager can detect when a container stops and correctly restart it. Here are a few sample scripts for systemd and upstart to integrate with -docker. +Docker. -## Sample Upstart Script -In this example We've already created a container to run Redis with -`--name redis_server`. To create an upstart script for our container, we -create a file named `/etc/init/redis.conf` and place the following into -it: +## Examples + +The examples below show configuration files for two popular process managers, +upstart and systemd. In these examples, we'll assume that we have already +created a container to run Redis with `--name=redis_server`. These files define +a new service that will be started after the docker daemon service has started. + + +### upstart description "Redis container" author "Me" @@ -39,12 +59,8 @@ it: /usr/bin/docker start -a redis_server end script -Next, we have to configure docker so that it's run with the option -`-r=false`. Run the following command: - $ sudo sh -c "echo 'DOCKER_OPTS=\"-r=false\"' >> /etc/default/docker" - -## Sample systemd Script +### systemd [Unit] Description=Redis container diff --git a/docs/sources/articles/networking.md b/docs/sources/articles/networking.md index 59673ecf6f..6587efc522 100644 --- a/docs/sources/articles/networking.md +++ b/docs/sources/articles/networking.md @@ -14,7 +14,7 @@ Docker made the choice `172.17.42.1/16` when I started it a few minutes ago, for example — a 16-bit netmask providing 65,534 addresses for the host machine and its containers. -> **Note:** +> **Note:** > This document discusses advanced networking configuration > and options for Docker. In most cases you won't need this information. > If you're looking to get started with a simpler explanation of Docker @@ -104,6 +104,9 @@ Finally, several networking options can only be provided when calling * `--net=bridge|none|container:NAME_or_ID|host` — see [How Docker networks a container](#container-networking) + * `--mac-address=MACADDRESS...` — see + [How Docker networks a container](#container-networking) + * `-p SPEC` or `--publish=SPEC` — see [Binding container ports](#binding-ports) @@ -170,6 +173,7 @@ Four different options affect container domain name services. When a container process attempts to access `host` and the search domain `example.com` is set, for instance, the DNS logic will not only look up `host` but also `host.example.com`. + Use `--dns-search=.` if you don't wish to set the search domain. Note that Docker, in the absence of either of the last two options above, will make `/etc/resolv.conf` inside of each container look like @@ -536,9 +540,15 @@ The steps with which Docker configures a container are: separate and unique network interface namespace, there are no physical interfaces with which this name could collide. -4. Give the container's `eth0` a new IP address from within the +4. Set the interface's MAC address according to the `--mac-address` + parameter or generate a random one. + +5. Give the container's `eth0` a new IP address from within the bridge's range of network addresses, and set its default route to - the IP address that the Docker host owns on the bridge. + the IP address that the Docker host owns on the bridge. If available + the IP address is generated from the MAC address. This prevents ARP + cache invalidation problems, when a new container comes up with an + IP used in the past by another container with another MAC. With these steps complete, the container now possesses an `eth0` (virtual) network card and will find itself able to communicate with @@ -620,6 +630,7 @@ Docker do all of the configuration: $ sudo ip link set B netns $pid $ sudo ip netns exec $pid ip link set dev B name eth0 + $ sudo ip netns exec $pid ip link set eth0 address 12:34:56:78:9a:bc $ sudo ip netns exec $pid ip link set eth0 up $ sudo ip netns exec $pid ip addr add 172.17.42.99/16 dev eth0 $ sudo ip netns exec $pid ip route add default via 172.17.42.1 diff --git a/docs/sources/articles/registry_mirror.md b/docs/sources/articles/registry_mirror.md index 6cb2b958c3..5d5378e234 100644 --- a/docs/sources/articles/registry_mirror.md +++ b/docs/sources/articles/registry_mirror.md @@ -29,11 +29,11 @@ There are two steps to set up and use a local registry mirror. You will need to pass the `--registry-mirror` option to your Docker daemon on startup: - docker --registry-mirror=http:// -d + sudo docker --registry-mirror=http:// -d For example, if your mirror is serving on `http://10.0.0.2:5000`, you would run: - docker --registry-mirror=http://10.0.0.2:5000 -d + sudo docker --registry-mirror=http://10.0.0.2:5000 -d **NOTE:** Depending on your local host setup, you may be able to add the @@ -47,7 +47,7 @@ You will need to start a local registry mirror service. The functionality. For example, to run a local registry mirror that serves on port `5000` and mirrors the content at `registry-1.docker.io`: - docker run -p 5000:5000 \ + sudo docker run -p 5000:5000 \ -e STANDALONE=false \ -e MIRROR_SOURCE=https://registry-1.docker.io \ -e MIRROR_SOURCE_INDEX=https://index.docker.io registry @@ -57,7 +57,7 @@ port `5000` and mirrors the content at `registry-1.docker.io`: With your mirror running, pull an image that you haven't pulled before (using `time` to time it): - $ time docker pull node:latest + $ time sudo docker pull node:latest Pulling repository node [...] @@ -71,7 +71,7 @@ Now, remove the image from your local machine: Finally, re-pull the image: - $ time docker pull node:latest + $ time sudo docker pull node:latest Pulling repository node [...] diff --git a/docs/sources/articles/systemd.md b/docs/sources/articles/systemd.md new file mode 100644 index 0000000000..141deac306 --- /dev/null +++ b/docs/sources/articles/systemd.md @@ -0,0 +1,105 @@ +page_title: Controlling and configuring Docker using Systemd +page_description: Controlling and configuring Docker using Systemd +page_keywords: docker, daemon, systemd, configuration + +# Controlling and configuring Docker using Systemd + +Many Linux distributions use systemd to start the Docker daemon. This document +shows a few examples of how to customise Docker's settings. + +## Starting the Docker daemon + +Once Docker is installed, you will need to start the Docker daemon. + + $ sudo systemctl start docker + # or on older distributions, you may need to use + $ sudo service docker start + +If you want Docker to start at boot, you should also: + + $ sudo systemctl enable docker + # or on older distributions, you may need to use + $ sudo chkconfig docker on + +## Custom Docker daemon options + +There are a number of ways to configure the daemon flags and environment variables +for your Docker daemon. + +If the `docker.service` file is set to use an `EnvironmentFile` +(often pointing to `/etc/sysconfig/docker`) then you can modify the +referenced file. + +Or, you may need to edit the `docker.service` file, which can be in `/usr/lib/systemd/system` +or `/etc/systemd/service`. + +### Runtime directory and storage driver + +You may want to control the disk space used for Docker images, containers +and volumes by moving it to a separate partition. + +In this example, we'll assume that your `docker.service` file looks something like: + + [Unit] + Description=Docker Application Container Engine + Documentation=http://docs.docker.com + After=network.target docker.socket + Requires=docker.socket + + [Service] + Type=notify + EnvironmentFile=-/etc/sysconfig/docker + ExecStart=/usr/bin/docker -d -H fd:// $OPTIONS + LimitNOFILE=1048576 + LimitNPROC=1048576 + + [Install] + Also=docker.socket + +This will allow us to add extra flags to the `/etc/sysconfig/docker` file by +setting `OPTIONS`: + + OPTIONS="--graph /mnt/docker-data --storage btrfs" + +You can also set other environment variables in this file, for example, the +`HTTP_PROXY` environment variables described below. + +### HTTP Proxy + +This example overrides the default `docker.service` file. + +If you are behind a HTTP proxy server, for example in corporate settings, +you will need to add this configuration in the Docker systemd service file. + +First, create a systemd drop-in directory for the docker service: + + mkdir /etc/systemd/system/docker.service.d + +Now create a file called `/etc/systemd/system/docker.service.d/http-proxy.conf` +that adds the `HTTP_PROXY` environment variable: + + [Service] + Environment="HTTP_PROXY=http://proxy.example.com:80/" + +If you have internal Docker registries that you need to contact without +proxying you can specify them via the `NO_PROXY` environment variable: + + Environment="HTTP_PROXY=http://proxy.example.com:80/" "NO_PROXY=localhost,127.0.0.0/8,docker-registry.somecorporation.com" + +Flush changes: + + $ sudo systemctl daemon-reload + +Restart Docker: + + $ sudo systemctl restart docker + +## Manually creating the systemd unit files + +When installing the binary without a package, you may want +to integrate Docker with systemd. For this, simply install the two unit files +(service and socket) from [the github +repository](https://github.com/docker/docker/tree/master/contrib/init/systemd) +to `/etc/systemd/system`. + + diff --git a/docs/sources/contributing/contributing.md b/docs/sources/contributing/contributing.md index 7d65a0479c..850b01ce12 100644 --- a/docs/sources/contributing/contributing.md +++ b/docs/sources/contributing/contributing.md @@ -21,4 +21,4 @@ https://github.com/docker/docker/blob/master/docs/Dockerfile) specifies the tools and versions used to build the Documentation. Further interesting details can be found in the [Packaging hints]( -https://github.com/docker/docker/blob/master/hack/PACKAGERS.md). +https://github.com/docker/docker/blob/master/project/PACKAGERS.md). diff --git a/docs/sources/contributing/devenvironment.md b/docs/sources/contributing/devenvironment.md index ee120a79c8..f39dec6708 100644 --- a/docs/sources/contributing/devenvironment.md +++ b/docs/sources/contributing/devenvironment.md @@ -63,7 +63,14 @@ To create the Docker binary, run this command: $ sudo make binary -This will create the Docker binary in `./bundles/-dev/binary/` +This will create the Docker binary in `./bundles/-dev/binary/`. If you +do not see files in the `./bundles` directory in your host, your `BINDDIR` +setting is not set quite right. You want to run the following command: + + $ sudo make BINDDIR=. binary + +If you are on a non-Linux platform, e.g., OSX, you'll want to run `make cross` +or `make BINDDIR=. cross`. ### Using your built Docker binary diff --git a/docs/sources/contributing/docs_style-guide.md b/docs/sources/contributing/docs_style-guide.md new file mode 100644 index 0000000000..6ff3dfd1cf --- /dev/null +++ b/docs/sources/contributing/docs_style-guide.md @@ -0,0 +1,276 @@ +page_title: Style Guide for Docker Documentation +page_description: Style guide for Docker documentation describing standards and conventions for contributors +page_keywords: style, guide, docker, documentation + +# Docker documentation: style & grammar conventions + +## Style standards + +Over time, different publishing communities have written standards for the style +and grammar they prefer in their publications. These standards are called +[style guides](http://en.wikipedia.org/wiki/Style_guide). Generally, Docker’s +documentation uses the standards described in the +[Associated Press's (AP) style guide](http://en.wikipedia.org/wiki/AP_Stylebook). +If a question about syntactical, grammatical, or lexical practice comes up, +refer to the AP guide first. If you don’t have a copy of (or online subscription +to) the AP guide, you can almost always find an answer to a specific question by +searching the web. If you can’t find an answer, please ask a +[maintainer](https://github.com/docker/docker/blob/master/docs/MAINTAINERS) and +we will find the answer. + +That said, please don't get too hung up on using correct style. We'd rather have +you submit good information that doesn't conform to the guide than no +information at all. Docker's tech writers are always happy to help you with the +prose, and we promise not to judge or use a red pen! + +> **Note:** +> The documentation is written with paragraphs wrapped at 80 column lines to +> make it easier for terminal use. You can probably set up your favorite text +> editor to do this automatically for you. + +### Prose style + +In general, try to write simple, declarative prose. We prefer short, +single-clause sentences and brief three-to-five sentence paragraphs. Try to +choose vocabulary that is straightforward and precise. Avoid creating new terms, +using obscure terms or, in particular, using a lot of jargon. For example, use +"use" instead of leveraging "leverage". + +That said, don’t feel like you have to write for localization or for +English-as-a-second-language (ESL) speakers specifically. Assume you are writing +for an ordinary speaker of English with a basic university education. If your +prose is simple, clear, and straightforward it will translate readily. + +One way to think about this is to assume Docker’s users are generally university +educated and read at at least a "16th" grade level (meaning they have a +university degree). You can use a [readability +tester](https://readability-score.com/) to help guide your judgement. For +example, the readability score for the phrase "Containers should be ephemeral" +is around the 13th grade level (first year at university), and so is acceptable. + +In all cases, we prefer clear, concise communication over stilted, formal +language. Don't feel like you have to write documentation that "sounds like +technical writing." + +### Metaphor and figurative language + +One exception to the "don’t write directly for ESL" rule is to avoid the use of +metaphor or other +[figurative language](http://en.wikipedia.org/wiki/Literal_and_figurative_language) to +describe things. There are too many cultural and social issues that can prevent +a reader from correctly interpreting a metaphor. + +## Specific conventions + +Below are some specific recommendations (and a few deviations) from AP style +that we use in our docs. + +### Contractions + +As long as your prose does not become too slangy or informal, it's perfectly +acceptable to use contractions in our documentation. Make sure to use +apostrophes correctly. + +### Use of dashes in a sentence. + +Dashes refers to the en dash (–) and the em dash (—). Dashes can be used to +separate parenthetical material. + +Usage Example: This is an example of a Docker client – which uses the Big Widget +to run – and does x, y, and z. + +Use dashes cautiously and consider whether commas or parentheses would work just +as well. We always emphasize short, succinct sentences. + +More info from the always handy [Grammar Girl site](http://www.quickanddirtytips.com/education/grammar/dashes-parentheses-and-commas). + +### Pronouns + +It's okay to use first and second person pronouns. Specifically, use "we" to +refer to Docker and "you" to refer to the user. For example, "We built the +`exec` command so you can resize a TTY session." + +As much as possible, avoid using gendered pronouns ("he" and "she", etc.). +Either recast the sentence so the pronoun is not needed or, less preferably, +use "they" instead. If you absolutely can't get around using a gendered pronoun, +pick one and stick to it. Which one you choose is up to you. One common +convention is to use the pronoun of the author's gender, but if you prefer to +default to "he" or "she", that's fine too. + +### Capitalization + +#### In general + +Only proper nouns should be capitalized in body text. In general, strive to be +as strict as possible in applying this rule. Avoid using capitals for emphasis +or to denote "specialness". + +The word "Docker" should always be capitalized when referring to either the +company or the technology. The only exception is when the term appears in a code +sample. + +#### Starting sentences + +Because code samples should always be written exactly as they would appear +on-screen, you should avoid starting sentences with a code sample. + +#### In headings + +Headings take sentence capitalization, meaning that only the first letter is +capitalized (and words that would normally be capitalized in a sentence, e.g., +"Docker"). Do not use Title Case (i.e., capitalizing every word) for headings. Generally, we adhere to [AP style +for titles](http://www.quickanddirtytips.com/education/grammar/capitalizing-titles). + +## Periods + +We prefer one space after a period at the end of a sentence, not two. + +See [lists](#lists) below for how to punctuate list items. + +### Abbreviations and acronyms + +* Exempli gratia (e.g.) and id est ( i.e.): these should always have periods and +are always followed by a comma. + +* Acronyms are pluralized by simply adding "s", e.g., PCs, OSs. + +* On first use on a given page, the complete term should be used, with the +abbreviation or acronym in parentheses. E.g., Red Hat Enterprise Linux (RHEL). +The exception is common, non-technical acronyms like AKA or ASAP. Note that +acronyms other than i.e. and e.g. are capitalized. + +* Other than "e.g." and "i.e." (as discussed above), acronyms do not take +periods, PC not P.C. + + +### Lists + +When writing lists, keep the following in mind: + +Use bullets when the items being listed are independent of each other and the +order of presentation is not important. + +Use numbers for steps that have to happen in order or if you have mentioned the +list in introductory text. For example, if you wrote "There are three config +settings available for SSL, as follows:", you would number each config setting +in the subsequent list. + +In all lists, if an item is a complete sentence, it should end with a +period. Otherwise, we prefer no terminal punctuation for list items. +Each item in a list should start with a capital. + +### Numbers + +Write out numbers in body text and titles from one to ten. From 11 on, use numerals. + +### Notes + +Use notes sparingly and only to bring things to the reader's attention that are +critical or otherwise deserving of being called out from the body text. Please +format all notes as follows: + + > **Note:** + > One line of note text + > another line of note text + +### Avoid excess use of "i.e." + +Minimize your use of "i.e.". It can add an unnecessary interpretive burden on +the reader. Avoid writing "This is a thing, i.e., it is like this". Just +say what it is: "This thing is …" + +### Preferred usages + +#### Login vs. log in. + +A "login" is a noun (one word), as in "Enter your login". "Log in" is a compound +verb (two words), as in "Log in to the terminal". + +### Oxford comma + +One way in which we differ from AP style is that Docker’s docs use the [Oxford +comma](http://en.wikipedia.org/wiki/Serial_comma) in all cases. That’s our +position on this controversial topic, we won't change our mind, and that’s that! + +### Code and UI text styling + +We require `code font` styling (monospace, sans-serif) for all text that refers +to a command or other input or output from the CLI. This includes file paths +(e.g., `/etc/hosts/docker.conf`). If you enclose text in backticks (`) markdown +will style the text as code. + +Text from a CLI should be quoted verbatim, even if it contains errors or its +style contradicts this guide. You can add "(sic)" after the quote to indicate +the errors are in the quote and are not errors in our docs. + +Text taken from a GUI (e.g., menu text or button text) should appear in "double +quotes". The text should take the exact same capitalisation, etc. as appears in +the GUI. E.g., Click "Continue" to save the settings. + +Text that refers to a keyboard command or hotkey is capitalized (e.g., Ctrl-D). + +When writing CLI examples, give the user hints by making the examples resemble +exactly what they see in their shell: + +* Indent shell examples by 4 spaces so they get rendered as code blocks. +* Start typed commands with `$ ` (dollar space), so that they are easily +differentiated from program output. +* Program output has no prefix. +* Comments begin with # (hash space). +* In-container shell commands, begin with `$$ ` (dollar dollar space). + +Please test all code samples to ensure that they are correct and functional so +that users can successfully cut-and-paste samples directly into the CLI. + +## Pull requests + +The pull request (PR) process is in place so that we can ensure changes made to +the docs are the best changes possible. A good PR will do some or all of the +following: + +* Explain why the change is needed +* Point out potential issues or questions +* Ask for help from experts in the company or the community +* Encourage feedback from core developers and others involved in creating the +software being documented. + +Writing a PR that is singular in focus and has clear objectives will encourage +all of the above. Done correctly, the process allows reviewers (maintainers and +community members) to validate the claims of the documentation and identify +potential problems in communication or presentation. + +### Commit messages + +In order to write clear, useful commit messages, please follow these +[recommendations](http://robots.thoughtbot.com/5-useful-tips-for-a-better-commit-message). + +## Links + +For accessibility and usability reasons, avoid using phrases such as "click +here" for link text. Recast your sentence so that the link text describes the +content of the link, as we did in the +["Commit messages" section](#commit-messages) above. + +You can use relative links (../linkeditem) to link to other pages in Docker's +documentation. + +## Graphics + +When you need to add a graphic, try to make the file-size as small as possible. +If you need help reducing file-size of a high-resolution image, feel free to +contact us for help. +Usually, graphics should go in the same directory as the .md file that +references them, or in a subdirectory for images if one already exists. + +The preferred file format for graphics is PNG, but GIF and JPG are also +acceptable. + +If you are referring to a specific part of the UI in an image, use +call-outs (circles and arrows or lines) to highlight what you’re referring to. +Line width for call-outs should not exceed five pixels. The preferred color for +call-outs is red. + +Be sure to include descriptive alt-text for the graphic. This greatly helps +users with accessibility issues. + +Lastly, be sure you have permission to use any included graphics. \ No newline at end of file diff --git a/docs/sources/docker-hub/builds.md b/docs/sources/docker-hub/builds.md index 7bf8b27eb2..5d73e4aae9 100644 --- a/docs/sources/docker-hub/builds.md +++ b/docs/sources/docker-hub/builds.md @@ -209,7 +209,7 @@ repository's full description.The build process will look for a > rewritten the next time the Automated Build has been built. To make changes, > modify the `README.md` from the Git repository. -### Build triggers +## Remote Build triggers If you need a way to trigger Automated Builds outside of GitHub or Bitbucket, you can set up a build trigger. When you turn on the build trigger for an @@ -219,6 +219,16 @@ This will trigger the Automated Build, much as with a GitHub webhook. Build triggers are available under the Settings menu of each Automated Build repo on the Docker Hub. +![Build trigger screen](/docker-hub/hub-images/build-trigger.png) + +You can use `curl` to trigger a build: + +``` +$ curl --data "build=true" -X POST https://registry.hub.docker.com/u/svendowideit/testhook/trigger/be579c +82-7c0e-11e4-81c4-0242ac110020/ +OK +``` + > **Note:** > You can only trigger one build at a time and no more than one > every five minutes. If you already have a build pending, or if you @@ -226,53 +236,96 @@ repo on the Docker Hub. > To verify everything is working correctly, check the logs of last > ten triggers on the settings page . -### Webhooks +## Webhooks Automated Builds also include a Webhooks feature. Webhooks can be called -after a successful repository push is made. +after a successful repository push is made. This includes when a new tag is added +to an existing image. The webhook call will generate a HTTP POST with the following JSON payload: ``` { - "push_data":{ - "pushed_at":1385141110, - "images":[ - "imagehash1", - "imagehash2", - "imagehash3" - ], - "pusher":"username" - }, - "repository":{ - "status":"Active", - "description":"my docker repo that does cool things", - "is_automated":false, - "full_description":"This is my full description", - "repo_url":"https://registry.hub.docker.com/u/username/reponame/", - "owner":"username", - "is_official":false, - "is_private":false, - "name":"reponame", - "namespace":"username", - "star_count":1, - "comment_count":1, - "date_created":1370174400, - "dockerfile":"my full dockerfile is listed here", - "repo_name":"username/reponame" - } + "callback_url": "https://registry.hub.docker.com/u/svendowideit/testhook/hook/2141b5bi5i5b02bec211i4eeih0242eg11000a/", + "push_data": { + "images": [], + "pushed_at": 1.417566161e+09, + "pusher": "trustedbuilder" + }, + "repository": { + "comment_count": 0, + "date_created": 1.417494799e+09, + "description": "", + "dockerfile": "#\n# BUILD\u0009\u0009docker build -t svendowideit/apt-cacher .\n# RUN\u0009\u0009docker run -d -p 3142:3142 -name apt-cacher-run apt-cacher\n#\n# and then you can run containers with:\n# \u0009\u0009docker run -t -i -rm -e http_proxy http://192.168.1.2:3142/ debian bash\n#\nFROM\u0009\u0009ubuntu\nMAINTAINER\u0009SvenDowideit@home.org.au\n\n\nVOLUME\u0009\u0009[\"/var/cache/apt-cacher-ng\"]\nRUN\u0009\u0009apt-get update ; apt-get install -yq apt-cacher-ng\n\nEXPOSE \u0009\u00093142\nCMD\u0009\u0009chmod 777 /var/cache/apt-cacher-ng ; /etc/init.d/apt-cacher-ng start ; tail -f /var/log/apt-cacher-ng/*\n", + "full_description": "Docker Hub based automated build from a GitHub repo", + "is_official": false, + "is_private": true, + "is_trusted": true, + "name": "testhook", + "namespace": "svendowideit", + "owner": "svendowideit", + "repo_name": "svendowideit/testhook", + "repo_url": "https://registry.hub.docker.com/u/svendowideit/testhook/", + "star_count": 0, + "status": "Active" + } } ``` -Webhooks are available under the Settings menu of each Automated -Build's repo. +Webhooks are available under the Settings menu of each Repository. > **Note:** If you want to test your webhook out we recommend using > a tool like [requestb.in](http://requestb.in/). +### Webhook chains -### Repository links +Webhook chains allow you to chain calls to multiple services. For example, +you can use this to trigger a deployment of your container only after +it has been successfully tested, then update a separate Changelog once the +deployment is complete. +After clicking the "Add webhook" button, simply add as many URLs as necessary +in your chain. + +The first webhook in a chain will be called after a successful push. Subsequent +URLs will be contacted after the callback has been validated. + +### Validating a callback + +In order to validate a callback in a webhook chain, you need to + +1. Retrieve the `callback_url` value in the request's JSON payload. +1. Send a POST request to this URL containing a valid JSON body. + +> **Note**: A chain request will only be considered complete once the last +> callback has been validated. + +To help you debug or simply view the results of your webhook(s), +view the "History" of the webhook available on its settings page. + +### Callback JSON data + +The following parameters are recognized in callback data: + +* `state` (required): Accepted values are `success`, `failure` and `error`. + If the state isn't `success`, the webhook chain will be interrupted. +* `description`: A string containing miscellaneous information that will be + available on the Docker Hub. Maximum 255 characters. +* `context`: A string containing the context of the operation. Can be retrieved + from the Docker Hub. Maximum 100 characters. +* `target_url`: The URL where the results of the operation can be found. Can be + retrieved on the Docker Hub. + +*Example callback payload:* + + { + "state": "success", + "description": "387 tests PASSED", + "context": "Continuous integration by Acme CI", + "target_url": "http://ci.acme.com/results/afd339c1c3d27" + } + +## Repository links Repository links are a way to associate one Automated Build with another. If one gets updated,the linking system triggers a rebuild diff --git a/docs/sources/docker-hub/hub-images/build-trigger.png b/docs/sources/docker-hub/hub-images/build-trigger.png new file mode 100644 index 0000000000..25597a27bd Binary files /dev/null and b/docs/sources/docker-hub/hub-images/build-trigger.png differ diff --git a/docs/sources/docker-hub/official_repos.md b/docs/sources/docker-hub/official_repos.md index 5a948c6263..4ec431238b 100644 --- a/docs/sources/docker-hub/official_repos.md +++ b/docs/sources/docker-hub/official_repos.md @@ -60,7 +60,7 @@ should also: * Be named `README-short.txt` * Reside in the repo for the “latest” tag -* Not exceed 200 characters +* Not exceed 100 characters ### A logo diff --git a/docs/sources/docker-hub/repos.md b/docs/sources/docker-hub/repos.md index 42d97d8bf2..0749c0814c 100644 --- a/docs/sources/docker-hub/repos.md +++ b/docs/sources/docker-hub/repos.md @@ -1,6 +1,6 @@ page_title: Repositories and Images on Docker Hub page_description: Repositories and Images on Docker Hub -page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker Hub, docs, documentation +page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker Hub, webhooks, docs, documentation # Repositories and Images on Docker Hub @@ -110,35 +110,80 @@ similar to the example shown below. *Example webhook JSON payload:* - { - "push_data":{ - "pushed_at":1385141110, - "images":[ - "imagehash1", - "imagehash2", - "imagehash3" - ], - "pusher":"username" - }, - "repository":{ - "status":"Active", - "description":"my docker repo that does cool things", - "is_automated":false, - "full_description":"This is my full description", - "repo_url":"https://registry.hub.docker.com/u/username/reponame/", - "owner":"username", - "is_official":false, - "is_private":false, - "name":"reponame", - "namespace":"username", - "star_count":1, - "comment_count":1, - "date_created":1370174400, - "dockerfile":"my full dockerfile is listed here", - "repo_name":"username/reponame" - } - } +``` +{ + "callback_url": "https://registry.hub.docker.com/u/svendowideit/busybox/hook/2141bc0cdec4hebec411i4c1g40242eg110020/", + "push_data": { + "images": [], + "pushed_at": 1.417566822e+09, + "pusher": "svendowideit" + }, + "repository": { + "comment_count": 0, + "date_created": 1.417566665e+09, + "description": "", + "full_description": "webhook triggered from a 'docker push'", + "is_official": false, + "is_private": false, + "is_trusted": false, + "name": "busybox", + "namespace": "svendowideit", + "owner": "svendowideit", + "repo_name": "svendowideit/busybox", + "repo_url": "https://registry.hub.docker.com/u/svendowideit/busybox/", + "star_count": 0, + "status": "Active" +} +``` Webhooks allow you to notify people, services and other applications of -new updates to your images and repositories. +new updates to your images and repositories. To get started adding webhooks, +go to the desired repo in the Hub, and click "Webhooks" under the "Settings" +box. +### Webhook chains + +Webhook chains allow you to chain calls to multiple services. For example, +you can use this to trigger a deployment of your container only after +it has been successfully tested, then update a separate Changelog once the +deployment is complete. +After clicking the "Add webhook" button, simply add as many URLs as necessary +in your chain. + +The first webhook in a chain will be called after a successful push. Subsequent +URLs will be contacted after the callback has been validated. + +#### Validating a callback + +In order to validate a callback in a webhook chain, you need to + +1. Retrieve the `callback_url` value in the request's JSON payload. +1. Send a POST request to this URL containing a valid JSON body. + +> **Note**: A chain request will only be considered complete once the last +> callback has been validated. + +To help you debug or simply view the results of your webhook(s), +view the "History" of the webhook available on its settings page. + +#### Callback JSON data + +The following parameters are recognized in callback data: + +* `state` (required): Accepted values are `success`, `failure` and `error`. + If the state isn't `success`, the webhook chain will be interrupted. +* `description`: A string containing miscellaneous information that will be + available on the Docker Hub. Maximum 255 characters. +* `context`: A string containing the context of the operation. Can be retrieved + from the Docker Hub. Maximum 100 characters. +* `target_url`: The URL where the results of the operation can be found. Can be + retrieved on the Docker Hub. + +*Example callback payload:* + + { + "state": "success", + "description": "387 tests PASSED", + "context": "Continuous integration by Acme CI", + "target_url": "http://ci.acme.com/results/afd339c1c3d27" + } diff --git a/docs/sources/examples/nodejs_web_app.md b/docs/sources/examples/nodejs_web_app.md index d634251fb8..3a9183e325 100644 --- a/docs/sources/examples/nodejs_web_app.md +++ b/docs/sources/examples/nodejs_web_app.md @@ -59,12 +59,8 @@ Create an empty file called `Dockerfile`: touch Dockerfile Open the `Dockerfile` in your favorite text editor -and add the following line that defines the version of Docker the image -requires to build (this example uses Docker 0.3.4): - # DOCKER-VERSION 0.3.4 - -Next, define the parent image you want to use to build your own image on +Define the parent image you want to use to build your own image on top of. Here, we'll use [CentOS](https://registry.hub.docker.com/_/centos/) (tag: `centos6`) available on the [Docker Hub](https://hub.docker.com/): @@ -108,7 +104,6 @@ defines your runtime, i.e. `node`, and the path to our app, i.e. `src/index.js` Your `Dockerfile` should now look like this: - # DOCKER-VERSION 0.3.4 FROM centos:centos6 # Enable EPEL for Node.js diff --git a/docs/sources/examples/running_ssh_service.md b/docs/sources/examples/running_ssh_service.md index 9f87fb726d..445cfe5257 100644 --- a/docs/sources/examples/running_ssh_service.md +++ b/docs/sources/examples/running_ssh_service.md @@ -46,7 +46,8 @@ the container's port 22 is mapped to: And now you can ssh as `root` on the container's IP address (you can find it with `docker inspect`) or on port `49154` of the Docker daemon's host IP address -(`ip address` or `ifconfig` can tell you that): +(`ip address` or `ifconfig` can tell you that) or `localhost` if on the +Docker daemon host: $ ssh root@192.168.1.2 -p 49154 # The password is ``screencast``. @@ -55,15 +56,15 @@ with `docker inspect`) or on port `49154` of the Docker daemon's host IP address ## Environment variables Using the `sshd` daemon to spawn shells makes it complicated to pass environment -variables to the user's shell via the simple Docker mechanisms, as `sshd` scrubs +variables to the user's shell via the normal Docker mechanisms, as `sshd` scrubs the environment before it starts the shell. -If you're setting values in the Dockerfile using `ENV`, you'll need to push them -to a shell initialisation file like the `/etc/profile` example in the Dockerfile +If you're setting values in the `Dockerfile` using `ENV`, you'll need to push them +to a shell initialization file like the `/etc/profile` example in the `Dockerfile` above. If you need to pass`docker run -e ENV=value` values, you will need to write a -short script to do the same before you start `sshd -D` - and then replace the +short script to do the same before you start `sshd -D` and then replace the `CMD` with that script. ## Clean up diff --git a/docs/sources/faq.md b/docs/sources/faq.md index 531afc3ea7..5e16698436 100644 --- a/docs/sources/faq.md +++ b/docs/sources/faq.md @@ -144,7 +144,7 @@ Currently the recommended way to link containers is via the link primitive. You can see details of how to [work with links here](/userguide/dockerlinks). -Also of useful when enabling more flexible service portability is the +Also useful for more flexible service portability is the [Ambassador linking pattern](/articles/ambassador_pattern_linking/). ### How do I run more than one process in a Docker container? diff --git a/docs/sources/http-routingtable.md b/docs/sources/http-routingtable.md index 4de7bcd3fa..ff66c7a198 100644 --- a/docs/sources/http-routingtable.md +++ b/docs/sources/http-routingtable.md @@ -44,7 +44,7 @@ [`POST /containers/(id)/wait`](../reference/api/docker_remote_api_v1.9/#post--containers-(id)-wait) ** [`POST /containers/create`](../reference/api/docker_remote_api_v1.9/#post--containers-create) ** [`GET /containers/json`](../reference/api/docker_remote_api_v1.9/#get--containers-json) ** - [`GET /containers/resize`](../reference/api/docker_remote_api_v1.9/#get--containers-resize) ** + [`POST /containers/(id)/resize`](../reference/api/docker_remote_api_v1.9/#get--containers-resize) **   **/events** [`GET /events`](../reference/api/docker_remote_api_v1.9/#get--events) ** diff --git a/docs/sources/index.md b/docs/sources/index.md index 949fe72e0a..993603eb33 100644 --- a/docs/sources/index.md +++ b/docs/sources/index.md @@ -77,8 +77,8 @@ The [Understanding Docker section](introduction/understanding-docker.md) will he ### Installation Guides -The [installation section](/installation/#installation) will show you how to install -Docker on a variety of platforms. +The [installation section](/installation/#installation) will show you how to +install Docker on a variety of platforms. ### Docker User Guide @@ -88,4 +88,12 @@ implementation, check out the [Docker User Guide](/userguide/). ## Release Notes -A summary of the changes in each release in the current series can now be found on the separate [Release Notes page](/release-notes/) +A summary of the changes in each release in the current series can now be found +on the separate [Release Notes page](/release-notes/) + +## Licensing + +Docker is licensed under the Apache License, Version 2.0. See +[LICENSE](https://github.com/docker/docker/blob/master/LICENSE) for the full +license text. + diff --git a/docs/sources/installation.md b/docs/sources/installation.md index 1c3c726594..7eaabeeefe 100644 --- a/docs/sources/installation.md +++ b/docs/sources/installation.md @@ -16,7 +16,7 @@ techniques for installing Docker all the time. - [Arch Linux](archlinux/) - [CRUX Linux](cruxlinux/) - [Gentoo](gentoolinux/) - - [openSUSE](openSUSE/) + - [openSUSE and SUSE Linux Enterprise](SUSE/) - [FrugalWare](frugalware/) - [Mac OS X](mac/) - [Windows](windows/) diff --git a/docs/sources/installation/openSUSE.md b/docs/sources/installation/SUSE.md similarity index 59% rename from docs/sources/installation/openSUSE.md rename to docs/sources/installation/SUSE.md index 951b8770cc..2a0aa91d9f 100644 --- a/docs/sources/installation/openSUSE.md +++ b/docs/sources/installation/SUSE.md @@ -1,40 +1,39 @@ -page_title: Installation on openSUSE -page_description: Installation instructions for Docker on openSUSE. -page_keywords: openSUSE, virtualbox, docker, documentation, installation +page_title: Installation on openSUSE and SUSE Linux Enterprise +page_description: Installation instructions for Docker on openSUSE and on SUSE Linux Enterprise. +page_keywords: openSUSE, SUSE Linux Enterprise, SUSE, SLE, docker, documentation, installation # openSUSE Docker is available in **openSUSE 12.3 and later**. Please note that due -to the current Docker limitations Docker is able to run only on the **64 -bit** architecture. +to its current limitations Docker is able to run only **64 bit** architecture. -## Installation +Docker is not part of the official repositories of openSUSE 12.3 and +openSUSE 13.1. Hence it is neccessary to add the [Virtualization +repository](https://build.opensuse.org/project/show/Virtualization) from +[OBS](https://build.opensuse.org/) to install the `docker` package. -The `docker` package from the [Virtualization -project](https://build.opensuse.org/project/show/Virtualization) on -[OBS](https://build.opensuse.org/) provides Docker on openSUSE. - -To proceed with Docker installation please add the right Virtualization -repository. +Execute one of the following commands to add the Virtualization repository: # openSUSE 12.3 $ sudo zypper ar -f http://download.opensuse.org/repositories/Virtualization/openSUSE_12.3/ Virtualization - $ sudo rpm --import http://download.opensuse.org/repositories/Virtualization/openSUSE_12.3/repodata/repomd.xml.key # openSUSE 13.1 $ sudo zypper ar -f http://download.opensuse.org/repositories/Virtualization/openSUSE_13.1/ Virtualization - $ sudo rpm --import http://download.opensuse.org/repositories/Virtualization/openSUSE_13.1/repodata/repomd.xml.key + +No extra repository is required for openSUSE 13.2 and later. + +# SUSE Linux Enterprise + +Docker is available in **SUSE Linux Enterprise 12 and later**. Please note that +due to its current limitations Docker is able to run only on **64 bit** +architecture. + +# Installation Install the Docker package. $ sudo zypper in docker -It's also possible to install Docker using openSUSE's1-click install. -Just visit [this](http://software.opensuse.org/package/docker) page, -select your openSUSE version and click on the installation link. This -will add the right repository to your system and it will also install -the docker package. - Now that it's installed, let's start the Docker daemon. $ sudo systemctl start docker @@ -71,5 +70,13 @@ hand to ensure the `FW_ROUTE` flag is set to `yes` like so: **Done!** +## Custom daemon options + +If you need to add an HTTP Proxy, set a different directory or partition for the +Docker runtime files, or make other customizations, read our systemd article to +learn how to [customize your systemd Docker daemon options](/articles/systemd/). + +## What's next + Continue with the [User Guide](/userguide/). diff --git a/docs/sources/installation/amazon.md b/docs/sources/installation/amazon.md index 3715d5c44f..58d269ad7a 100644 --- a/docs/sources/installation/amazon.md +++ b/docs/sources/installation/amazon.md @@ -4,86 +4,39 @@ page_keywords: amazon ec2, virtualization, cloud, docker, documentation, install # Amazon EC2 -There are several ways to install Docker on AWS EC2: - - - [*Amazon QuickStart (Release Candidate - March 2014)*]( - #amazon-quickstart-release-candidate-march-2014) or - - [*Amazon QuickStart*](#amazon-quickstart) or - - [*Standard Ubuntu Installation*](#standard-ubuntu-installation) +There are several ways to install Docker on AWS EC2. You can use Amazon Linux, which includes the Docker packages in its Software Repository, or opt for any of the other supported Linux images, for example a [*Standard Ubuntu Installation*](#standard-ubuntu-installation). **You'll need an** [AWS account](http://aws.amazon.com/) **first, of course.** -## Amazon QuickStart +## Amazon QuickStart with Amazon Linux AMI 2014.09.1 -1. **Choose an image:** - - Launch the [Create Instance - Wizard](https://console.aws.amazon.com/ec2/v2/home?#LaunchInstanceWizard:) - menu on your AWS Console. - - Click the `Select` button for a 64Bit Ubuntu - image. For example: Ubuntu Server 12.04.3 LTS - - For testing you can use the default (possibly free) - `t1.micro` instance (more info on - [pricing](http://aws.amazon.com/ec2/pricing/)). - - Click the `Next: Configure Instance Details` - button at the bottom right. - -2. **Tell CloudInit to install Docker:** - - When you're on the "Configure Instance Details" step, expand the - "Advanced Details" section. - - Under "User data", select "As text". - - Enter `#include https://get.docker.com` into - the instance *User Data*. - [CloudInit](https://help.ubuntu.com/community/CloudInit) is part - of the Ubuntu image you chose; it will bootstrap Docker by - running the shell script located at this URL. - -3. After a few more standard choices where defaults are probably ok, - your AWS Ubuntu instance with Docker should be running! - -**If this is your first AWS instance, you may need to set up your -Security Group to allow SSH.** By default all incoming ports to your new -instance will be blocked by the AWS Security Group, so you might just -get timeouts when you try to connect. - -Installing with `get.docker.com` (as above) will -create a service named `lxc-docker`. It will also -set up a [*docker group*](../binaries/#dockergroup) and you may want to -add the *ubuntu* user to it so that you don't have to use -`sudo` for every Docker command. - -Once you`ve got Docker installed, you're ready to try it out – head on -over to the [User Guide](/userguide). - -## Amazon QuickStart (Release Candidate - March 2014) - -Amazon just published new Docker-ready AMIs (2014.03 Release Candidate). -Docker packages can now be installed from Amazon's provided Software +The latest Amazon Linux AMI, 2014.09.1, is Docker ready. Docker packages can be installed from Amazon's provided Software Repository. 1. **Choose an image:** - Launch the [Create Instance Wizard](https://console.aws.amazon.com/ec2/v2/home?#LaunchInstanceWizard:) menu on your AWS Console. - - Click the `Community AMI` menu option on the - left side - - Search for `2014.03` and select one of the Amazon provided AMI, - for example `amzn-ami-pv-2014.03.rc-0.x86_64-ebs` + - In the Quick Start menu, select the Amazon provided AMI for Amazon Linux 2014.09.1 - For testing you can use the default (possibly free) - `t1.micro` instance (more info on + `t2.micro` instance (more info on [pricing](http://aws.amazon.com/ec2/pricing/)). - Click the `Next: Configure Instance Details` button at the bottom right. - 2. After a few more standard choices where defaults are probably ok, your Amazon Linux instance should be running! 3. SSH to your instance to install Docker : `ssh -i ec2-user@` - 4. Once connected to the instance, type `sudo yum install -y docker ; sudo service docker start` to install and start Docker +**If this is your first AWS instance, you may need to set up your Security Group to allow SSH.** By default all incoming ports to your new instance will be blocked by the AWS Security Group, so you might just get timeouts when you try to connect. + +Once you`ve got Docker installed, you're ready to try it out – head on +over to the [User Guide](/userguide). + ## Standard Ubuntu Installation If you want a more hands-on installation, then you can follow the diff --git a/docs/sources/installation/archlinux.md b/docs/sources/installation/archlinux.md index 81cc21fb02..99849c7aa0 100644 --- a/docs/sources/installation/archlinux.md +++ b/docs/sources/installation/archlinux.md @@ -53,3 +53,9 @@ service: To start on system boot: $ sudo systemctl enable docker + +## Custom daemon options + +If you need to add an HTTP Proxy, set a different directory or partition for the +Docker runtime files, or make other customizations, read our systemd article to +learn how to [customize your systemd Docker daemon options](/articles/systemd/). diff --git a/docs/sources/installation/centos.md b/docs/sources/installation/centos.md index 2f7d57d604..707afc959a 100644 --- a/docs/sources/installation/centos.md +++ b/docs/sources/installation/centos.md @@ -45,11 +45,11 @@ to `/etc/systemd/system`. CentOS-7 introduced firewalld, which is a wrapper around iptables and can conflict with Docker. -When firewalld is started or restarted it will remove the `DOCKER` chain +When `firewalld` is started or restarted it will remove the `DOCKER` chain from iptables, preventing Docker from working properly. -When using systemd, firewalld is started before Docker, but if you -start or restart firewalld after Docker, you will have to restart the Docker daemon. +When using systemd, `firewalld` is started before Docker, but if you +start or restart `firewalld` after Docker, you will have to restart the Docker daemon. ## Installing Docker - CentOS-6 Please note that this for CentOS-6, this package is part of [Extra Packages @@ -103,7 +103,13 @@ Run a simple bash shell to test the image: $ sudo docker run -i -t centos /bin/bash If everything is working properly, you'll get a simple bash prompt. Type -exit to continue. +`exit` to continue. + +## Custom daemon options + +If you need to add an HTTP Proxy, set a different directory or partition for the +Docker runtime files, or make other customizations, read our systemd article to +learn how to [customize your systemd Docker daemon options](/articles/systemd/). ## Dockerfiles The CentOS Project provides a number of sample Dockerfiles which you may use diff --git a/docs/sources/installation/cruxlinux.md b/docs/sources/installation/cruxlinux.md index 28efde376a..ead4c273ca 100644 --- a/docs/sources/installation/cruxlinux.md +++ b/docs/sources/installation/cruxlinux.md @@ -4,16 +4,14 @@ page_keywords: crux linux, virtualization, Docker, documentation, installation # CRUX Linux -Installing on CRUX Linux can be handled via the ports from [James -Mills](http://prologic.shortcircuit.net.au/) and are included in the +Installing on CRUX Linux can be handled via the contrib ports from +[James Mills](http://prologic.shortcircuit.net.au/) and are included in the official [contrib](http://crux.nu/portdb/?a=repo&q=contrib) ports: - docker -- docker-bin -The `docker` port will install the latest tagged -version of Docker. The `docker-bin` port will -install the latest tagged version of Docker from upstream built binaries. +The `docker` port will build and install the latest tagged version of Docker. + ## Installation @@ -21,22 +19,21 @@ Assuming you have contrib enabled, update your ports tree and install docker (*a # prt-get depinst docker -You can install `docker-bin` instead if you wish to avoid compilation time. - ## Kernel Requirements To have a working **CRUX+Docker** Host you must ensure your Kernel has -the necessary modules enabled for LXC containers to function correctly -and Docker Daemon to work properly. +the necessary modules enabled for the Docker Daemon to function correctly. Please read the `README`: $ prt-get readme docker -The `docker` and `docker-bin` ports install the `contrib/check-config.sh` -script provided by the Docker contributors for checking your kernel -configuration as a suitable Docker Host. +The `docker` port installs the `contrib/check-config.sh` script +provided by the Docker contributors for checking your kernel +configuration as a suitable Docker host. + +To check your Kernel configuration run: $ /usr/share/docker/check-config.sh @@ -51,6 +48,18 @@ To start on system boot: - Edit `/etc/rc.conf` - Put `docker` into the `SERVICES=(...)` array after `net`. +## Images + +There is a CRUX image maintained by [James Mills](http://prologic.shortcircuit.net.au/) +as part of the Docker "Official Library" of images. To use this image simply pull it +or use it as part of your `FROM` line in your `Dockerfile(s)`. + + $ docker pull crux + $ docker run -i -t crux + +There are also user contributed [CRUX based image(s)](https://registry.hub.docker.com/repos/crux/) on the Docker Hub. + + ## Issues If you have any issues please file a bug with the diff --git a/docs/sources/installation/fedora.md b/docs/sources/installation/fedora.md index 9101ef1356..9253144045 100644 --- a/docs/sources/installation/fedora.md +++ b/docs/sources/installation/fedora.md @@ -67,28 +67,11 @@ member of that group in order to contact the `docker -d` process. Adding users to the `docker` group is *not* necessary for Docker versions 1.0 and above. -## HTTP Proxy +## Custom daemon options -If you are behind a HTTP proxy server, for example in corporate settings, -you will need to add this configuration in the Docker *systemd service file*. - -Edit file `/usr/lib/systemd/system/docker.service`. Add the following to -section `[Service]` : - - Environment="HTTP_PROXY=http://proxy.example.com:80/" - -If you have internal Docker registries that you need to contact without -proxying you can specify them via the `NO_PROXY` environment variable: - - Environment="HTTP_PROXY=http://proxy.example.com:80/" "NO_PROXY=localhost,127.0.0.0/8,docker-registry.somecorporation.com" - -Flush changes: - - $ systemctl daemon-reload - -Restart Docker: - - $ systemctl start docker +If you need to add an HTTP Proxy, set a different directory or partition for the +Docker runtime files, or make other customizations, read our systemd article to +learn how to [customize your systemd Docker daemon options](/articles/systemd/). ## What next? diff --git a/docs/sources/installation/frugalware.md b/docs/sources/installation/frugalware.md index 2c2f922613..6b4db23b26 100644 --- a/docs/sources/installation/frugalware.md +++ b/docs/sources/installation/frugalware.md @@ -42,3 +42,9 @@ service: To start on system boot: $ sudo systemctl enable lxc-docker + +## Custom daemon options + +If you need to add an HTTP Proxy, set a different directory or partition for the +Docker runtime files, or make other customizations, read our systemd article to +learn how to [customize your systemd Docker daemon options](/articles/systemd/). diff --git a/docs/sources/installation/gentoolinux.md b/docs/sources/installation/gentoolinux.md index 39333e63e6..716eab9d82 100644 --- a/docs/sources/installation/gentoolinux.md +++ b/docs/sources/installation/gentoolinux.md @@ -91,3 +91,7 @@ To start the `docker` daemon: To start on system boot: $ sudo systemctl enable docker + +If you need to add an HTTP Proxy, set a different directory or partition for the +Docker runtime files, or make other customizations, read our systemd article to +learn how to [customize your systemd Docker daemon options](/articles/systemd/). diff --git a/docs/sources/installation/mac.md b/docs/sources/installation/mac.md index 0707c56b7b..89fed17115 100644 --- a/docs/sources/installation/mac.md +++ b/docs/sources/installation/mac.md @@ -22,8 +22,8 @@ virtual machine (using VirtualBox) that's all set up to run the Docker daemon. ## Installation 1. Download the latest release of the [Docker for OS X Installer]( - https://github.com/boot2docker/osx-installer/releases) (Look for the green - Boot2Docker-x.x.x.pkg button near the bottom of the page.) + https://github.com/boot2docker/osx-installer/releases/latest) (Look for the + green Boot2Docker-x.x.x.pkg button near the bottom of the page.) 2. Run the installer by double-clicking the downloaded package, which will install a VirtualBox VM, Docker itself, and the Boot2Docker management tool. @@ -55,7 +55,7 @@ for more information. ## Upgrading 1. Download the latest release of the [Docker for OS X Installer]( - https://github.com/boot2docker/osx-installer/releases) + https://github.com/boot2docker/osx-installer/releases/latest) 2. If Boot2Docker is currently running, stop it with `boot2docker stop`. Then, run the installer package, which will update Docker and the Boot2Docker management tool. diff --git a/docs/sources/installation/oracle.md b/docs/sources/installation/oracle.md index 05bb3d9808..6d2f782b49 100644 --- a/docs/sources/installation/oracle.md +++ b/docs/sources/installation/oracle.md @@ -75,6 +75,12 @@ and set `enabled=1` in the `[ol6_addons]` or the `[ol7_addons]` stanza. **Done!** +## Custom daemon options + +If you need to add an HTTP Proxy, set a different directory or partition for the +Docker runtime files, or make other customizations, read our systemd article to +learn how to [customize your systemd Docker daemon options](/articles/systemd/). + ## Using the btrfs storage engine Docker on Oracle Linux 6 and 7 supports the use of the btrfs storage engine. @@ -116,5 +122,4 @@ Request at [My Oracle Support](http://support.oracle.com). If you do not have an Oracle Linux Support Subscription, you can use the [Oracle Linux -Forum](https://community.oracle.com/community/server_%26_storage_systems/linux/ -oracle_linux) for community-based support. +Forum](https://community.oracle.com/community/server_%26_storage_systems/linux/oracle_linux) for community-based support. diff --git a/docs/sources/installation/rhel.md b/docs/sources/installation/rhel.md index 74a293b513..59ab049641 100644 --- a/docs/sources/installation/rhel.md +++ b/docs/sources/installation/rhel.md @@ -83,6 +83,13 @@ Now let's verify that Docker is working. Continue with the [User Guide](/userguide/). +## Custom daemon options + +If you need to add an HTTP Proxy, set a different directory or partition for the +Docker runtime files, or make other customizations, read our systemd article to +learn how to [customize your systemd Docker daemon options](/articles/systemd/). + + ## Issues? If you have any issues - please report them directly in the diff --git a/docs/sources/installation/ubuntulinux.md b/docs/sources/installation/ubuntulinux.md index efeeeea2e1..09b776f08d 100644 --- a/docs/sources/installation/ubuntulinux.md +++ b/docs/sources/installation/ubuntulinux.md @@ -29,8 +29,9 @@ To install the latest Ubuntu package (may not be the latest Docker release): $ sudo apt-get update $ sudo apt-get install docker.io - $ sudo ln -sf /usr/bin/docker.io /usr/local/bin/docker - $ sudo sed -i '$acomplete -F _docker docker' /etc/bash_completion.d/docker.io + +Then, to enable tab-completion of Docker commands in BASH, either restart BASH or: + $ source /etc/bash_completion.d/docker.io If you'd like to try the latest version of Docker: @@ -202,7 +203,18 @@ Type `exit` to exit **Done!**, now continue with the [User Guide](/userguide/). -### Giving non-root access +### Upgrade + +To install the latest version of Docker, use the standard +`apt-get` method: + + # update your sources list + $ sudo apt-get update + + # install the latest + $ sudo apt-get install lxc-docker + +## Giving non-root access The `docker` daemon always runs as the `root` user, and since Docker version 0.5.2, the `docker` daemon binds to a Unix socket instead of a @@ -221,7 +233,7 @@ alternative group. > **Warning**: > The `docker` group (or the group specified with the `-G` flag) is > `root`-equivalent; see [*Docker Daemon Attack Surface*]( -> /articles/security/#dockersecurity-daemon) details. +> /articles/security/#dockersecurity-daemon) for details. **Example:** @@ -238,17 +250,6 @@ alternative group. # If you are in Ubuntu 14.04, use docker.io instead of docker $ sudo service docker restart -### Upgrade - -To install the latest version of docker, use the standard -`apt-get` method: - - # update your sources list - $ sudo apt-get update - - # install the latest - $ sudo apt-get install lxc-docker - ## Memory and Swap Accounting If you want to enable memory and swap accounting, you must add the diff --git a/docs/sources/installation/windows.md b/docs/sources/installation/windows.md index 6220cd6b6e..667ce2935d 100644 --- a/docs/sources/installation/windows.md +++ b/docs/sources/installation/windows.md @@ -21,7 +21,7 @@ virtual machine and runs the Docker daemon. ## Installation -1. Download the latest release of the [Docker for Windows Installer](https://github.com/boot2docker/windows-installer/releases) +1. Download the latest release of the [Docker for Windows Installer](https://github.com/boot2docker/windows-installer/releases/latest) 2. Run the installer, which will install VirtualBox, MSYS-git, the boot2docker Linux ISO, and the Boot2Docker management tool. ![](/installation/images/windows-installer.png) @@ -37,7 +37,7 @@ and the Boot2Docker management tool. ## Upgrading 1. Download the latest release of the [Docker for Windows Installer]( - https://github.com/boot2docker/windows-installer/releases) + https://github.com/boot2docker/windows-installer/releases/latest) 2. Run the installer, which will update the Boot2Docker management tool. diff --git a/docs/sources/reference/api/docker-io_api.md b/docs/sources/reference/api/docker-io_api.md index c21781a42a..a7557bacb5 100644 --- a/docs/sources/reference/api/docker-io_api.md +++ b/docs/sources/reference/api/docker-io_api.md @@ -503,44 +503,3 @@ Status Codes: - **401** – Unauthorized - **403** – Account is not Active - **404** – User not found - -## Search - -If you need to search the index, this is the endpoint you would use. - -`GET /v1/search` - -Search the Index given a search term. It accepts - - [GET](http://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html#sec9.3) - only. - -**Example request**: - - GET /v1/search?q=search_term HTTP/1.1 - Host: index.docker.io - Accept: application/json - -**Example response**: - - HTTP/1.1 200 OK - Vary: Accept - Content-Type: application/json - - {"query":"search_term", - "num_results": 3, - "results" : [ - {"name": "ubuntu", "description": "An ubuntu image..."}, - {"name": "centos", "description": "A centos image..."}, - {"name": "fedora", "description": "A fedora image..."} - ] - } - -Query Parameters: - -- **q** – what you want to search for - -Status Codes: - -- **200** – no error -- **500** – server error diff --git a/docs/sources/reference/api/docker_remote_api.md b/docs/sources/reference/api/docker_remote_api.md index 39c83743bf..530b15d41a 100644 --- a/docs/sources/reference/api/docker_remote_api.md +++ b/docs/sources/reference/api/docker_remote_api.md @@ -8,8 +8,10 @@ page_keywords: API, Docker, rcli, REST, documentation and the client must have `root` access to interact with the daemon. - If the Docker daemon is set to use an encrypted TCP socket (`--tls`, or `--tlsverify`) as with Boot2Docker 1.3.0, then you need to add extra - parameters to `curl` when making test API requests: + parameters to `curl` or `wget` when making test API requests: `curl --insecure --cert ~/.docker/cert.pem --key ~/.docker/key.pem https://boot2docker:2376/images/json` + or + `wget --no-check-certificate --certificate=$DOCKER_CERT_PATH/cert.pem --private-key=$DOCKER_CERT_PATH/key.pem https://boot2docker:2376/images/json -O - -q` - If a group named `docker` exists on your system, docker will apply ownership of the socket to the group. - The API tends to be REST, but for some complex commands, like attach @@ -28,13 +30,47 @@ page_keywords: API, Docker, rcli, REST, documentation Client applications need to take this into account to ensure they will not break when talking to newer Docker daemons. -The current version of the API is v1.15 +The current version of the API is v1.16 Calling `/info` is the same as calling -`/v1.15/info`. +`/v1.16/info`. You can still call an old version of the API using -`/v1.14/info`. +`/v1.15/info`. + +## v1.16 + +### Full Documentation + +[*Docker Remote API v1.16*](/reference/api/docker_remote_api_v1.16/) + +### What's new + +`GET /info` + +**New!** +`info` now returns the number of CPUs available on the machine (`NCPU`), +total memory available (`MemTotal`), a user-friendly name describing the running Docker daemon (`Name`), a unique ID identifying the daemon (`ID`), and +a list of daemon labels (`Labels`). + +`POST /containers/create` + +**New!** +You can set the new container's MAC address explicitly. + +**New!** +Volumes are now initialized when the container is created. + +`POST /containers/(id)/start` + +**New!** +Passing the container's `HostConfig` on start is now deprecated. You should +set this when creating the container. + +`POST /containers/(id)/copy` + +**New!** +You can now copy data which is contained in a volume. ## v1.15 @@ -44,6 +80,12 @@ You can still call an old version of the API using ### What's new +`POST /containers/create` + +**New!** +It is now possible to set a container's HostConfig when creating a container. +Previously this was only available when starting a container. + ## v1.14 ### Full Documentation diff --git a/docs/sources/reference/api/docker_remote_api_v1.10.md b/docs/sources/reference/api/docker_remote_api_v1.10.md index 52bbe2e486..eb3f5cc1e5 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.10.md +++ b/docs/sources/reference/api/docker_remote_api_v1.10.md @@ -363,14 +363,14 @@ Start the container `id` "LxcConf":[{"Key":"lxc.utsname","Value":"docker"}], "PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] }, "PublishAllPorts":false, - "Privileged":false + "Privileged":false, "Dns": ["8.8.8.8"], "VolumesFrom": ["parent", "other:ro"] } **Example response**: - HTTP/1.1 204 No Conten + HTTP/1.1 204 No Content Content-Type: text/plain Json Parameters: @@ -519,7 +519,7 @@ Status Codes: `STREAM_TYPE` can be: - 0: stdin (will be written on stdout) -- 1: stdou +- 1: stdout - 2: stderr `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of diff --git a/docs/sources/reference/api/docker_remote_api_v1.11.md b/docs/sources/reference/api/docker_remote_api_v1.11.md index 2368daf4ec..838d199ea7 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.11.md +++ b/docs/sources/reference/api/docker_remote_api_v1.11.md @@ -406,7 +406,7 @@ Start the container `id` **Example response**: - HTTP/1.1 204 No Conten + HTTP/1.1 204 No Content Content-Type: text/plain Json Parameters: @@ -555,7 +555,7 @@ Status Codes: `STREAM_TYPE` can be: - 0: stdin (will be written on stdout) -- 1: stdou +- 1: stdout - 2: stderr `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of diff --git a/docs/sources/reference/api/docker_remote_api_v1.12.md b/docs/sources/reference/api/docker_remote_api_v1.12.md index 8b245f5e9c..f38b018ef9 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.12.md +++ b/docs/sources/reference/api/docker_remote_api_v1.12.md @@ -416,7 +416,7 @@ Start the container `id` **Example response**: - HTTP/1.1 204 No Conten + HTTP/1.1 204 No Content Content-Type: text/plain Json Parameters: @@ -603,7 +603,7 @@ Status Codes: `STREAM_TYPE` can be: - 0: stdin (will be written on stdout) -- 1: stdou +- 1: stdout - 2: stderr `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of @@ -1074,7 +1074,7 @@ Query Parameters: - **q** – suppress verbose build output - **nocache** – do not use the cache when building the image - **rm** - remove intermediate containers after a successful build (default behavior) -- **forcerm - always remove intermediate containers (includes rm) +- **forcerm** - always remove intermediate containers (includes rm) Request Headers: @@ -1208,7 +1208,7 @@ Create a new image from a container's changes **Example request**: - POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 + POST /commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 Content-Type: application/json { @@ -1256,7 +1256,7 @@ Query Parameters: - **container** – source container - **repo** – repository - **tag** – tag -- **m** – commit message +- **comment** – commit message - **author** – author (e.g., "John Hannibal Smith <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") diff --git a/docs/sources/reference/api/docker_remote_api_v1.13.md b/docs/sources/reference/api/docker_remote_api_v1.13.md index 1a25da18ae..f5ca931fe7 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.13.md +++ b/docs/sources/reference/api/docker_remote_api_v1.13.md @@ -407,7 +407,7 @@ Start the container `id` **Example response**: - HTTP/1.1 204 No Conten + HTTP/1.1 204 No Content Content-Type: text/plain Json Parameters: @@ -596,7 +596,7 @@ Status Codes: `STREAM_TYPE` can be: - 0: stdin (will be written on stdout) -- 1: stdou +- 1: stdout - 2: stderr `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of @@ -1063,7 +1063,7 @@ Query Parameters: - **q** – suppress verbose build output - **nocache** – do not use the cache when building the image - **rm** - remove intermediate containers after a successful build (default behavior) -- **forcerm - always remove intermediate containers (includes rm) +- **forcerm** - always remove intermediate containers (includes rm) Request Headers: @@ -1197,7 +1197,7 @@ Create a new image from a container's changes **Example request**: - POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 + POST /commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 Content-Type: application/json { @@ -1245,7 +1245,7 @@ Query Parameters: - **container** – source container - **repo** – repository - **tag** – tag -- **m** – commit message +- **comment** – commit message - **author** – author (e.g., "John Hannibal Smith <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") diff --git a/docs/sources/reference/api/docker_remote_api_v1.14.md b/docs/sources/reference/api/docker_remote_api_v1.14.md index 0c806bdd2e..a5392f3bc9 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.14.md +++ b/docs/sources/reference/api/docker_remote_api_v1.14.md @@ -601,7 +601,7 @@ Status Codes: `STREAM_TYPE` can be: - 0: stdin (will be written on stdout) -- 1: stdou +- 1: stdout - 2: stderr `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of @@ -1068,7 +1068,7 @@ Query Parameters: - **q** – suppress verbose build output - **nocache** – do not use the cache when building the image - **rm** - remove intermediate containers after a successful build (default behavior) -- **forcerm - always remove intermediate containers (includes rm) +- **forcerm** - always remove intermediate containers (includes rm) Request Headers: @@ -1202,7 +1202,7 @@ Create a new image from a container's changes **Example request**: - POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 + POST /commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 Content-Type: application/json { @@ -1250,7 +1250,7 @@ Query Parameters: - **container** – source container - **repo** – repository - **tag** – tag -- **m** – commit message +- **comment** – commit message - **author** – author (e.g., "John Hannibal Smith <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") diff --git a/docs/sources/reference/api/docker_remote_api_v1.15.md b/docs/sources/reference/api/docker_remote_api_v1.15.md index e23fa0ff30..ae265653a3 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.15.md +++ b/docs/sources/reference/api/docker_remote_api_v1.15.md @@ -117,7 +117,6 @@ Create a container "AttachStdin":false, "AttachStdout":true, "AttachStderr":true, - "PortSpecs":null, "Tty":false, "OpenStdin":false, "StdinOnce":false, @@ -125,16 +124,34 @@ Create a container "Cmd":[ "date" ], + "Entrypoint": "", "Image":"base", "Volumes":{ "/tmp": {} }, "WorkingDir":"", "NetworkDisabled": false, + "MacAddress":"12:34:56:78:9a:bc", "ExposedPorts":{ "22/tcp": {} }, - "RestartPolicy": { "Name": "always" } + "SecurityOpts": [""], + "HostConfig": { + "Binds":["/tmp:/tmp"], + "Links":["redis3:redis"], + "LxcConf":{"lxc.utsname":"docker"}, + "PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts":false, + "Privileged":false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [] + } } **Example response**: @@ -143,21 +160,78 @@ Create a container Content-Type: application/json { - "Id":"e90e34656806" + "Id":"f91ddc4b01e079c4481a8340bbbeca4dbd33d6e4a10662e499f8eacbb5bf252b" "Warnings":[] } Json Parameters: -- **RestartPolicy** – The behavior to apply when the container exits. The - value is an object with a `Name` property of either `"always"` to - always restart or `"on-failure"` to restart only when the container - exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` - controls the number of times to retry before giving up. - The default is not to restart. (optional) +- **Hostname** - A string value containing the desired hostname to use for the + container. +- **Domainname** - A string value containing the desired domain name to use + for the container. +- **User** - A string value containg the user to use inside the container. +- **Memory** - Memory limit in bytes. +- **MemorySwap**- Total memory usage (memory + swap); set `-1` to disable swap. +- **CpuShares** - An integer value containing the CPU Shares for container + (ie. the relative weight vs othercontainers). + **CpuSet** - String value containg the cgroups Cpuset to use. +- **AttachStdin** - Boolean value, attaches to stdin. +- **AttachStdout** - Boolean value, attaches to stdout. +- **AttachStderr** - Boolean value, attaches to stderr. +- **Tty** - Boolean value, Attach standard streams to a tty, including stdin if it is not closed. +- **OpenStdin** - Boolean value, opens stdin, +- **StdinOnce** - Boolean value, close stdin after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `VAR=value` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entrypoint for the container a a string or an array + of strings +- **Image** - String value containing the image name to use for the container - **Volumes** – An object mapping mountpoint paths (strings) inside the container to empty objects. -- **config** – the container's configuration +- **WorkingDir** - A string value containing the working dir for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables neworking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **SecurityOpts**: A list of string values to customize labels for MLS + systems, such as SELinux. +- **HostConfig** + - **Binds** – A list of volume bindings for this container. Each volume + binding is a string of the form `container_path` (to create a new + volume for the container), `host_path:container_path` (to bind-mount + a host path into the container), or `host_path:container_path:ro` + (to make the bind-mount read-only inside the container). + - **Links** - A list of links for the container. Each link entry should be of + of the form "container_name:alias". + - **LxcConf** - LXC specific configurations. These configurations will only + work when using the `lxc` execution driver. + - **PortBindings** - A map of exposed container ports and the host port they + should map to. It should be specified in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **Dns** - A list of dns servers for the container to use. + - **DnsSearch** - A list of DNS search domains + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilties to add to the container. + - **Capdrop** - A list of kernel capabilties to drop from the container. + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + - **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, and `container:` + - **Devices** - A list of devices to add to the container specified in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` Query Parameters: @@ -399,7 +473,7 @@ Status Codes: `GET /containers/(id)/resize?h=&w=` -Resize the TTY of container `id` +Resize the TTY of container `id` **Example request**: @@ -436,9 +510,13 @@ Start the container `id` "PublishAllPorts":false, "Privileged":false, "Dns": ["8.8.8.8"], + "DnsSearch": [""], "VolumesFrom": ["parent", "other:ro"], "CapAdd": ["NET_ADMIN"], - "CapDrop": ["MKNOD"] + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [] } **Example response**: @@ -452,7 +530,35 @@ Json Parameters: volume for the container), `host_path:container_path` (to bind-mount a host path into the container), or `host_path:container_path:ro` (to make the bind-mount read-only inside the container). -- **hostConfig** – the container's host configuration (optional) +- **Links** - A list of links for the container. Each link entry should be of + of the form "container_name:alias". +- **LxcConf** - LXC specific configurations. These configurations will only + work when using the `lxc` execution driver. +- **PortBindings** - A map of exposed container ports and the host port they + should map to. It should be specified in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. +- **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. +- **Privileged** - Gives the container full access to the host. Specified as + a boolean value. +- **Dns** - A list of dns servers for the container to use. +- **DnsSearch** - A list of DNS search domains +- **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` +- **CapAdd** - A list of kernel capabilties to add to the container. +- **Capdrop** - A list of kernel capabilties to drop from the container. +- **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) +- **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, and `container:` +- **Devices** - A list of devices to add to the container specified in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` Status Codes: @@ -635,7 +741,7 @@ Status Codes: `STREAM_TYPE` can be: - 0: stdin (will be written on stdout) -- 1: stdou +- 1: stdout - 2: stderr `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of @@ -807,7 +913,8 @@ Create an image, either by pulling it from the registry or by importing it Query Parameters: - **fromImage** – name of the image to pull -- **fromSrc** – source to import, - means stdin +- **fromSrc** – source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. - **repo** – repository - **tag** – tag - **registry** – the registry to pull from @@ -1103,7 +1210,7 @@ Query Parameters: - **q** – suppress verbose build output - **nocache** – do not use the cache when building the image - **rm** - remove intermediate containers after a successful build (default behavior) -- **forcerm - always remove intermediate containers (includes rm) +- **forcerm** - always remove intermediate containers (includes rm) Request Headers: @@ -1237,7 +1344,7 @@ Create a new image from a container's changes **Example request**: - POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 + POST /commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 Content-Type: application/json { @@ -1285,7 +1392,7 @@ Query Parameters: - **container** – source container - **repo** – repository - **tag** – tag -- **m** – commit message +- **comment** – commit message - **author** – author (e.g., "John Hannibal Smith <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") @@ -1455,7 +1562,6 @@ Sets up an exec instance in a running container `id` "Cmd":[ "date" ], - "Container":"e90e34656806", } **Example response**: @@ -1469,7 +1575,12 @@ Sets up an exec instance in a running container `id` Json Parameters: -- **execConfig** ? exec configuration. +- **AttachStdin** - Boolean value, attaches to stdin of the exec command. +- **AttachStdout** - Boolean value, attaches to stdout of the exec command. +- **AttachStderr** - Boolean value, attaches to stderr of the exec command. +- **Tty** - Boolean value to allocate a pseudo-TTY +- **Cmd** - Command to run specified as a string or an array of strings. + Status Codes: @@ -1480,8 +1591,9 @@ Status Codes: `POST /exec/(id)/start` -Starts a previously set up exec instance `id`. If `detach` is true, this API returns after -starting the `exec` command. Otherwise, this API sets up an interactive session with the `exec` command. +Starts a previously set up exec instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. **Example request**: @@ -1502,7 +1614,8 @@ starting the `exec` command. Otherwise, this API sets up an interactive session Json Parameters: -- **execConfig** ? exec configuration. +- **Detach** - Detach from the exec command +- **Tty** - Boolean value to allocate a pseudo-TTY Status Codes: diff --git a/docs/sources/reference/api/docker_remote_api_v1.16.md b/docs/sources/reference/api/docker_remote_api_v1.16.md new file mode 100644 index 0000000000..72f5519e1c --- /dev/null +++ b/docs/sources/reference/api/docker_remote_api_v1.16.md @@ -0,0 +1,1749 @@ +page_title: Remote API v1.16 +page_description: API Documentation for Docker +page_keywords: API, Docker, rcli, REST, documentation + +# Docker Remote API v1.16 + +## 1. Brief introduction + + - The Remote API has replaced `rcli`. + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket]( + /articles/basics/#bind-docker-to-another-hostport-or-a-unix-socket). + - The API tends to be REST, but for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `STDOUT`, + `STDIN` and `STDERR`. + +# 2. Endpoints + +## 2.1 Containers + +### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Image": "base:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "9cd87474be90", + "Image": "base:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "3176a2479c92", + "Image": "base:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Image": "base:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + } + ] + +Query Parameters: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname":"", + "Domainname": "", + "User":"", + "Memory":0, + "MemorySwap":0, + "CpuShares": 512, + "Cpuset": "0,1", + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "date" + ], + "Entrypoint": "", + "Image":"base", + "Volumes":{ + "/tmp": {} + }, + "WorkingDir":"", + "NetworkDisabled": false, + "MacAddress":"12:34:56:78:9a:bc", + "ExposedPorts":{ + "22/tcp": {} + }, + "SecurityOpts": [""], + "HostConfig": { + "Binds":["/tmp:/tmp"], + "Links":["redis3:redis"], + "LxcConf":{"lxc.utsname":"docker"}, + "PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts":false, + "Privileged":false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [] + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806" + "Warnings":[] + } + +Json Parameters: + +- **Hostname** - A string value containing the desired hostname to use for the + container. +- **Domainname** - A string value containing the desired domain name to use + for the container. +- **User** - A string value containg the user to use inside the container. +- **Memory** - Memory limit in bytes. +- **MemorySwap**- Total memory usage (memory + swap); set `-1` to disable swap. +- **CpuShares** - An integer value containing the CPU Shares for container + (ie. the relative weight vs othercontainers). + **CpuSet** - String value containg the cgroups Cpuset to use. +- **AttachStdin** - Boolean value, attaches to stdin. +- **AttachStdout** - Boolean value, attaches to stdout. +- **AttachStderr** - Boolean value, attaches to stderr. +- **Tty** - Boolean value, Attach standard streams to a tty, including stdin if it is not closed. +- **OpenStdin** - Boolean value, opens stdin, +- **StdinOnce** - Boolean value, close stdin after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `VAR=value` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entrypoint for the container a a string or an array + of strings +- **Image** - String value containing the image name to use for the container +- **Volumes** – An object mapping mountpoint paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string value containing the working dir for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables neworking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **SecurityOpts**: A list of string values to customize labels for MLS + systems, such as SELinux. +- **HostConfig** + - **Binds** – A list of volume bindings for this container. Each volume + binding is a string of the form `container_path` (to create a new + volume for the container), `host_path:container_path` (to bind-mount + a host path into the container), or `host_path:container_path:ro` + (to make the bind-mount read-only inside the container). + - **Links** - A list of links for the container. Each link entry should be of + of the form "container_name:alias". + - **LxcConf** - LXC specific configurations. These configurations will only + work when using the `lxc` execution driver. + - **PortBindings** - A map of exposed container ports and the host port they + should map to. It should be specified in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **Dns** - A list of dns servers for the container to use. + - **DnsSearch** - A list of DNS search domains + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilties to add to the container. + - **Capdrop** - A list of kernel capabilties to drop from the container. + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + - **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, and `container:` + - **Devices** - A list of devices to add to the container specified in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + +Query Parameters: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +Status Codes: + +- **201** – no error +- **404** – no such container +- **406** – impossible to attach (container not running) +- **500** – server error + +### Inspect a container + +`GET /containers/(id)/json` + +Return low-level information on the container `id` + + +**Example request**: + + GET /containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "Created": "2013-05-07T14:51:42.041847+02:00", + "Path": "date", + "Args": [], + "Config": { + "Hostname": "4fa6e0f0c678", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Dns": null, + "Image": "base", + "Volumes": {}, + "VolumesFrom": "", + "WorkingDir":"" + + }, + "State": { + "Running": false, + "Pid": 0, + "ExitCode": 0, + "StartedAt": "2013-05-07T14:51:42.087658+02:01360", + "Ghost": false + }, + "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "NetworkSettings": { + "IpAddress": "", + "IpPrefixLen": 0, + "Gateway": "", + "Bridge": "", + "PortMapping": null + }, + "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", + "ResolvConfPath": "/etc/resolv.conf", + "Volumes": {}, + "HostConfig": { + "Binds": null, + "ContainerIDFile": "", + "LxcConf": [], + "Privileged": false, + "PortBindings": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "49153" + } + ] + }, + "Links": ["/name:alias"], + "PublishAllPorts": false, + "CapAdd: ["NET_ADMIN"], + "CapDrop: ["MKNOD"] + } + } + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### List processes running inside a container + +`GET /containers/(id)/top` + +List processes running inside the container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles":[ + "USER", + "PID", + "%CPU", + "%MEM", + "VSZ", + "RSS", + "TTY", + "STAT", + "START", + "TIME", + "COMMAND" + ], + "Processes":[ + ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"], + ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"] + ] + } + +Query Parameters: + +- **ps_args** – ps arguments to use (e.g., aux) + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Get container logs + +`GET /containers/(id)/logs` + +Get stdout and stderr logs from the container ``id`` + +**Example request**: + + GET /containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + +Query Parameters: + +- **follow** – 1/True/true or 0/False/false, return stream. Default false +- **stdout** – 1/True/true or 0/False/false, show stdout log. Default false +- **stderr** – 1/True/true or 0/False/false, show stderr log. Default false +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default false +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Inspect changes on a container's filesystem + +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path":"/dev", + "Kind":0 + }, + { + "Path":"/dev/kmsg", + "Kind":1 + }, + { + "Path":"/test", + "Kind":1 + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Export a container + +`GET /containers/(id)/export` + +Export the contents of container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Resize a container TTY + +`POST /containers/(id)/resize?h=&w=` + +Resize the TTY for container with `id`. The container must be restarted for the resize to take effect. + +**Example request**: + + POST /containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +Status Codes: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +### Start a container + +`POST /containers/(id)/start` + +Start the container `id` + +**Example request**: + + POST /containers/(id)/start HTTP/1.1 + Content-Type: application/json + +**Example response**: + + HTTP/1.1 204 No Content + +Json Parameters: + +Status Codes: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +### Stop a container + +`POST /containers/(id)/stop` + +Stop the container `id` + +**Example request**: + + POST /containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +### Restart a container + +`POST /containers/(id)/restart` + +Restart the container `id` + +**Example request**: + + POST /containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Kill a container + +`POST /containers/(id)/kill` + +Kill the container `id` + +**Example request**: + + POST /containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters + +- **signal** - Signal to send to the container: integer or string like "SIGINT". + When not set, SIGKILL is assumed and the call will waits for the container to exit. + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Pause a container + +`POST /containers/(id)/pause` + +Pause the container `id` + +**Example request**: + + POST /containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Unpause a container + +`POST /containers/(id)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Attach to a container + +`POST /containers/(id)/attach` + +Attach to the container `id` + +**Example request**: + + POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + + **Stream details**: + + When using the TTY setting is enabled in + [`POST /containers/create` + ](../docker_remote_api_v1.9/#post--containers-create "POST /containers/create"), + the stream is the raw data from the process PTY and client's stdin. + When the TTY is disabled, then the stream is multiplexed to separate + stdout and stderr. + + The format is a **Header** and a **Payload** (frame). + + **HEADER** + + The header will contain the information on which stream write the + stream (stdout or stderr). It also contain the size of the + associated frame encoded on the last 4 bytes (uint32). + + It is encoded on the first 8 bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + + `STREAM_TYPE` can be: + +- 0: stdin (will be written on stdout) +- 1: stdout +- 2: stderr + + `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of + the uint32 size encoded as big endian. + + **PAYLOAD** + + The payload is the raw stream. + + **IMPLEMENTATION** + + The simplest way to implement the Attach protocol is the following: + + 1. Read 8 bytes + 2. chose stdout or stderr depending on the first byte + 3. Extract the frame size from the last 4 byets + 4. Read the extracted size and output it on the correct output + 5. Goto 1 + +### Wait a container + +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode":0} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Remove a container + +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default false +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default false + +Status Codes: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Copy files or folders from a container + +`POST /containers/(id)/copy` + +Copy files or folders of container `id` + +**Example request**: + + POST /containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource":"test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +## 2.2 Images + +### List Images + +`GET /images/json` + +**Example request**: + + GET /images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275 + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135 + } + ] + + +Query Parameters: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a json encoded value of the filters (a map[string][]string) to process on the images list. + +### Create an image + +`POST /images/create` + +Create an image, either by pulling it from the registry or by importing it + +**Example request**: + + POST /images/create?fromImage=base HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Pulling..."} + {"status":"Pulling", "progress":"1 B/ 100 B", "progressDetail":{"current":1, "total":100}} + {"error":"Invalid..."} + ... + + When using this endpoint to pull an image from the registry, the + `X-Registry-Auth` header can be used to include + a base64-encoded AuthConfig object. + +Query Parameters: + +- **fromImage** – name of the image to pull +- **fromSrc** – source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. +- **repo** – repository +- **tag** – tag +- **registry** – the registry to pull from + + Request Headers: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +Status Codes: + +- **200** – no error +- **500** – server error + + + +### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /images/base/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Created":"2013-03-23T22:24:18.818426-07:00", + "Container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "ContainerConfig": + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":false, + "AttachStderr":false, + "PortSpecs":null, + "Tty":true, + "OpenStdin":true, + "StdinOnce":false, + "Env":null, + "Cmd": ["/bin/bash"], + "Dns":null, + "Image":"base", + "Volumes":null, + "VolumesFrom":"", + "WorkingDir":"" + }, + "Id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Parent":"27cf784147099545", + "Size": 6824592 + } + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /images/base/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id":"b750fe79269d", + "Created":1364102658, + "CreatedBy":"/bin/bash" + }, + { + "Id":"27cf78414709", + "Created":1364068391, + "CreatedBy":"" + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Pushing..."} + {"status":"Pushing", "progress":"1/? (n/a)", "progressDetail":{"current":1}}} + {"error":"Invalid..."} + ... + + If you wish to push an image on to a private registry, that image must already have been tagged + into a repository which references that registry host name and port. This repository name should + then be used in the URL. This mirrors the flow of the CLI. + +**Example request**: + + POST /images/registry.acme.com:5000/test/push HTTP/1.1 + + +Query Parameters: + +- **tag** – the tag to associate with the image on the registry, optional + +Request Headers: + +- **X-Registry-Auth** – include a base64-encoded AuthConfig + object. + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 OK + +Query Parameters: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +Status Codes: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged":"3e2f21a89f"}, + {"Deleted":"3e2f21a89f"}, + {"Deleted":"53b4f83ac9"} + ] + +Query Parameters: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +Status Codes: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +Query Parameters: + +- **term** – term to search + +Status Codes: + +- **200** – no error +- **500** – server error + +## 2.3 Misc + +### Build an image from Dockerfile via stdin + +`POST /build` + +Build an image from Dockerfile via stdin + +**Example request**: + + POST /build HTTP/1.1 + + {{ TAR STREAM }} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream":"Step 1..."} + {"stream":"..."} + {"error":"Error...", "errorDetail":{"code": 123, "message": "Error..."}} + + The stream must be a tar archive compressed with one of the + following algorithms: identity (no compression), gzip, bzip2, xz. + + The archive must include a file called `Dockerfile` + at its root. It may include any number of other files, + which will be accessible in the build context (See the [*ADD build + command*](/reference/builder/#dockerbuilder)). + +Query Parameters: + +- **t** – repository name (and optionally a tag) to be applied to + the resulting image in case of success +- **q** – suppress verbose build output +- **nocache** – do not use the cache when building the image +- **pull** - attempt to pull the image even if an older image exists locally +- **rm** - remove intermediate containers after a successful build (default behavior) +- **forcerm** - always remove intermediate containers (includes rm) + + Request Headers: + +- **Content-type** – should be set to `"application/tar"`. +- **X-Registry-Config** – base64-encoded ConfigFile objec + +Status Codes: + +- **200** – no error +- **500** – server error + +### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /auth HTTP/1.1 + Content-Type: application/json + + { + "username":"hannibal", + "password:"xxxx", + "email":"hannibal@a-team.com", + "serveraddress":"https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **204** – no error +- **500** – server error + +### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers":11, + "Images":16, + "Driver":"btrfs", + "ExecutionDriver":"native-0.1", + "KernelVersion":"3.12.0-1-amd64" + "NCPU":1, + "MemTotal":2099236864, + "Name":"prod-server-42", + "ID":"7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "Debug":false, + "NFd": 11, + "NGoroutines":21, + "NEventsListener":0, + "InitPath":"/usr/bin/docker", + "IndexServerAddress":["https://index.docker.io/v1/"], + "MemoryLimit":true, + "SwapLimit":false, + "IPv4Forwarding":true, + "Labels":["storage=ssd"] + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "ApiVersion":"1.12", + "Version":"0.2.2", + "GitCommit":"5a2a5cc+CHANGES", + "GoVersion":"go1.0.3" + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +Status Codes: + +- **200** - no error +- **500** - server error + +### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname":"", + "Domainname": "", + "User":"", + "Memory":0, + "MemorySwap":0, + "CpuShares": 512, + "Cpuset": "0,1", + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "date" + ], + "Volumes":{ + "/tmp": {} + }, + "WorkingDir":"", + "NetworkDisabled": false, + "ExposedPorts":{ + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/vnd.docker.raw-stream + + {"Id":"596069db4bf5"} + +Json Parameters: + +- **config** - the container's configuration + +Query Parameters: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + +Status Codes: + +- **201** – no error +- **404** – no such container +- **500** – server error + +### Monitor Docker's events + +`GET /events` + +Get container events from docker, either in real time via streaming, or via +polling (using since). + +Docker containers will report the following events: + + create, destroy, die, export, kill, pause, restart, start, stop, unpause + +and Docker images will report: + + untag, delete + +**Example request**: + + GET /events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"create","id":"dfdf82bd3881","from":"base:latest","time":1374067924} + {"status":"start","id":"dfdf82bd3881","from":"base:latest","time":1374067924} + {"status":"stop","id":"dfdf82bd3881","from":"base:latest","time":1374067966} + {"status":"destroy","id":"dfdf82bd3881","from":"base:latest","time":1374067970} + +Query Parameters: + +- **since** – timestamp used for polling +- **until** – timestamp used for polling +- **filters** – a json encoded value of the filters (a map[string][]string) to process on the event list. + +Status Codes: + +- **200** – no error +- **500** – server error + +### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only tha +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +Status Codes: + +- **200** – no error +- **500** – server error + +### Get a tarball containing all images. + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +ubuntu:latest), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +Status Codes: + +- **200** – no error +- **500** – server error + +### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into the docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /images/load + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **500** – server error + +### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing three files: + +1. `VERSION`: currently `1.0` - the file format version +2. `json`: detailed layer information, similar to `docker inspect layer_id` +3. `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file will contain `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, there will also be a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest":"565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +### Exec Create + +`POST /containers/(id)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "Tty":false, + "Cmd":[ + "date" + ], + } + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: application/json + + { + "Id":"f90e34656806" + } + +Json Parameters: + +- **AttachStdin** - Boolean value, attaches to stdin of the exec command. +- **AttachStdout** - Boolean value, attaches to stdout of the exec command. +- **AttachStderr** - Boolean value, attaches to stderr of the exec command. +- **Tty** - Boolean value to allocate a pseudo-TTY +- **Cmd** - Command to run specified as a string or an array of strings. + + +Status Codes: + +- **201** – no error +- **404** – no such container + +### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up exec instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach":false, + "Tty":false, + } + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: application/json + + {{ STREAM }} + +Json Parameters: + +- **Detach** - Detach from the exec command +- **Tty** - Boolean value to allocate a pseudo-TTY + +Status Codes: + +- **201** – no error +- **404** – no such exec instance + + **Stream details**: + Similar to the stream behavior of `POST /container/(id)/attach` API + +### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the tty session used by the exec command `id`. +This API is valid only if `tty` was specified as part of creating and starting the exec command. + +**Example request**: + + POST /exec/e90e34656806/resize HTTP/1.1 + Content-Type: plain/text + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: plain/text + +Query Parameters: + +- **h** – height of tty session +- **w** – width + +Status Codes: + +- **201** – no error +- **404** – no such exec instance + +### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the exec command `id`. + +**Example request**: + + GET /exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: plain/text + + { + "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", + "Running" : false, + "ExitCode" : 2, + "ProcessConfig" : { + "privileged" : false, + "user" : "", + "tty" : false, + "entrypoint" : "sh", + "arguments" : [ + "-c", + "exit 2" + ] + }, + "OpenStdin" : false, + "OpenStderr" : false, + "OpenStdout" : false, + "Container" : { + "State" : { + "Running" : true, + "Paused" : false, + "Restarting" : false, + "OOMKilled" : false, + "Pid" : 3650, + "ExitCode" : 0, + "Error" : "", + "StartedAt" : "2014-11-17T22:26:03.717657531Z", + "FinishedAt" : "0001-01-01T00:00:00Z" + }, + "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", + "Created" : "2014-11-17T22:26:03.626304998Z", + "Path" : "date", + "Args" : [], + "Config" : { + "Hostname" : "8f177a186b97", + "Domainname" : "", + "User" : "", + "Memory" : 0, + "MemorySwap" : 0, + "CpuShares" : 0, + "Cpuset" : "", + "AttachStdin" : false, + "AttachStdout" : false, + "AttachStderr" : false, + "PortSpecs" : null, + "ExposedPorts" : null, + "Tty" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], + "Cmd" : [ + "date" + ], + "Image" : "ubuntu", + "Volumes" : null, + "WorkingDir" : "", + "Entrypoint" : null, + "NetworkDisabled" : false, + "MacAddress" : "", + "OnBuild" : null, + "SecurityOpt" : null + }, + "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", + "NetworkSettings" : { + "IPAddress" : "172.17.0.2", + "IPPrefixLen" : 16, + "MacAddress" : "02:42:ac:11:00:02", + "Gateway" : "172.17.42.1", + "Bridge" : "docker0", + "PortMapping" : null, + "Ports" : {} + }, + "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", + "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", + "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", + "Name" : "/test", + "Driver" : "aufs", + "ExecDriver" : "native-0.2", + "MountLabel" : "", + "ProcessLabel" : "", + "AppArmorProfile" : "", + "RestartCount" : 0, + "Volumes" : {}, + "VolumesRW" : {} + } + } + +Status Codes: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +# 3. Going further + +## 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it + - Then retry to create the container + +- Start the container + +- If you are not in detached mode: +- Attach to the container, using logs=1 (to have stdout and + stderr from the container's start) and stream=1 + +- If in detached mode or only stdin is attached: +- Display the container's id + +## 3.2 Hijacking + +In this version of the API, /attach, uses hijacking to transport stdin, +stdout and stderr on the same socket. This might change in the future. + +## 3.3 CORS Requests + +To enable cross origin requests to the remote api add the flag +"--api-enable-cors" when running docker in daemon mode. + + $ docker -d -H="192.168.1.9:2375" --api-enable-cors diff --git a/docs/sources/reference/api/docker_remote_api_v1.2.md b/docs/sources/reference/api/docker_remote_api_v1.2.md index 8da486cf94..4a518aea90 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.2.md +++ b/docs/sources/reference/api/docker_remote_api_v1.2.md @@ -979,4 +979,4 @@ To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode. > docker -d -H="[tcp://192.168.1.9:2375](tcp://192.168.1.9:2375)" -> –api-enable-cors +> -api-enable-cors diff --git a/docs/sources/reference/api/docker_remote_api_v1.3.md b/docs/sources/reference/api/docker_remote_api_v1.3.md index 087262d7c8..7ae7462bf9 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.3.md +++ b/docs/sources/reference/api/docker_remote_api_v1.3.md @@ -325,7 +325,7 @@ Start the container `id` **Example response**: - HTTP/1.1 204 No Conten + HTTP/1.1 204 No Content Content-Type: text/plain Json Parameters: @@ -1064,4 +1064,4 @@ stdout and stderr on the same socket. This might change in the future. To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode. -> docker -d -H="192.168.1.9:2375" –api-enable-cors +> docker -d -H="192.168.1.9:2375" -api-enable-cors diff --git a/docs/sources/reference/api/docker_remote_api_v1.4.md b/docs/sources/reference/api/docker_remote_api_v1.4.md index bfd739f8df..5c0a015cc1 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.4.md +++ b/docs/sources/reference/api/docker_remote_api_v1.4.md @@ -341,7 +341,7 @@ Start the container `id` **Example response**: - HTTP/1.1 204 No Conten + HTTP/1.1 204 No Content Content-Type: text/plain Json Parameters: diff --git a/docs/sources/reference/api/docker_remote_api_v1.5.md b/docs/sources/reference/api/docker_remote_api_v1.5.md index 1c6b15df70..56245c303d 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.5.md +++ b/docs/sources/reference/api/docker_remote_api_v1.5.md @@ -338,7 +338,7 @@ Start the container `id` **Example response**: - HTTP/1.1 204 No Conten + HTTP/1.1 204 No Content Content-Type: text/plain Json Parameters: diff --git a/docs/sources/reference/api/docker_remote_api_v1.6.md b/docs/sources/reference/api/docker_remote_api_v1.6.md index 39d87f38f6..9055b24712 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.6.md +++ b/docs/sources/reference/api/docker_remote_api_v1.6.md @@ -393,7 +393,7 @@ Start the container `id` **Example response**: - HTTP/1.1 204 No Conten + HTTP/1.1 204 No Content Content-Type: text/plain Json Parameters: @@ -545,7 +545,7 @@ Status Codes: `STREAM_TYPE` can be: - 0: stdin (will be written on stdout) -- 1: stdou +- 1: stdout - 2: stderr `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of diff --git a/docs/sources/reference/api/docker_remote_api_v1.7.md b/docs/sources/reference/api/docker_remote_api_v1.7.md index 6e5387a80e..2f07b2b698 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.7.md +++ b/docs/sources/reference/api/docker_remote_api_v1.7.md @@ -348,7 +348,7 @@ Start the container `id` **Example response**: - HTTP/1.1 204 No Conten + HTTP/1.1 204 No Content Content-Type: text/plain Json Parameters: @@ -490,7 +490,7 @@ Status Codes: `STREAM_TYPE` can be: - 0: stdin (will be written on stdout) -- 1: stdou +- 1: stdout - 2: stderr `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of diff --git a/docs/sources/reference/api/docker_remote_api_v1.8.md b/docs/sources/reference/api/docker_remote_api_v1.8.md index 36c92a4aee..faaa71397e 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.8.md +++ b/docs/sources/reference/api/docker_remote_api_v1.8.md @@ -385,7 +385,7 @@ Start the container `id` **Example response**: - HTTP/1.1 204 No Conten + HTTP/1.1 204 No Content Content-Type: text/plain Json Parameters: @@ -538,7 +538,7 @@ Status Codes: `STREAM_TYPE` can be: - 0: stdin (will be written on stdout) -- 1: stdou +- 1: stdout - 2: stderr `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of diff --git a/docs/sources/reference/api/docker_remote_api_v1.9.md b/docs/sources/reference/api/docker_remote_api_v1.9.md index 7cac380109..4c7301ee18 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.9.md +++ b/docs/sources/reference/api/docker_remote_api_v1.9.md @@ -364,28 +364,6 @@ Status Codes: - **404** – no such container - **500** – server error -### Resize a container TTY - -`GET /containers/(id)/resize?h=&w=` - -Resize the TTY of container `id` - -**Example request**: - - GET /containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 - -**Example response**: - - HTTP/1.1 200 OK - Content-Length: 0 - Content-Type: text/plain; charset=utf-8 - -Status Codes: - -- **200** – no error -- **404** – No such container -- **500** – bad file descriptor - ### Start a container `POST /containers/(id)/start` @@ -407,7 +385,7 @@ Start the container `id` **Example response**: - HTTP/1.1 204 No Conten + HTTP/1.1 204 No Content Content-Type: text/plain Json Parameters: @@ -564,7 +542,7 @@ Status Codes: `STREAM_TYPE` can be: - 0: stdin (will be written on stdout) -- 1: stdou +- 1: stdout - 2: stderr `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of diff --git a/docs/sources/reference/api/hub_registry_spec.md b/docs/sources/reference/api/hub_registry_spec.md index 853eda4aee..26d4ffca30 100644 --- a/docs/sources/reference/api/hub_registry_spec.md +++ b/docs/sources/reference/api/hub_registry_spec.md @@ -458,7 +458,7 @@ on a private network without having to rely on an external entity controlled by Docker Inc. In this case, the registry will be launched in a special mode -(–standalone? ne? –no-index?). In this mode, the only thing which changes is +(-standalone? ne? -no-index?). In this mode, the only thing which changes is that Registry will never contact the Docker Hub to verify a token. It will be the Registry owner responsibility to authenticate the user who pushes (or even pulls) an image using any mechanism (HTTP auth, IP based, @@ -579,13 +579,19 @@ The following naming restrictions apply: ### Get all tags: -GET /v1/repositories///tags + GET /v1/repositories///tags **Return**: HTTP 200 - { "latest": - "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f", - “0.1.1”: - “b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087” } + [ + { + "layer": "9e89cc6f", + "name": "latest" + }, + { + "layer": "b486531f", + "name": "0.1.1", + } + ] **4.3.2 Read the content of a tag (resolve the image id):** diff --git a/docs/sources/reference/api/registry_api.md b/docs/sources/reference/api/registry_api.md index d6130bf767..43a463cd5e 100644 --- a/docs/sources/reference/api/registry_api.md +++ b/docs/sources/reference/api/registry_api.md @@ -14,9 +14,9 @@ page_keywords: API, Docker, index, registry, REST, documentation service using tokens - It supports different storage backends (S3, cloud files, local FS) - It doesn't have a local database - - It will be open-sourced at some point + - The registry is open source: [Docker Registry](https://github.com/docker/docker-registry) -We expect that there will be multiple registries out there. To help to + We expect that there will be multiple registries out there. To help to grasp the context, here are some examples of registries: - **sponsor registry**: such a registry is provided by a third-party @@ -501,6 +501,47 @@ Status Codes: - **401** – Requires authorization - **404** – Repository not found +## Search + +If you need to search the index, this is the endpoint you would use. + +`GET /v1/search` + +Search the Index given a search term. It accepts + + [GET](http://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html#sec9.3) + only. + +**Example request**: + + GET /v1/search?q=search_term HTTP/1.1 + Host: index.docker.io + Accept: application/json + +**Example response**: + + HTTP/1.1 200 OK + Vary: Accept + Content-Type: application/json + + {"query":"search_term", + "num_results": 3, + "results" : [ + {"name": "ubuntu", "description": "An ubuntu image..."}, + {"name": "centos", "description": "A centos image..."}, + {"name": "fedora", "description": "A fedora image..."} + ] + } + +Query Parameters: + +- **q** – what you want to search for + +Status Codes: + +- **200** – no error +- **500** – server error + ## Status ### Status check for registry diff --git a/docs/sources/reference/api/remote_api_client_libraries.md b/docs/sources/reference/api/remote_api_client_libraries.md index 71bd2ebfc1..bff2fa30cf 100644 --- a/docs/sources/reference/api/remote_api_client_libraries.md +++ b/docs/sources/reference/api/remote_api_client_libraries.md @@ -131,7 +131,7 @@ will add the libraries here. Python docker-py - https://github.com/dotcloud/docker-py + https://github.com/docker/docker-py Active diff --git a/docs/sources/reference/builder.md b/docs/sources/reference/builder.md index 2678a87a19..adc308c9d6 100644 --- a/docs/sources/reference/builder.md +++ b/docs/sources/reference/builder.md @@ -14,11 +14,12 @@ successively. This page discusses the specifics of all the instructions you can use in your `Dockerfile`. To further help you write a clear, readable, maintainable `Dockerfile`, we've also written a [`Dockerfile` Best Practices -guide](/articles/dockerfile_best-practices). +guide](/articles/dockerfile_best-practices). Lastly, you can test your +Dockerfile knowledge with the [Dockerfile tutorial](/userguide/level1). ## Usage -To [*build*](../commandline/cli/#cli-build) an image from a source repository, +To [*build*](/reference/commandline/cli/#build) an image from a source repository, create a description file called `Dockerfile` at the root of your repository. This file will describe the steps to assemble the image. @@ -104,6 +105,47 @@ be treated as an argument. This allows statements like: Here is the set of instructions you can use in a `Dockerfile` for building images. +### Environment Replacement + +**Note:** prior to 1.3, `Dockerfile` environment variables were handled +similarly, in that they would be replaced as described below. However, there +was no formal definition on as to which instructions handled environment +replacement at the time. After 1.3 this behavior will be preserved and +canonical. + +Environment variables (declared with the `ENV` statement) can also be used in +certain instructions as variables to be interpreted by the `Dockerfile`. Escapes +are also handled for including variable-like syntax into a statement literally. + +Environment variables are notated in the `Dockerfile` either with +`$variable_name` or `${variable_name}`. They are treated equivalently and the +brace syntax is typically used to address issues with variable names with no +whitespace, like `${foo}_bar`. + +Escaping is possible by adding a `\` before the variable: `\$foo` or `\${foo}`, +for example, will translate to `$foo` and `${foo}` literals respectively. + +Example (parsed representation is displayed after the `#`): + + FROM busybox + ENV foo /bar + WORKDIR ${foo} # WORKDIR /bar + ADD . $foo # ADD . /bar + COPY \$foo /quux # COPY $foo /quux + +The instructions that handle environment variables in the `Dockerfile` are: + +* `ENV` +* `ADD` +* `COPY` +* `WORKDIR` +* `EXPOSE` +* `VOLUME` +* `USER` + +`ONBUILD` instructions are **NOT** supported for environment replacement, even +the instructions above. + ## The `.dockerignore` file If a file named `.dockerignore` exists in the source repository, then it @@ -196,9 +238,9 @@ commands using a base image that does not contain `/bin/sh`. > **Note**: > Unlike the *shell* form, the *exec* form does not invoke a command shell. > This means that normal shell processing does not happen. For example, -> `CMD [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. +> `RUN [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. > If you want shell processing then either use the *shell* form or execute -> a shell directly, for example: `CMD [ "sh", "-c", "echo", "$HOME" ]`. +> a shell directly, for example: `RUN [ "sh", "-c", "echo", "$HOME" ]`. The cache for `RUN` instructions isn't invalidated automatically during the next build. The cache for an instruction like @@ -287,19 +329,47 @@ default specified in `CMD`. The `EXPOSE` instructions informs Docker that the container will listen on the specified network ports at runtime. Docker uses this information to interconnect containers using links (see the [Docker User -Guide](/userguide/dockerlinks)). Note that `EXPOSE` only works for -inter-container links. It doesn't make ports accessible from the host. To -expose ports to the host, at runtime, -[use the `-p` flag](/userguide/dockerlinks). +Guide](/userguide/dockerlinks)) and to determine which ports to expose to the +host when [using the -P flag](/reference/run/#expose-incoming-ports). +**Note:** +`EXPOSE` doesn't define which ports can be exposed to the host or make ports +accessible from the host by default. To expose ports to the host, at runtime, +[use the `-p` flag](/userguide/dockerlinks) or +[the -P flag](/reference/run/#expose-incoming-ports). ## ENV ENV + ENV = ... The `ENV` instruction sets the environment variable `` to the value ``. This value will be passed to all future `RUN` instructions. This is functionally equivalent to prefixing the command with `=` +The `ENV` instruction has two forms. The first form, `ENV `, +will set a single variable to a value. The entire string after the first +space will be treated as the `` - including characters such as +spaces and quotes. + +The second form, `ENV = ...`, allows for multiple variables to +be set at one time. Notice that the second form uses the equals sign (=) +in the syntax, while the first form does not. Like command line parsing, +quotes and backslashes can be used to include spaces within values. + +For example: + + ENV myName="John Doe" myDog=Rex\ The\ Dog \ + myCat=fluffy + +and + + ENV myName John Doe + ENV myDog Rex The Dog + ENV myCat fluffy + +will yield the same net results in the final container, but the first form +does it all in one layer. + The environment variables set using `ENV` will persist when a container is run from the resulting image. You can view the values using `docker inspect`, and change them using `docker run --env =`. @@ -313,9 +383,8 @@ change them using `docker run --env =`. ADD ... -The `ADD` instruction copies new files,directories or remote file URLs to -the filesystem of the container from `` and add them to the at -path ``. +The `ADD` instruction copies new files, directories or remote file URLs from `` +and adds them to the filesystem of the container at the path ``. Multiple `` resource may be specified but if they are files or directories then they must be relative to the source directory that is @@ -334,7 +403,11 @@ destination container. All new files and directories are created with a UID and GID of 0. In the case where `` is a remote file URL, the destination will -have permissions of 600. +have permissions of 600. If the remote file being retrieved has an HTTP +`Last-Modified` header, the timestamp from that header will be used +to set the `mtime` on the destination file. Then, like any other file +processed during an `ADD`, `mtime` will be included in the determination +of whether or not the file has changed and the cache should be updated. > **Note**: > If you build by passing a `Dockerfile` through STDIN (`docker @@ -375,8 +448,10 @@ The copy obeys the following rules: appropriate filename can be discovered in this case (`http://example.com` will not work). -- If `` is a directory, the entire directory is copied, including - filesystem metadata. +- If `` is a directory, the entire contents of the directory are copied, + including filesystem metadata. +> **Note**: +> The directory itself is not copied, just its contents. - If `` is a *local* tar archive in a recognized compression format (identity, gzip, bzip2 or xz) then it is unpacked as a directory. Resources @@ -406,13 +481,11 @@ The copy obeys the following rules: COPY ... -The `COPY` instruction copies new files,directories or remote file URLs to -the filesystem of the container from `` and add them to the at -path ``. +The `COPY` instruction copies new files or directories from `` +and adds them to the filesystem of the container at the path ``. -Multiple `` resource may be specified but if they are files or -directories then they must be relative to the source directory that is being -built (the context of the build). +Multiple `` resource may be specified but they must be relative +to the source directory that is being built (the context of the build). Each `` may contain wildcards and matching will be done using Go's [filepath.Match](http://golang.org/pkg/path/filepath#Match) rules. @@ -437,8 +510,10 @@ The copy obeys the following rules: `docker build` is to send the context directory (and subdirectories) to the docker daemon. -- If `` is a directory, the entire directory is copied, including - filesystem metadata. +- If `` is a directory, the entire contents of the directory are copied, + including filesystem metadata. +> **Note**: +> The directory itself is not copied, just its contents. - If `` is any other kind of file, it is copied individually along with its metadata. In this case, if `` ends with a trailing slash `/`, it @@ -460,43 +535,151 @@ The copy obeys the following rules: ENTRYPOINT has two forms: - `ENTRYPOINT ["executable", "param1", "param2"]` - (*exec* form, the preferred form) + (the preferred *exec* form) - `ENTRYPOINT command param1 param2` (*shell* form) -There can only be one `ENTRYPOINT` in a `Dockerfile`. If you have more -than one `ENTRYPOINT`, then only the last one in the `Dockerfile` will -have an effect. +An `ENTRYPOINT` allows you to configure a container that will run as an executable. -An `ENTRYPOINT` helps you to configure a container that you can run as -an executable. That is, when you specify an `ENTRYPOINT`, then the whole -container runs as if it was just that executable. +For example, the following will start nginx with its default content, listening +on port 80: -Unlike the behavior of the `CMD` instruction, The `ENTRYPOINT` -instruction adds an entry command that will **not** be overwritten when -arguments are passed to `docker run`. This allows arguments to be passed -to the entry point, i.e. `docker run -d` will pass the `-d` -argument to the entry point. + docker run -i -t --rm -p 80:80 nginx -You can specify parameters either in the `ENTRYPOINT` JSON array (as in -"like an exec" above), or by using a `CMD` instruction. Parameters in -the `ENTRYPOINT` instruction will not be overridden by the `docker run` -arguments, but parameters specified via a `CMD` instruction will be -overridden by `docker run` arguments. +Command line arguments to `docker run ` will be appended after all +elements in an *exec* form `ENTRYPOINT`, and will override all elements specified +using `CMD`. +This allows arguments to be passed to the entry point, i.e., `docker run -d` +will pass the `-d` argument to the entry point. +You can override the `ENTRYPOINT` instruction using the `docker run --entrypoint` +flag. -Like a `CMD`, you can specify a plain string for the `ENTRYPOINT` and it -will execute in `/bin/sh -c`: +The *shell* form prevents any `CMD` or `run` command line arguments from being +used, but has the disadvantage that your `ENTRYPOINT` will be started as a +subcommand of `/bin/sh -c`, which does not pass signals. +This means that the executable will not be the container's `PID 1` - and +will _not_ receive Unix signals - so your executable will not receive a +`SIGTERM` from `docker stop `. + +Only the last `ENTRYPOINT` instruction in the `Dockerfile` will have an effect. + +### Exec form ENTRYPOINT example + +You can use the *exec* form of `ENTRYPOINT` to set fairly stable default commands +and arguments and then use either form of `CMD` to set additional defaults that +are more likely to be changed. FROM ubuntu - ENTRYPOINT ls -l + ENTRYPOINT ["top", "-b"] + CMD ["-c"] -For example, that `Dockerfile`'s image will *always* take a directory as -an input and return a directory listing. If you wanted to make this -optional but default, you could use a `CMD` instruction: +When you run the container, you can see that `top` is the only process: - FROM ubuntu - CMD ["-l"] - ENTRYPOINT ["ls"] + $ docker run -it --rm --name test top -H + top - 08:25:00 up 7:27, 0 users, load average: 0.00, 0.01, 0.05 + Threads: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + %Cpu(s): 0.1 us, 0.1 sy, 0.0 ni, 99.7 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st + KiB Mem: 2056668 total, 1616832 used, 439836 free, 99352 buffers + KiB Swap: 1441840 total, 0 used, 1441840 free. 1324440 cached Mem + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 19744 2336 2080 R 0.0 0.1 0:00.04 top + +To examine the result further, you can use `docker exec`: + + $ docker exec -it test ps aux + USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND + root 1 2.6 0.1 19752 2352 ? Ss+ 08:24 0:00 top -b -H + root 7 0.0 0.1 15572 2164 ? R+ 08:25 0:00 ps aux + +And you can gracefully request `top` to shut down using `docker stop test`. + +The following `Dockerfile` shows using the `ENTRYPOINT` to run Apache in the +foreground (i.e., as `PID 1`): + +``` +FROM debian:stable +RUN apt-get update && apt-get install -y --force-yes apache2 +EXPOSE 80 443 +VOLUME ["/var/www", "/var/log/apache2", "/etc/apache2"] +ENTRYPOINT ["/usr/sbin/apache2ctl", "-D", "FOREGROUND"] +``` + +If you need to write a starter script for a single executable, you can ensure that +the final executable receives the Unix signals by using `exec` and `gosu` +(see [the Dockerfile best practices](/articles/dockerfile_best-practices/#entrypoint) +for more details): + +```bash +#!/bin/bash +set -e + +if [ "$1" = 'postgres' ]; then + chown -R postgres "$PGDATA" + + if [ -z "$(ls -A "$PGDATA")" ]; then + gosu postgres initdb + fi + + exec gosu postgres "$@" +fi + +exec "$@" +``` + +Lastly, if you need to do some extra cleanup (or communicate with other containers) +on shutdown, or are co-ordinating more than one executable, you may need to ensure +that the `ENTRYPOINT` script receives the Unix signals, passes them on, and then +does some more work: + +``` +#!/bin/sh +# Note: I've written this using sh so it works in the busybox container too + +# USE the trap if you need to also do manual cleanup after the service is stopped, +# or need to start multiple services in the one container +trap "echo TRAPed signal" HUP INT QUIT KILL TERM + +# start service in background here +/usr/sbin/apachectl start + +echo "[hit enter key to exit] or run 'docker stop '" +read + +# stop service and clean up here +echo "stopping apache" +/usr/sbin/apachectl stop + +echo "exited $0" +``` + +If you run this image with `docker run -it --rm -p 80:80 --name test apache`, +you can then examine the container's processes with `docker exec`, or `docker top`, +and then ask the script to stop Apache: + +```bash +$ docker exec -it test ps aux +USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND +root 1 0.1 0.0 4448 692 ? Ss+ 00:42 0:00 /bin/sh /run.sh 123 cmd cmd2 +root 19 0.0 0.2 71304 4440 ? Ss 00:42 0:00 /usr/sbin/apache2 -k start +www-data 20 0.2 0.2 360468 6004 ? Sl 00:42 0:00 /usr/sbin/apache2 -k start +www-data 21 0.2 0.2 360468 6000 ? Sl 00:42 0:00 /usr/sbin/apache2 -k start +root 81 0.0 0.1 15572 2140 ? R+ 00:44 0:00 ps aux +$ docker top test +PID USER COMMAND +10035 root {run.sh} /bin/sh /run.sh 123 cmd cmd2 +10054 root /usr/sbin/apache2 -k start +10055 33 /usr/sbin/apache2 -k start +10056 33 /usr/sbin/apache2 -k start +$ /usr/bin/time docker stop test +test +real 0m 0.27s +user 0m 0.03s +sys 0m 0.03s +``` + +> **Note:** you can over ride the `ENTRYPOINT` setting using `--entrypoint`, +> but this can only set the binary to *exec* (no `sh -c` will be used). > **Note**: > The *exec* form is parsed as a JSON array, which means that @@ -505,13 +688,71 @@ optional but default, you could use a `CMD` instruction: > **Note**: > Unlike the *shell* form, the *exec* form does not invoke a command shell. > This means that normal shell processing does not happen. For example, -> `CMD [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. +> `ENTRYPOINT [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. > If you want shell processing then either use the *shell* form or execute -> a shell directly, for example: `CMD [ "sh", "-c", "echo", "$HOME" ]`. +> a shell directly, for example: `ENTRYPOINT [ "sh", "-c", "echo", "$HOME" ]`. +> Variables that are defined in the `Dockerfile`using `ENV`, will be substituted by +> the `Dockerfile` parser. -> **Note**: -> It is preferable to use the JSON array format for specifying -> `ENTRYPOINT` instructions. +### Shell form ENTRYPOINT example + +You can specify a plain string for the `ENTRYPOINT` and it will execute in `/bin/sh -c`. +This form will use shell processing to substitute shell environment variables, +and will ignore any `CMD` or `docker run` command line arguments. +To ensure that `docker stop` will signal any long running `ENTRYPOINT` executable +correctly, you need to remember to start it with `exec`: + + FROM ubuntu + ENTRYPOINT exec top -b + +When you run this image, you'll see the single `PID 1` process: + + $ docker run -it --rm --name test top + Mem: 1704520K used, 352148K free, 0K shrd, 0K buff, 140368121167873K cached + CPU: 5% usr 0% sys 0% nic 94% idle 0% io 0% irq 0% sirq + Load average: 0.08 0.03 0.05 2/98 6 + PID PPID USER STAT VSZ %VSZ %CPU COMMAND + 1 0 root R 3164 0% 0% top -b + +Which will exit cleanly on `docker stop`: + + $ /usr/bin/time docker stop test + test + real 0m 0.20s + user 0m 0.02s + sys 0m 0.04s + +If you forget to add `exec` to the beginning of your `ENTRYPOINT`: + + FROM ubuntu + ENTRYPOINT top -b + CMD --ignored-param1 + +You can then run it (giving it a name for the next step): + + $ docker run -it --name test top --ignored-param2 + Mem: 1704184K used, 352484K free, 0K shrd, 0K buff, 140621524238337K cached + CPU: 9% usr 2% sys 0% nic 88% idle 0% io 0% irq 0% sirq + Load average: 0.01 0.02 0.05 2/101 7 + PID PPID USER STAT VSZ %VSZ %CPU COMMAND + 1 0 root S 3168 0% 0% /bin/sh -c top -b cmd cmd2 + 7 1 root R 3164 0% 0% top -b + +You can see from the output of `top` that the specified `ENTRYPOINT` is not `PID 1`. + +If you then run `docker stop test`, the container will not exit cleanly - the +`stop` command will be forced to send a `SIGKILL` after the timeout: + + $ docker exec -it test ps aux + PID USER COMMAND + 1 root /bin/sh -c top -b cmd cmd2 + 7 root top -b + 8 root ps aux + $ /usr/bin/time docker stop test + test + real 0m 10.19s + user 0m 0.04s + sys 0m 0.03s ## VOLUME @@ -526,7 +767,7 @@ Docker client, refer to [*Share Directories via Volumes*](/userguide/dockervolum documentation. > **Note**: -> The list is parsed a JSON array, which means that +> The list is parsed as a JSON array, which means that > you must use double-quotes (") around words not single-quotes ('). ## USER @@ -534,7 +775,8 @@ documentation. USER daemon The `USER` instruction sets the user name or UID to use when running the image -and for any following `RUN` directives. +and for any `RUN`, `CMD` and `ENTRYPOINT` instructions that follow it in the +`Dockerfile`. ## WORKDIR @@ -642,7 +884,7 @@ For example you might add something like this: # Install vnc, xvfb in order to create a 'fake' display and firefox RUN apt-get update && apt-get install -y x11vnc xvfb firefox - RUN mkdir /.vnc + RUN mkdir ~/.vnc # Setup a password RUN x11vnc -storepasswd 1234 ~/.vnc/passwd # Autostart firefox (might not be the best way, but it does the trick) diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 6e6ccd69aa..577a4c68c0 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -11,7 +11,7 @@ or execute `docker help`: Usage: docker [OPTIONS] COMMAND [arg...] -H, --host=[]: The socket(s) to bind to in daemon mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. - A self-sufficient runtime for linux containers. + A self-sufficient runtime for Linux containers. ... @@ -69,12 +69,14 @@ expect an integer, and they can only be specified once. use '' (the empty string) to disable setting of a group -g, --graph="/var/lib/docker" Path to use as the root of the Docker runtime -H, --host=[] The socket(s) to bind to in daemon mode or connect to in client mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. - --icc=true Enable inter-container communication + --icc=true Allow unrestricted inter-container and Docker daemon host communication --insecure-registry=[] Enable insecure communication with specified registries (disables certificate verification for HTTPS and enables HTTP fallback) (e.g., localhost:5000 or 10.20.0.0/16) --ip=0.0.0.0 Default IP address to use when binding container ports --ip-forward=true Enable net.ipv4.ip_forward --ip-masq=true Enable IP masquerading for bridge's IP range --iptables=true Enable Docker's addition of iptables rules + -l, --log-level="info" Set the logging level + --label=[] Set key=value labels to the daemon (displayed in `docker info`) --mtu=0 Set the containers network MTU if no value is provided: default to the default route MTU or 1500 if no default route is available -p, --pidfile="/var/run/docker.pid" Path to use for daemon PID file @@ -82,7 +84,7 @@ expect an integer, and they can only be specified once. -s, --storage-driver="" Force the Docker runtime to use a specific storage driver --selinux-enabled=false Enable selinux support. SELinux does not presently support the BTRFS storage driver --storage-opt=[] Set storage driver options - --tls=false Use TLS; implied by tls-verify flags + --tls=false Use TLS; implied by --tlsverify flag --tlscacert="/home/sven/.docker/ca.pem" Trust only remotes providing a certificate signed by the CA given here --tlscert="/home/sven/.docker/cert.pem" Path to TLS certificate file --tlskey="/home/sven/.docker/key.pem" Path to TLS key file @@ -100,7 +102,7 @@ To run the daemon with debug output, use `docker -d -D`. ### Daemon socket option -The Docker daemon can listen for [Docker Remote API](reference/api/docker_remote_api/) +The Docker daemon can listen for [Docker Remote API](/reference/api/docker_remote_api/) requests via three different types of Socket: `unix`, `tcp`, and `fd`. By default, a `unix` domain socket (or IPC socket) is created at `/var/run/docker.sock`, @@ -109,7 +111,7 @@ requiring either `root` permission, or `docker` group membership. If you need to access the Docker daemon remotely, you need to enable the `tcp` Socket. Beware that the default setup provides un-encrypted and un-authenticated direct access to the Docker daemon - and should be secured either using the -[built in https encrypted socket](/articles/https/), or by putting a secure web +[built in HTTPS encrypted socket](/articles/https/), or by putting a secure web proxy in front of it. You can listen on port `2375` on all network interfaces with `-H tcp://0.0.0.0:2375`, or on a particular network interface using its IP address: `-H tcp://192.168.59.103:2375`. It is conventional to use port `2375` @@ -119,7 +121,7 @@ for un-encrypted, and port `2376` for encrypted communication with the daemon. > and greater are supported. Protocols SSLv3 and under are not supported anymore > for security reasons. -On Systemd based systems, you can communicate with the daemon via +On Systemd based systems, you can communicate with the daemon via [systemd socket activation](http://0pointer.de/blog/projects/socket-activation.html), use `docker -d -H fd://`. Using `fd://` will work perfectly for most setups but you can also specify individual sockets: `docker -d -H fd://3`. If the @@ -153,8 +155,8 @@ string is equivalent to setting the `--tlsverify` flag. The following are equiva ### Daemon storage-driver option -The Docker daemon has support for three different image layer storage drivers: `aufs`, -`devicemapper`, and `btrfs`. +The Docker daemon has support for several different image layer storage drivers: `aufs`, +`devicemapper`, `btrfs` and `overlay`. The `aufs` driver is the oldest, but is based on a Linux kernel patch-set that is unlikely to be merged into the main kernel. These are also known to cause some @@ -162,17 +164,155 @@ serious kernel crashes. However, `aufs` is also the only storage driver that all containers to share executable and shared library memory, so is a useful choice when running thousands of containers with the same program or libraries. -The `devicemapper` driver uses thin provisioning and Copy on Write (CoW) snapshots. -This driver will create a 100GB sparse file containing all your images and -containers. Each container will be limited to a 10 GB thin volume, and either of -these will require tuning - see [~jpetazzo/Resizing Docker containers with the -Device Mapper plugin]( http://jpetazzo.github.io/2014/01/29/docker-device-mapper-resize/) -To tell the Docker daemon to use `devicemapper`, use -`docker -d -s devicemapper`. +The `devicemapper` driver uses thin provisioning and Copy on Write (CoW) +snapshots. For each devicemapper graph location – typically +`/var/lib/docker/devicemapper` – a thin pool is created based on two block +devices, one for data and one for metadata. By default, these block devices +are created automatically by using loopback mounts of automatically created +sparse files. Refer to [Storage driver options](#storage-driver-options) below +for a way how to customize this setup. +[~jpetazzo/Resizing Docker containers with the Device Mapper plugin]( +http://jpetazzo.github.io/2014/01/29/docker-device-mapper-resize/) article +explains how to tune your existing setup without the use of options. The `btrfs` driver is very fast for `docker build` - but like `devicemapper` does not share executable memory between devices. Use `docker -d -s btrfs -g /mnt/btrfs_partition`. +The `overlay` is a very fast union filesystem. It is now merged in the main +Linux kernel as of [3.18.0](https://lkml.org/lkml/2014/10/26/137). +Call `docker -d -s overlay` to use it. +> **Note:** +> It is currently unsupported on `btrfs` or any Copy on Write filesystem +> and should only be used over `ext4` partitions. + +#### Storage driver options + +Particular storage-driver can be configured with options specified with +`--storage-opt` flags. The only driver accepting options is `devicemapper` as +of now. All its options are prefixed with `dm`. + +Currently supported options are: + + * `dm.basesize` + + Specifies the size to use when creating the base device, which limits the + size of images and containers. The default value is 10G. Note, thin devices + are inherently "sparse", so a 10G device which is mostly empty doesn't use + 10 GB of space on the pool. However, the filesystem will use more space for + the empty case the larger the device is. + + **Warning**: This value affects the system-wide "base" empty filesystem + that may already be initialized and inherited by pulled images. Typically, + a change to this value will require additional steps to take effect: + + $ sudo service docker stop + $ sudo rm -rf /var/lib/docker + $ sudo service docker start + + Example use: + + $ sudo docker -d --storage-opt dm.basesize=20G + + * `dm.loopdatasize` + + Specifies the size to use when creating the loopback file for the "data" + device which is used for the thin pool. The default size is 100G. Note that + the file is sparse, so it will not initially take up this much space. + + Example use: + + $ sudo docker -d --storage-opt dm.loopdatasize=200G + + * `dm.loopmetadatasize` + + Specifies the size to use when creating the loopback file for the + "metadata" device which is used for the thin pool. The default size is 2G. + Note that the file is sparse, so it will not initially take up this much + space. + + Example use: + + $ sudo docker -d --storage-opt dm.loopmetadatasize=4G + + * `dm.fs` + + Specifies the filesystem type to use for the base device. The supported + options are "ext4" and "xfs". The default is "ext4" + + Example use: + + $ sudo docker -d --storage-opt dm.fs=xfs + + * `dm.mkfsarg` + + Specifies extra mkfs arguments to be used when creating the base device. + + Example use: + + $ sudo docker -d --storage-opt "dm.mkfsarg=-O ^has_journal" + + * `dm.mountopt` + + Specifies extra mount options used when mounting the thin devices. + + Example use: + + $ sudo docker -d --storage-opt dm.mountopt=nodiscard + + * `dm.datadev` + + Specifies a custom blockdevice to use for data for the thin pool. + + If using a block device for device mapper storage, ideally both datadev and + metadatadev should be specified to completely avoid using the loopback + device. + + Example use: + + $ sudo docker -d \ + --storage-opt dm.datadev=/dev/sdb1 \ + --storage-opt dm.metadatadev=/dev/sdc1 + + * `dm.metadatadev` + + Specifies a custom blockdevice to use for metadata for the thin pool. + + For best performance the metadata should be on a different spindle than the + data, or even better on an SSD. + + If setting up a new metadata pool it is required to be valid. This can be + achieved by zeroing the first 4k to indicate empty metadata, like this: + + $ dd if=/dev/zero of=$metadata_dev bs=4096 count=1 + + Example use: + + $ sudo docker -d \ + --storage-opt dm.datadev=/dev/sdb1 \ + --storage-opt dm.metadatadev=/dev/sdc1 + + * `dm.blocksize` + + Specifies a custom blocksize to use for the thin pool. The default + blocksize is 64K. + + Example use: + + $ sudo docker -d --storage-opt dm.blocksize=512K + + * `dm.blkdiscard` + + Enables or disables the use of blkdiscard when removing devicemapper + devices. This is enabled by default (only) if using loopback devices and is + required to res-parsify the loopback file on image/container removal. + + Disabling this on loopback can lead to *much* faster container removal + times, but will make the space used in `/var/lib/docker` directory not be + returned to the system for other use when containers are removed. + + Example use: + + $ sudo docker -d --storage-opt dm.blkdiscard=false ### Docker exec-driver option @@ -247,7 +387,7 @@ Docker supports softlinks for the Docker data directory Attach to a running container --no-stdin=false Do not attach STDIN - --sig-proxy=true Proxy all received signals to the process (even in non-TTY mode). SIGCHLD, SIGKILL, and SIGSTOP are not proxied. + --sig-proxy=true Proxy all received signals to the process (non-TTY mode only). SIGCHLD, SIGKILL, and SIGSTOP are not proxied. The `attach` command lets you view or interact with any running container's primary process (`pid 1`). @@ -456,7 +596,7 @@ Supported formats are: bzip2, gzip and xz. This will clone the GitHub repository and use the cloned repository as context. The Dockerfile at the root of the repository is used as Dockerfile. Note that you -can specify an arbitrary Git repository by using the `git://` +can specify an arbitrary Git repository by using the `git://` or `git@` schema. > **Note:** `docker build` will return a `no such file or directory` error @@ -523,18 +663,22 @@ Creates a new container. --cap-drop=[] Drop Linux capabilities --cidfile="" Write the container ID to the file --cpuset="" CPUs in which to allow execution (0-3, 0,1) - --device=[] Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc) + --device=[] Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm) --dns=[] Set custom DNS servers - --dns-search=[] Set custom DNS search domains + --dns-search=[] Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain) -e, --env=[] Set environment variables --entrypoint="" Overwrite the default ENTRYPOINT of the image --env-file=[] Read in a line delimited file of environment variables - --expose=[] Expose a port from the container without publishing it to your host + --expose=[] Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host -h, --hostname="" Container host name -i, --interactive=false Keep STDIN open even if not attached + --ipc="" Default is to create a private IPC namespace (POSIX SysV IPC) for the container + 'container:': reuses another container shared memory, semaphores and message queues + 'host': use the host shared memory,semaphores and message queues inside the container. Note: the host mode gives the container full access to local shared memory and is therefore considered insecure. --link=[] Add link to another container in the form of name:alias --lxc-conf=[] (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" -m, --memory="" Memory limit (format: , where unit = b, k, m or g) + --mac-address="" Container MAC address (e.g. 92:d0:c6:0a:29:33) --name="" Assign a name to the container --net="bridge" Set the Network mode for the container 'bridge': creates a new network stack for the container on the docker bridge @@ -547,6 +691,7 @@ Creates a new container. (use 'docker port' to see the actual mapping) --privileged=false Give extended privileges to this container --restart="" Restart policy to apply when a container exits (no, on-failure[:max-retry], always) + --security-opt=[] Security Options -t, --tty=false Allocate a pseudo-TTY -u, --user="" Username or UID -v, --volume=[] Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container) @@ -563,6 +708,8 @@ container at any point. This is useful when you want to set up a container configuration ahead of time so that it is ready to start when you need it. +Please see the [run command](#run) section for more details. + #### Example $ sudo docker create -t -i fedora bash @@ -617,6 +764,24 @@ and Docker images will report: untag, delete +#### Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If you would like to use +multiple filters, pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +Using the same filter multiple times will be handled as a *OR*; for example +`--filter container=588a23dac085 --filter container=a8f7720b8c22` will display events for +container 588a23dac085 *OR* container a8f7720b8c22 + +Using multiple filters will be handled as a *AND*; for example +`--filter container=588a23dac085 --filter event=start` will display events for container +container 588a23dac085 *AND* the event type is *start* + +Current filters: + * event + * image + * container + #### Examples You'll need two shells for this example. @@ -625,37 +790,70 @@ You'll need two shells for this example. $ sudo docker events -**Shell 2: Start and Stop a Container:** +**Shell 2: Start and Stop containers:** $ sudo docker start 4386fb97867d $ sudo docker stop 4386fb97867d + $ sudo docker stop 7805c1d35632 **Shell 1: (Again .. now showing events):** - 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from 12de384bfb10) start - 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from 12de384bfb10) die - 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from 12de384bfb10) stop + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) start + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) die + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop + 2014-05-10T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) die + 2014-05-10T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) stop **Show events in the past from a specified time:** $ sudo docker events --since 1378216169 - 2014-03-10T17:42:14.999999999Z07:00 4386fb97867d: (from 12de384bfb10) die - 2014-03-10T17:42:14.999999999Z07:00 4386fb97867d: (from 12de384bfb10) stop + 2014-03-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) die + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop + 2014-05-10T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) die + 2014-03-10T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) stop $ sudo docker events --since '2013-09-03' - 2014-09-03T17:42:14.999999999Z07:00 4386fb97867d: (from 12de384bfb10) start - 2014-09-03T17:42:14.999999999Z07:00 4386fb97867d: (from 12de384bfb10) die - 2014-09-03T17:42:14.999999999Z07:00 4386fb97867d: (from 12de384bfb10) stop + 2014-09-03T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) start + 2014-09-03T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) die + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop + 2014-05-10T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) die + 2014-09-03T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) stop $ sudo docker events --since '2013-09-03 15:49:29 +0200 CEST' - 2014-09-03T15:49:29.999999999Z07:00 4386fb97867d: (from 12de384bfb10) die - 2014-09-03T15:49:29.999999999Z07:00 4386fb97867d: (from 12de384bfb10) stop + 2014-09-03T15:49:29.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) die + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop + 2014-05-10T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) die + 2014-09-03T15:49:29.999999999Z07:00 7805c1d35632: (from redis:2.8) stop + +**Filter events:** + + $ sudo docker events --filter 'event=stop' + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop + 2014-09-03T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) stop + + $ sudo docker events --filter 'image=ubuntu-1:14.04' + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) start + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) die + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop + + $ sudo docker events --filter 'container=7805c1d35632' + 2014-05-10T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) die + 2014-09-03T15:49:29.999999999Z07:00 7805c1d35632: (from redis:2.8) stop + + $ sudo docker events --filter 'container=7805c1d35632' --filter 'container=4386fb97867d' + 2014-09-03T15:49:29.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) die + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop + 2014-05-10T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) die + 2014-09-03T15:49:29.999999999Z07:00 7805c1d35632: (from redis:2.8) stop + + $ sudo docker events --filter 'container=7805c1d35632' --filter 'event=stop' + 2014-09-03T15:49:29.999999999Z07:00 7805c1d35632: (from redis:2.8) stop ## exec Usage: docker exec [OPTIONS] CONTAINER COMMAND [ARG...] - Run a command in an existing container + Run a command in a running container -d, --detach=false Detached mode: run command in the background -i, --interactive=false Keep STDIN open even if not attached @@ -663,7 +861,11 @@ You'll need two shells for this example. The `docker exec` command runs a new command in a running container. -The `docker exec` command will typically be used after `docker run` or `docker start`. +The command started using `docker exec` will only run while the container's primary +process (`PID 1`) is running, and will not be restarted if the container is restarted. + +If the container is paused, then the `docker exec` command will wait until the +container is unpaused, and then run. #### Examples @@ -676,7 +878,7 @@ This will create a container named `ubuntu_bash` and start a Bash session. This will create a new file `/tmp/execWorks` inside the running container `ubuntu_bash`, in the background. - $ sudo docker exec ubuntu_bash -it bash + $ sudo docker exec -it ubuntu_bash bash This will create a new Bash session in the container `ubuntu_bash`. @@ -712,7 +914,7 @@ To see how the `docker:latest` image was built: ## images - Usage: docker images [OPTIONS] [NAME] + Usage: docker images [OPTIONS] [REPOSITORY] List images @@ -729,19 +931,28 @@ decrease disk usage, and speed up `docker build` by allowing each step to be cached. These intermediate layers are not shown by default. +The `VIRTUAL SIZE` is the cumulative space taken up by the image and all +its parent images. This is also the disk space used by the contents of the +Tar file created when you `docker save` an image. + +An image will be listed more than once if it has multiple repository names +or tags. This single image (identifiable by its matching `IMAGE ID`) +uses up the `VIRTUAL SIZE` listed only once. + #### Listing the most recently created images $ sudo docker images | head - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE - 77af4d6b9913 19 hours ago 1.089 GB - committest latest b6fa739cedf5 19 hours ago 1.089 GB - 78a85c484f71 19 hours ago 1.089 GB - docker latest 30557a29d5ab 20 hours ago 1.089 GB - 0124422dd9f9 20 hours ago 1.089 GB - 18ad6fad3402 22 hours ago 1.082 GB - f9f1e26352f0 23 hours ago 1.089 GB - tryout latest 2629d1fa0b81 23 hours ago 131.5 MB - 5ed6274db6ce 24 hours ago 1.089 GB + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + 77af4d6b9913 19 hours ago 1.089 GB + committ latest b6fa739cedf5 19 hours ago 1.089 GB + 78a85c484f71 19 hours ago 1.089 GB + docker latest 30557a29d5ab 20 hours ago 1.089 GB + 5ed6274db6ce 24 hours ago 1.089 GB + postgres 9 746b819f315e 4 days ago 213.4 MB + postgres 9.3 746b819f315e 4 days ago 213.4 MB + postgres 9.3.5 746b819f315e 4 days ago 213.4 MB + postgres latest 746b819f315e 4 days ago 213.4 MB + #### Listing the full length image IDs @@ -759,7 +970,7 @@ by default. #### Filtering -The filtering flag (`-f` or `--filter`) format is of "key=value". If there are more +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) Current filters: @@ -842,18 +1053,27 @@ For example: $ sudo docker -D info Containers: 14 Images: 52 - Storage Driver: btrfs + Storage Driver: aufs + Root Dir: /var/lib/docker/aufs + Dirs: 545 Execution Driver: native-0.2 Kernel Version: 3.13.0-24-generic Operating System: Ubuntu 14.04 LTS + CPUs: 1 + Name: prod-server-42 + ID: 7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS + Total Memory: 2 GiB Debug mode (server): false Debug mode (client): true Fds: 10 Goroutines: 9 EventsListeners: 0 Init Path: /usr/bin/docker + Docker Root Dir: /var/lib/docker Username: svendowideit Registry: [https://index.docker.io/v1/] + Labels: + storage=ssd The global `-D` option tells all `docker` commands to output debug information. @@ -883,6 +1103,13 @@ straightforward manner. $ sudo docker inspect --format='{{.NetworkSettings.IPAddress}}' $INSTANCE_ID +**Get an instance's MAC Address:** + +For the most part, you can pick out any field from the JSON in a fairly +straightforward manner. + + $ sudo docker inspect --format='{{.NetworkSettings.MacAddress}}' $INSTANCE_ID + **List All Port Bindings:** One can loop over arrays and maps in the results to produce simple text @@ -1031,7 +1258,7 @@ used, which is observable by the process being suspended. With the cgroups freez the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed. -See the +See the [cgroups freezer documentation](https://www.kernel.org/doc/Documentation/cgroups/freezer-subsystem.txt) for further details. @@ -1050,7 +1277,7 @@ for further details. -n=-1 Show n last created containers, include non-running ones. --no-trunc=false Don't truncate output -q, --quiet=false Only display numeric IDs - -s, --size=false Display sizes + -s, --size=false Display total file sizes --since="" Show only containers created since Id or Name, include non-running ones. Running `docker ps` showing 2 linked containers. @@ -1070,6 +1297,7 @@ than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bi Current filters: * exited (int - the code of exited containers. Only useful with '--all') + * status (restarting|running|paused|exited) ##### Successfully exited containers @@ -1107,9 +1335,8 @@ use `docker pull`: # will pull the debian:latest image, its intermediate layers # and any aliases of the same id $ sudo docker pull debian:testing - # will pull the image named ubuntu:trusty, ubuntu:14.04 - # which is an alias of the same image - # and any intermediate layers it is based on. + # will pull the image named debian:testing and any intermediate + # layers it is based on. # (Typically the empty `scratch` image, a MAINTAINER layer, # and the un-tarred base). $ sudo docker pull --all-tags centos @@ -1221,18 +1448,22 @@ removed before the image is removed. --cidfile="" Write the container ID to the file --cpuset="" CPUs in which to allow execution (0-3, 0,1) -d, --detach=false Detached mode: run the container in the background and print the new container ID - --device=[] Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc) + --device=[] Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm) --dns=[] Set custom DNS servers - --dns-search=[] Set custom DNS search domains + --dns-search=[] Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain) -e, --env=[] Set environment variables --entrypoint="" Overwrite the default ENTRYPOINT of the image --env-file=[] Read in a line delimited file of environment variables - --expose=[] Expose a port from the container without publishing it to your host + --expose=[] Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host -h, --hostname="" Container host name -i, --interactive=false Keep STDIN open even if not attached + --ipc="" Default is to create a private IPC namespace (POSIX SysV IPC) for the container + 'container:': reuses another container shared memory, semaphores and message queues + 'host': use the host shared memory,semaphores and message queues inside the container. Note: the host mode gives the container full access to local shared memory and is therefore considered insecure. --link=[] Add link to another container in the form of name:alias --lxc-conf=[] (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" -m, --memory="" Memory limit (format: , where unit = b, k, m or g) + --mac-address="" Container MAC address (e.g. 92:d0:c6:0a:29:33) --name="" Assign a name to the container --net="bridge" Set the Network mode for the container 'bridge': creates a new network stack for the container on the docker bridge @@ -1246,7 +1477,8 @@ removed before the image is removed. --privileged=false Give extended privileges to this container --restart="" Restart policy to apply when a container exits (no, on-failure[:max-retry], always) --rm=false Automatically remove the container when it exits (incompatible with -d) - --sig-proxy=true Proxy received signals to the process (even in non-TTY mode). SIGCHLD, SIGSTOP, and SIGKILL are not proxied. + --security-opt=[] Security Options + --sig-proxy=true Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied. -t, --tty=false Allocate a pseudo-TTY -u, --user="" Username or UID -v, --volume=[] Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container) @@ -1260,6 +1492,9 @@ specified image, and then `starts` it using the specified command. That is, previous changes intact using `docker start`. See `docker ps -a` to view a list of all containers. +There is detailed information about `docker run` in the [Docker run reference]( +/reference/run/). + The `docker run` command can be used in combination with `docker commit` to [*change the command that a container runs*](#commit-an-existing-container). @@ -1430,8 +1665,31 @@ option enables that. For example, a specific block storage device or loop device or audio device can be added to an otherwise unprivileged container (without the `--privileged` flag) and have the application directly access it. +By default, the container will be able to `read`, `write` and `mknod` these devices. +This can be overridden using a third `:rwm` set of options to each `--device` +flag: + + +``` + $ sudo docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk /dev/xvdc + + Command (m for help): q + $ sudo docker run --device=/dev/sda:/dev/xvdc:r --rm -it ubuntu fdisk /dev/xvdc + You will not be able to write the partition table. + + Command (m for help): q + + $ sudo docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk /dev/xvdc + + Command (m for help): q + + $ sudo docker run --device=/dev/sda:/dev/xvdc:m --rm -it ubuntu fdisk /dev/xvdc + fdisk: unable to open /dev/xvdc: Operation not permitted +``` + **Note:** -> `--device` cannot be safely used with ephemeral devices. Block devices that may be removed should not be added to untrusted containers with `--device`. +> `--device` cannot be safely used with ephemeral devices. Block devices that +> may be removed should not be added to untrusted containers with `--device`. **A complete example:** @@ -1492,6 +1750,30 @@ container exits with a non-zero exit status more than 10 times in a row Docker will abort trying to restart the container. Providing a maximum restart limit is only valid for the ** on-failure ** policy. +### Adding entries to a container hosts file + +You can add other hosts into a container's `/etc/hosts` file by using one or more +`--add-host` flags. This example adds a static address for a host named `docker`: + +``` + $ docker run --add-host=docker:10.180.0.1 --rm -it debian + $$ ping docker + PING docker (10.180.0.1): 48 data bytes + 56 bytes from 10.180.0.1: icmp_seq=0 ttl=254 time=7.600 ms + 56 bytes from 10.180.0.1: icmp_seq=1 ttl=254 time=30.705 ms + ^C--- docker ping statistics --- + 2 packets transmitted, 2 packets received, 0% packet loss + round-trip min/avg/max/stddev = 7.600/19.152/30.705/11.553 ms +``` + +> **Note:** +> Sometimes you need to connect to the Docker host, which means getting the IP +> address of the host. You can use the following shell commands to simplify this +> process: +> +> $ alias hostip="ip route show 0.0.0.0/0 | grep -Eo 'via \S+' | awk '{ print \$2 }'" +> $ docker run --add-host=docker:$(hostip) --rm -it debian + ## save Usage: docker save [OPTIONS] IMAGE [IMAGE...] @@ -1541,8 +1823,8 @@ more details on finding shared images from the command line. Restart a stopped container - -a, --attach=false Attach container's `STDOUT` and `STDERR` and forward all signals to the process - -i, --interactive=false Attach container's `STDIN` + -a, --attach=false Attach container's STDOUT and STDERR and forward all signals to the process + -i, --interactive=false Attach container's STDIN When run on a container that has already been started, takes no action and succeeds unconditionally. @@ -1551,7 +1833,7 @@ takes no action and succeeds unconditionally. Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...] - Stop a running container by sending `SIGTERM` and then `SIGKILL` after a grace period + Stop a running container by sending SIGTERM and then SIGKILL after a grace period -t, --time=10 Number of seconds to wait for the container to stop before killing it. Default is 10 seconds. @@ -1585,7 +1867,7 @@ them to [*Share Images via Repositories*]( The `docker unpause` command uses the cgroups freezer to un-suspend all processes in a container. -See the +See the [cgroups freezer documentation](https://www.kernel.org/doc/Documentation/cgroups/freezer-subsystem.txt) for further details. diff --git a/docs/sources/reference/run.md b/docs/sources/reference/run.md index 67007ccff7..e9ecfff442 100644 --- a/docs/sources/reference/run.md +++ b/docs/sources/reference/run.md @@ -1,8 +1,8 @@ -page_title: Docker Run Reference +page_title: Docker run reference page_description: Configure containers at runtime page_keywords: docker, run, configure, runtime -# Docker Run Reference +# Docker run reference **Docker runs processes in isolated containers**. When an operator executes `docker run`, she starts a process with its own file system, @@ -14,7 +14,7 @@ the container from the image. That's the main reason [*run*](/reference/commandline/cli/#run) has more options than any other `docker` command. -## General Form +## General form The basic `docker run` command takes this form: @@ -39,7 +39,7 @@ behavior, allowing them to override all defaults set by the developer during `docker build` and nearly all the defaults set by the Docker runtime itself. -## Operator Exclusive Options +## Operator exclusive options Only the operator (the person executing `docker run`) can set the following options. @@ -50,12 +50,13 @@ following options. - [Container Identification](#container-identification) - [Name (--name)](#name-name) - [PID Equivalent](#pid-equivalent) + - [IPC Settings](#ipc-settings) - [Network Settings](#network-settings) - [Clean Up (--rm)](#clean-up-rm) - [Runtime Constraints on CPU and Memory](#runtime-constraints-on-cpu-and-memory) - [Runtime Privilege, Linux Capabilities, and LXC Configuration](#runtime-privilege-linux-capabilities-and-lxc-configuration) -## Detached vs Foreground +## Detached vs foreground When starting a Docker container, you must first decide if you want to run the container in the background in a "detached" mode or in the @@ -82,7 +83,7 @@ and pass along signals. All of that is configurable: -a=[] : Attach to `STDIN`, `STDOUT` and/or `STDERR` -t=false : Allocate a pseudo-tty - --sig-proxy=true: Proxify all received signal to the process (even in non-tty mode) + --sig-proxy=true: Proxify all received signal to the process (non-TTY mode only) -i=false : Keep STDIN open even if not attached If you do not specify `-a` then Docker will [attach all standard @@ -93,13 +94,14 @@ specify to which of the three standard streams (`STDIN`, `STDOUT`, $ sudo docker run -a stdin -a stdout -i -t ubuntu /bin/bash -For interactive processes (like a shell) you will typically want a tty -as well as persistent standard input (`STDIN`), so you'll use `-i -t` -together in most interactive cases. +For interactive processes (like a shell), you must use `-i -t` together in +order to allocate a tty for the container process. Specifying `-t` is however +forbidden when the client standard output is redirected or pipe, such as in: +`echo test | docker run -i busybox cat`. -## Container Identification +## Container identification -### Name (–-name) +### Name (--name) The operator can identify a container in three ways: @@ -116,7 +118,7 @@ add meaning to a container since you can use this name when defining other place you need to identify a container). This works for both background and foreground Docker containers. -### PID Equivalent +### PID equivalent Finally, to help with automation, you can have Docker write the container ID out to a file of your choosing. This is similar to how some @@ -131,15 +133,32 @@ While not strictly a means of identifying a container, you can specify a version image you'd like to run the container with by adding `image[:tag]` to the command. For example, `docker run ubuntu:14.04`. -## Network Settings +## IPC Settings + --ipc="" : Set the IPC mode for the container, + 'container:': reuses another container's IPC namespace + 'host': use the host's IPC namespace inside the container +By default, all containers have the IPC namespace enabled - --dns=[] : Set custom dns servers for the container - --net="bridge" : Set the Network mode for the container - 'bridge': creates a new network stack for the container on the docker bridge - 'none': no networking for this container - 'container:': reuses another container network stack - 'host': use the host network stack inside the container - --add-host="" : Add a line to /etc/hosts (host:IP) +IPC (POSIX/SysV IPC) namespace provides separation of named shared memory segments, semaphores and message queues. + +Shared memory segments are used to accelerate inter-process communication at +memory speed, rather than through pipes or through the network stack. Shared +memory is commonly used by databases and custom-built (typically C/OpenMPI, +C++/using boost libraries) high performance applications for scientific +computing and financial services industries. If these types of applications +are broken into multiple containers, you might need to share the IPC mechanisms +of the containers. + +## Network settings + + --dns=[] : Set custom dns servers for the container + --net="bridge" : Set the Network mode for the container + 'bridge': creates a new network stack for the container on the docker bridge + 'none': no networking for this container + 'container:': reuses another container network stack + 'host': use the host network stack inside the container + --add-host="" : Add a line to /etc/hosts (host:IP) + --mac-address="" : Sets the container's Ethernet device's MAC address By default, all containers have networking enabled and they can make any outgoing connections. The operator can completely disable networking @@ -150,6 +169,10 @@ networking. In cases like this, you would perform I/O through files or Your container will use the same DNS servers as the host by default, but you can override this with `--dns`. +By default a random MAC is generated. You can set the container's MAC address +explicitly by providing a MAC via the `--mac-address` parameter (format: +`12:34:56:78:9a:bc`). + Supported networking modes are: * none - no networking in the container @@ -213,7 +236,7 @@ container itself as well as `localhost` and a few other common things. The ::1 localhost ip6-localhost ip6-loopback 86.75.30.9 db-static -## Clean Up (–-rm) +## Clean up (--rm) By default a container's file system persists even after the container exits. This makes debugging a lot easier (since you can inspect the @@ -225,7 +248,7 @@ the container exits**, you can add the `--rm` flag: --rm=false: Automatically remove the container when it exits (incompatible with -d) -## Security Configuration +## Security configuration --security-opt="label:user:USER" : Set the label user for the container --security-opt="label:role:ROLE" : Set the label role for the container --security-opt="label:type:TYPE" : Set the label type for the container @@ -261,7 +284,7 @@ Note: You would have to write policy defining a `svirt_apache_t` type. -## Runtime Constraints on CPU and Memory +## Runtime constraints on CPU and memory The operator can also adjust the performance parameters of the container: @@ -279,7 +302,20 @@ get the same proportion of CPU cycles, but you can tell the kernel to give more shares of CPU time to one or more containers when you start them via Docker. -## Runtime Privilege, Linux Capabilities, and LXC Configuration +The flag `-c` or `--cpu-shares` with value 0 indicates that the running +container has access to all 1024 (default) CPU shares. However, this value +can be modified to run a container with a different priority or different +proportion of CPU cycles. + +E.g., If we start three {C0, C1, C2} containers with default values +(`-c` OR `--cpu-shares` = 0) and one {C3} with (`-c` or `--cpu-shares`=512) +then C0, C1, and C2 would have access to 100% CPU shares (1024) and C3 would +only have access to 50% CPU shares (512). In the context of a time-sliced OS +with time quantum set as 100 milliseconds, containers C0, C1, and C2 will run +for full-time quantum, and container C3 will run for half-time quantum i.e 50 +milliseconds. + +## Runtime privilege, Linux capabilities, and LXC configuration --cap-add: Add Linux capabilities --cap-drop: Drop Linux capabilities @@ -308,6 +344,26 @@ will be accessible within the container. $ sudo docker run --device=/dev/snd:/dev/snd ... +By default, the container will be able to `read`, `write`, and `mknod` these devices. +This can be overridden using a third `:rwm` set of options to each `--device` flag: + + +``` + $ sudo docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk /dev/xvdc + + Command (m for help): q + $ sudo docker run --device=/dev/sda:/dev/xvdc:r --rm -it ubuntu fdisk /dev/xvdc + You will not be able to write the partition table. + + Command (m for help): q + + $ sudo docker run --device=/dev/sda:/dev/xvdc:w --rm -it ubuntu fdisk /dev/xvdc + crash.... + + $ sudo docker run --device=/dev/sda:/dev/xvdc:m --rm -it ubuntu fdisk /dev/xvdc + fdisk: unable to open /dev/xvdc: Operation not permitted +``` + In addition to `--privileged`, the operator can have fine grain control over the capabilities using `--cap-add` and `--cap-drop`. By default, Docker has a default list of capabilities that are kept. Both flags support the value `all`, so if the @@ -318,6 +374,34 @@ operator wants to have all capabilities but `MKNOD` they could use: For interacting with the network stack, instead of using `--privileged` they should use `--cap-add=NET_ADMIN` to modify the network interfaces. + $ docker run -t -i --rm ubuntu:14.04 ip link add dummy0 type dummy + RTNETLINK answers: Operation not permitted + $ docker run -t -i --rm --cap-add=NET_ADMIN ubuntu:14.04 ip link add dummy0 type dummy + +To mount a FUSE based filesystem, you need to combine both `--cap-add` and +`--device`: + + $ docker run --rm -it --cap-add SYS_ADMIN sshfs sshfs sven@10.10.10.20:/home/sven /mnt + fuse: failed to open /dev/fuse: Operation not permitted + $ docker run --rm -it --device /dev/fuse sshfs sshfs sven@10.10.10.20:/home/sven /mnt + fusermount: mount failed: Operation not permitted + $ docker run --rm -it --cap-add SYS_ADMIN --device /dev/fuse sshfs + # sshfs sven@10.10.10.20:/home/sven /mnt + The authenticity of host '10.10.10.20 (10.10.10.20)' can't be established. + ECDSA key fingerprint is 25:34:85:75:25:b0:17:46:05:19:04:93:b5:dd:5f:c6. + Are you sure you want to continue connecting (yes/no)? yes + sven@10.10.10.20's password: + root@30aa0cfaf1b5:/# ls -la /mnt/src/docker + total 1516 + drwxrwxr-x 1 1000 1000 4096 Dec 4 06:08 . + drwxrwxr-x 1 1000 1000 4096 Dec 4 11:46 .. + -rw-rw-r-- 1 1000 1000 16 Oct 8 00:09 .dockerignore + -rwxrwxr-x 1 1000 1000 464 Oct 8 00:09 .drone.yml + drwxrwxr-x 1 1000 1000 4096 Dec 4 06:11 .git + -rw-rw-r-- 1 1000 1000 461 Dec 4 06:08 .gitignore + .... + + If the Docker daemon was started using the `lxc` exec-driver (`docker -d --exec-driver=lxc`) then the operator can also specify LXC options using one or more `--lxc-conf` parameters. These can be new parameters or @@ -327,7 +411,14 @@ Note that in the future, a given host's docker daemon may not use LXC, so this is an implementation-specific configuration meant for operators already familiar with using LXC directly. -## Overriding Dockerfile Image Defaults +> **Note:** +> If you use `--lxc-conf` to modify a container's configuration which is also +> managed by the Docker daemon, then the Docker daemon will not know about this +> modification, and you will need to manage any conflicts yourself. For example, +> you can use `--lxc-conf` to set a container's IP address, but this will not be +> reflected in the `/etc/hosts` file. + +## Overriding Dockerfile image defaults When a developer builds an image from a [*Dockerfile*](/reference/builder/#dockerbuilder) or when she commits it, the developer can set a number of default parameters @@ -347,7 +438,7 @@ Dockerfile instruction and how the operator can override that setting. - [USER](#user) - [WORKDIR](#workdir) -## CMD (Default Command or Options) +## CMD (default command or options) Recall the optional `COMMAND` in the Docker commandline: @@ -363,7 +454,7 @@ image), you can override that `CMD` instruction just by specifying a new If the image also specifies an `ENTRYPOINT` then the `CMD` or `COMMAND` get appended as arguments to the `ENTRYPOINT`. -## ENTRYPOINT (Default Command to Execute at Runtime) +## ENTRYPOINT (default command to execute at runtime) --entrypoint="": Overwrite the default entrypoint set by the image @@ -386,14 +477,14 @@ or two examples of how to pass more parameters to that ENTRYPOINT: $ sudo docker run -i -t --entrypoint /bin/bash example/redis -c ls -l $ sudo docker run -i -t --entrypoint /usr/bin/redis-cli example/redis --help -## EXPOSE (Incoming Ports) +## EXPOSE (incoming ports) The Dockerfile doesn't give much control over networking, only providing the `EXPOSE` instruction to give a hint to the operator about what incoming ports might provide services. The following options work with or override the Dockerfile's exposed defaults: - --expose=[]: Expose a port from the container + --expose=[]: Expose a port or a range of ports from the container without publishing it to your host -P=false : Publish all exposed ports to the host interfaces -p=[] : Publish a container᾿s port to the host (format: @@ -402,7 +493,7 @@ or override the Dockerfile's exposed defaults: (use 'docker port' to see the actual mapping) --link="" : Add link to another container (name:alias) -As mentioned previously, `EXPOSE` (and `--expose`) make a port available +As mentioned previously, `EXPOSE` (and `--expose`) makes ports available **in** a container for incoming connections. The port number on the inside of the container (where the service listens) does not need to be the same number as the port exposed on the outside of the container @@ -426,7 +517,7 @@ then the client container can access the exposed port via a private networking interface. Docker will set some environment variables in the client container to help indicate which interface and port to use. -## ENV (Environment Variables) +## ENV (environment variables) When a new container is created, Docker will set the following environment variables automatically: @@ -535,7 +626,7 @@ mechanism to communicate with a linked container by its alias: If you restart the source container (`servicename` in this case), the recipient container's `/etc/hosts` entry will be automatically updated. -## VOLUME (Shared Filesystems) +## VOLUME (shared filesystems) -v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. If "container-dir" is missing, then docker creates a new volume. diff --git a/docs/sources/release-notes.md b/docs/sources/release-notes.md index 1e744e32a5..7ec08b1a84 100644 --- a/docs/sources/release-notes.md +++ b/docs/sources/release-notes.md @@ -1,6 +1,6 @@ -page_title: Docker 1.x Series Release Notes page_description: Release Notes for -Docker 1.x. page_keywords: docker, documentation, about, technology, -understanding, release +page_title: Docker 1.x Series Release Notes +page_description: Release Notes for Docker 1.x. +page_keywords: docker, documentation, about, technology, understanding, release #Release Notes @@ -74,25 +74,28 @@ This release fixes some bugs and addresses some security issues. *Security fixes* -Patches and changes were made to address CVE-2014-5277 and CVE-2014-3566. Specifically, changes were made to: +Patches and changes were made to address [CVE-2014-5277 and CVE-2014-3566](https://groups.google.com/forum/#!topic/docker-user/oYm0i3xShJU). +Specifically, changes were made to: + * Prevent fallback to SSL protocols < TLS 1.0 for client, daemon and registry -* Secure HTTPS connection to registries with certificate verification and without HTTP fallback unless `--insecure-registry` is specified. +* Secure HTTPS connection to registries with certificate verification and without HTTP fallback unless [`--insecure-registry`](/reference/commandline/cli/#run) is specified. *Runtime fixes* -* Fixed issue where volumes would not be shared +* Fixed issue where volumes would not be shared. *Client fixes* * Fixed issue with `--iptables=false` not automatically setting -`--ip-masq=false` -* Fixed docker run output to non-TTY stdout +`--ip-masq=false`. +* Fixed docker run output to non-TTY stdout. *Builder fixes* -* Fixed escaping `$` for environment variables -* Fixed issue with lowercase `onbuild` Dockerfile instruction - +* Fixed escaping `$` for environment variables. +* Fixed issue with lowercase `onbuild` instruction in a `Dockerfile`. +* Restricted environment variable expansion to `ENV`, `ADD`, `COPY`, `WORKDIR`, +`EXPOSE`, `VOLUME`, and `USER` ##Version 1.3.0 @@ -174,7 +177,7 @@ accept an optional maximum restart count (e.g. `on-failure:5`). * `always` – Always restart the container no matter what exit code is returned. This deprecates the `--restart` flag on the Docker daemon. -*New flags for `docker run`: `--cap-add` and `–-cap-drop`* +*New flags for `docker run`: `--cap-add` and `--cap-drop`* In previous releases, Docker containers could either be given complete capabilities or they could all follow a whitelist of allowed capabilities while @@ -187,7 +190,7 @@ This release introduces two new flags for `docker run`, `--cap-add` and `--cap-drop`, that give you fine-grain control over the specific capabilities you want grant to a particular container. -*New `-–device` flag for `docker run`* +*New `--device` flag for `docker run`* Previously, you could only use devices inside your containers by bind mounting them (with `-v`) in a `--privileged` container. With this release, we introduce diff --git a/docs/sources/userguide/dockerimages.md b/docs/sources/userguide/dockerimages.md index a0a30408c6..ead6d82db7 100644 --- a/docs/sources/userguide/dockerimages.md +++ b/docs/sources/userguide/dockerimages.md @@ -13,14 +13,14 @@ image and the `training/webapp` image. We've also discovered that Docker stores downloaded images on the Docker host. If an image isn't already present on the host then it'll be downloaded from a registry: by default the -[Docker Hub](https://hub.docker.com) public registry. +[Docker Hub Registry](https://registry.hub.docker.com). In this section we're going to explore Docker images a bit more including: * Managing and working with images locally on your Docker host; * Creating basic images; -* Uploading images to [Docker Hub](https://hub.docker.com). +* Uploading images to [Docker Hub Registry](https://registry.hub.docker.com). ## Listing images on the host @@ -65,7 +65,7 @@ So when we run a container we refer to a tagged image like so: $ sudo docker run -t -i ubuntu:14.04 /bin/bash -If instead we wanted to build an Ubuntu 12.04 image we'd use: +If instead we wanted to run an Ubuntu 12.04 image we'd use: $ sudo docker run -t -i ubuntu:12.04 /bin/bash @@ -154,7 +154,7 @@ We've identified a suitable image, `training/sinatra`, and now we can download i $ sudo docker pull training/sinatra -The team can now use this image by run their own containers. +The team can now use this image by running their own containers. $ sudo docker run -t -i training/sinatra /bin/bash root@a8cb6ce02d85:/# @@ -168,7 +168,6 @@ update and create images. 1. We can update a container created from an image and commit the results to an image. 2. We can use a `Dockerfile` to specify instructions to create an image. -To learn more, check out the [Dockerfile tutorial](/userguide/level1). ### Updating and committing an image @@ -457,7 +456,7 @@ Next we can see each instruction in the `Dockerfile` being executed step-by-step. We can see that each step creates a new container, runs the instruction inside that container and then commits that change - just like the `docker commit` work flow we saw earlier. When all the -instructions have executed we're left with the `324104cde6ad` image +instructions have executed we're left with the `97feabe5d2ed` image (also helpfully tagged as `ouruser/sinatra:v2`) and all intermediate containers will get removed to clean things up. @@ -539,6 +538,9 @@ Until now we've seen how to build individual applications inside Docker containers. Now learn how to build whole application stacks with Docker by linking together multiple Docker containers. +Test your Dockerfile knowledge with the +[Dockerfile tutorial](/userguide/level1). + Go to [Linking Containers Together](/userguide/dockerlinks). diff --git a/docs/sources/userguide/dockerizing.md b/docs/sources/userguide/dockerizing.md index 9da4890bfa..2383160986 100644 --- a/docs/sources/userguide/dockerizing.md +++ b/docs/sources/userguide/dockerizing.md @@ -126,8 +126,7 @@ identifies a container so we can work with it. > on we'll see a shorter ID and some ways to name our containers to make > working with them easier. -We can use this container ID to see what's happening with our `hello -world` daemon. +We can use this container ID to see what's happening with our `hello world` daemon. Firstly let's make sure our container is running. We can do that with the `docker ps` command. The `docker ps` command queries diff --git a/docs/sources/userguide/dockerlinks.md b/docs/sources/userguide/dockerlinks.md index ce14bfa12a..e2228cef00 100644 --- a/docs/sources/userguide/dockerlinks.md +++ b/docs/sources/userguide/dockerlinks.md @@ -151,21 +151,16 @@ earlier. The `--link` flag takes the form: Where `name` is the name of the container we're linking to and `alias` is an alias for the link name. You'll see how that alias gets used shortly. -Next, look at the names of your linked containers by filtering the full output of -`docker ps` to the last column (NAMES) using `docker ps --no-trunc | awk '{print $NF}'`. +Next, inspect your linked containers with `docker inspect`: - $ sudo docker ps --no-trunc | awk '{print $NF}' - NAMES - db, web/db - web + $ sudo docker inspect -f "{{ .HostConfig.Links }}" web + [/db:/web/db] -You can see your named containers, `db` and `web`, and you can see that the `db` -container also shows `web/db` in the `NAMES` column. This tells you that the -`web` container is linked to the `db` container, which allows it to access information -about the `db` container. +You can see that the `web` container is now linked to the `db` container +`web/db`. Which allows it to access information about the `db` container. -So what does linking the containers actually do? You've learned that a link creates a -source container that can provide information about itself to a recipient container. In +So what does linking the containers actually do? You've learned that a link allows a +source container to provide information about itself to a recipient container. In our example, the recipient, `web`, can access information about the source `db`. To do this, Docker creates a secure tunnel between the containers that doesn't need to expose any ports externally on the container; you'll note when we started the @@ -200,7 +195,7 @@ port. Where `` is the alias name specified in the `--link` parameter is either `TCP` or `UDP`. The format of the URL will be: `://:` (e.g. `tcp://172.17.0.82:8080`). This URL will then be -split into the following 3 environment variables for convinience: +split into the following 3 environment variables for convenience: * `_PORT___ADDR` will contain just the IP address from the URL (e.g. `WEBDB_PORT_8080_TCP_ADDR=172.17.0.82`). * `_PORT___PORT` will contain just the port number diff --git a/docs/sources/userguide/dockerrepos.md b/docs/sources/userguide/dockerrepos.md index 967ed0d8cf..9b5f9783e1 100644 --- a/docs/sources/userguide/dockerrepos.md +++ b/docs/sources/userguide/dockerrepos.md @@ -24,12 +24,12 @@ Docker itself provides access to Docker Hub services via the `docker search`, ### Account creation and login Typically, you'll want to start by creating an account on Docker Hub (if you haven't -already) and logging in. You can create your account directly on +already) and logging in. You can create your account directly on [Docker Hub](https://hub.docker.com/account/signup/), or by running: $ sudo docker login -This will prompt you for a user name, which will become the public namespace for your +This will prompt you for a user name, which will become the public namespace for your public repositories. If your user name is available, Docker will prompt you to enter a password and your e-mail address. It will then automatically log you in. You can now commit and @@ -162,6 +162,9 @@ event when an image or updated image is pushed to the repository. With a webhook you can specify a target URL and a JSON payload that will be delivered when the image is pushed. +See the Docker Hub documentation for [more information on +webhooks](http://docs.docker.com/docker-hub/repos/#webhooks) + ## Next steps Go and use Docker! diff --git a/docs/sources/userguide/dockervolumes.md b/docs/sources/userguide/dockervolumes.md index 58412611c5..6f94b6dbd0 100644 --- a/docs/sources/userguide/dockervolumes.md +++ b/docs/sources/userguide/dockervolumes.md @@ -51,8 +51,15 @@ directory from your own host into a container. $ sudo docker run -d -P --name web -v /src/webapp:/opt/webapp training/webapp python app.py -This will mount the local directory, `/src/webapp`, into the container as the -`/opt/webapp` directory. This is very useful for testing, for example we can +This will mount the host directory, `/src/webapp`, into the container at +`/opt/webapp`. + +> **Note:** +> If the path `/opt/webapp` already exists inside the container's image, it's +> contents will be replaced by the contents of `/src/webapp` on the host to stay +> consistent with the expected behavior of `mount` + +This is very useful for testing, for example we can mount our source code inside the container and see our application at work as we change the source code. The directory on the host must be specified as an absolute path and if the directory doesn't exist Docker will automatically diff --git a/docs/sources/userguide/index.md b/docs/sources/userguide/index.md index 08d6be0731..64bd3d16f3 100644 --- a/docs/sources/userguide/index.md +++ b/docs/sources/userguide/index.md @@ -10,7 +10,7 @@ using Docker and integrating it into your environment. We’ll teach you how to use Docker to: -* Dockerizing your applications. +* Dockerize your applications. * Run your own containers. * Build Docker images. * Share your Docker images with others. @@ -25,7 +25,7 @@ the Docker life cycle: Docker Hub is the central hub for Docker. It hosts public Docker images and provides services to help you build and manage your Docker -environment. To learn more; +environment. To learn more: Go to [Using Docker Hub](/userguide/dockerhub). @@ -34,7 +34,7 @@ Go to [Using Docker Hub](/userguide/dockerhub). *How do I run applications inside containers?* Docker offers a *container-based* virtualization platform to power your -applications. To learn how to Dockerize applications and run them. +applications. To learn how to Dockerize applications and run them: Go to [Dockerizing Applications](/userguide/dockerizing). @@ -55,7 +55,7 @@ Go to [Working With Containers](/userguide/usingdocker). Once you've learnt how to use Docker it's time to take the next step and learn how to build your own application images with Docker. -Go to [Working with Docker Images](/userguide/dockerimages) +Go to [Working with Docker Images](/userguide/dockerimages). ## Linking Containers Together diff --git a/docs/sources/userguide/level1.md b/docs/sources/userguide/level1.md index eca816250a..56048bfccf 100644 --- a/docs/sources/userguide/level1.md +++ b/docs/sources/userguide/level1.md @@ -2,7 +2,7 @@ page_title: Docker Images Test page_description: How to work with Docker images. page_keywords: documentation, docs, the docker guide, docker guide, docker, docker platform, virtualization framework, docker.io, Docker images, Docker image, image management, Docker repos, Docker repositories, docker, docker tag, docker tags, Docker Hub, collaboration -Back +Back # Dockerfile Tutorial diff --git a/docs/sources/userguide/level2.md b/docs/sources/userguide/level2.md index c4f2a2802c..4ff76be074 100644 --- a/docs/sources/userguide/level2.md +++ b/docs/sources/userguide/level2.md @@ -2,7 +2,7 @@ page_title: Docker Images Test page_description: How to work with Docker images. page_keywords: documentation, docs, the docker guide, docker guide, docker, docker platform, virtualization framework, docker.io, Docker images, Docker image, image management, Docker repos, Docker repositories, docker, docker tag, docker tags, Docker Hub, collaboration -Back +Back #Dockerfile Tutorial @@ -89,9 +89,8 @@ RUN apt-get install -y
    ## What's next?

    -Thanks for going through our tutorial! We will be posting Level 3 shortly. Follow us on twitter
    - - -

    -

    In the meantime, check out this blog post by Michael Crosby that describes Dockerfile Best Practices.

    +Thanks for going through our tutorial! We will be posting Level 3 in the future. + +To improve your Dockerfile writing skills even further, visit the Dockerfile best practices page. + Back to the Docs! \ No newline at end of file diff --git a/docs/sources/userguide/usingdocker.md b/docs/sources/userguide/usingdocker.md index e64db0bc2e..865f446bd0 100644 --- a/docs/sources/userguide/usingdocker.md +++ b/docs/sources/userguide/usingdocker.md @@ -25,7 +25,7 @@ The `docker` client is pretty simple. Each action you can take with Docker is a command and each command can take a series of flags and arguments. - # Usage: [sudo] docker [flags] [command] [arguments] .. + # Usage: [sudo] docker [command] [flags] [arguments] .. # Example: $ sudo docker run -i -t ubuntu /bin/bash @@ -85,7 +85,7 @@ This will display the help text and all available flags: Attach to a running container --no-stdin=false: Do not attach stdin - --sig-proxy=true: Proxify all received signal to the process (even in non-tty mode) + --sig-proxy=true: Proxify all received signal to the process (non-TTY mode only) > **Note:** > You can see a full list of Docker's commands diff --git a/docs/theme/mkdocs/css/dockerfile_tutorial.css b/docs/theme/mkdocs/css/dockerfile_tutorial.css index 79d0e9cfdf..ac3f538f3e 100644 --- a/docs/theme/mkdocs/css/dockerfile_tutorial.css +++ b/docs/theme/mkdocs/css/dockerfile_tutorial.css @@ -56,4 +56,8 @@ div.level_error { width: 90px; margin-right: 0; padding: 0 0 2px 0; +} +.dockerfile.back { + display: block; + margin-top: 5px; } \ No newline at end of file diff --git a/docs/theme/mkdocs/css/docs.css b/docs/theme/mkdocs/css/docs.css index 9b6d5028e8..068a0003ef 100644 --- a/docs/theme/mkdocs/css/docs.css +++ b/docs/theme/mkdocs/css/docs.css @@ -29,13 +29,17 @@ font-weight: 700; color: #394d54; line-height: 1; - margin: 0px 0 10px 0; + margin: 10px 0 10px 0; padding-left: 20px; white-space: nowrap; overflow: hidden; text-overflow: ellipsis; } +#leftnav li.active { + margin-bottom: 10px; +} + .content-body { padding: 0px 0px 0px 20px; } diff --git a/docs/theme/mkdocs/css/main.css b/docs/theme/mkdocs/css/main.css index 3375f797da..ed7c189a09 100644 --- a/docs/theme/mkdocs/css/main.css +++ b/docs/theme/mkdocs/css/main.css @@ -366,9 +366,20 @@ body { text-decoration: none; color: #eeeeee; } +#footer .social { + width: 100px; + float: left; +} #footer .social li a { padding-left: 28px; } +#footer .social li span { + float: left; + width: 24px; + height: 25px; + position: absolute; + margin: 1px 0px 2px -28px; +} #footer .social .blog { background: url(../img/footer/docker-blog-24.png) no-repeat; background-position: 0px -3px; @@ -393,6 +404,23 @@ body { background: url(../img/footer/slideshare-24.png) no-repeat; background-position: 0px -3px; } +#footer .social .linkedin { + background: url(../img/footer/sprites-small_360.png) no-repeat; + background-position: -168px -3px; +} +#footer .social .github { + background: url(../img/footer/sprites-small_360.png) no-repeat; + background-position: -48px -3px; +} +#footer .social .reddit { + background: url(../img/footer/sprites-small_360.png) no-repeat; + background-position: -192px -3px; +} +#footer .social .angellist { + background: url(../img/footer/angellist-white.svg) no-repeat; + background-position: 5px; + height: 20px; +} /* Social Links */ @media only screen and (-webkit-min-device-pixel-ratio: 2), only screen and (min--moz-device-pixel-ratio: 2), only screen and (-o-min-device-pixel-ratio: 2/1), only screen and (min-device-pixel-ratio: 2), only screen and (min-resolution: 192dpi), only screen and (min-resolution: 2dppx) { #footer .social .blog { diff --git a/docs/theme/mkdocs/footer.html b/docs/theme/mkdocs/footer.html index 05316b4f38..69a4e6367f 100644 --- a/docs/theme/mkdocs/footer.html +++ b/docs/theme/mkdocs/footer.html @@ -69,7 +69,33 @@
  • Google+
  • YouTube
  • -
  • Slideshare
  • + + diff --git a/docs/theme/mkdocs/img/footer/angellist-white.svg b/docs/theme/mkdocs/img/footer/angellist-white.svg new file mode 100644 index 0000000000..5c52f3a832 --- /dev/null +++ b/docs/theme/mkdocs/img/footer/angellist-white.svg @@ -0,0 +1,35 @@ + + + + + + + + diff --git a/docs/theme/mkdocs/img/footer/sprites-small_360.png b/docs/theme/mkdocs/img/footer/sprites-small_360.png new file mode 100644 index 0000000000..c28863e3f5 Binary files /dev/null and b/docs/theme/mkdocs/img/footer/sprites-small_360.png differ diff --git a/engine/engine.go b/engine/engine.go index 5c708d405f..26f9953d66 100644 --- a/engine/engine.go +++ b/engine/engine.go @@ -250,11 +250,3 @@ func (eng *Engine) ParseJob(input string) (*Job, error) { job.Env().Init(&env) return job, nil } - -func (eng *Engine) Logf(format string, args ...interface{}) (n int, err error) { - if !eng.Logging { - return 0, nil - } - prefixedFormat := fmt.Sprintf("[%s] %s\n", eng, strings.TrimRight(format, "\n")) - return fmt.Fprintf(eng.Stderr, prefixedFormat, args...) -} diff --git a/engine/engine_test.go b/engine/engine_test.go index 92f3757251..7ab2f8fc0d 100644 --- a/engine/engine_test.go +++ b/engine/engine_test.go @@ -99,16 +99,6 @@ func TestEngineString(t *testing.T) { } } -func TestEngineLogf(t *testing.T) { - eng := New() - input := "Test log line" - if n, err := eng.Logf("%s\n", input); err != nil { - t.Fatal(err) - } else if n < len(input) { - t.Fatalf("Test: Logf() should print at least as much as the input\ninput=%d\nprinted=%d", len(input), n) - } -} - func TestParseJob(t *testing.T) { eng := New() // Verify that the resulting job calls to the right place diff --git a/engine/job.go b/engine/job.go index d032ff0215..6c11b13446 100644 --- a/engine/job.go +++ b/engine/job.go @@ -6,6 +6,8 @@ import ( "io" "strings" "time" + + log "github.com/Sirupsen/logrus" ) // A job is the fundamental unit of work in the docker engine. @@ -46,7 +48,7 @@ const ( // If the job returns a failure status, an error is returned // which includes the status. func (job *Job) Run() error { - if job.Eng.IsShutdown() { + if job.Eng.IsShutdown() && !job.GetenvBool("overrideShutdown") { return fmt.Errorf("engine is shutdown") } // FIXME: this is a temporary workaround to avoid Engine.Shutdown @@ -66,10 +68,12 @@ func (job *Job) Run() error { return fmt.Errorf("%s: job has already completed", job.Name) } // Log beginning and end of the job - job.Eng.Logf("+job %s", job.CallString()) - defer func() { - job.Eng.Logf("-job %s%s", job.CallString(), job.StatusString()) - }() + if job.Eng.Logging { + log.Infof("+job %s", job.CallString()) + defer func() { + log.Infof("-job %s%s", job.CallString(), job.StatusString()) + }() + } var errorMessage = bytes.NewBuffer(nil) job.Stderr.Add(errorMessage) if job.handler == nil { diff --git a/events/events.go b/events/events.go index 57a82cada0..0951f7099d 100644 --- a/events/events.go +++ b/events/events.go @@ -6,6 +6,7 @@ import ( "time" "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/parsers/filters" "github.com/docker/docker/utils" ) @@ -48,6 +49,11 @@ func (e *Events) Get(job *engine.Job) engine.Status { timeout = time.NewTimer(time.Unix(until, 0).Sub(time.Now())) ) + eventFilters, err := filters.FromParam(job.Getenv("filters")) + if err != nil { + return job.Error(err) + } + // If no until, disable timeout if until == 0 { timeout.Stop() @@ -61,7 +67,7 @@ func (e *Events) Get(job *engine.Job) engine.Status { // Resend every event in the [since, until] time interval. if since != 0 { - if err := e.writeCurrent(job, since, until); err != nil { + if err := e.writeCurrent(job, since, until, eventFilters); err != nil { return job.Error(err) } } @@ -72,7 +78,7 @@ func (e *Events) Get(job *engine.Job) engine.Status { if !ok { return engine.StatusOK } - if err := writeEvent(job, event); err != nil { + if err := writeEvent(job, event, eventFilters); err != nil { return job.Error(err) } case <-timeout.C: @@ -97,7 +103,23 @@ func (e *Events) SubscribersCount(job *engine.Job) engine.Status { return engine.StatusOK } -func writeEvent(job *engine.Job, event *utils.JSONMessage) error { +func writeEvent(job *engine.Job, event *utils.JSONMessage, eventFilters filters.Args) error { + isFiltered := func(field string, filter []string) bool { + if len(filter) == 0 { + return false + } + for _, v := range filter { + if v == field { + return false + } + } + return true + } + + if isFiltered(event.Status, eventFilters["event"]) || isFiltered(event.From, eventFilters["image"]) || isFiltered(event.ID, eventFilters["container"]) { + return nil + } + // When sending an event JSON serialization errors are ignored, but all // other errors lead to the eviction of the listener. if b, err := json.Marshal(event); err == nil { @@ -108,11 +130,11 @@ func writeEvent(job *engine.Job, event *utils.JSONMessage) error { return nil } -func (e *Events) writeCurrent(job *engine.Job, since, until int64) error { +func (e *Events) writeCurrent(job *engine.Job, since, until int64, eventFilters filters.Args) error { e.mu.RLock() for _, event := range e.events { if event.Time >= since && (event.Time <= until || until == 0) { - if err := writeEvent(job, event); err != nil { + if err := writeEvent(job, event, eventFilters); err != nil { e.mu.RUnlock() return err } diff --git a/graph/export.go b/graph/export.go index 86dc5a342a..7a8054010e 100644 --- a/graph/export.go +++ b/graph/export.go @@ -7,9 +7,9 @@ import ( "os" "path" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/log" "github.com/docker/docker/pkg/parsers" ) @@ -30,24 +30,21 @@ func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status { defer os.RemoveAll(tempdir) rootRepoMap := map[string]Repository{} + addKey := func(name string, tag string, id string) { + log.Debugf("add key [%s:%s]", name, tag) + if repo, ok := rootRepoMap[name]; !ok { + rootRepoMap[name] = Repository{tag: id} + } else { + repo[tag] = id + } + } for _, name := range job.Args { log.Debugf("Serializing %s", name) rootRepo := s.Repositories[name] if rootRepo != nil { // this is a base repo name, like 'busybox' - for _, id := range rootRepo { - if _, ok := rootRepoMap[name]; !ok { - rootRepoMap[name] = rootRepo - } else { - log.Debugf("Duplicate key [%s]", name) - if rootRepoMap[name].Contains(rootRepo) { - log.Debugf("skipping, because it is present [%s:%q]", name, rootRepo) - continue - } - log.Debugf("updating [%s]: [%q] with [%q]", name, rootRepoMap[name], rootRepo) - rootRepoMap[name].Update(rootRepo) - } - + for tag, id := range rootRepo { + addKey(name, tag, id) if err := s.exportImage(job.Eng, id, tempdir); err != nil { return job.Error(err) } @@ -65,18 +62,7 @@ func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status { // check this length, because a lookup of a truncated has will not have a tag // and will not need to be added to this map if len(repoTag) > 0 { - if _, ok := rootRepoMap[repoName]; !ok { - rootRepoMap[repoName] = Repository{repoTag: img.ID} - } else { - log.Debugf("Duplicate key [%s]", repoName) - newRepo := Repository{repoTag: img.ID} - if rootRepoMap[repoName].Contains(newRepo) { - log.Debugf("skipping, because it is present [%s:%q]", repoName, newRepo) - continue - } - log.Debugf("updating [%s]: [%q] with [%q]", repoName, rootRepoMap[repoName], newRepo) - rootRepoMap[repoName].Update(newRepo) - } + addKey(repoName, repoTag, img.ID) } if err := s.exportImage(job.Eng, img.ID, tempdir); err != nil { return job.Error(err) diff --git a/graph/graph.go b/graph/graph.go index 00c0324ea8..720f6e6963 100644 --- a/graph/graph.go +++ b/graph/graph.go @@ -12,11 +12,11 @@ import ( "syscall" "time" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/dockerversion" "github.com/docker/docker/image" "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/log" "github.com/docker/docker/pkg/truncindex" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" @@ -72,7 +72,7 @@ func (graph *Graph) restore() error { // FIXME: Implement error subclass instead of looking at the error text // Note: This is the way golang implements os.IsNotExists on Plan9 func (graph *Graph) IsNotExist(err error) bool { - return err != nil && (strings.Contains(err.Error(), "does not exist") || strings.Contains(err.Error(), "No such")) + return err != nil && (strings.Contains(strings.ToLower(err.Error()), "does not exist") || strings.Contains(strings.ToLower(err.Error()), "no such")) } // Exists returns true if an image is registered at the given id. @@ -132,14 +132,14 @@ func (graph *Graph) Create(layerData archive.ArchiveReader, containerID, contain img.ContainerConfig = *containerConfig } - if err := graph.Register(img, nil, layerData); err != nil { + if err := graph.Register(img, layerData); err != nil { return nil, err } return img, nil } // Register imports a pre-existing image into the graph. -func (graph *Graph) Register(img *image.Image, jsonData []byte, layerData archive.ArchiveReader) (err error) { +func (graph *Graph) Register(img *image.Image, layerData archive.ArchiveReader) (err error) { defer func() { // If any error occurs, remove the new dir from the driver. // Don't check for errors since the dir might not have been created. @@ -181,7 +181,7 @@ func (graph *Graph) Register(img *image.Image, jsonData []byte, layerData archiv } // Apply the diff/layer img.SetGraph(graph) - if err := image.StoreImage(img, jsonData, layerData, tmp); err != nil { + if err := image.StoreImage(img, layerData, tmp); err != nil { return err } // Commit diff --git a/graph/import.go b/graph/import.go index 36d0d3fe10..a8e8e04b5b 100644 --- a/graph/import.go +++ b/graph/import.go @@ -4,6 +4,7 @@ import ( "net/http" "net/url" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/utils" @@ -57,5 +58,12 @@ func (s *TagStore) CmdImport(job *engine.Job) engine.Status { } } job.Stdout.Write(sf.FormatStatus("", img.ID)) + logID := img.ID + if tag != "" { + logID += ":" + tag + } + if err = job.Eng.Job("log", "import", logID, "").Run(); err != nil { + log.Errorf("Error logging event 'import' for %s: %s", logID, err) + } return engine.StatusOK } diff --git a/graph/load.go b/graph/load.go index f27aca4a67..6ef219c077 100644 --- a/graph/load.go +++ b/graph/load.go @@ -1,3 +1,5 @@ +// +build linux + package graph import ( @@ -7,11 +9,11 @@ import ( "os" "path" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/engine" "github.com/docker/docker/image" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/log" "github.com/docker/docker/utils" ) @@ -124,7 +126,7 @@ func (s *TagStore) recursiveLoad(eng *engine.Engine, address, tmpImageDir string } } } - if err := s.graph.Register(img, imageJson, layer); err != nil { + if err := s.graph.Register(img, layer); err != nil { return err } } diff --git a/graph/load_unsupported.go b/graph/load_unsupported.go new file mode 100644 index 0000000000..164e9176a1 --- /dev/null +++ b/graph/load_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux + +package graph + +import ( + "github.com/docker/docker/engine" +) + +func (s *TagStore) CmdLoad(job *engine.Job) engine.Status { + return job.Errorf("CmdLoad is not supported on this platform") +} diff --git a/graph/pull.go b/graph/pull.go index 05d5ec7654..716a27c909 100644 --- a/graph/pull.go +++ b/graph/pull.go @@ -12,9 +12,9 @@ import ( "strings" "time" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/engine" "github.com/docker/docker/image" - "github.com/docker/docker/pkg/log" "github.com/docker/docker/registry" "github.com/docker/docker/utils" "github.com/docker/libtrust" @@ -137,6 +137,11 @@ func (s *TagStore) CmdPull(job *engine.Job) engine.Status { mirrors = s.mirrors } + logName := localName + if tag != "" { + logName += ":" + tag + } + if len(mirrors) == 0 && (isOfficial || endpoint.Version == registry.APIVersion2) { j := job.Eng.Job("trust_update_base") if err = j.Run(); err != nil { @@ -144,6 +149,9 @@ func (s *TagStore) CmdPull(job *engine.Job) engine.Status { } if err := s.pullV2Repository(job.Eng, r, job.Stdout, localName, remoteName, tag, sf, job.GetenvBool("parallel")); err == nil { + if err = job.Eng.Job("log", "pull", logName, "").Run(); err != nil { + log.Errorf("Error logging event 'pull' for %s: %s", logName, err) + } return engine.StatusOK } else if err != registry.ErrDoesNotExist { log.Errorf("Error from V2 registry: %s", err) @@ -154,6 +162,10 @@ func (s *TagStore) CmdPull(job *engine.Job) engine.Status { return job.Error(err) } + if err = job.Eng.Job("log", "pull", logName, "").Run(); err != nil { + log.Errorf("Error logging event 'pull' for %s: %s", logName, err) + } + return engine.StatusOK } @@ -163,7 +175,7 @@ func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, localName, repoData, err := r.GetRepositoryData(remoteName) if err != nil { if strings.Contains(err.Error(), "HTTP code: 404") { - return fmt.Errorf("Error: image %s not found", remoteName) + return fmt.Errorf("Error: image %s:%s not found", remoteName, askedTag) } // Unexpected HTTP error return err @@ -392,7 +404,7 @@ func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint layers_downloaded = true defer layer.Close() - err = s.graph.Register(img, imgJSON, + err = s.graph.Register(img, utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading")) if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries { time.Sleep(time.Duration(j) * 500 * time.Millisecond) @@ -577,7 +589,7 @@ func (s *TagStore) pullV2Tag(eng *engine.Engine, r *registry.Session, out io.Wri defer d.tmpFile.Close() d.tmpFile.Seek(0, 0) if d.tmpFile != nil { - err = s.graph.Register(d.img, d.imgJSON, + err = s.graph.Register(d.img, utils.ProgressReader(d.tmpFile, int(d.length), out, sf, false, utils.TruncateID(d.img.ID), "Extracting")) if err != nil { return false, err diff --git a/graph/push.go b/graph/push.go index 165b580f2c..29fc4a066d 100644 --- a/graph/push.go +++ b/graph/push.go @@ -7,9 +7,9 @@ import ( "os" "path" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/log" "github.com/docker/docker/registry" "github.com/docker/docker/utils" ) diff --git a/graph/service.go b/graph/service.go index 1be986f8d5..a27c9a8e38 100644 --- a/graph/service.go +++ b/graph/service.go @@ -4,9 +4,9 @@ import ( "fmt" "io" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/engine" "github.com/docker/docker/image" - "github.com/docker/docker/pkg/log" ) func (s *TagStore) Install(eng *engine.Engine) error { @@ -74,7 +74,7 @@ func (s *TagStore) CmdSet(job *engine.Job) engine.Status { if err != nil { return job.Error(err) } - if err := s.graph.Register(img, imgJSON, layer); err != nil { + if err := s.graph.Register(img, layer); err != nil { return job.Error(err) } return engine.StatusOK @@ -150,6 +150,7 @@ func (s *TagStore) CmdLookup(job *engine.Job) engine.Status { out.Set("Os", image.OS) out.SetInt64("Size", image.Size) out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size) + out.Set("Checksum", image.Checksum) if _, err = out.WriteTo(job.Stdout); err != nil { return job.Error(err) } diff --git a/graph/tags.go b/graph/tags.go index 622d620941..5c3e533b2a 100644 --- a/graph/tags.go +++ b/graph/tags.go @@ -221,11 +221,11 @@ func (store *TagStore) Set(repoName, tag, imageName string, force bool) error { var repo Repository if r, exists := store.Repositories[repoName]; exists { repo = r + if old, exists := store.Repositories[repoName][tag]; exists && !force { + return fmt.Errorf("Conflict: Tag %s is already set to image %s, if you want to replace it, please use -f option", tag, old) + } } else { repo = make(map[string]string) - if old, exists := store.Repositories[repoName]; exists && !force { - return fmt.Errorf("Conflict: Tag %s:%s is already set to %s", repoName, tag, old) - } store.Repositories[repoName] = repo } repo[tag] = img.ID diff --git a/graph/tags_unit_test.go b/graph/tags_unit_test.go index bf94deb445..339fb51fc9 100644 --- a/graph/tags_unit_test.go +++ b/graph/tags_unit_test.go @@ -62,7 +62,7 @@ func mkTestTagStore(root string, t *testing.T) *TagStore { t.Fatal(err) } img := &image.Image{ID: testImageID} - if err := graph.Register(img, nil, archive); err != nil { + if err := graph.Register(img, archive); err != nil { t.Fatal(err) } if err := store.Set(testImageName, "", testImageID, false); err != nil { diff --git a/hack b/hack new file mode 120000 index 0000000000..e3f094ee63 --- /dev/null +++ b/hack @@ -0,0 +1 @@ +project \ No newline at end of file diff --git a/hack/ROADMAP.md b/hack/ROADMAP.md deleted file mode 100644 index d49664b7b3..0000000000 --- a/hack/ROADMAP.md +++ /dev/null @@ -1,41 +0,0 @@ -# Docker: what's next? - -This document is a high-level overview of where we want to take Docker next. -It is a curated selection of planned improvements which are either important, difficult, or both. - -For a more complete view of planned and requested improvements, see [the Github issues](https://github.com/docker/docker/issues). - -To suggest changes to the roadmap, including additions, please write the change as if it were already in effect, and make a pull request. - - -## Container wiring and service discovery - -In its current version, docker doesn’t make it very easy to manipulate multiple containers as a cohesive group (ie. orchestration), and it doesn’t make it seamless for containers to connect to each other as network services (ie. wiring). - -To achieve wiring and orchestration with docker today, you need to write glue scripts yourself, or use one several companion tools available, like Orchestra, Shipper, Deis, Pipeworks, etc. - -We want the Docker API to support orchestration and wiring natively, so that these tools can cleanly and seamlessly integrate into the Docker user experience, and remain interoperable with each other. - - -## Better integration with process supervisors - -For docker to be fully usable in production, it needs to cleanly integrate with the host machine’s process supervisor of choice. Whether it’s sysV-init, upstart, systemd, runit or supervisord, we want to make sure docker plays nice with your existing system. This will be a major focus of the 0.7 release. - - -## Plugin API - -We want Docker to run everywhere, and to integrate with every devops tool. Those are ambitious goals, and the only way to reach them is with the Docker community. For the community to participate fully, we need an API which allows Docker to be deeply and easily customized. - -We are working on a plugin API which will make Docker very, very customization-friendly. We believe it will facilitate the integrations listed above – and many more we didn’t even think about. - - -## Broader kernel support - -Our goal is to make Docker run everywhere, but currently Docker requires Linux version 3.8 or higher with cgroups support. If you’re deploying new machines for the purpose of running Docker, this is a fairly easy requirement to meet. However, if you’re adding Docker to an existing deployment, you may not have the flexibility to update and patch the kernel. - -Expanding Docker’s kernel support is a priority. This includes running on older kernel versions, specifically focusing on versions already popular in server deployments such as those used by RHEL and the OpenVZ stack. - - -## Cross-architecture support - -Our goal is to make Docker run everywhere. However currently Docker only runs on x86_64 systems. We plan on expanding architecture support, so that Docker containers can be created and used on more architectures. diff --git a/hack/make/binary b/hack/make/binary deleted file mode 100755 index b97069a856..0000000000 --- a/hack/make/binary +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -set -e - -DEST=$1 - -go build \ - -o "$DEST/docker-$VERSION" \ - "${BUILDFLAGS[@]}" \ - -ldflags " - $LDFLAGS - $LDFLAGS_STATIC_DOCKER - " \ - ./docker -echo "Created binary: $DEST/docker-$VERSION" -ln -sf "docker-$VERSION" "$DEST/docker" - -hash_files "$DEST/docker-$VERSION" diff --git a/image/image.go b/image/image.go index fabd897d29..8cd9aa3755 100644 --- a/image/image.go +++ b/image/image.go @@ -9,8 +9,9 @@ import ( "strconv" "time" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/tarsum" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" ) @@ -32,20 +33,25 @@ type Image struct { Config *runconfig.Config `json:"config,omitempty"` Architecture string `json:"architecture,omitempty"` OS string `json:"os,omitempty"` + Checksum string `json:"checksum"` Size int64 graph Graph } func LoadImage(root string) (*Image, error) { - // Load the json data - jsonData, err := ioutil.ReadFile(jsonPath(root)) + // Open the JSON file to decode by streaming + jsonSource, err := os.Open(jsonPath(root)) if err != nil { return nil, err } - img := &Image{} + defer jsonSource.Close() - if err := json.Unmarshal(jsonData, img); err != nil { + img := &Image{} + dec := json.NewDecoder(jsonSource) + + // Decode the JSON data + if err := dec.Decode(img); err != nil { return nil, err } if err := utils.ValidateID(img.ID); err != nil { @@ -60,7 +66,10 @@ func LoadImage(root string) (*Image, error) { // because a layer size of 0 (zero) is valid img.Size = -1 } else { - size, err := strconv.Atoi(string(buf)) + // Using Atoi here instead would temporarily convert the size to a machine + // dependent integer type, which causes images larger than 2^31 bytes to + // display negative sizes on 32-bit machines: + size, err := strconv.ParseInt(string(buf), 10, 64) if err != nil { return nil, err } @@ -70,19 +79,43 @@ func LoadImage(root string) (*Image, error) { return img, nil } -func StoreImage(img *Image, jsonData []byte, layerData archive.ArchiveReader, root string) error { +// StoreImage stores file system layer data for the given image to the +// image's registered storage driver. Image metadata is stored in a file +// at the specified root directory. This function also computes the TarSum +// of `layerData` (currently using tarsum.dev). +func StoreImage(img *Image, layerData archive.ArchiveReader, root string) error { // Store the layer var ( - size int64 - err error - driver = img.graph.Driver() + size int64 + err error + driver = img.graph.Driver() + layerTarSum tarsum.TarSum ) // If layerData is not nil, unpack it into the new layer if layerData != nil { - if size, err = driver.ApplyDiff(img.ID, img.Parent, layerData); err != nil { + layerDataDecompressed, err := archive.DecompressStream(layerData) + if err != nil { return err } + + defer layerDataDecompressed.Close() + + if layerTarSum, err = tarsum.NewTarSum(layerDataDecompressed, true, tarsum.VersionDev); err != nil { + return err + } + + if size, err = driver.ApplyDiff(img.ID, img.Parent, layerTarSum); err != nil { + return err + } + + checksum := layerTarSum.Sum(nil) + + if img.Checksum != "" && img.Checksum != checksum { + log.Warnf("image layer checksum mismatch: computed %q, expected %q", checksum, img.Checksum) + } + + img.Checksum = checksum } img.Size = size @@ -90,20 +123,14 @@ func StoreImage(img *Image, jsonData []byte, layerData archive.ArchiveReader, ro return err } - // If raw json is provided, then use it - if jsonData != nil { - if err := ioutil.WriteFile(jsonPath(root), jsonData, 0600); err != nil { - return err - } - } else { - if jsonData, err = json.Marshal(img); err != nil { - return err - } - if err := ioutil.WriteFile(jsonPath(root), jsonData, 0600); err != nil { - return err - } + f, err := os.OpenFile(jsonPath(root), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0600)) + if err != nil { + return err } - return nil + + defer f.Close() + + return json.NewEncoder(f).Encode(img) } func (img *Image) SetGraph(graph Graph) { diff --git a/integration-cli/build_tests/TestCopy/DirContentToExistDir/Dockerfile b/integration-cli/build_tests/TestCopy/DirContentToExistDir/Dockerfile deleted file mode 100644 index d63e8538bb..0000000000 --- a/integration-cli/build_tests/TestCopy/DirContentToExistDir/Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -FROM busybox -RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd -RUN echo 'dockerio:x:1001:' >> /etc/group -RUN mkdir /exists -RUN touch /exists/exists_file -RUN chown -R dockerio.dockerio /exists -COPY test_dir/ /exists/ -RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] -RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] -RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] diff --git a/integration-cli/build_tests/TestCopy/DirContentToExistDir/test_dir/test_file b/integration-cli/build_tests/TestCopy/DirContentToExistDir/test_dir/test_file deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/integration-cli/build_tests/TestCopy/DirContentToRoot/Dockerfile b/integration-cli/build_tests/TestCopy/DirContentToRoot/Dockerfile deleted file mode 100644 index 45df77e563..0000000000 --- a/integration-cli/build_tests/TestCopy/DirContentToRoot/Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ -FROM busybox -RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd -RUN echo 'dockerio:x:1001:' >> /etc/group -RUN touch /exists -RUN chown dockerio.dockerio exists -COPY test_dir / -RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff --git a/integration-cli/build_tests/TestCopy/DirContentToRoot/test_dir/test_file b/integration-cli/build_tests/TestCopy/DirContentToRoot/test_dir/test_file deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/integration-cli/build_tests/TestCopy/DisallowRemote/Dockerfile b/integration-cli/build_tests/TestCopy/DisallowRemote/Dockerfile deleted file mode 100644 index e6bc0c0dd2..0000000000 --- a/integration-cli/build_tests/TestCopy/DisallowRemote/Dockerfile +++ /dev/null @@ -1,2 +0,0 @@ -FROM busybox -COPY https://index.docker.io/robots.txt / diff --git a/integration-cli/build_tests/TestCopy/EtcToRoot/Dockerfile b/integration-cli/build_tests/TestCopy/EtcToRoot/Dockerfile deleted file mode 100644 index b4f319f80f..0000000000 --- a/integration-cli/build_tests/TestCopy/EtcToRoot/Dockerfile +++ /dev/null @@ -1,2 +0,0 @@ -FROM scratch -COPY . / diff --git a/integration-cli/build_tests/TestCopy/MultipleFiles/Dockerfile b/integration-cli/build_tests/TestCopy/MultipleFiles/Dockerfile deleted file mode 100644 index 4143e65962..0000000000 --- a/integration-cli/build_tests/TestCopy/MultipleFiles/Dockerfile +++ /dev/null @@ -1,17 +0,0 @@ -FROM busybox -RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd -RUN echo 'dockerio:x:1001:' >> /etc/group -RUN mkdir /exists -RUN touch /exists/exists_file -RUN chown -R dockerio.dockerio /exists -COPY test_file1 test_file2 /exists/ -ADD test_file3 test_file4 https://docker.com/robots.txt /exists/ -RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] -RUN [ $(ls -l /exists/test_file1 | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /exists/test_file2 | awk '{print $3":"$4}') = 'root:root' ] - -RUN [ $(ls -l /exists/test_file3 | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /exists/test_file4 | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /exists/robots.txt | awk '{print $3":"$4}') = 'root:root' ] - -RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff --git a/integration-cli/build_tests/TestCopy/MultipleFiles/test_file1 b/integration-cli/build_tests/TestCopy/MultipleFiles/test_file1 deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/integration-cli/build_tests/TestCopy/MultipleFiles/test_file2 b/integration-cli/build_tests/TestCopy/MultipleFiles/test_file2 deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/integration-cli/build_tests/TestCopy/MultipleFiles/test_file3 b/integration-cli/build_tests/TestCopy/MultipleFiles/test_file3 deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/integration-cli/build_tests/TestCopy/MultipleFiles/test_file4 b/integration-cli/build_tests/TestCopy/MultipleFiles/test_file4 deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/integration-cli/build_tests/TestCopy/MultipleFilesToFile/Dockerfile b/integration-cli/build_tests/TestCopy/MultipleFilesToFile/Dockerfile deleted file mode 100644 index 520d356c72..0000000000 --- a/integration-cli/build_tests/TestCopy/MultipleFilesToFile/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM busybox -RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd -RUN echo 'dockerio:x:1001:' >> /etc/group -RUN mkdir /exists -RUN chown -R dockerio.dockerio /exists -COPY test_file1 /exists/ -ADD test_file2 test_file3 /exists/test_file1 diff --git a/integration-cli/build_tests/TestCopy/MultipleFilesToFile/test_file1 b/integration-cli/build_tests/TestCopy/MultipleFilesToFile/test_file1 deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/integration-cli/build_tests/TestCopy/MultipleFilesToFile/test_file2 b/integration-cli/build_tests/TestCopy/MultipleFilesToFile/test_file2 deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/integration-cli/build_tests/TestCopy/MultipleFilesToFile/test_file3 b/integration-cli/build_tests/TestCopy/MultipleFilesToFile/test_file3 deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/integration-cli/build_tests/TestCopy/SingleFileToExistDir/Dockerfile b/integration-cli/build_tests/TestCopy/SingleFileToExistDir/Dockerfile deleted file mode 100644 index 3edfe661d4..0000000000 --- a/integration-cli/build_tests/TestCopy/SingleFileToExistDir/Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -FROM busybox -RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd -RUN echo 'dockerio:x:1001:' >> /etc/group -RUN mkdir /exists -RUN touch /exists/exists_file -RUN chown -R dockerio.dockerio /exists -COPY test_file /exists/ -RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] -RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff --git a/integration-cli/build_tests/TestCopy/SingleFileToExistDir/test_file b/integration-cli/build_tests/TestCopy/SingleFileToExistDir/test_file deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/integration-cli/build_tests/TestCopy/SingleFileToNonExistDir/Dockerfile b/integration-cli/build_tests/TestCopy/SingleFileToNonExistDir/Dockerfile deleted file mode 100644 index 33b65a62c7..0000000000 --- a/integration-cli/build_tests/TestCopy/SingleFileToNonExistDir/Dockerfile +++ /dev/null @@ -1,9 +0,0 @@ -FROM busybox -RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd -RUN echo 'dockerio:x:1001:' >> /etc/group -RUN touch /exists -RUN chown dockerio.dockerio /exists -COPY test_file /test_dir/ -RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff --git a/integration-cli/build_tests/TestCopy/SingleFileToNonExistDir/test_file b/integration-cli/build_tests/TestCopy/SingleFileToNonExistDir/test_file deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/integration-cli/build_tests/TestCopy/SingleFileToRoot/Dockerfile b/integration-cli/build_tests/TestCopy/SingleFileToRoot/Dockerfile deleted file mode 100644 index 38fd09026d..0000000000 --- a/integration-cli/build_tests/TestCopy/SingleFileToRoot/Dockerfile +++ /dev/null @@ -1,9 +0,0 @@ -FROM busybox -RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd -RUN echo 'dockerio:x:1001:' >> /etc/group -RUN touch /exists -RUN chown dockerio.dockerio /exists -COPY test_file / -RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /test_file | awk '{print $1}') = '-rw-r--r--' ] -RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff --git a/integration-cli/build_tests/TestCopy/SingleFileToWorkdir/Dockerfile b/integration-cli/build_tests/TestCopy/SingleFileToWorkdir/Dockerfile deleted file mode 100644 index ba2d797e35..0000000000 --- a/integration-cli/build_tests/TestCopy/SingleFileToWorkdir/Dockerfile +++ /dev/null @@ -1,2 +0,0 @@ -FROM busybox -COPY test_file . diff --git a/integration-cli/build_tests/TestCopy/WholeDirToRoot/Dockerfile b/integration-cli/build_tests/TestCopy/WholeDirToRoot/Dockerfile deleted file mode 100644 index 91be29fe7a..0000000000 --- a/integration-cli/build_tests/TestCopy/WholeDirToRoot/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM busybox -RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd -RUN echo 'dockerio:x:1001:' >> /etc/group -RUN touch /exists -RUN chown dockerio.dockerio exists -COPY test_dir /test_dir -RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] -RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '-rw-r--r--' ] -RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff --git a/integration-cli/docker_api_containers_test.go b/integration-cli/docker_api_containers_test.go new file mode 100644 index 0000000000..f02f619c44 --- /dev/null +++ b/integration-cli/docker_api_containers_test.go @@ -0,0 +1,122 @@ +package main + +import ( + "bytes" + "encoding/json" + "io" + "os/exec" + "testing" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" +) + +func TestContainerApiGetAll(t *testing.T) { + startCount, err := getContainerCount() + if err != nil { + t.Fatalf("Cannot query container count: %v", err) + } + + name := "getall" + runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "true") + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatalf("Error on container creation: %v, output: %q", err, out) + } + + body, err := sockRequest("GET", "/containers/json?all=1", nil) + if err != nil { + t.Fatalf("GET all containers sockRequest failed: %v", err) + } + + var inspectJSON []struct { + Names []string + } + if err = json.Unmarshal(body, &inspectJSON); err != nil { + t.Fatalf("unable to unmarshal response body: %v", err) + } + + if len(inspectJSON) != startCount+1 { + t.Fatalf("Expected %d container(s), %d found (started with: %d)", startCount+1, len(inspectJSON), startCount) + } + + if actual := inspectJSON[0].Names[0]; actual != "/"+name { + t.Fatalf("Container Name mismatch. Expected: %q, received: %q\n", "/"+name, actual) + } + + deleteAllContainers() + + logDone("container REST API - check GET json/all=1") +} + +func TestContainerApiGetExport(t *testing.T) { + name := "exportcontainer" + runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "touch", "/test") + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatalf("Error on container creation: %v, output: %q", err, out) + } + + body, err := sockRequest("GET", "/containers/"+name+"/export", nil) + if err != nil { + t.Fatalf("GET containers/export sockRequest failed: %v", err) + } + + found := false + for tarReader := tar.NewReader(bytes.NewReader(body)); ; { + h, err := tarReader.Next() + if err != nil { + if err == io.EOF { + break + } + t.Fatal(err) + } + if h.Name == "test" { + found = true + break + } + } + + if !found { + t.Fatalf("The created test file has not been found in the exported image") + } + deleteAllContainers() + + logDone("container REST API - check GET containers/export") +} + +func TestContainerApiGetChanges(t *testing.T) { + name := "changescontainer" + runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "rm", "/etc/passwd") + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatalf("Error on container creation: %v, output: %q", err, out) + } + + body, err := sockRequest("GET", "/containers/"+name+"/changes", nil) + if err != nil { + t.Fatalf("GET containers/changes sockRequest failed: %v", err) + } + + changes := []struct { + Kind int + Path string + }{} + if err = json.Unmarshal(body, &changes); err != nil { + t.Fatalf("unable to unmarshal response body: %v", err) + } + + // Check the changelog for removal of /etc/passwd + success := false + for _, elem := range changes { + if elem.Path == "/etc/passwd" && elem.Kind == 2 { + success = true + } + } + if !success { + t.Fatalf("/etc/passwd has been removed but is not present in the diff") + } + + deleteAllContainers() + + logDone("container REST API - check GET containers/changes") +} diff --git a/integration-cli/docker_api_exec_test.go b/integration-cli/docker_api_exec_test.go new file mode 100644 index 0000000000..df7122dd75 --- /dev/null +++ b/integration-cli/docker_api_exec_test.go @@ -0,0 +1,25 @@ +package main + +import ( + "bytes" + "fmt" + "os/exec" + "testing" +) + +// Regression test for #9414 +func TestExecApiCreateNoCmd(t *testing.T) { + defer deleteAllContainers() + name := "exec_test" + runCmd := exec.Command(dockerBinary, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") + if out, _, err := runCommandWithOutput(runCmd); err != nil { + t.Fatal(out, err) + } + + body, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), map[string]interface{}{"Cmd": nil}) + if err == nil || !bytes.Contains(body, []byte("No exec command specified")) { + t.Fatalf("Expected error when creating exec command with no Cmd specified: %q", err) + } + + logDone("exec create API - returns error when missing Cmd") +} diff --git a/integration-cli/docker_api_inspect_test.go b/integration-cli/docker_api_inspect_test.go index 42258d7aae..1ff0312581 100644 --- a/integration-cli/docker_api_inspect_test.go +++ b/integration-cli/docker_api_inspect_test.go @@ -2,7 +2,6 @@ package main import ( "encoding/json" - "fmt" "os/exec" "testing" ) @@ -10,7 +9,9 @@ import ( func TestInspectApiContainerResponse(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, fmt.Sprintf("failed to create a container: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to create a container: %s, %v", out, err) + } cleanedContainerID := stripTrailingCharacters(out) @@ -23,7 +24,7 @@ func TestInspectApiContainerResponse(t *testing.T) { if testVersion != "latest" { endpoint = "/" + testVersion + endpoint } - body, err := sockRequest("GET", endpoint) + body, err := sockRequest("GET", endpoint, nil) if err != nil { t.Fatalf("sockRequest failed for %s version: %v", testVersion, err) } diff --git a/integration-cli/docker_api_resize_test.go b/integration-cli/docker_api_resize_test.go new file mode 100644 index 0000000000..6ba95c3052 --- /dev/null +++ b/integration-cli/docker_api_resize_test.go @@ -0,0 +1,53 @@ +package main + +import ( + "os/exec" + "strings" + "testing" +) + +func TestResizeApiResponse(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatalf(out, err) + } + defer deleteAllContainers() + cleanedContainerID := stripTrailingCharacters(out) + + endpoint := "/containers/" + cleanedContainerID + "/resize?h=40&w=40" + _, err = sockRequest("POST", endpoint, nil) + if err != nil { + t.Fatalf("resize Request failed %v", err) + } + + logDone("container resize - when started") +} + +func TestResizeApiResponseWhenContainerNotStarted(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatalf(out, err) + } + defer deleteAllContainers() + cleanedContainerID := stripTrailingCharacters(out) + + // make sure the exited cintainer is not running + runCmd = exec.Command(dockerBinary, "wait", cleanedContainerID) + out, _, err = runCommandWithOutput(runCmd) + if err != nil { + t.Fatalf(out, err) + } + + endpoint := "/containers/" + cleanedContainerID + "/resize?h=40&w=40" + body, err := sockRequest("POST", endpoint, nil) + if err == nil { + t.Fatalf("resize should fail when container is not started") + } + if !strings.Contains(string(body), "Cannot resize container") && !strings.Contains(string(body), cleanedContainerID) { + t.Fatalf("resize should fail with message 'Cannot resize container' but instead received %s", string(body)) + } + + logDone("container resize - when not started should not resize") +} diff --git a/integration-cli/docker_cli_attach_test.go b/integration-cli/docker_cli_attach_test.go index 510f02ab18..0530d3896e 100644 --- a/integration-cli/docker_cli_attach_test.go +++ b/integration-cli/docker_cli_attach_test.go @@ -50,7 +50,7 @@ func TestAttachMultipleAndRestart(t *testing.T) { t.Fatal(err) } - if _, err := startCommand(c); err != nil { + if err := c.Start(); err != nil { t.Fatal(err) } @@ -87,3 +87,50 @@ func TestAttachMultipleAndRestart(t *testing.T) { logDone("attach - multiple attach") } + +func TestAttachTtyWithoutStdin(t *testing.T) { + defer deleteAllContainers() + + cmd := exec.Command(dockerBinary, "run", "-d", "-ti", "busybox") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatalf("failed to start container: %v (%v)", out, err) + } + + id := strings.TrimSpace(out) + if err := waitRun(id); err != nil { + t.Fatal(err) + } + + defer func() { + cmd := exec.Command(dockerBinary, "kill", id) + if out, _, err := runCommandWithOutput(cmd); err != nil { + t.Fatalf("failed to kill container: %v (%v)", out, err) + } + }() + + done := make(chan struct{}) + go func() { + defer close(done) + + cmd := exec.Command(dockerBinary, "attach", id) + if _, err := cmd.StdinPipe(); err != nil { + t.Fatal(err) + } + + expected := "cannot enable tty mode" + if out, _, err := runCommandWithOutput(cmd); err == nil { + t.Fatal("attach should have failed") + } else if !strings.Contains(out, expected) { + t.Fatal("attach failed with error %q: expected %q", out, expected) + } + }() + + select { + case <-done: + case <-time.After(attachWait): + t.Fatal("attach is running but should have failed") + } + + logDone("attach - forbid piped stdin to tty enabled container") +} diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index 408b801615..0fd5b1363d 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -7,9 +7,12 @@ import ( "io/ioutil" "os" "os/exec" + "path" "path/filepath" + "reflect" "regexp" "strings" + "syscall" "testing" "time" @@ -37,6 +40,7 @@ func TestBuildShCmdJSONEntrypoint(t *testing.T) { exec.Command( dockerBinary, "run", + "--rm", name)) if err != nil { @@ -176,6 +180,7 @@ func TestBuildEnvironmentReplacementAddCopy(t *testing.T) { if err != nil { t.Fatal(err) } + defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) @@ -218,7 +223,7 @@ func TestBuildEnvironmentReplacementEnv(t *testing.T) { if parts[0] == "bar" { found = true if parts[1] != "foo" { - t.Fatal("Could not find replaced var for env `bar`: got %q instead of `foo`", parts[1]) + t.Fatalf("Could not find replaced var for env `bar`: got %q instead of `foo`", parts[1]) } } } @@ -261,6 +266,8 @@ func TestBuildHandleEscapes(t *testing.T) { t.Fatal("Could not find volume bar set from env foo in volumes table") } + deleteImages(name) + _, err = buildImage(name, ` FROM scratch @@ -285,6 +292,8 @@ func TestBuildHandleEscapes(t *testing.T) { t.Fatal("Could not find volume ${FOO} set from env foo in volumes table") } + deleteImages(name) + // this test in particular provides *7* backslashes and expects 6 to come back. // Like above, the first escape is swallowed and the rest are treated as // literals, this one is just less obvious because of all the character noise. @@ -353,8 +362,8 @@ func TestBuildOnBuildLowercase(t *testing.T) { func TestBuildEnvEscapes(t *testing.T) { name := "testbuildenvescapes" - defer deleteAllContainers() defer deleteImages(name) + defer deleteAllContainers() _, err := buildImage(name, ` FROM busybox @@ -378,8 +387,8 @@ func TestBuildEnvEscapes(t *testing.T) { func TestBuildEnvOverwrite(t *testing.T) { name := "testbuildenvoverwrite" - defer deleteAllContainers() defer deleteImages(name) + defer deleteAllContainers() _, err := buildImage(name, ` @@ -408,10 +417,15 @@ func TestBuildEnvOverwrite(t *testing.T) { func TestBuildOnBuildForbiddenMaintainerInSourceImage(t *testing.T) { name := "testbuildonbuildforbiddenmaintainerinsourceimage" + defer deleteImages("onbuild") defer deleteImages(name) + defer deleteAllContainers() + createCmd := exec.Command(dockerBinary, "create", "busybox", "true") out, _, _, err := runCommandWithStdoutStderr(createCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } cleanedContainerID := stripTrailingCharacters(out) @@ -437,10 +451,15 @@ func TestBuildOnBuildForbiddenMaintainerInSourceImage(t *testing.T) { func TestBuildOnBuildForbiddenFromInSourceImage(t *testing.T) { name := "testbuildonbuildforbiddenfrominsourceimage" + defer deleteImages("onbuild") defer deleteImages(name) + defer deleteAllContainers() + createCmd := exec.Command(dockerBinary, "create", "busybox", "true") out, _, _, err := runCommandWithStdoutStderr(createCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } cleanedContainerID := stripTrailingCharacters(out) @@ -466,10 +485,15 @@ func TestBuildOnBuildForbiddenFromInSourceImage(t *testing.T) { func TestBuildOnBuildForbiddenChainedInSourceImage(t *testing.T) { name := "testbuildonbuildforbiddenchainedinsourceimage" + defer deleteImages("onbuild") defer deleteImages(name) + defer deleteAllContainers() + createCmd := exec.Command(dockerBinary, "create", "busybox", "true") out, _, _, err := runCommandWithStdoutStderr(createCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } cleanedContainerID := stripTrailingCharacters(out) @@ -497,9 +521,9 @@ func TestBuildOnBuildCmdEntrypointJSON(t *testing.T) { name1 := "onbuildcmd" name2 := "onbuildgenerated" - defer deleteAllContainers() defer deleteImages(name2) defer deleteImages(name1) + defer deleteAllContainers() _, err := buildImage(name1, ` FROM busybox @@ -534,9 +558,9 @@ func TestBuildOnBuildEntrypointJSON(t *testing.T) { name1 := "onbuildcmd" name2 := "onbuildgenerated" - defer deleteAllContainers() defer deleteImages(name2) defer deleteImages(name1) + defer deleteAllContainers() _, err := buildImage(name1, ` FROM busybox @@ -582,6 +606,10 @@ func TestBuildCacheADD(t *testing.T) { true); err != nil { t.Fatal(err) } + if err != nil { + t.Fatal(err) + } + deleteImages(name) _, out, err := buildImageWithOut(name, fmt.Sprintf(`FROM scratch ADD %s/index.html /`, server.URL), @@ -606,6 +634,8 @@ func TestBuildSixtySteps(t *testing.T) { if err != nil { t.Fatal(err) } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } @@ -630,6 +660,8 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, if err != nil { t.Fatal(err) } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } @@ -648,6 +680,8 @@ ADD test_file .`, if err != nil { t.Fatal(err) } + defer ctx.Close() + done := make(chan struct{}) go func() { if _, err := buildImageFromContext(name, ctx, true); err != nil { @@ -682,6 +716,8 @@ RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' if err != nil { t.Fatal(err) } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } @@ -689,16 +725,40 @@ RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' } func TestBuildCopyAddMultipleFiles(t *testing.T) { - buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") - out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testaddimg", "MultipleFiles") - errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + name := "testcopymultiplefilestofile" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +COPY test_file1 test_file2 /exists/ +ADD test_file3 test_file4 https://docker.com/robots.txt /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file1 | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/test_file2 | awk '{print $3":"$4}') = 'root:root' ] - if err != nil || exitCode != 0 { - t.Fatal("failed to build the image") +RUN [ $(ls -l /exists/test_file3 | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/test_file4 | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/robots.txt | awk '{print $3":"$4}') = 'root:root' ] + +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +`, + map[string]string{ + "test_file1": "test1", + "test_file2": "test2", + "test_file3": "test3", + "test_file4": "test4", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) } - deleteImages("testaddimg") - + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } logDone("build - mulitple file copy/add tests") } @@ -879,8 +939,8 @@ func TestBuildCopyWildcardCache(t *testing.T) { logDone("build - copy wild card cache") } -func TestBuildAddSingleFileToNonExistDir(t *testing.T) { - name := "testaddsinglefiletononexistdir" +func TestBuildAddSingleFileToNonExistingDir(t *testing.T) { + name := "testaddsinglefiletononexistingdir" defer deleteImages(name) ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd @@ -897,9 +957,13 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, if err != nil { t.Fatal(err) } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } + + logDone("build - add single file to non-existing dir") } func TestBuildAddDirContentToRoot(t *testing.T) { @@ -919,14 +983,16 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, if err != nil { t.Fatal(err) } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } logDone("build - add directory contents to root") } -func TestBuildAddDirContentToExistDir(t *testing.T) { - name := "testadddircontenttoexistdir" +func TestBuildAddDirContentToExistingDir(t *testing.T) { + name := "testadddircontenttoexistingdir" defer deleteImages(name) ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd @@ -944,6 +1010,8 @@ RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`, if err != nil { t.Fatal(err) } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } @@ -970,6 +1038,8 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, if err != nil { t.Fatal(err) } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } @@ -988,182 +1058,255 @@ ADD . /`, if err != nil { t.Fatal(err) } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } logDone("build - add etc directory to root") } -func TestBuildCopySingleFileToRoot(t *testing.T) { - testDirName := "SingleFileToRoot" - sourceDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy", testDirName) - buildDirectory, err := ioutil.TempDir("", "test-build-add") - defer os.RemoveAll(buildDirectory) - - err = copyWithCP(sourceDirectory, buildDirectory) - if err != nil { - t.Fatalf("failed to copy files to temporary directory: %s", err) - } - - buildDirectory = filepath.Join(buildDirectory, testDirName) - f, err := os.OpenFile(filepath.Join(buildDirectory, "test_file"), os.O_CREATE, 0644) +// Testing #9401 +func TestBuildAddPreservesFilesSpecialBits(t *testing.T) { + name := "testaddpreservesfilesspecialbits" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox +ADD suidbin /usr/bin/suidbin +RUN chmod 4755 /usr/bin/suidbin +RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ] +ADD ./data/ / +RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ]`, + map[string]string{ + "suidbin": "suidbin", + "/data/usr/test_file": "test1", + }) if err != nil { t.Fatal(err) } - f.Close() - out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", ".") - errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + defer ctx.Close() - if err != nil || exitCode != 0 { - t.Fatal("failed to build the image") + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) } + logDone("build - add preserves files special bits") +} - deleteImages("testcopyimg") +func TestBuildCopySingleFileToRoot(t *testing.T) { + name := "testcopysinglefiletoroot" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +COPY test_file / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_file | awk '{print $1}') = '-rw-r--r--' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } logDone("build - copy single file to root") } // Issue #3960: "ADD src ." hangs - adapted for COPY func TestBuildCopySingleFileToWorkdir(t *testing.T) { - testDirName := "SingleFileToWorkdir" - sourceDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy", testDirName) - buildDirectory, err := ioutil.TempDir("", "test-build-add") - defer os.RemoveAll(buildDirectory) - - err = copyWithCP(sourceDirectory, buildDirectory) - if err != nil { - t.Fatalf("failed to copy files to temporary directory: %s", err) - } - - buildDirectory = filepath.Join(buildDirectory, testDirName) - f, err := os.OpenFile(filepath.Join(buildDirectory, "test_file"), os.O_CREATE, 0644) + name := "testcopysinglefiletoworkdir" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox +COPY test_file .`, + map[string]string{ + "test_file": "test1", + }) if err != nil { t.Fatal(err) } - f.Close() - _, exitCode, err := dockerCmdInDirWithTimeout(5*time.Second, buildDirectory, "build", "-t", "testcopyimg", ".") - if err != nil || exitCode != 0 { - t.Fatalf("build failed: %s", err) + defer ctx.Close() + + done := make(chan struct{}) + go func() { + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + close(done) + }() + select { + case <-time.After(5 * time.Second): + t.Fatal("Build with adding to workdir timed out") + case <-done: } - - deleteImages("testcopyimg") - logDone("build - copy single file to workdir") } func TestBuildCopySingleFileToExistDir(t *testing.T) { - buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") - out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "SingleFileToExistDir") - errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) - - if err != nil || exitCode != 0 { - t.Fatal("failed to build the image") + name := "testcopysinglefiletoexistdir" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +COPY test_file /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + t.Fatal(err) } + defer ctx.Close() - deleteImages("testcopyimg") - + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } logDone("build - copy single file to existing dir") } func TestBuildCopySingleFileToNonExistDir(t *testing.T) { - buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") - out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "SingleFileToNonExistDir") - errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) - - if err != nil || exitCode != 0 { - t.Fatal("failed to build the image") + name := "testcopysinglefiletononexistdir" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +COPY test_file /test_dir/ +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + t.Fatal(err) } + defer ctx.Close() - deleteImages("testcopyimg") - + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } logDone("build - copy single file to non-existing dir") } func TestBuildCopyDirContentToRoot(t *testing.T) { - buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") - out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "DirContentToRoot") - errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) - - if err != nil || exitCode != 0 { - t.Fatal("failed to build the image") + name := "testcopydircontenttoroot" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +COPY test_dir / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_dir/test_file": "test1", + }) + if err != nil { + t.Fatal(err) } + defer ctx.Close() - deleteImages("testcopyimg") - + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } logDone("build - copy directory contents to root") } func TestBuildCopyDirContentToExistDir(t *testing.T) { - buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") - out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "DirContentToExistDir") - errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) - - if err != nil || exitCode != 0 { - t.Fatal("failed to build the image") + name := "testcopydircontenttoexistdir" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +COPY test_dir/ /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`, + map[string]string{ + "test_dir/test_file": "test1", + }) + if err != nil { + t.Fatal(err) } + defer ctx.Close() - deleteImages("testcopyimg") - + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } logDone("build - copy directory contents to existing dir") } func TestBuildCopyWholeDirToRoot(t *testing.T) { - testDirName := "WholeDirToRoot" - sourceDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy", testDirName) - buildDirectory, err := ioutil.TempDir("", "test-build-add") - defer os.RemoveAll(buildDirectory) - - err = copyWithCP(sourceDirectory, buildDirectory) - if err != nil { - t.Fatalf("failed to copy files to temporary directory: %s", err) - } - - buildDirectory = filepath.Join(buildDirectory, testDirName) - testDir := filepath.Join(buildDirectory, "test_dir") - if err := os.MkdirAll(testDir, 0755); err != nil { - t.Fatal(err) - } - f, err := os.OpenFile(filepath.Join(testDir, "test_file"), os.O_CREATE, 0644) + name := "testcopywholedirtoroot" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +COPY test_dir /test_dir +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '-rw-r--r--' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_dir/test_file": "test1", + }) if err != nil { t.Fatal(err) } - f.Close() - out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", ".") - errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + defer ctx.Close() - if err != nil || exitCode != 0 { - t.Fatal("failed to build the image") + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) } - - deleteImages("testcopyimg") - logDone("build - copy whole directory to root") } func TestBuildCopyEtcToRoot(t *testing.T) { - buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") - out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "EtcToRoot") - errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) - - if err != nil || exitCode != 0 { - t.Fatal("failed to build the image") + name := "testcopyetctoroot" + defer deleteImages(name) + ctx, err := fakeContext(`FROM scratch +COPY . /`, + map[string]string{ + "etc/test_file": "test1", + }) + if err != nil { + t.Fatal(err) } + defer ctx.Close() - deleteImages("testcopyimg") + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } logDone("build - copy etc directory to root") } func TestBuildCopyDisallowRemote(t *testing.T) { - buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") - buildCmd := exec.Command(dockerBinary, "build", "-t", "testcopyimg", "DisallowRemote") - buildCmd.Dir = buildDirectory - out, exitCode, err := runCommandWithOutput(buildCmd) - - if err == nil || exitCode == 0 { - t.Fatalf("building the image should've failed; output: %s", out) + name := "testcopydisallowremote" + defer deleteImages(name) + _, out, err := buildImageWithOut(name, `FROM scratch +COPY https://index.docker.io/robots.txt /`, + true) + if err == nil || !strings.Contains(out, "Source can't be a URL for COPY") { + t.Fatalf("Error should be about disallowed remote source, got err: %s, out: %q", err, out) } - - deleteImages("testcopyimg") logDone("build - copy - disallow copy from remote") } @@ -1308,14 +1451,16 @@ func TestBuildWithInaccessibleFilesInContext(t *testing.T) { // This is used to ensure we detect inaccessible files early during build in the cli client pathToFileWithoutReadAccess := filepath.Join(ctx.Dir, "fileWithoutReadAccess") - err = os.Chown(pathToFileWithoutReadAccess, 0, 0) - errorOut(err, t, fmt.Sprintf("failed to chown file to root: %s", err)) - err = os.Chmod(pathToFileWithoutReadAccess, 0700) - errorOut(err, t, fmt.Sprintf("failed to chmod file to 700: %s", err)) + if err = os.Chown(pathToFileWithoutReadAccess, 0, 0); err != nil { + t.Fatalf("failed to chown file to root: %s", err) + } + if err = os.Chmod(pathToFileWithoutReadAccess, 0700); err != nil { + t.Fatalf("failed to chmod file to 700: %s", err) + } buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) buildCmd.Dir = ctx.Dir - out, exitCode, err := runCommandWithOutput(buildCmd) - if err == nil || exitCode == 0 { + out, _, err := runCommandWithOutput(buildCmd) + if err == nil { t.Fatalf("build should have failed: %s %s", err, out) } @@ -1340,17 +1485,20 @@ func TestBuildWithInaccessibleFilesInContext(t *testing.T) { pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat") pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar") - err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0) - errorOut(err, t, fmt.Sprintf("failed to chown directory to root: %s", err)) - err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444) - errorOut(err, t, fmt.Sprintf("failed to chmod directory to 755: %s", err)) - err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700) - errorOut(err, t, fmt.Sprintf("failed to chmod file to 444: %s", err)) + if err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil { + t.Fatalf("failed to chown directory to root: %s", err) + } + if err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil { + t.Fatalf("failed to chmod directory to 755: %s", err) + } + if err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil { + t.Fatalf("failed to chmod file to 444: %s", err) + } buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) buildCmd.Dir = ctx.Dir - out, exitCode, err := runCommandWithOutput(buildCmd) - if err == nil || exitCode == 0 { + out, _, err := runCommandWithOutput(buildCmd) + if err == nil { t.Fatalf("build should have failed: %s %s", err, out) } @@ -1372,9 +1520,12 @@ func TestBuildWithInaccessibleFilesInContext(t *testing.T) { t.Fatal(err) } defer ctx.Close() - if err := os.Symlink(filepath.Join(ctx.Dir, "g"), "../../../../../../../../../../../../../../../../../../../azA"); err != nil { + + target := "../../../../../../../../../../../../../../../../../../../azA" + if err := os.Symlink(filepath.Join(ctx.Dir, "g"), target); err != nil { t.Fatal(err) } + defer os.Remove(target) // This is used to ensure we don't follow links when checking if everything in the context is accessible // This test doesn't require that we run commands as an unprivileged user if _, err := buildImageFromContext(name, ctx, true); err != nil { @@ -1396,17 +1547,19 @@ func TestBuildWithInaccessibleFilesInContext(t *testing.T) { // This is used to ensure we don't try to add inaccessible files when they are ignored by a .dockerignore pattern pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat") pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar") - err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0) - errorOut(err, t, fmt.Sprintf("failed to chown directory to root: %s", err)) - err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444) - errorOut(err, t, fmt.Sprintf("failed to chmod directory to 755: %s", err)) - err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700) - errorOut(err, t, fmt.Sprintf("failed to chmod file to 444: %s", err)) + if err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil { + t.Fatalf("failed to chown directory to root: %s", err) + } + if err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil { + t.Fatalf("failed to chmod directory to 755: %s", err) + } + if err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil { + t.Fatalf("failed to chmod file to 444: %s", err) + } buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) buildCmd.Dir = ctx.Dir - out, exitCode, err := runCommandWithOutput(buildCmd) - if err != nil || exitCode != 0 { + if out, _, err := runCommandWithOutput(buildCmd); err != nil { t.Fatalf("build should have worked: %s %s", err, out) } @@ -1431,10 +1584,8 @@ func TestBuildForceRm(t *testing.T) { buildCmd := exec.Command(dockerBinary, "build", "-t", name, "--force-rm", ".") buildCmd.Dir = ctx.Dir - _, exitCode, err := runCommandWithOutput(buildCmd) - - if err == nil || exitCode == 0 { - t.Fatal("failed to build the image") + if out, _, err := runCommandWithOutput(buildCmd); err == nil { + t.Fatalf("failed to build the image: %s, %v", out, err) } containerCountAfter, err := getContainerCount() @@ -1463,9 +1614,9 @@ func TestBuildRm(t *testing.T) { t.Fatalf("failed to get the container count: %s", err) } - out, exitCode, err := dockerCmdInDir(t, ctx.Dir, "build", "--rm", "-t", name, ".") + out, _, err := dockerCmdInDir(t, ctx.Dir, "build", "--rm", "-t", name, ".") - if err != nil || exitCode != 0 { + if err != nil { t.Fatal("failed to build the image", out) } @@ -1486,9 +1637,9 @@ func TestBuildRm(t *testing.T) { t.Fatalf("failed to get the container count: %s", err) } - out, exitCode, err := dockerCmdInDir(t, ctx.Dir, "build", "-t", name, ".") + out, _, err := dockerCmdInDir(t, ctx.Dir, "build", "-t", name, ".") - if err != nil || exitCode != 0 { + if err != nil { t.Fatal("failed to build the image", out) } @@ -1509,9 +1660,9 @@ func TestBuildRm(t *testing.T) { t.Fatalf("failed to get the container count: %s", err) } - out, exitCode, err := dockerCmdInDir(t, ctx.Dir, "build", "--rm=false", "-t", name, ".") + out, _, err := dockerCmdInDir(t, ctx.Dir, "build", "--rm=false", "-t", name, ".") - if err != nil || exitCode != 0 { + if err != nil { t.Fatal("failed to build the image", out) } @@ -1571,7 +1722,7 @@ func TestBuildWithVolumes(t *testing.T) { t.Fatal(err) } - equal := deepEqual(&expected, &result) + equal := reflect.DeepEqual(&result, &expected) if !equal { t.Fatalf("Volumes %s, expected %s", result, expected) @@ -1726,6 +1877,7 @@ func TestBuildContextCleanup(t *testing.T) { func TestBuildContextCleanupFailedBuild(t *testing.T) { name := "testbuildcontextcleanup" defer deleteImages(name) + defer deleteAllContainers() entries, err := ioutil.ReadDir("/var/lib/docker/tmp") if err != nil { t.Fatalf("failed to list contents of tmp dir: %s", err) @@ -1894,9 +2046,12 @@ func TestBuildOnBuildLimitedInheritence(t *testing.T) { if err != nil { t.Fatal(err) } + defer ctx.Close() out1, _, err := dockerCmdInDir(t, ctx.Dir, "build", "-t", name1, ".") - errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out1, err)) + if err != nil { + t.Fatalf("build failed to complete: %s, %v", out1, err) + } defer deleteImages(name1) } { @@ -1908,9 +2063,12 @@ func TestBuildOnBuildLimitedInheritence(t *testing.T) { if err != nil { t.Fatal(err) } + defer ctx.Close() out2, _, err = dockerCmdInDir(t, ctx.Dir, "build", "-t", name2, ".") - errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out2, err)) + if err != nil { + t.Fatalf("build failed to complete: %s, %v", out2, err) + } defer deleteImages(name2) } { @@ -1922,9 +2080,13 @@ func TestBuildOnBuildLimitedInheritence(t *testing.T) { if err != nil { t.Fatal(err) } + defer ctx.Close() out3, _, err = dockerCmdInDir(t, ctx.Dir, "build", "-t", name3, ".") - errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out3, err)) + if err != nil { + t.Fatalf("build failed to complete: %s, %v", out3, err) + } + defer deleteImages(name3) } @@ -1970,7 +2132,8 @@ func TestBuildWithCache(t *testing.T) { func TestBuildWithoutCache(t *testing.T) { name := "testbuildwithoutcache" - defer deleteImages(name) + name2 := "testbuildwithoutcache2" + defer deleteImages(name, name2) id1, err := buildImage(name, `FROM scratch MAINTAINER dockerio @@ -1980,7 +2143,8 @@ func TestBuildWithoutCache(t *testing.T) { if err != nil { t.Fatal(err) } - id2, err := buildImage(name, + + id2, err := buildImage(name2, `FROM scratch MAINTAINER dockerio EXPOSE 5432 @@ -1997,7 +2161,8 @@ func TestBuildWithoutCache(t *testing.T) { func TestBuildADDLocalFileWithCache(t *testing.T) { name := "testbuildaddlocalfilewithcache" - defer deleteImages(name) + name2 := "testbuildaddlocalfilewithcache2" + defer deleteImages(name, name2) dockerfile := ` FROM busybox MAINTAINER dockerio @@ -2014,7 +2179,7 @@ func TestBuildADDLocalFileWithCache(t *testing.T) { if err != nil { t.Fatal(err) } - id2, err := buildImageFromContext(name, ctx, true) + id2, err := buildImageFromContext(name2, ctx, true) if err != nil { t.Fatal(err) } @@ -2026,7 +2191,8 @@ func TestBuildADDLocalFileWithCache(t *testing.T) { func TestBuildADDMultipleLocalFileWithCache(t *testing.T) { name := "testbuildaddmultiplelocalfilewithcache" - defer deleteImages(name) + name2 := "testbuildaddmultiplelocalfilewithcache2" + defer deleteImages(name, name2) dockerfile := ` FROM busybox MAINTAINER dockerio @@ -2043,7 +2209,7 @@ func TestBuildADDMultipleLocalFileWithCache(t *testing.T) { if err != nil { t.Fatal(err) } - id2, err := buildImageFromContext(name, ctx, true) + id2, err := buildImageFromContext(name2, ctx, true) if err != nil { t.Fatal(err) } @@ -2055,7 +2221,8 @@ func TestBuildADDMultipleLocalFileWithCache(t *testing.T) { func TestBuildADDLocalFileWithoutCache(t *testing.T) { name := "testbuildaddlocalfilewithoutcache" - defer deleteImages(name) + name2 := "testbuildaddlocalfilewithoutcache2" + defer deleteImages(name, name2) dockerfile := ` FROM busybox MAINTAINER dockerio @@ -2072,7 +2239,7 @@ func TestBuildADDLocalFileWithoutCache(t *testing.T) { if err != nil { t.Fatal(err) } - id2, err := buildImageFromContext(name, ctx, false) + id2, err := buildImageFromContext(name2, ctx, false) if err != nil { t.Fatal(err) } @@ -2084,7 +2251,8 @@ func TestBuildADDLocalFileWithoutCache(t *testing.T) { func TestBuildCopyDirButNotFile(t *testing.T) { name := "testbuildcopydirbutnotfile" - defer deleteImages(name) + name2 := "testbuildcopydirbutnotfile2" + defer deleteImages(name, name2) dockerfile := ` FROM scratch COPY dir /tmp/` @@ -2103,7 +2271,7 @@ func TestBuildCopyDirButNotFile(t *testing.T) { if err := ctx.Add("dir_file", "hello2"); err != nil { t.Fatal(err) } - id2, err := buildImageFromContext(name, ctx, true) + id2, err := buildImageFromContext(name2, ctx, true) if err != nil { t.Fatal(err) } @@ -2115,7 +2283,11 @@ func TestBuildCopyDirButNotFile(t *testing.T) { func TestBuildADDCurrentDirWithCache(t *testing.T) { name := "testbuildaddcurrentdirwithcache" - defer deleteImages(name) + name2 := name + "2" + name3 := name + "3" + name4 := name + "4" + name5 := name + "5" + defer deleteImages(name, name2, name3, name4, name5) dockerfile := ` FROM scratch MAINTAINER dockerio @@ -2135,7 +2307,7 @@ func TestBuildADDCurrentDirWithCache(t *testing.T) { if err := ctx.Add("bar", "hello2"); err != nil { t.Fatal(err) } - id2, err := buildImageFromContext(name, ctx, true) + id2, err := buildImageFromContext(name2, ctx, true) if err != nil { t.Fatal(err) } @@ -2146,7 +2318,7 @@ func TestBuildADDCurrentDirWithCache(t *testing.T) { if err := ctx.Add("foo", "hello1"); err != nil { t.Fatal(err) } - id3, err := buildImageFromContext(name, ctx, true) + id3, err := buildImageFromContext(name3, ctx, true) if err != nil { t.Fatal(err) } @@ -2158,14 +2330,14 @@ func TestBuildADDCurrentDirWithCache(t *testing.T) { if err := ctx.Add("foo", "hello1"); err != nil { t.Fatal(err) } - id4, err := buildImageFromContext(name, ctx, true) + id4, err := buildImageFromContext(name4, ctx, true) if err != nil { t.Fatal(err) } if id3 == id4 { t.Fatal("The cache should have been invalided but hasn't.") } - id5, err := buildImageFromContext(name, ctx, true) + id5, err := buildImageFromContext(name5, ctx, true) if err != nil { t.Fatal(err) } @@ -2177,7 +2349,8 @@ func TestBuildADDCurrentDirWithCache(t *testing.T) { func TestBuildADDCurrentDirWithoutCache(t *testing.T) { name := "testbuildaddcurrentdirwithoutcache" - defer deleteImages(name) + name2 := "testbuildaddcurrentdirwithoutcache2" + defer deleteImages(name, name2) dockerfile := ` FROM scratch MAINTAINER dockerio @@ -2193,7 +2366,7 @@ func TestBuildADDCurrentDirWithoutCache(t *testing.T) { if err != nil { t.Fatal(err) } - id2, err := buildImageFromContext(name, ctx, false) + id2, err := buildImageFromContext(name2, ctx, false) if err != nil { t.Fatal(err) } @@ -2237,7 +2410,8 @@ func TestBuildADDRemoteFileWithCache(t *testing.T) { func TestBuildADDRemoteFileWithoutCache(t *testing.T) { name := "testbuildaddremotefilewithoutcache" - defer deleteImages(name) + name2 := "testbuildaddremotefilewithoutcache2" + defer deleteImages(name, name2) server, err := fakeStorage(map[string]string{ "baz": "hello", }) @@ -2253,7 +2427,7 @@ func TestBuildADDRemoteFileWithoutCache(t *testing.T) { if err != nil { t.Fatal(err) } - id2, err := buildImage(name, + id2, err := buildImage(name2, fmt.Sprintf(`FROM scratch MAINTAINER dockerio ADD %s/baz /usr/lib/baz/quux`, server.URL), @@ -2267,6 +2441,68 @@ func TestBuildADDRemoteFileWithoutCache(t *testing.T) { logDone("build - add remote file without cache") } +func TestBuildADDRemoteFileMTime(t *testing.T) { + name := "testbuildaddremotefilemtime" + name2 := name + "2" + name3 := name + "3" + name4 := name + "4" + + defer deleteImages(name, name2, name3, name4) + + server, err := fakeStorage(map[string]string{"baz": "hello"}) + if err != nil { + t.Fatal(err) + } + defer server.Close() + + ctx, err := fakeContext(fmt.Sprintf(`FROM scratch + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL), nil) + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + + id2, err := buildImageFromContext(name2, ctx, true) + if err != nil { + t.Fatal(err) + } + if id1 != id2 { + t.Fatal("The cache should have been used but wasn't - #1") + } + + // Now set baz's times to anything else and redo the build + // This time the cache should not be used + bazPath := path.Join(server.FakeContext.Dir, "baz") + err = syscall.UtimesNano(bazPath, make([]syscall.Timespec, 2)) + if err != nil { + t.Fatalf("Error setting mtime on %q: %v", bazPath, err) + } + + id3, err := buildImageFromContext(name3, ctx, true) + if err != nil { + t.Fatal(err) + } + if id1 == id3 { + t.Fatal("The cache should not have been used but was") + } + + // And for good measure do it again and make sure cache is used this time + id4, err := buildImageFromContext(name4, ctx, true) + if err != nil { + t.Fatal(err) + } + if id3 != id4 { + t.Fatal("The cache should have been used but wasn't - #2") + } + logDone("build - add remote file testing mtime") +} + func TestBuildADDLocalAndRemoteFilesWithCache(t *testing.T) { name := "testbuildaddlocalandremotefilewithcache" defer deleteImages(name) @@ -2324,8 +2560,7 @@ CMD ["cat", "/foo"]`, defer deleteImages(name) buildCmd.Stdin = context - out, exitCode, err := runCommandWithOutput(buildCmd) - if err != nil || exitCode != 0 { + if out, _, err := runCommandWithOutput(buildCmd); err != nil { t.Fatalf("build failed to complete: %v %v", out, err) } logDone(fmt.Sprintf("build - build an image with a context tar, compression: %v", compression)) @@ -2343,13 +2578,11 @@ func TestBuildNoContext(t *testing.T) { buildCmd := exec.Command(dockerBinary, "build", "-t", "nocontext", "-") buildCmd.Stdin = strings.NewReader("FROM busybox\nCMD echo ok\n") - out, exitCode, err := runCommandWithOutput(buildCmd) - if err != nil || exitCode != 0 { + if out, _, err := runCommandWithOutput(buildCmd); err != nil { t.Fatalf("build failed to complete: %v %v", out, err) } - out, exitCode, err = cmd(t, "run", "nocontext") - if out != "ok\n" { + if out, _, err := dockerCmd(t, "run", "--rm", "nocontext"); out != "ok\n" || err != nil { t.Fatalf("run produced invalid output: %q, expected %q", out, "ok") } @@ -2360,7 +2593,8 @@ func TestBuildNoContext(t *testing.T) { // TODO: TestCaching func TestBuildADDLocalAndRemoteFilesWithoutCache(t *testing.T) { name := "testbuildaddlocalandremotefilewithoutcache" - defer deleteImages(name) + name2 := "testbuildaddlocalandremotefilewithoutcache2" + defer deleteImages(name, name2) server, err := fakeStorage(map[string]string{ "baz": "hello", }) @@ -2383,7 +2617,7 @@ func TestBuildADDLocalAndRemoteFilesWithoutCache(t *testing.T) { if err != nil { t.Fatal(err) } - id2, err := buildImageFromContext(name, ctx, false) + id2, err := buildImageFromContext(name2, ctx, false) if err != nil { t.Fatal(err) } @@ -2548,6 +2782,7 @@ func TestBuildInheritance(t *testing.T) { func TestBuildFails(t *testing.T) { name := "testbuildfails" defer deleteImages(name) + defer deleteAllContainers() _, err := buildImage(name, `FROM busybox RUN sh -c "exit 23"`, @@ -2725,6 +2960,29 @@ func TestBuildDockerignore(t *testing.T) { logDone("build - test .dockerignore") } +func TestBuildDockerignoreCleanPaths(t *testing.T) { + name := "testbuilddockerignorecleanpaths" + defer deleteImages(name) + dockerfile := ` + FROM busybox + ADD . /tmp/ + RUN (! ls /tmp/foo) && (! ls /tmp/foo2) && (! ls /tmp/dir1/foo)` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "foo", + "foo2": "foo2", + "dir1/foo": "foo in dir1", + ".dockerignore": "./foo\ndir1//foo\n./dir1/../foo2", + }) + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + logDone("build - test .dockerignore with clean paths") +} + func TestBuildDockerignoringDockerfile(t *testing.T) { name := "testbuilddockerignoredockerfile" defer deleteImages(name) @@ -2734,13 +2992,20 @@ func TestBuildDockerignoringDockerfile(t *testing.T) { "Dockerfile": "FROM scratch", ".dockerignore": "Dockerfile\n", }) - defer ctx.Close() if err != nil { t.Fatal(err) } + defer ctx.Close() if _, err = buildImageFromContext(name, ctx, true); err == nil { t.Fatalf("Didn't get expected error from ignoring Dockerfile") } + + // now try it with ./Dockerfile + ctx.Add(".dockerignore", "./Dockerfile\n") + if _, err = buildImageFromContext(name, ctx, true); err == nil { + t.Fatalf("Didn't get expected error from ignoring ./Dockerfile") + } + logDone("build - test .dockerignore of Dockerfile") } @@ -2910,6 +3175,8 @@ RUN [ "$(cat $TO)" = "hello" ] if err != nil { t.Fatal(err) } + defer ctx.Close() + _, err = buildImageFromContext(name, ctx, true) if err != nil { t.Fatal(err) @@ -2917,6 +3184,46 @@ RUN [ "$(cat $TO)" = "hello" ] logDone("build - environment variables usage") } +func TestBuildEnvUsage2(t *testing.T) { + name := "testbuildenvusage2" + defer deleteImages(name) + dockerfile := `FROM busybox +ENV abc=def +RUN [ "$abc" = "def" ] +ENV def="hello world" +RUN [ "$def" = "hello world" ] +ENV def=hello\ world +RUN [ "$def" = "hello world" ] +ENV v1=abc v2="hi there" +RUN [ "$v1" = "abc" ] +RUN [ "$v2" = "hi there" ] +ENV v3='boogie nights' v4="with'quotes too" +RUN [ "$v3" = "boogie nights" ] +RUN [ "$v4" = "with'quotes too" ] +ENV abc=zzz FROM=hello/docker/world +ENV abc=zzz TO=/docker/world/hello +ADD $FROM $TO +RUN [ "$(cat $TO)" = "hello" ] +ENV abc "zzz" +RUN [ $abc = \"zzz\" ] +ENV abc 'yyy' +RUN [ $abc = \'yyy\' ] +ENV abc= +RUN [ "$abc" = "" ] +` + ctx, err := fakeContext(dockerfile, map[string]string{ + "hello/docker/world": "hello", + }) + if err != nil { + t.Fatal(err) + } + _, err = buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + logDone("build - environment variables usage2") +} + func TestBuildAddScript(t *testing.T) { name := "testbuildaddscript" defer deleteImages(name) @@ -2932,6 +3239,8 @@ RUN [ "$(cat /testfile)" = 'test!' ]` if err != nil { t.Fatal(err) } + defer ctx.Close() + _, err = buildImageFromContext(name, ctx, true) if err != nil { t.Fatal(err) @@ -2986,6 +3295,7 @@ RUN cat /existing-directory-trailing-slash/test/foo | grep Hi` } return &FakeContext{Dir: tmpDir} }() + defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatalf("build failed to complete for TestBuildAddTar: %v", err) @@ -3295,6 +3605,7 @@ func TestBuildIgnoreInvalidInstruction(t *testing.T) { func TestBuildEntrypointInheritance(t *testing.T) { defer deleteImages("parent", "child") + defer deleteAllContainers() if _, err := buildImage("parent", ` FROM busybox @@ -3319,7 +3630,7 @@ func TestBuildEntrypointInheritance(t *testing.T) { status, _ = runCommand(exec.Command(dockerBinary, "run", "child")) if status != 5 { - t.Fatal("expected exit code 5 but received %d", status) + t.Fatalf("expected exit code 5 but received %d", status) } logDone("build - clear entrypoint") @@ -3333,6 +3644,7 @@ func TestBuildEntrypointInheritanceInspect(t *testing.T) { ) defer deleteImages(name, name2) + defer deleteAllContainers() if _, err := buildImage(name, "FROM busybox\nENTRYPOINT /foo/bar", true); err != nil { t.Fatal(err) @@ -3376,7 +3688,7 @@ func TestBuildRunShEntrypoint(t *testing.T) { t.Fatal(err) } - out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", name)) + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--rm", name)) if err != nil { t.Fatal(err, out) @@ -3391,7 +3703,7 @@ func TestBuildExoticShellInterpolation(t *testing.T) { _, err := buildImage(name, ` FROM busybox - + ENV SOME_VAR a.b.c RUN [ "$SOME_VAR" = 'a.b.c' ] @@ -3415,6 +3727,111 @@ func TestBuildExoticShellInterpolation(t *testing.T) { logDone("build - exotic shell interpolation") } +func TestBuildVerifySingleQuoteFails(t *testing.T) { + // This testcase is supposed to generate an error because the + // JSON array we're passing in on the CMD uses single quotes instead + // of double quotes (per the JSON spec). This means we interpret it + // as a "string" insead of "JSON array" and pass it on to "sh -c" and + // it should barf on it. + name := "testbuildsinglequotefails" + defer deleteImages(name) + defer deleteAllContainers() + + _, err := buildImage(name, + `FROM busybox + CMD [ '/bin/sh', '-c', 'echo hi' ]`, + true) + _, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "--rm", name)) + + if err == nil { + t.Fatal("The image was not supposed to be able to run") + } + + logDone("build - verify single quotes fail") +} + +func TestBuildVerboseOut(t *testing.T) { + name := "testbuildverboseout" + defer deleteImages(name) + + _, out, err := buildImageWithOut(name, + `FROM busybox +RUN echo 123`, + false) + + if err != nil { + t.Fatal(err) + } + if !strings.Contains(out, "\n123\n") { + t.Fatalf("Output should contain %q: %q", "123", out) + } + + logDone("build - verbose output from commands") +} + +func TestBuildWithTabs(t *testing.T) { + name := "testbuildwithtabs" + defer deleteImages(name) + _, err := buildImage(name, + "FROM busybox\nRUN echo\tone\t\ttwo", true) + if err != nil { + t.Fatal(err) + } + res, err := inspectFieldJSON(name, "ContainerConfig.Cmd") + if err != nil { + t.Fatal(err) + } + expected := "[\"/bin/sh\",\"-c\",\"echo\\u0009one\\u0009\\u0009two\"]" + if res != expected { + t.Fatalf("Missing tabs.\nGot:%s\nExp:%s", res, expected) + } + logDone("build - with tabs") +} + +func TestBuildStderr(t *testing.T) { + // This test just makes sure that no non-error output goes + // to stderr + name := "testbuildstderr" + defer deleteImages(name) + _, _, stderr, err := buildImageWithStdoutStderr(name, + "FROM busybox\nRUN echo one", true) + if err != nil { + t.Fatal(err) + } + if stderr != "" { + t.Fatal("Stderr should have been empty, instead its: %q", stderr) + } + logDone("build - testing stderr") +} + +func TestBuildChownSingleFile(t *testing.T) { + name := "testbuildchownsinglefile" + defer deleteImages(name) + + ctx, err := fakeContext(` +FROM busybox +COPY test / +RUN ls -l /test +RUN [ $(ls -l /test | awk '{print $3":"$4}') = 'root:root' ] +`, map[string]string{ + "test": "test", + }) + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + + if err := os.Chown(filepath.Join(ctx.Dir, "test"), 4242, 4242); err != nil { + t.Fatal(err) + } + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + + logDone("build - change permission on single file") +} + func TestBuildSymlinkBreakout(t *testing.T) { name := "testbuildsymlinkbreakout" tmpdir, err := ioutil.TempDir("", name) diff --git a/integration-cli/docker_cli_commit_test.go b/integration-cli/docker_cli_commit_test.go index 46b998693d..f41361ece4 100644 --- a/integration-cli/docker_cli_commit_test.go +++ b/integration-cli/docker_cli_commit_test.go @@ -1,7 +1,6 @@ package main import ( - "fmt" "os/exec" "strings" "testing" @@ -10,23 +9,29 @@ import ( func TestCommitAfterContainerIsDone(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "echo", "foo") out, _, _, err := runCommandWithStdoutStderr(runCmd) - errorOut(err, t, fmt.Sprintf("failed to run container: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to run container: %s, %v", out, err) + } cleanedContainerID := stripTrailingCharacters(out) waitCmd := exec.Command(dockerBinary, "wait", cleanedContainerID) - _, _, err = runCommandWithOutput(waitCmd) - errorOut(err, t, fmt.Sprintf("error thrown while waiting for container: %s", out)) + if _, _, err = runCommandWithOutput(waitCmd); err != nil { + t.Fatalf("error thrown while waiting for container: %s, %v", out, err) + } commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID) out, _, err = runCommandWithOutput(commitCmd) - errorOut(err, t, fmt.Sprintf("failed to commit container to image: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to commit container to image: %s, %v", out, err) + } cleanedImageID := stripTrailingCharacters(out) inspectCmd := exec.Command(dockerBinary, "inspect", cleanedImageID) - out, _, err = runCommandWithOutput(inspectCmd) - errorOut(err, t, fmt.Sprintf("failed to inspect image: %v %v", out, err)) + if out, _, err = runCommandWithOutput(inspectCmd); err != nil { + t.Fatalf("failed to inspect image: %s, %v", out, err) + } deleteContainer(cleanedContainerID) deleteImages(cleanedImageID) @@ -37,23 +42,29 @@ func TestCommitAfterContainerIsDone(t *testing.T) { func TestCommitWithoutPause(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "echo", "foo") out, _, _, err := runCommandWithStdoutStderr(runCmd) - errorOut(err, t, fmt.Sprintf("failed to run container: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to run container: %s, %v", out, err) + } cleanedContainerID := stripTrailingCharacters(out) waitCmd := exec.Command(dockerBinary, "wait", cleanedContainerID) - _, _, err = runCommandWithOutput(waitCmd) - errorOut(err, t, fmt.Sprintf("error thrown while waiting for container: %s", out)) + if _, _, err = runCommandWithOutput(waitCmd); err != nil { + t.Fatalf("error thrown while waiting for container: %s, %v", out, err) + } commitCmd := exec.Command(dockerBinary, "commit", "-p=false", cleanedContainerID) out, _, err = runCommandWithOutput(commitCmd) - errorOut(err, t, fmt.Sprintf("failed to commit container to image: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to commit container to image: %s, %v", out, err) + } cleanedImageID := stripTrailingCharacters(out) inspectCmd := exec.Command(dockerBinary, "inspect", cleanedImageID) - out, _, err = runCommandWithOutput(inspectCmd) - errorOut(err, t, fmt.Sprintf("failed to inspect image: %v %v", out, err)) + if out, _, err = runCommandWithOutput(inspectCmd); err != nil { + t.Fatalf("failed to inspect image: %s, %v", out, err) + } deleteContainer(cleanedContainerID) deleteImages(cleanedImageID) @@ -81,7 +92,7 @@ func TestCommitNewFile(t *testing.T) { t.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual != "koye" { - t.Fatalf("expected output koye received %s", actual) + t.Fatalf("expected output koye received %q", actual) } deleteAllContainers() @@ -90,9 +101,63 @@ func TestCommitNewFile(t *testing.T) { logDone("commit - commit file and read") } -func TestCommitTTY(t *testing.T) { - cmd := exec.Command(dockerBinary, "run", "-t", "--name", "tty", "busybox", "/bin/ls") +func TestCommitHardlink(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-t", "--name", "hardlinks", "busybox", "sh", "-c", "touch file1 && ln file1 file2 && ls -di file1 file2") + firstOuput, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + chunks := strings.Split(strings.TrimSpace(firstOuput), " ") + inode := chunks[0] + found := false + for _, chunk := range chunks[1:] { + if chunk == inode { + found = true + break + } + } + if !found { + t.Fatalf("Failed to create hardlink in a container. Expected to find %q in %q", inode, chunks[1:]) + } + + cmd = exec.Command(dockerBinary, "commit", "hardlinks", "hardlinks") + imageID, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(imageID, err) + } + imageID = strings.Trim(imageID, "\r\n") + + cmd = exec.Command(dockerBinary, "run", "-t", "hardlinks", "ls", "-di", "file1", "file2") + secondOuput, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + + chunks = strings.Split(strings.TrimSpace(secondOuput), " ") + inode = chunks[0] + found = false + for _, chunk := range chunks[1:] { + if chunk == inode { + found = true + break + } + } + if !found { + t.Fatalf("Failed to create hardlink in a container. Expected to find %q in %q", inode, chunks[1:]) + } + + deleteAllContainers() + deleteImages(imageID) + + logDone("commit - commit hardlinks") +} + +func TestCommitTTY(t *testing.T) { + defer deleteImages("ttytest") + defer deleteAllContainers() + + cmd := exec.Command(dockerBinary, "run", "-t", "--name", "tty", "busybox", "/bin/ls") if _, err := runCommand(cmd); err != nil { t.Fatal(err) } @@ -105,7 +170,6 @@ func TestCommitTTY(t *testing.T) { imageID = strings.Trim(imageID, "\r\n") cmd = exec.Command(dockerBinary, "run", "ttytest", "/bin/ls") - if _, err := runCommand(cmd); err != nil { t.Fatal(err) } @@ -124,6 +188,7 @@ func TestCommitWithHostBindMount(t *testing.T) { if err != nil { t.Fatal(imageID, err) } + imageID = strings.Trim(imageID, "\r\n") cmd = exec.Command(dockerBinary, "run", "bindtest", "true") diff --git a/integration-cli/docker_cli_cp_test.go b/integration-cli/docker_cli_cp_test.go index a5e16bb214..7002e1a34a 100644 --- a/integration-cli/docker_cli_cp_test.go +++ b/integration-cli/docker_cli_cp_test.go @@ -1,6 +1,7 @@ package main import ( + "bytes" "fmt" "io/ioutil" "os" @@ -22,7 +23,7 @@ const ( // Test for #5656 // Check that garbage paths don't escape the container's rootfs func TestCpGarbagePath(t *testing.T) { - out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) if err != nil || exitCode != 0 { t.Fatal("failed to create a container", out, err) } @@ -30,7 +31,7 @@ func TestCpGarbagePath(t *testing.T) { cleanedContainerID := stripTrailingCharacters(out) defer deleteContainer(cleanedContainerID) - out, _, err = cmd(t, "wait", cleanedContainerID) + out, _, err = dockerCmd(t, "wait", cleanedContainerID) if err != nil || stripTrailingCharacters(out) != "0" { t.Fatal("failed to set up container", out, err) } @@ -58,7 +59,7 @@ func TestCpGarbagePath(t *testing.T) { path := filepath.Join("../../../../../../../../../../../../", cpFullPath) - _, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir) + _, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir) if err != nil { t.Fatalf("couldn't copy from garbage path: %s:%s %s", cleanedContainerID, path, err) } @@ -84,7 +85,7 @@ func TestCpGarbagePath(t *testing.T) { // Check that relative paths are relative to the container's rootfs func TestCpRelativePath(t *testing.T) { - out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) if err != nil || exitCode != 0 { t.Fatal("failed to create a container", out, err) } @@ -92,7 +93,7 @@ func TestCpRelativePath(t *testing.T) { cleanedContainerID := stripTrailingCharacters(out) defer deleteContainer(cleanedContainerID) - out, _, err = cmd(t, "wait", cleanedContainerID) + out, _, err = dockerCmd(t, "wait", cleanedContainerID) if err != nil || stripTrailingCharacters(out) != "0" { t.Fatal("failed to set up container", out, err) } @@ -121,7 +122,7 @@ func TestCpRelativePath(t *testing.T) { path, _ := filepath.Rel("/", cpFullPath) - _, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir) + _, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir) if err != nil { t.Fatalf("couldn't copy from relative path: %s:%s %s", cleanedContainerID, path, err) } @@ -147,7 +148,7 @@ func TestCpRelativePath(t *testing.T) { // Check that absolute paths are relative to the container's rootfs func TestCpAbsolutePath(t *testing.T) { - out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) if err != nil || exitCode != 0 { t.Fatal("failed to create a container", out, err) } @@ -155,7 +156,7 @@ func TestCpAbsolutePath(t *testing.T) { cleanedContainerID := stripTrailingCharacters(out) defer deleteContainer(cleanedContainerID) - out, _, err = cmd(t, "wait", cleanedContainerID) + out, _, err = dockerCmd(t, "wait", cleanedContainerID) if err != nil || stripTrailingCharacters(out) != "0" { t.Fatal("failed to set up container", out, err) } @@ -184,7 +185,7 @@ func TestCpAbsolutePath(t *testing.T) { path := cpFullPath - _, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir) + _, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir) if err != nil { t.Fatalf("couldn't copy from absolute path: %s:%s %s", cleanedContainerID, path, err) } @@ -211,7 +212,7 @@ func TestCpAbsolutePath(t *testing.T) { // Test for #5619 // Check that absolute symlinks are still relative to the container's rootfs func TestCpAbsoluteSymlink(t *testing.T) { - out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpFullPath+" container_path") + out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpFullPath+" container_path") if err != nil || exitCode != 0 { t.Fatal("failed to create a container", out, err) } @@ -219,7 +220,7 @@ func TestCpAbsoluteSymlink(t *testing.T) { cleanedContainerID := stripTrailingCharacters(out) defer deleteContainer(cleanedContainerID) - out, _, err = cmd(t, "wait", cleanedContainerID) + out, _, err = dockerCmd(t, "wait", cleanedContainerID) if err != nil || stripTrailingCharacters(out) != "0" { t.Fatal("failed to set up container", out, err) } @@ -248,7 +249,7 @@ func TestCpAbsoluteSymlink(t *testing.T) { path := filepath.Join("/", "container_path") - _, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir) + _, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir) if err != nil { t.Fatalf("couldn't copy from absolute path: %s:%s %s", cleanedContainerID, path, err) } @@ -275,7 +276,7 @@ func TestCpAbsoluteSymlink(t *testing.T) { // Test for #5619 // Check that symlinks which are part of the resource path are still relative to the container's rootfs func TestCpSymlinkComponent(t *testing.T) { - out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpTestPath+" container_path") + out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpTestPath+" container_path") if err != nil || exitCode != 0 { t.Fatal("failed to create a container", out, err) } @@ -283,7 +284,7 @@ func TestCpSymlinkComponent(t *testing.T) { cleanedContainerID := stripTrailingCharacters(out) defer deleteContainer(cleanedContainerID) - out, _, err = cmd(t, "wait", cleanedContainerID) + out, _, err = dockerCmd(t, "wait", cleanedContainerID) if err != nil || stripTrailingCharacters(out) != "0" { t.Fatal("failed to set up container", out, err) } @@ -312,7 +313,7 @@ func TestCpSymlinkComponent(t *testing.T) { path := filepath.Join("/", "container_path", cpTestName) - _, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir) + _, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir) if err != nil { t.Fatalf("couldn't copy from symlink path component: %s:%s %s", cleanedContainerID, path, err) } @@ -338,7 +339,7 @@ func TestCpSymlinkComponent(t *testing.T) { // Check that cp with unprivileged user doesn't return any error func TestCpUnprivilegedUser(t *testing.T) { - out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "touch "+cpTestName) + out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "touch "+cpTestName) if err != nil || exitCode != 0 { t.Fatal("failed to create a container", out, err) } @@ -346,7 +347,7 @@ func TestCpUnprivilegedUser(t *testing.T) { cleanedContainerID := stripTrailingCharacters(out) defer deleteContainer(cleanedContainerID) - out, _, err = cmd(t, "wait", cleanedContainerID) + out, _, err = dockerCmd(t, "wait", cleanedContainerID) if err != nil || stripTrailingCharacters(out) != "0" { t.Fatal("failed to set up container", out, err) } @@ -372,6 +373,112 @@ func TestCpUnprivilegedUser(t *testing.T) { logDone("cp - unprivileged user") } +func TestCpVolumePath(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "cp-test-volumepath") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + outDir, err := ioutil.TempDir("", "cp-test-volumepath-out") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(outDir) + _, err = os.Create(tmpDir + "/test") + if err != nil { + t.Fatal(err) + } + + out, exitCode, err := dockerCmd(t, "run", "-d", "-v", "/foo", "-v", tmpDir+"/test:/test", "-v", tmpDir+":/baz", "busybox", "/bin/sh", "-c", "touch /foo/bar") + if err != nil || exitCode != 0 { + t.Fatal("failed to create a container", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + defer deleteContainer(cleanedContainerID) + + out, _, err = dockerCmd(t, "wait", cleanedContainerID) + if err != nil || stripTrailingCharacters(out) != "0" { + t.Fatal("failed to set up container", out, err) + } + + // Copy actual volume path + _, _, err = dockerCmd(t, "cp", cleanedContainerID+":/foo", outDir) + if err != nil { + t.Fatalf("couldn't copy from volume path: %s:%s %v", cleanedContainerID, "/foo", err) + } + stat, err := os.Stat(outDir + "/foo") + if err != nil { + t.Fatal(err) + } + if !stat.IsDir() { + t.Fatal("expected copied content to be dir") + } + stat, err = os.Stat(outDir + "/foo/bar") + if err != nil { + t.Fatal(err) + } + if stat.IsDir() { + t.Fatal("Expected file `bar` to be a file") + } + + // Copy file nested in volume + _, _, err = dockerCmd(t, "cp", cleanedContainerID+":/foo/bar", outDir) + if err != nil { + t.Fatalf("couldn't copy from volume path: %s:%s %v", cleanedContainerID, "/foo", err) + } + stat, err = os.Stat(outDir + "/bar") + if err != nil { + t.Fatal(err) + } + if stat.IsDir() { + t.Fatal("Expected file `bar` to be a file") + } + + // Copy Bind-mounted dir + _, _, err = dockerCmd(t, "cp", cleanedContainerID+":/baz", outDir) + if err != nil { + t.Fatalf("couldn't copy from bind-mounted volume path: %s:%s %v", cleanedContainerID, "/baz", err) + } + stat, err = os.Stat(outDir + "/baz") + if err != nil { + t.Fatal(err) + } + if !stat.IsDir() { + t.Fatal("Expected `baz` to be a dir") + } + + // Copy file nested in bind-mounted dir + _, _, err = dockerCmd(t, "cp", cleanedContainerID+":/baz/test", outDir) + fb, err := ioutil.ReadFile(outDir + "/baz/test") + if err != nil { + t.Fatal(err) + } + fb2, err := ioutil.ReadFile(tmpDir + "/test") + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(fb, fb2) { + t.Fatalf("Expected copied file to be duplicate of bind-mounted file") + } + + // Copy bind-mounted file + _, _, err = dockerCmd(t, "cp", cleanedContainerID+":/test", outDir) + fb, err = ioutil.ReadFile(outDir + "/test") + if err != nil { + t.Fatal(err) + } + fb2, err = ioutil.ReadFile(tmpDir + "/test") + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(fb, fb2) { + t.Fatalf("Expected copied file to be duplicate of bind-mounted file") + } + + logDone("cp - volume path") +} + func TestCpToDot(t *testing.T) { out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /test") if err != nil || exitCode != 0 { @@ -405,7 +512,7 @@ func TestCpToDot(t *testing.T) { } content, err := ioutil.ReadFile("./test") if string(content) != "lololol\n" { - t.Fatal("Wrong content in copied file %q, should be %q", content, "lololol\n") + t.Fatalf("Wrong content in copied file %q, should be %q", content, "lololol\n") } logDone("cp - to dot path") } diff --git a/integration-cli/docker_cli_create_test.go b/integration-cli/docker_cli_create_test.go index 226a3f5a75..0dc7993798 100644 --- a/integration-cli/docker_cli_create_test.go +++ b/integration-cli/docker_cli_create_test.go @@ -2,7 +2,7 @@ package main import ( "encoding/json" - "fmt" + "os" "os/exec" "testing" "time" @@ -12,13 +12,17 @@ import ( func TestCreateArgs(t *testing.T) { runCmd := exec.Command(dockerBinary, "create", "busybox", "command", "arg1", "arg2", "arg with space") out, _, _, err := runCommandWithStdoutStderr(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } cleanedContainerID := stripTrailingCharacters(out) inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) - inspectOut, _, err := runCommandWithOutput(inspectCmd) - errorOut(err, t, fmt.Sprintf("out should've been a container id: %v %v", inspectOut, err)) + out, _, err = runCommandWithOutput(inspectCmd) + if err != nil { + t.Fatalf("out should've been a container id: %s, %v", out, err) + } containers := []struct { ID string @@ -27,7 +31,7 @@ func TestCreateArgs(t *testing.T) { Args []string Image string }{} - if err := json.Unmarshal([]byte(inspectOut), &containers); err != nil { + if err := json.Unmarshal([]byte(out), &containers); err != nil { t.Fatalf("Error inspecting the container: %s", err) } if len(containers) != 1 { @@ -60,20 +64,24 @@ func TestCreateArgs(t *testing.T) { func TestCreateHostConfig(t *testing.T) { runCmd := exec.Command(dockerBinary, "create", "-P", "busybox", "echo") out, _, _, err := runCommandWithStdoutStderr(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } cleanedContainerID := stripTrailingCharacters(out) inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) - inspectOut, _, err := runCommandWithOutput(inspectCmd) - errorOut(err, t, fmt.Sprintf("out should've been a container id: %v %v", inspectOut, err)) + out, _, err = runCommandWithOutput(inspectCmd) + if err != nil { + t.Fatalf("out should've been a container id: %s, %v", out, err) + } containers := []struct { HostConfig *struct { PublishAllPorts bool } }{} - if err := json.Unmarshal([]byte(inspectOut), &containers); err != nil { + if err := json.Unmarshal([]byte(out), &containers); err != nil { t.Fatalf("Error inspecting the container: %s", err) } if len(containers) != 1 { @@ -98,19 +106,43 @@ func TestCreateHostConfig(t *testing.T) { func TestCreateEchoStdout(t *testing.T) { runCmd := exec.Command(dockerBinary, "create", "busybox", "echo", "test123") out, _, _, err := runCommandWithStdoutStderr(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } cleanedContainerID := stripTrailingCharacters(out) runCmd = exec.Command(dockerBinary, "start", "-ai", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if out != "test123\n" { - t.Errorf("container should've printed 'test123', got '%s'", out) + t.Errorf("container should've printed 'test123', got %q", out) } deleteAllContainers() logDone("create - echo test123") } + +func TestCreateVolumesCreated(t *testing.T) { + name := "test_create_volume" + if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "create", "--name", name, "-v", "/foo", "busybox")); err != nil { + t.Fatal(out, err) + } + dir, err := inspectFieldMap(name, "Volumes", "/foo") + if err != nil { + t.Fatalf("Error getting volume host path: %q", err) + } + + if _, err := os.Stat(dir); err != nil && os.IsNotExist(err) { + t.Fatalf("Volume was not created") + } + if err != nil { + t.Fatalf("Error statting volume host path: %q", err) + } + + logDone("create - volumes are created") +} diff --git a/integration-cli/docker_cli_daemon_test.go b/integration-cli/docker_cli_daemon_test.go index 6160e57e94..31bfac3f67 100644 --- a/integration-cli/docker_cli_daemon_test.go +++ b/integration-cli/docker_cli_daemon_test.go @@ -2,7 +2,9 @@ package main import ( "encoding/json" + "io/ioutil" "os" + "os/exec" "strings" "testing" ) @@ -92,3 +94,193 @@ func TestDaemonStartIptablesFalse(t *testing.T) { logDone("daemon - started daemon with iptables=false") } + +// Issue #8444: If docker0 bridge is modified (intentionally or unintentionally) and +// no longer has an IP associated, we should gracefully handle that case and associate +// an IP with it rather than fail daemon start +func TestDaemonStartBridgeWithoutIPAssociation(t *testing.T) { + d := NewDaemon(t) + // rather than depending on brctl commands to verify docker0 is created and up + // let's start the daemon and stop it, and then make a modification to run the + // actual test + if err := d.Start(); err != nil { + t.Fatalf("Could not start daemon: %v", err) + } + if err := d.Stop(); err != nil { + t.Fatalf("Could not stop daemon: %v", err) + } + + // now we will remove the ip from docker0 and then try starting the daemon + ipCmd := exec.Command("ip", "addr", "flush", "dev", "docker0") + stdout, stderr, _, err := runCommandWithStdoutStderr(ipCmd) + if err != nil { + t.Fatalf("failed to remove docker0 IP association: %v, stdout: %q, stderr: %q", err, stdout, stderr) + } + + if err := d.Start(); err != nil { + warning := "**WARNING: Docker bridge network in bad state--delete docker0 bridge interface to fix" + t.Fatalf("Could not start daemon when docker0 has no IP address: %v\n%s", err, warning) + } + + // cleanup - stop the daemon if test passed + if err := d.Stop(); err != nil { + t.Fatalf("Could not stop daemon: %v", err) + } + + logDone("daemon - successful daemon start when bridge has no IP association") +} + +func TestDaemonIptablesClean(t *testing.T) { + d := NewDaemon(t) + if err := d.StartWithBusybox(); err != nil { + t.Fatalf("Could not start daemon with busybox: %v", err) + } + defer d.Stop() + + if out, err := d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top"); err != nil { + t.Fatalf("Could not run top: %s, %v", out, err) + } + + // get output from iptables with container running + ipTablesSearchString := "tcp dpt:80" + ipTablesCmd := exec.Command("iptables", "-nvL") + out, _, err := runCommandWithOutput(ipTablesCmd) + if err != nil { + t.Fatalf("Could not run iptables -nvL: %s, %v", out, err) + } + + if !strings.Contains(out, ipTablesSearchString) { + t.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, out) + } + + if err := d.Stop(); err != nil { + t.Fatalf("Could not stop daemon: %v", err) + } + + // get output from iptables after restart + ipTablesCmd = exec.Command("iptables", "-nvL") + out, _, err = runCommandWithOutput(ipTablesCmd) + if err != nil { + t.Fatalf("Could not run iptables -nvL: %s, %v", out, err) + } + + if strings.Contains(out, ipTablesSearchString) { + t.Fatalf("iptables output should not have contained %q, but was %q", ipTablesSearchString, out) + } + + deleteAllContainers() + + logDone("daemon - run,iptables - iptables rules cleaned after daemon restart") +} + +func TestDaemonIptablesCreate(t *testing.T) { + d := NewDaemon(t) + if err := d.StartWithBusybox(); err != nil { + t.Fatalf("Could not start daemon with busybox: %v", err) + } + defer d.Stop() + + if out, err := d.Cmd("run", "-d", "--name", "top", "--restart=always", "-p", "80", "busybox:latest", "top"); err != nil { + t.Fatalf("Could not run top: %s, %v", out, err) + } + + // get output from iptables with container running + ipTablesSearchString := "tcp dpt:80" + ipTablesCmd := exec.Command("iptables", "-nvL") + out, _, err := runCommandWithOutput(ipTablesCmd) + if err != nil { + t.Fatalf("Could not run iptables -nvL: %s, %v", out, err) + } + + if !strings.Contains(out, ipTablesSearchString) { + t.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, out) + } + + if err := d.Restart(); err != nil { + t.Fatalf("Could not restart daemon: %v", err) + } + + // make sure the container is not running + runningOut, err := d.Cmd("inspect", "--format='{{.State.Running}}'", "top") + if err != nil { + t.Fatalf("Could not inspect on container: %s, %v", out, err) + } + if strings.TrimSpace(runningOut) != "true" { + t.Fatalf("Container should have been restarted after daemon restart. Status running should have been true but was: %q", strings.TrimSpace(runningOut)) + } + + // get output from iptables after restart + ipTablesCmd = exec.Command("iptables", "-nvL") + out, _, err = runCommandWithOutput(ipTablesCmd) + if err != nil { + t.Fatalf("Could not run iptables -nvL: %s, %v", out, err) + } + + if !strings.Contains(out, ipTablesSearchString) { + t.Fatalf("iptables output after restart should have contained %q, but was %q", ipTablesSearchString, out) + } + + deleteAllContainers() + + logDone("daemon - run,iptables - iptables rules for always restarted container created after daemon restart") +} + +func TestDaemonLoggingLevel(t *testing.T) { + d := NewDaemon(t) + + if err := d.Start("--log-level=bogus"); err == nil { + t.Fatal("Daemon should not have been able to start") + } + + d = NewDaemon(t) + if err := d.Start("--log-level=debug"); err != nil { + t.Fatal(err) + } + d.Stop() + content, _ := ioutil.ReadFile(d.logFile.Name()) + if !strings.Contains(string(content), `level="debug"`) { + t.Fatalf(`Missing level="debug" in log file:\n%s`, string(content)) + } + + d = NewDaemon(t) + if err := d.Start("--log-level=fatal"); err != nil { + t.Fatal(err) + } + d.Stop() + content, _ = ioutil.ReadFile(d.logFile.Name()) + if strings.Contains(string(content), `level="debug"`) { + t.Fatalf(`Should not have level="debug" in log file:\n%s`, string(content)) + } + + d = NewDaemon(t) + if err := d.Start("-D"); err != nil { + t.Fatal(err) + } + d.Stop() + content, _ = ioutil.ReadFile(d.logFile.Name()) + if !strings.Contains(string(content), `level="debug"`) { + t.Fatalf(`Missing level="debug" in log file using -D:\n%s`, string(content)) + } + + d = NewDaemon(t) + if err := d.Start("--debug"); err != nil { + t.Fatal(err) + } + d.Stop() + content, _ = ioutil.ReadFile(d.logFile.Name()) + if !strings.Contains(string(content), `level="debug"`) { + t.Fatalf(`Missing level="debug" in log file using --debug:\n%s`, string(content)) + } + + d = NewDaemon(t) + if err := d.Start("--debug", "--log-level=fatal"); err != nil { + t.Fatal(err) + } + d.Stop() + content, _ = ioutil.ReadFile(d.logFile.Name()) + if !strings.Contains(string(content), `level="debug"`) { + t.Fatalf(`Missing level="debug" in log file when using both --debug and --log-level=fatal:\n%s`, string(content)) + } + + logDone("daemon - Logging Level") +} diff --git a/integration-cli/docker_cli_diff_test.go b/integration-cli/docker_cli_diff_test.go index 726f23491c..4068140ce2 100644 --- a/integration-cli/docker_cli_diff_test.go +++ b/integration-cli/docker_cli_diff_test.go @@ -1,7 +1,6 @@ package main import ( - "fmt" "os/exec" "strings" "testing" @@ -11,14 +10,18 @@ import ( func TestDiffFilenameShownInOutput(t *testing.T) { containerCmd := `echo foo > /root/bar` runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", containerCmd) - cid, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, fmt.Sprintf("failed to start the container: %v", err)) + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatalf("failed to start the container: %s, %v", out, err) + } - cleanCID := stripTrailingCharacters(cid) + cleanCID := stripTrailingCharacters(out) diffCmd := exec.Command(dockerBinary, "diff", cleanCID) - out, _, err := runCommandWithOutput(diffCmd) - errorOut(err, t, fmt.Sprintf("failed to run diff: %v %v", out, err)) + out, _, err = runCommandWithOutput(diffCmd) + if err != nil { + t.Fatalf("failed to run diff: %s %v", out, err) + } found := false for _, line := range strings.Split(out, "\n") { @@ -44,14 +47,18 @@ func TestDiffEnsureDockerinitFilesAreIgnored(t *testing.T) { for i := 0; i < 20; i++ { containerCmd := `echo foo > /root/bar` runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", containerCmd) - cid, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, fmt.Sprintf("%s", err)) + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatal(out, err) + } - cleanCID := stripTrailingCharacters(cid) + cleanCID := stripTrailingCharacters(out) diffCmd := exec.Command(dockerBinary, "diff", cleanCID) - out, _, err := runCommandWithOutput(diffCmd) - errorOut(err, t, fmt.Sprintf("failed to run diff: %v %v", out, err)) + out, _, err = runCommandWithOutput(diffCmd) + if err != nil { + t.Fatalf("failed to run diff: %s, %v", out, err) + } deleteContainer(cleanCID) @@ -67,13 +74,18 @@ func TestDiffEnsureDockerinitFilesAreIgnored(t *testing.T) { func TestDiffEnsureOnlyKmsgAndPtmx(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sleep", "0") - cid, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, fmt.Sprintf("%s", err)) - cleanCID := stripTrailingCharacters(cid) + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatal(out, err) + } + + cleanCID := stripTrailingCharacters(out) diffCmd := exec.Command(dockerBinary, "diff", cleanCID) - out, _, err := runCommandWithOutput(diffCmd) - errorOut(err, t, fmt.Sprintf("failed to run diff: %v %v", out, err)) + out, _, err = runCommandWithOutput(diffCmd) + if err != nil { + t.Fatalf("failed to run diff: %s, %v", out, err) + } deleteContainer(cleanCID) expected := map[string]bool{ @@ -85,7 +97,7 @@ func TestDiffEnsureOnlyKmsgAndPtmx(t *testing.T) { for _, line := range strings.Split(out, "\n") { if line != "" && !expected[line] { - t.Errorf("'%s' is shown in the diff but shouldn't", line) + t.Errorf("%q is shown in the diff but shouldn't", line) } } diff --git a/integration-cli/docker_cli_events_test.go b/integration-cli/docker_cli_events_test.go index b7f410b175..a56788e219 100644 --- a/integration-cli/docker_cli_events_test.go +++ b/integration-cli/docker_cli_events_test.go @@ -16,12 +16,12 @@ import ( ) func TestEventsUntag(t *testing.T) { - out, _, _ := cmd(t, "images", "-q") + out, _, _ := dockerCmd(t, "images", "-q") image := strings.Split(out, "\n")[0] - cmd(t, "tag", image, "utest:tag1") - cmd(t, "tag", image, "utest:tag2") - cmd(t, "rmi", "utest:tag1") - cmd(t, "rmi", "utest:tag2") + dockerCmd(t, "tag", image, "utest:tag1") + dockerCmd(t, "tag", image, "utest:tag2") + dockerCmd(t, "rmi", "utest:tag1") + dockerCmd(t, "rmi", "utest:tag2") eventsCmd := exec.Command("timeout", "0.2", dockerBinary, "events", "--since=1") out, _, _ = runCommandWithOutput(eventsCmd) events := strings.Split(out, "\n") @@ -38,11 +38,15 @@ func TestEventsUntag(t *testing.T) { } func TestEventsPause(t *testing.T) { - out, _, _ := cmd(t, "images", "-q") + name := "testeventpause" + out, _, _ := dockerCmd(t, "images", "-q") image := strings.Split(out, "\n")[0] - cmd(t, "run", "-d", "--name", "testeventpause", image, "sleep", "2") - cmd(t, "pause", "testeventpause") - cmd(t, "unpause", "testeventpause") + dockerCmd(t, "run", "-d", "--name", name, image, "sleep", "2") + dockerCmd(t, "pause", name) + dockerCmd(t, "unpause", name) + + defer deleteAllContainers() + eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", time.Now().Unix())) out, _, _ = runCommandWithOutput(eventsCmd) events := strings.Split(out, "\n") @@ -57,14 +61,21 @@ func TestEventsPause(t *testing.T) { t.Fatalf("event should be pause, not %#v", pauseEvent) } if unpauseEvent[len(unpauseEvent)-1] != "unpause" { - t.Fatalf("event should be pause, not %#v", unpauseEvent) + t.Fatalf("event should be unpause, not %#v", unpauseEvent) + } + + waitCmd := exec.Command(dockerBinary, "wait", name) + if waitOut, _, err := runCommandWithOutput(waitCmd); err != nil { + t.Fatalf("error thrown while waiting for container: %s, %v", waitOut, err) } logDone("events - pause/unpause is logged") } func TestEventsContainerFailStartDie(t *testing.T) { - out, _, _ := cmd(t, "images", "-q") + defer deleteAllContainers() + + out, _, _ := dockerCmd(t, "images", "-q") image := strings.Split(out, "\n")[0] eventsCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testeventdie", image, "blerg") _, _, err := runCommandWithOutput(eventsCmd) @@ -93,8 +104,9 @@ func TestEventsContainerFailStartDie(t *testing.T) { } func TestEventsLimit(t *testing.T) { + defer deleteAllContainers() for i := 0; i < 30; i++ { - cmd(t, "run", "busybox", "echo", strconv.Itoa(i)) + dockerCmd(t, "run", "busybox", "echo", strconv.Itoa(i)) } eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", time.Now().Unix())) out, _, _ := runCommandWithOutput(eventsCmd) @@ -107,7 +119,7 @@ func TestEventsLimit(t *testing.T) { } func TestEventsContainerEvents(t *testing.T) { - cmd(t, "run", "--rm", "busybox", "true") + dockerCmd(t, "run", "--rm", "busybox", "true") eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", time.Now().Unix())) out, exitCode, err := runCommandWithOutput(eventsCmd) if exitCode != 0 || err != nil { @@ -126,13 +138,13 @@ func TestEventsContainerEvents(t *testing.T) { t.Fatalf("event should be create, not %#v", createEvent) } if startEvent[len(startEvent)-1] != "start" { - t.Fatalf("event should be pause, not %#v", startEvent) + t.Fatalf("event should be start, not %#v", startEvent) } if dieEvent[len(dieEvent)-1] != "die" { - t.Fatalf("event should be pause, not %#v", dieEvent) + t.Fatalf("event should be die, not %#v", dieEvent) } if destroyEvent[len(destroyEvent)-1] != "destroy" { - t.Fatalf("event should be pause, not %#v", destroyEvent) + t.Fatalf("event should be destroy, not %#v", destroyEvent) } logDone("events - container create, start, die, destroy is logged") @@ -178,7 +190,7 @@ func TestEventsRedirectStdout(t *testing.T) { since := time.Now().Unix() - cmd(t, "run", "busybox", "true") + dockerCmd(t, "run", "busybox", "true") defer deleteAllContainers() @@ -215,3 +227,119 @@ func TestEventsRedirectStdout(t *testing.T) { logDone("events - redirect stdout") } + +func TestEventsImagePull(t *testing.T) { + since := time.Now().Unix() + pullCmd := exec.Command(dockerBinary, "pull", "scratch") + if out, _, err := runCommandWithOutput(pullCmd); err != nil { + t.Fatalf("pulling the scratch image from has failed: %s, %v", out, err) + } + + eventsCmd := exec.Command(dockerBinary, "events", + fmt.Sprintf("--since=%d", since), + fmt.Sprintf("--until=%d", time.Now().Unix())) + out, _, _ := runCommandWithOutput(eventsCmd) + + events := strings.Split(strings.TrimSpace(out), "\n") + event := strings.TrimSpace(events[len(events)-1]) + + if !strings.HasSuffix(event, "scratch:latest: pull") { + t.Fatalf("Missing pull event - got:%q", event) + } + + logDone("events - image pull is logged") +} + +func TestEventsImageImport(t *testing.T) { + since := time.Now().Unix() + + defer deleteImages("cirros") + + server, err := fileServer(map[string]string{ + "/cirros.tar.gz": "/cirros.tar.gz", + }) + if err != nil { + t.Fatal(err) + } + defer server.Close() + fileURL := fmt.Sprintf("%s/cirros.tar.gz", server.URL) + importCmd := exec.Command(dockerBinary, "import", fileURL, "cirros") + out, _, err := runCommandWithOutput(importCmd) + if err != nil { + t.Errorf("import failed with errors: %v, output: %q", err, out) + } + + eventsCmd := exec.Command(dockerBinary, "events", + fmt.Sprintf("--since=%d", since), + fmt.Sprintf("--until=%d", time.Now().Unix())) + out, _, _ = runCommandWithOutput(eventsCmd) + + events := strings.Split(strings.TrimSpace(out), "\n") + event := strings.TrimSpace(events[len(events)-1]) + + if !strings.HasSuffix(event, ": import") { + t.Fatalf("Missing pull event - got:%q", event) + } + + logDone("events - image import is logged") +} + +func TestEventsFilters(t *testing.T) { + since := time.Now().Unix() + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--rm", "busybox", "true")) + if err != nil { + t.Fatal(out, err) + } + out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "--rm", "busybox", "true")) + if err != nil { + t.Fatal(out, err) + } + eventsCmd := exec.Command(dockerBinary, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", time.Now().Unix()), "--filter", "event=die") + out, exitCode, err := runCommandWithOutput(eventsCmd) + if exitCode != 0 || err != nil { + t.Fatalf("Failed to get events with exit code %d: %s", exitCode, err) + } + events := strings.Split(out, "\n") + events = events[:len(events)-1] + if len(events) != 2 { + t.Fatalf("Expected 2 events, got %d: %v", len(events), events) + } + dieEvent := strings.Fields(events[len(events)-1]) + if dieEvent[len(dieEvent)-1] != "die" { + t.Fatalf("event should be die, not %#v", dieEvent) + } + + dieEvent = strings.Fields(events[len(events)-2]) + if dieEvent[len(dieEvent)-1] != "die" { + t.Fatalf("event should be die, not %#v", dieEvent) + } + + eventsCmd = exec.Command(dockerBinary, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", time.Now().Unix()), "--filter", "event=die", "--filter", "event=start") + out, exitCode, err = runCommandWithOutput(eventsCmd) + if exitCode != 0 || err != nil { + t.Fatalf("Failed to get events with exit code %d: %s", exitCode, err) + } + events = strings.Split(out, "\n") + events = events[:len(events)-1] + if len(events) != 4 { + t.Fatalf("Expected 4 events, got %d: %v", len(events), events) + } + startEvent := strings.Fields(events[len(events)-4]) + if startEvent[len(startEvent)-1] != "start" { + t.Fatalf("event should be start, not %#v", startEvent) + } + dieEvent = strings.Fields(events[len(events)-3]) + if dieEvent[len(dieEvent)-1] != "die" { + t.Fatalf("event should be die, not %#v", dieEvent) + } + startEvent = strings.Fields(events[len(events)-2]) + if startEvent[len(startEvent)-1] != "start" { + t.Fatalf("event should be start, not %#v", startEvent) + } + dieEvent = strings.Fields(events[len(events)-1]) + if dieEvent[len(dieEvent)-1] != "die" { + t.Fatalf("event should be die, not %#v", dieEvent) + } + + logDone("events - filters") +} diff --git a/integration-cli/docker_cli_exec_test.go b/integration-cli/docker_cli_exec_test.go index 0e012aa4c0..b07f215a36 100644 --- a/integration-cli/docker_cli_exec_test.go +++ b/integration-cli/docker_cli_exec_test.go @@ -2,6 +2,7 @@ package main import ( "bufio" + "os" "os/exec" "strings" "testing" @@ -10,13 +11,15 @@ import ( func TestExec(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && sleep 100") - out, _, _, err := runCommandWithStdoutStderr(runCmd) - errorOut(err, t, out) + if out, _, _, err := runCommandWithStdoutStderr(runCmd); err != nil { + t.Fatal(out, err) + } execCmd := exec.Command(dockerBinary, "exec", "testing", "cat", "/tmp/file") - - out, _, err = runCommandWithOutput(execCmd) - errorOut(err, t, out) + out, _, err := runCommandWithOutput(execCmd) + if err != nil { + t.Fatal(out, err) + } out = strings.Trim(out, "\r\n") @@ -29,10 +32,51 @@ func TestExec(t *testing.T) { logDone("exec - basic test") } +func TestExecInteractiveStdinClose(t *testing.T) { + defer deleteAllContainers() + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-itd", "busybox", "/bin/cat")) + if err != nil { + t.Fatal(err) + } + + contId := strings.TrimSpace(out) + + returnchan := make(chan struct{}) + + go func() { + var err error + cmd := exec.Command(dockerBinary, "exec", "-i", contId, "/bin/ls", "/") + cmd.Stdin = os.Stdin + if err != nil { + t.Fatal(err) + } + + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatal(err, out) + } + + if string(out) == "" { + t.Fatalf("Output was empty, likely blocked by standard input") + } + + returnchan <- struct{}{} + }() + + select { + case <-returnchan: + case <-time.After(10 * time.Second): + t.Fatal("timed out running docker exec") + } + + logDone("exec - interactive mode closes stdin after execution") +} + func TestExecInteractive(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && sleep 100") - out, _, _, err := runCommandWithStdoutStderr(runCmd) - errorOut(err, t, out) + if out, _, _, err := runCommandWithStdoutStderr(runCmd); err != nil { + t.Fatal(out, err) + } execCmd := exec.Command(dockerBinary, "exec", "-i", "testing", "sh") stdin, err := execCmd.StdinPipe() @@ -84,17 +128,22 @@ func TestExecInteractive(t *testing.T) { func TestExecAfterContainerRestart(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } cleanedContainerID := stripTrailingCharacters(out) runCmd = exec.Command(dockerBinary, "restart", cleanedContainerID) - out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if out, _, err = runCommandWithOutput(runCmd); err != nil { + t.Fatal(out, err) + } runCmd = exec.Command(dockerBinary, "exec", cleanedContainerID, "echo", "hello") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } outStr := strings.TrimSpace(out) if outStr != "hello" { @@ -137,3 +186,168 @@ func TestExecAfterDaemonRestart(t *testing.T) { logDone("exec - exec running container after daemon restart") } + +// Regresssion test for #9155, #9044 +func TestExecEnv(t *testing.T) { + defer deleteAllContainers() + + runCmd := exec.Command(dockerBinary, "run", + "-e", "LALA=value1", + "-e", "LALA=value2", + "-d", "--name", "testing", "busybox", "top") + if out, _, _, err := runCommandWithStdoutStderr(runCmd); err != nil { + t.Fatal(out, err) + } + + execCmd := exec.Command(dockerBinary, "exec", "testing", "env") + out, _, err := runCommandWithOutput(execCmd) + if err != nil { + t.Fatal(out, err) + } + + if strings.Contains(out, "LALA=value1") || + !strings.Contains(out, "LALA=value2") || + !strings.Contains(out, "HOME=/root") { + t.Errorf("exec env(%q), expect %q, %q", out, "LALA=value2", "HOME=/root") + } + + logDone("exec - exec inherits correct env") +} + +func TestExecExitStatus(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "top", "busybox", "top") + if out, _, _, err := runCommandWithStdoutStderr(runCmd); err != nil { + t.Fatal(out, err) + } + + // Test normal (non-detached) case first + cmd := exec.Command(dockerBinary, "exec", "top", "sh", "-c", "exit 23") + ec, _ := runCommand(cmd) + + if ec != 23 { + t.Fatalf("Should have had an ExitCode of 23, not: %d", ec) + } + + logDone("exec - exec non-zero ExitStatus") +} + +func TestExecPausedContainer(t *testing.T) { + + defer deleteAllContainers() + defer unpauseAllContainers() + + runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testing", "busybox", "top") + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatal(out, err) + } + + ContainerID := stripTrailingCharacters(out) + + pausedCmd := exec.Command(dockerBinary, "pause", "testing") + out, _, _, err = runCommandWithStdoutStderr(pausedCmd) + if err != nil { + t.Fatal(out, err) + } + + execCmd := exec.Command(dockerBinary, "exec", "-i", "-t", ContainerID, "echo", "hello") + out, _, err = runCommandWithOutput(execCmd) + if err == nil { + t.Fatal("container should fail to exec new command if it is paused") + } + + expected := ContainerID + " is paused, unpause the container before exec" + if !strings.Contains(out, expected) { + t.Fatal("container should not exec new command if it is paused") + } + + logDone("exec - exec should not exec a pause container") +} + +// regression test for #9476 +func TestExecTtyCloseStdin(t *testing.T) { + defer deleteAllContainers() + + cmd := exec.Command(dockerBinary, "run", "-d", "-it", "--name", "exec_tty_stdin", "busybox") + if out, _, err := runCommandWithOutput(cmd); err != nil { + t.Fatal(out, err) + } + + cmd = exec.Command(dockerBinary, "exec", "-i", "exec_tty_stdin", "cat") + stdinRw, err := cmd.StdinPipe() + if err != nil { + t.Fatal(err) + } + + stdinRw.Write([]byte("test")) + stdinRw.Close() + + if out, _, err := runCommandWithOutput(cmd); err != nil { + t.Fatal(out, err) + } + + cmd = exec.Command(dockerBinary, "top", "exec_tty_stdin") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(out, err) + } + + outArr := strings.Split(out, "\n") + if len(outArr) > 3 || strings.Contains(out, "nsenter-exec") { + // This is the really bad part + if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "rm", "-f", "exec_tty_stdin")); err != nil { + t.Fatal(out, err) + } + + t.Fatalf("exec process left running\n\t %s", out) + } + + logDone("exec - stdin is closed properly with tty enabled") +} + +func TestExecTtyWithoutStdin(t *testing.T) { + defer deleteAllContainers() + + cmd := exec.Command(dockerBinary, "run", "-d", "-ti", "busybox") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatalf("failed to start container: %v (%v)", out, err) + } + + id := strings.TrimSpace(out) + if err := waitRun(id); err != nil { + t.Fatal(err) + } + + defer func() { + cmd := exec.Command(dockerBinary, "kill", id) + if out, _, err := runCommandWithOutput(cmd); err != nil { + t.Fatalf("failed to kill container: %v (%v)", out, err) + } + }() + + done := make(chan struct{}) + go func() { + defer close(done) + + cmd := exec.Command(dockerBinary, "exec", "-ti", id, "true") + if _, err := cmd.StdinPipe(); err != nil { + t.Fatal(err) + } + + expected := "cannot enable tty mode" + if out, _, err := runCommandWithOutput(cmd); err == nil { + t.Fatal("exec should have failed") + } else if !strings.Contains(out, expected) { + t.Fatal("exec failed with error %q: expected %q", out, expected) + } + }() + + select { + case <-done: + case <-time.After(3 * time.Second): + t.Fatal("exec is running but should have failed") + } + + logDone("exec - forbid piped stdin to tty enabled container") +} diff --git a/integration-cli/docker_cli_export_import_test.go b/integration-cli/docker_cli_export_import_test.go index b044cd8366..e1e95e436e 100644 --- a/integration-cli/docker_cli_export_import_test.go +++ b/integration-cli/docker_cli_export_import_test.go @@ -26,19 +26,23 @@ func TestExportContainerAndImportImage(t *testing.T) { exportCmdTemplate := `%v export %v > /tmp/testexp.tar` exportCmdFinal := fmt.Sprintf(exportCmdTemplate, dockerBinary, cleanedContainerID) exportCmd := exec.Command("bash", "-c", exportCmdFinal) - out, _, err = runCommandWithOutput(exportCmd) - errorOut(err, t, fmt.Sprintf("failed to export container: %v %v", out, err)) + if out, _, err = runCommandWithOutput(exportCmd); err != nil { + t.Fatalf("failed to export container: %s, %v", out, err) + } importCmdFinal := `cat /tmp/testexp.tar | docker import - repo/testexp:v1` importCmd := exec.Command("bash", "-c", importCmdFinal) out, _, err = runCommandWithOutput(importCmd) - errorOut(err, t, fmt.Sprintf("failed to import image: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to import image: %s, %v", out, err) + } cleanedImageID := stripTrailingCharacters(out) inspectCmd = exec.Command(dockerBinary, "inspect", cleanedImageID) - out, _, err = runCommandWithOutput(inspectCmd) - errorOut(err, t, fmt.Sprintf("output should've been an image id: %v %v", out, err)) + if out, _, err = runCommandWithOutput(inspectCmd); err != nil { + t.Fatalf("output should've been an image id: %s, %v", out, err) + } deleteContainer(cleanedContainerID) deleteImages("repo/testexp:v1") diff --git a/integration-cli/docker_cli_history_test.go b/integration-cli/docker_cli_history_test.go index 8b1a73b59c..3ae9ffb45d 100644 --- a/integration-cli/docker_cli_history_test.go +++ b/integration-cli/docker_cli_history_test.go @@ -46,9 +46,8 @@ RUN echo "Z"`, } out, exitCode, err := runCommandWithOutput(exec.Command(dockerBinary, "history", "testbuildhistory")) - errorOut(err, t, fmt.Sprintf("image history failed: %v %v", out, err)) if err != nil || exitCode != 0 { - t.Fatal("failed to get image history") + t.Fatalf("failed to get image history: %s, %v", out, err) } actualValues := strings.Split(out, "\n")[1:27] diff --git a/integration-cli/docker_cli_images_test.go b/integration-cli/docker_cli_images_test.go index 5a7207cec5..a91f1c0e22 100644 --- a/integration-cli/docker_cli_images_test.go +++ b/integration-cli/docker_cli_images_test.go @@ -3,6 +3,8 @@ package main import ( "fmt" "os/exec" + "reflect" + "sort" "strings" "testing" "time" @@ -11,7 +13,9 @@ import ( func TestImagesEnsureImageIsListed(t *testing.T) { imagesCmd := exec.Command(dockerBinary, "images") out, _, err := runCommandWithOutput(imagesCmd) - errorOut(err, t, fmt.Sprintf("listing images failed with errors: %v", err)) + if err != nil { + t.Fatalf("listing images failed with errors: %s, %v", out, err) + } if !strings.Contains(out, "busybox") { t.Fatal("images should've listed busybox") @@ -46,7 +50,9 @@ func TestImagesOrderedByCreationDate(t *testing.T) { } out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "images", "-q", "--no-trunc")) - errorOut(err, t, fmt.Sprintf("listing images failed with errors: %v", err)) + if err != nil { + t.Fatalf("listing images failed with errors: %s, %v", out, err) + } imgs := strings.Split(out, "\n") if imgs[0] != id3 { t.Fatalf("First image must be %s, got %s", id3, imgs[0]) @@ -60,3 +66,59 @@ func TestImagesOrderedByCreationDate(t *testing.T) { logDone("images - ordering by creation date") } + +func TestImagesErrorWithInvalidFilterNameTest(t *testing.T) { + imagesCmd := exec.Command(dockerBinary, "images", "-f", "FOO=123") + out, _, err := runCommandWithOutput(imagesCmd) + if !strings.Contains(out, "Invalid filter") { + t.Fatalf("error should occur when listing images with invalid filter name FOO, %s, %v", out, err) + } + + logDone("images - invalid filter name check working") +} + +func TestImagesFilterWhiteSpaceTrimmingAndLowerCasingWorking(t *testing.T) { + imageName := "images_filter_test" + defer deleteAllContainers() + defer deleteImages(imageName) + buildImage(imageName, + `FROM scratch + RUN touch /test/foo + RUN touch /test/bar + RUN touch /test/baz`, true) + + filters := []string{ + "dangling=true", + "Dangling=true", + " dangling=true", + "dangling=true ", + "dangling = true", + } + + imageListings := make([][]string, 5, 5) + for idx, filter := range filters { + cmd := exec.Command(dockerBinary, "images", "-f", filter) + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + listing := strings.Split(out, "\n") + sort.Strings(listing) + imageListings[idx] = listing + } + + for idx, listing := range imageListings { + if idx < 4 && !reflect.DeepEqual(listing, imageListings[idx+1]) { + for idx, errListing := range imageListings { + fmt.Printf("out %d", idx) + for _, image := range errListing { + fmt.Print(image) + } + fmt.Print("") + } + t.Fatalf("All output must be the same") + } + } + + logDone("images - white space trimming and lower casing") +} diff --git a/integration-cli/docker_cli_import_test.go b/integration-cli/docker_cli_import_test.go index ea001fd456..94aadc5831 100644 --- a/integration-cli/docker_cli_import_test.go +++ b/integration-cli/docker_cli_import_test.go @@ -16,7 +16,7 @@ func TestImportDisplay(t *testing.T) { } defer server.Close() fileURL := fmt.Sprintf("%s/cirros.tar.gz", server.URL) - importCmd := exec.Command(dockerBinary, "import", fileURL) + importCmd := exec.Command(dockerBinary, "import", fileURL, "cirros") out, _, err := runCommandWithOutput(importCmd) if err != nil { t.Errorf("import failed with errors: %v, output: %q", err, out) @@ -26,5 +26,7 @@ func TestImportDisplay(t *testing.T) { t.Fatalf("display is messed up: %d '\\n' instead of 2", n) } + deleteImages("cirros") + logDone("import - cirros was imported and display is fine") } diff --git a/integration-cli/docker_cli_info_test.go b/integration-cli/docker_cli_info_test.go index 32aa3a2125..2e8239a4b3 100644 --- a/integration-cli/docker_cli_info_test.go +++ b/integration-cli/docker_cli_info_test.go @@ -1,7 +1,6 @@ package main import ( - "fmt" "os/exec" "strings" "testing" @@ -11,10 +10,8 @@ import ( func TestInfoEnsureSucceeds(t *testing.T) { versionCmd := exec.Command(dockerBinary, "info") out, exitCode, err := runCommandWithOutput(versionCmd) - errorOut(err, t, fmt.Sprintf("encountered error while running docker info: %v", err)) - if err != nil || exitCode != 0 { - t.Fatal("failed to execute docker info") + t.Fatalf("failed to execute docker info: %s, %v", out, err) } stringsToCheck := []string{"Containers:", "Execution Driver:", "Kernel Version:"} diff --git a/integration-cli/docker_cli_inspect_test.go b/integration-cli/docker_cli_inspect_test.go index 30a722047a..bb99818bf9 100644 --- a/integration-cli/docker_cli_inspect_test.go +++ b/integration-cli/docker_cli_inspect_test.go @@ -10,13 +10,14 @@ func TestInspectImage(t *testing.T) { imageTest := "scratch" imageTestID := "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" imagesCmd := exec.Command(dockerBinary, "inspect", "--format='{{.Id}}'", imageTest) - out, exitCode, err := runCommandWithOutput(imagesCmd) if exitCode != 0 || err != nil { - t.Fatalf("failed to inspect image") + t.Fatalf("failed to inspect image: %s, %v", out, err) } + if id := strings.TrimSuffix(out, "\n"); id != imageTestID { t.Fatalf("Expected id: %s for image: %s but received id: %s", imageTestID, imageTest, id) } + logDone("inspect - inspect an image") } diff --git a/integration-cli/docker_cli_kill_test.go b/integration-cli/docker_cli_kill_test.go index 6ee246f5ff..33135a3be7 100644 --- a/integration-cli/docker_cli_kill_test.go +++ b/integration-cli/docker_cli_kill_test.go @@ -1,7 +1,6 @@ package main import ( - "fmt" "os/exec" "strings" "testing" @@ -10,21 +9,27 @@ import ( func TestKillContainer(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "sleep 10") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + if err != nil { + t.Fatal(out, err) + } cleanedContainerID := stripTrailingCharacters(out) inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) - inspectOut, _, err := runCommandWithOutput(inspectCmd) - errorOut(err, t, fmt.Sprintf("out should've been a container id: %v %v", inspectOut, err)) + if out, _, err = runCommandWithOutput(inspectCmd); err != nil { + t.Fatalf("out should've been a container id: %s, %v", out, err) + } killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID) - out, _, err = runCommandWithOutput(killCmd) - errorOut(err, t, fmt.Sprintf("failed to kill container: %v %v", out, err)) + if out, _, err = runCommandWithOutput(killCmd); err != nil { + t.Fatalf("failed to kill container: %s, %v", out, err) + } listRunningContainersCmd := exec.Command(dockerBinary, "ps", "-q") out, _, err = runCommandWithOutput(listRunningContainersCmd) - errorOut(err, t, fmt.Sprintf("failed to list running containers: %v", err)) + if err != nil { + t.Fatalf("failed to list running containers: %s, %v", out, err) + } if strings.Contains(out, cleanedContainerID) { t.Fatal("killed container is still running") @@ -38,21 +43,27 @@ func TestKillContainer(t *testing.T) { func TestKillDifferentUserContainer(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-u", "daemon", "-d", "busybox", "sh", "-c", "sleep 10") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + if err != nil { + t.Fatal(out, err) + } cleanedContainerID := stripTrailingCharacters(out) inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) - inspectOut, _, err := runCommandWithOutput(inspectCmd) - errorOut(err, t, fmt.Sprintf("out should've been a container id: %v %v", inspectOut, err)) + if out, _, err = runCommandWithOutput(inspectCmd); err != nil { + t.Fatalf("out should've been a container id: %s, %v", out, err) + } killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID) - out, _, err = runCommandWithOutput(killCmd) - errorOut(err, t, fmt.Sprintf("failed to kill container: %v %v", out, err)) + if out, _, err = runCommandWithOutput(killCmd); err != nil { + t.Fatalf("failed to kill container: %s, %v", out, err) + } listRunningContainersCmd := exec.Command(dockerBinary, "ps", "-q") out, _, err = runCommandWithOutput(listRunningContainersCmd) - errorOut(err, t, fmt.Sprintf("failed to list running containers: %v", err)) + if err != nil { + t.Fatalf("failed to list running containers: %s, %v", out, err) + } if strings.Contains(out, cleanedContainerID) { t.Fatal("killed container is still running") diff --git a/integration-cli/docker_cli_links_test.go b/integration-cli/docker_cli_links_test.go index da6f5ac220..5b81b7fec3 100644 --- a/integration-cli/docker_cli_links_test.go +++ b/integration-cli/docker_cli_links_test.go @@ -1,12 +1,13 @@ package main import ( - "fmt" "io/ioutil" "os" "os/exec" + "reflect" "strings" "testing" + "time" "github.com/docker/docker/pkg/iptables" ) @@ -14,7 +15,9 @@ import ( func TestLinksEtcHostsRegularFile(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "--net=host", "busybox", "ls", "-la", "/etc/hosts") out, _, _, err := runCommandWithStdoutStderr(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if !strings.HasPrefix(out, "-") { t.Errorf("/etc/hosts should be a regular file") @@ -28,7 +31,9 @@ func TestLinksEtcHostsRegularFile(t *testing.T) { func TestLinksEtcHostsContentMatch(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "--net=host", "busybox", "cat", "/etc/hosts") out, _, _, err := runCommandWithStdoutStderr(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } hosts, err := ioutil.ReadFile("/etc/hosts") if os.IsNotExist(err) { @@ -51,7 +56,7 @@ func TestLinksPingUnlinkedContainers(t *testing.T) { if exitCode == 0 { t.Fatal("run ping did not fail") } else if exitCode != 1 { - errorOut(err, t, fmt.Sprintf("run ping failed with errors: %v", err)) + t.Fatalf("run ping failed with errors: %v", err) } logDone("links - ping unlinked container") @@ -59,21 +64,21 @@ func TestLinksPingUnlinkedContainers(t *testing.T) { func TestLinksPingLinkedContainers(t *testing.T) { var out string - out, _, _ = cmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10") + out, _, _ = dockerCmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10") idA := stripTrailingCharacters(out) - out, _, _ = cmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10") + out, _, _ = dockerCmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10") idB := stripTrailingCharacters(out) - cmd(t, "run", "--rm", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") - cmd(t, "kill", idA) - cmd(t, "kill", idB) + dockerCmd(t, "run", "--rm", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") + dockerCmd(t, "kill", idA) + dockerCmd(t, "kill", idB) deleteAllContainers() logDone("links - ping linked container") } func TestLinksIpTablesRulesWhenLinkAndUnlink(t *testing.T) { - cmd(t, "run", "-d", "--name", "child", "--publish", "8080:80", "busybox", "sleep", "10") - cmd(t, "run", "-d", "--name", "parent", "--link", "child:http", "busybox", "sleep", "10") + dockerCmd(t, "run", "-d", "--name", "child", "--publish", "8080:80", "busybox", "sleep", "10") + dockerCmd(t, "run", "-d", "--name", "parent", "--link", "child:http", "busybox", "sleep", "10") childIP := findContainerIP(t, "child") parentIP := findContainerIP(t, "parent") @@ -84,13 +89,13 @@ func TestLinksIpTablesRulesWhenLinkAndUnlink(t *testing.T) { t.Fatal("Iptables rules not found") } - cmd(t, "rm", "--link", "parent/http") + dockerCmd(t, "rm", "--link", "parent/http") if iptables.Exists(sourceRule...) || iptables.Exists(destinationRule...) { t.Fatal("Iptables rules should be removed when unlink") } - cmd(t, "kill", "child") - cmd(t, "kill", "parent") + dockerCmd(t, "kill", "child") + dockerCmd(t, "kill", "parent") deleteAllContainers() logDone("link - verify iptables when link and unlink") @@ -102,9 +107,9 @@ func TestLinksInspectLinksStarted(t *testing.T) { result []string ) defer deleteAllContainers() - cmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10") - cmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10") - cmd(t, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "sleep", "10") + dockerCmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10") + dockerCmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10") + dockerCmd(t, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "sleep", "10") links, err := inspectFieldJSON("testinspectlink", "HostConfig.Links") if err != nil { t.Fatal(err) @@ -117,7 +122,7 @@ func TestLinksInspectLinksStarted(t *testing.T) { output := convertSliceOfStringsToMap(result) - equal := deepEqual(expected, output) + equal := reflect.DeepEqual(output, expected) if !equal { t.Fatalf("Links %s, expected %s", result, expected) @@ -131,9 +136,9 @@ func TestLinksInspectLinksStopped(t *testing.T) { result []string ) defer deleteAllContainers() - cmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10") - cmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10") - cmd(t, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "true") + dockerCmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10") + dockerCmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10") + dockerCmd(t, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "true") links, err := inspectFieldJSON("testinspectlink", "HostConfig.Links") if err != nil { t.Fatal(err) @@ -146,7 +151,7 @@ func TestLinksInspectLinksStopped(t *testing.T) { output := convertSliceOfStringsToMap(result) - equal := deepEqual(expected, output) + equal := reflect.DeepEqual(output, expected) if !equal { t.Fatalf("Links %s, but expected %s", result, expected) @@ -154,3 +159,75 @@ func TestLinksInspectLinksStopped(t *testing.T) { logDone("link - links in stopped container inspect") } + +func TestLinksNotStartedParentNotFail(t *testing.T) { + defer deleteAllContainers() + runCmd := exec.Command(dockerBinary, "create", "--name=first", "busybox", "top") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + t.Fatal(out, err) + } + runCmd = exec.Command(dockerBinary, "create", "--name=second", "--link=first:first", "busybox", "top") + out, _, _, err = runCommandWithStdoutStderr(runCmd) + if err != nil { + t.Fatal(out, err) + } + runCmd = exec.Command(dockerBinary, "start", "first") + out, _, _, err = runCommandWithStdoutStderr(runCmd) + if err != nil { + t.Fatal(out, err) + } + logDone("link - container start not failing on updating stopped parent links") +} + +func TestLinksHostsFilesInject(t *testing.T) { + defer deleteAllContainers() + + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-itd", "--name", "one", "busybox", "top")) + if err != nil { + t.Fatal(err, out) + } + + idOne := strings.TrimSpace(out) + + out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "-itd", "--name", "two", "--link", "one:onetwo", "busybox", "top")) + if err != nil { + t.Fatal(err, out) + } + + idTwo := strings.TrimSpace(out) + + time.Sleep(1 * time.Second) + + contentOne, err := readContainerFile(idOne, "hosts") + if err != nil { + t.Fatal(err, string(contentOne)) + } + + contentTwo, err := readContainerFile(idTwo, "hosts") + if err != nil { + t.Fatal(err, string(contentTwo)) + } + + if !strings.Contains(string(contentTwo), "onetwo") { + t.Fatal("Host is not present in updated hosts file", string(contentTwo)) + } + + logDone("link - ensure containers hosts files are updated with the link alias.") +} + +func TestLinksNetworkHostContainer(t *testing.T) { + defer deleteAllContainers() + + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--net", "host", "--name", "host_container", "busybox", "top")) + if err != nil { + t.Fatal(err, out) + } + + out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "--name", "should_fail", "--link", "host_container:tester", "busybox", "true")) + if err == nil || !strings.Contains(out, "--net=host can't be used with links. This would result in undefined behavior.") { + t.Fatalf("Running container linking to a container with --net host should have failed: %s", out) + } + + logDone("link - error thrown when linking to container with --net host") +} diff --git a/integration-cli/docker_cli_login_test.go b/integration-cli/docker_cli_login_test.go new file mode 100644 index 0000000000..cf134e4c9b --- /dev/null +++ b/integration-cli/docker_cli_login_test.go @@ -0,0 +1,35 @@ +package main + +import ( + "bytes" + "io" + "os" + "os/exec" + "testing" +) + +func TestLoginWithoutTTY(t *testing.T) { + cmd := exec.Command(dockerBinary, "login") + // setup STDOUT and STDERR so that we see any output and errors in our console + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + // create a buffer with text then a new line as a return + buf := bytes.NewBuffer([]byte("buffer test string \n")) + + // use a pipe for stdin and manually copy the data so that + // the process does not get the TTY + in, err := cmd.StdinPipe() + if err != nil { + t.Fatal(err) + } + // copy the bytes into the commands stdin along with a new line + go io.Copy(in, buf) + + // run the command and block until it's done + if err := cmd.Run(); err == nil { + t.Fatal("Expected non nil err when loginning in & TTY not available") + } + + logDone("login - login without TTY") +} diff --git a/integration-cli/docker_cli_logs_test.go b/integration-cli/docker_cli_logs_test.go index 2407291cdb..b86a50480d 100644 --- a/integration-cli/docker_cli_logs_test.go +++ b/integration-cli/docker_cli_logs_test.go @@ -16,14 +16,18 @@ func TestLogsContainerSmallerThanPage(t *testing.T) { testLen := 32767 runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n =; done; echo", testLen)) out, _, _, err := runCommandWithStdoutStderr(runCmd) - errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + if err != nil { + t.Fatalf("run failed with errors: %s, %v", out, err) + } cleanedContainerID := stripTrailingCharacters(out) exec.Command(dockerBinary, "wait", cleanedContainerID).Run() logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(logsCmd) - errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to log container: %s, %v", out, err) + } if len(out) != testLen+1 { t.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out)) @@ -39,14 +43,18 @@ func TestLogsContainerBiggerThanPage(t *testing.T) { testLen := 32768 runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n =; done; echo", testLen)) out, _, _, err := runCommandWithStdoutStderr(runCmd) - errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + if err != nil { + t.Fatalf("run failed with errors: %s, %v", out, err) + } cleanedContainerID := stripTrailingCharacters(out) exec.Command(dockerBinary, "wait", cleanedContainerID).Run() logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(logsCmd) - errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to log container: %s, %v", out, err) + } if len(out) != testLen+1 { t.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out)) @@ -62,14 +70,18 @@ func TestLogsContainerMuchBiggerThanPage(t *testing.T) { testLen := 33000 runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n =; done; echo", testLen)) out, _, _, err := runCommandWithStdoutStderr(runCmd) - errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + if err != nil { + t.Fatalf("run failed with errors: %s, %v", out, err) + } cleanedContainerID := stripTrailingCharacters(out) exec.Command(dockerBinary, "wait", cleanedContainerID).Run() logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(logsCmd) - errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to log container: %s, %v", out, err) + } if len(out) != testLen+1 { t.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out)) @@ -85,14 +97,18 @@ func TestLogsTimestamps(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo =; done;", testLen)) out, _, _, err := runCommandWithStdoutStderr(runCmd) - errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + if err != nil { + t.Fatalf("run failed with errors: %s, %v", out, err) + } cleanedContainerID := stripTrailingCharacters(out) exec.Command(dockerBinary, "wait", cleanedContainerID).Run() logsCmd := exec.Command(dockerBinary, "logs", "-t", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(logsCmd) - errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to log container: %s, %v", out, err) + } lines := strings.Split(out, "\n") @@ -124,14 +140,18 @@ func TestLogsSeparateStderr(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)) out, _, _, err := runCommandWithStdoutStderr(runCmd) - errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + if err != nil { + t.Fatalf("run failed with errors: %s, %v", out, err) + } cleanedContainerID := stripTrailingCharacters(out) exec.Command(dockerBinary, "wait", cleanedContainerID).Run() logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) stdout, stderr, _, err := runCommandWithStdoutStderr(logsCmd) - errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to log container: %s, %v", out, err) + } if stdout != "" { t.Fatalf("Expected empty stdout stream, got %v", stdout) @@ -152,14 +172,18 @@ func TestLogsStderrInStdout(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "-t", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)) out, _, _, err := runCommandWithStdoutStderr(runCmd) - errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + if err != nil { + t.Fatalf("run failed with errors: %s, %v", out, err) + } cleanedContainerID := stripTrailingCharacters(out) exec.Command(dockerBinary, "wait", cleanedContainerID).Run() logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) stdout, stderr, _, err := runCommandWithStdoutStderr(logsCmd) - errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to log container: %s, %v", out, err) + } if stderr != "" { t.Fatalf("Expected empty stderr stream, got %v", stdout) @@ -180,14 +204,18 @@ func TestLogsTail(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo =; done;", testLen)) out, _, _, err := runCommandWithStdoutStderr(runCmd) - errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + if err != nil { + t.Fatalf("run failed with errors: %s, %v", out, err) + } cleanedContainerID := stripTrailingCharacters(out) exec.Command(dockerBinary, "wait", cleanedContainerID).Run() logsCmd := exec.Command(dockerBinary, "logs", "--tail", "5", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(logsCmd) - errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to log container: %s, %v", out, err) + } lines := strings.Split(out, "\n") @@ -197,7 +225,9 @@ func TestLogsTail(t *testing.T) { logsCmd = exec.Command(dockerBinary, "logs", "--tail", "all", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(logsCmd) - errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to log container: %s, %v", out, err) + } lines = strings.Split(out, "\n") @@ -207,7 +237,9 @@ func TestLogsTail(t *testing.T) { logsCmd = exec.Command(dockerBinary, "logs", "--tail", "random", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(logsCmd) - errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to log container: %s, %v", out, err) + } lines = strings.Split(out, "\n") @@ -223,7 +255,9 @@ func TestLogsFollowStopped(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "echo", "hello") out, _, _, err := runCommandWithStdoutStderr(runCmd) - errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + if err != nil { + t.Fatalf("run failed with errors: %s, %v", out, err) + } cleanedContainerID := stripTrailingCharacters(out) exec.Command(dockerBinary, "wait", cleanedContainerID).Run() @@ -250,3 +284,54 @@ func TestLogsFollowStopped(t *testing.T) { deleteContainer(cleanedContainerID) logDone("logs - logs follow stopped container") } + +// Regression test for #8832 +func TestLogsFollowSlowStdoutConsumer(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "/bin/sh", "-c", `usleep 200000;yes X | head -c 200000`) + + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + t.Fatalf("run failed with errors: %s, %v", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + defer deleteContainer(cleanedContainerID) + + stopSlowRead := make(chan bool) + + go func() { + exec.Command(dockerBinary, "wait", cleanedContainerID).Run() + stopSlowRead <- true + }() + + logCmd := exec.Command(dockerBinary, "logs", "-f", cleanedContainerID) + + stdout, err := logCmd.StdoutPipe() + if err != nil { + t.Fatal(err) + } + + if err := logCmd.Start(); err != nil { + t.Fatal(err) + } + + // First read slowly + bytes1, err := consumeWithSpeed(stdout, 10, 50*time.Millisecond, stopSlowRead) + if err != nil { + t.Fatal(err) + } + + // After the container has finished we can continue reading fast + bytes2, err := consumeWithSpeed(stdout, 32*1024, 0, nil) + if err != nil { + t.Fatal(err) + } + + actual := bytes1 + bytes2 + expected := 200000 + if actual != expected { + t.Fatalf("Invalid bytes read: %d, expected %d", actual, expected) + } + + logDone("logs - follow slow consumer") +} diff --git a/integration-cli/docker_cli_nat_test.go b/integration-cli/docker_cli_nat_test.go index 3f0fa2b272..7e3b595a80 100644 --- a/integration-cli/docker_cli_nat_test.go +++ b/integration-cli/docker_cli_nat_test.go @@ -11,7 +11,7 @@ import ( func TestNetworkNat(t *testing.T) { iface, err := net.InterfaceByName("eth0") if err != nil { - t.Skip("Test not running with `make test`. Interface eth0 not found: %s", err) + t.Skipf("Test not running with `make test`. Interface eth0 not found: %s", err) } ifaceAddrs, err := iface.Addrs() @@ -26,17 +26,24 @@ func TestNetworkNat(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-dt", "-p", "8080:8080", "busybox", "nc", "-lp", "8080") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, fmt.Sprintf("run1 failed with errors: %v (%s)", err, out)) + if err != nil { + t.Fatal(out, err) + } cleanedContainerID := stripTrailingCharacters(out) runCmd = exec.Command(dockerBinary, "run", "busybox", "sh", "-c", fmt.Sprintf("echo hello world | nc -w 30 %s 8080", ifaceIP)) out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, fmt.Sprintf("run2 failed with errors: %v (%s)", err, out)) + if err != nil { + t.Fatal(out, err) + } runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID) out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, fmt.Sprintf("failed to retrieve logs for container: %v %v", cleanedContainerID, err)) + if err != nil { + t.Fatalf("failed to retrieve logs for container: %s, %v", out, err) + } + out = strings.Trim(out, "\r\n") if expected := "hello world"; out != expected { @@ -44,8 +51,9 @@ func TestNetworkNat(t *testing.T) { } killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID) - out, _, err = runCommandWithOutput(killCmd) - errorOut(err, t, fmt.Sprintf("failed to kill container: %v %v", out, err)) + if out, _, err = runCommandWithOutput(killCmd); err != nil { + t.Fatalf("failed to kill container: %s, %v", out, err) + } deleteAllContainers() logDone("network - make sure nat works through the host") diff --git a/integration-cli/docker_cli_port_test.go b/integration-cli/docker_cli_port_test.go index ba986b9ac6..1ea7374e85 100644 --- a/integration-cli/docker_cli_port_test.go +++ b/integration-cli/docker_cli_port_test.go @@ -11,12 +11,16 @@ func TestPortList(t *testing.T) { // one port runCmd := exec.Command(dockerBinary, "run", "-d", "-p", "9876:80", "busybox", "top") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } firstID := stripTrailingCharacters(out) runCmd = exec.Command(dockerBinary, "port", firstID, "80") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if !assertPortList(t, out, []string{"0.0.0.0:9876"}) { t.Error("Port list is not correct") @@ -24,14 +28,17 @@ func TestPortList(t *testing.T) { runCmd = exec.Command(dockerBinary, "port", firstID) out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if !assertPortList(t, out, []string{"80/tcp -> 0.0.0.0:9876"}) { t.Error("Port list is not correct") } runCmd = exec.Command(dockerBinary, "rm", "-f", firstID) - out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if out, _, err = runCommandWithOutput(runCmd); err != nil { + t.Fatal(out, err) + } // three port runCmd = exec.Command(dockerBinary, "run", "-d", @@ -40,12 +47,16 @@ func TestPortList(t *testing.T) { "-p", "9878:82", "busybox", "top") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } ID := stripTrailingCharacters(out) runCmd = exec.Command(dockerBinary, "port", ID, "80") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if !assertPortList(t, out, []string{"0.0.0.0:9876"}) { t.Error("Port list is not correct") @@ -53,7 +64,9 @@ func TestPortList(t *testing.T) { runCmd = exec.Command(dockerBinary, "port", ID) out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if !assertPortList(t, out, []string{ "80/tcp -> 0.0.0.0:9876", @@ -63,7 +76,9 @@ func TestPortList(t *testing.T) { } runCmd = exec.Command(dockerBinary, "rm", "-f", ID) out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } // more and one port mapped to the same container port runCmd = exec.Command(dockerBinary, "run", "-d", @@ -73,12 +88,16 @@ func TestPortList(t *testing.T) { "-p", "9878:82", "busybox", "top") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } ID = stripTrailingCharacters(out) runCmd = exec.Command(dockerBinary, "port", ID, "80") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if !assertPortList(t, out, []string{"0.0.0.0:9876", "0.0.0.0:9999"}) { t.Error("Port list is not correct") @@ -86,7 +105,9 @@ func TestPortList(t *testing.T) { runCmd = exec.Command(dockerBinary, "port", ID) out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if !assertPortList(t, out, []string{ "80/tcp -> 0.0.0.0:9876", @@ -96,8 +117,9 @@ func TestPortList(t *testing.T) { t.Error("Port list is not correct\n", out) } runCmd = exec.Command(dockerBinary, "rm", "-f", ID) - out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if out, _, err = runCommandWithOutput(runCmd); err != nil { + t.Fatal(out, err) + } deleteAllContainers() diff --git a/integration-cli/docker_cli_ps_test.go b/integration-cli/docker_cli_ps_test.go index f2a7b2ab48..09207826bb 100644 --- a/integration-cli/docker_cli_ps_test.go +++ b/integration-cli/docker_cli_ps_test.go @@ -10,34 +10,45 @@ import ( func TestPsListContainers(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } firstID := stripTrailingCharacters(out) runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "top") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } secondID := stripTrailingCharacters(out) // not long running runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } thirdID := stripTrailingCharacters(out) runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "top") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } fourthID := stripTrailingCharacters(out) // make sure third one is not running runCmd = exec.Command(dockerBinary, "wait", thirdID) - out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if out, _, err = runCommandWithOutput(runCmd); err != nil { + t.Fatal(out, err) + } // all runCmd = exec.Command(dockerBinary, "ps", "-a") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if !assertContainerList(out, []string{fourthID, thirdID, secondID, firstID}) { t.Error("Container list is not in the correct order") @@ -46,7 +57,9 @@ func TestPsListContainers(t *testing.T) { // running runCmd = exec.Command(dockerBinary, "ps") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if !assertContainerList(out, []string{fourthID, secondID, firstID}) { t.Error("Container list is not in the correct order") @@ -57,7 +70,9 @@ func TestPsListContainers(t *testing.T) { // limit runCmd = exec.Command(dockerBinary, "ps", "-n=2", "-a") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } expected := []string{fourthID, thirdID} if !assertContainerList(out, expected) { @@ -66,7 +81,9 @@ func TestPsListContainers(t *testing.T) { runCmd = exec.Command(dockerBinary, "ps", "-n=2") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if !assertContainerList(out, expected) { t.Error("Container list is not in the correct order") @@ -75,7 +92,9 @@ func TestPsListContainers(t *testing.T) { // since runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "-a") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } expected = []string{fourthID, thirdID, secondID} if !assertContainerList(out, expected) { @@ -84,7 +103,9 @@ func TestPsListContainers(t *testing.T) { runCmd = exec.Command(dockerBinary, "ps", "--since", firstID) out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if !assertContainerList(out, expected) { t.Error("Container list is not in the correct order") @@ -93,7 +114,9 @@ func TestPsListContainers(t *testing.T) { // before runCmd = exec.Command(dockerBinary, "ps", "--before", thirdID, "-a") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } expected = []string{secondID, firstID} if !assertContainerList(out, expected) { @@ -102,7 +125,9 @@ func TestPsListContainers(t *testing.T) { runCmd = exec.Command(dockerBinary, "ps", "--before", thirdID) out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if !assertContainerList(out, expected) { t.Error("Container list is not in the correct order") @@ -111,7 +136,9 @@ func TestPsListContainers(t *testing.T) { // since & before runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "--before", fourthID, "-a") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } expected = []string{thirdID, secondID} if !assertContainerList(out, expected) { @@ -120,7 +147,9 @@ func TestPsListContainers(t *testing.T) { runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "--before", fourthID) out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if !assertContainerList(out, expected) { t.Error("Container list is not in the correct order") } @@ -128,7 +157,9 @@ func TestPsListContainers(t *testing.T) { // since & limit runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "-n=2", "-a") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } expected = []string{fourthID, thirdID} if !assertContainerList(out, expected) { @@ -137,7 +168,9 @@ func TestPsListContainers(t *testing.T) { runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "-n=2") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if !assertContainerList(out, expected) { t.Error("Container list is not in the correct order") @@ -146,7 +179,9 @@ func TestPsListContainers(t *testing.T) { // before & limit runCmd = exec.Command(dockerBinary, "ps", "--before", fourthID, "-n=1", "-a") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } expected = []string{thirdID} if !assertContainerList(out, expected) { @@ -155,7 +190,9 @@ func TestPsListContainers(t *testing.T) { runCmd = exec.Command(dockerBinary, "ps", "--before", fourthID, "-n=1") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if !assertContainerList(out, expected) { t.Error("Container list is not in the correct order") @@ -164,7 +201,9 @@ func TestPsListContainers(t *testing.T) { // since & before & limit runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "--before", fourthID, "-n=1", "-a") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } expected = []string{thirdID} if !assertContainerList(out, expected) { @@ -173,7 +212,9 @@ func TestPsListContainers(t *testing.T) { runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "--before", fourthID, "-n=1") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if !assertContainerList(out, expected) { t.Error("Container list is not in the correct order") @@ -205,7 +246,9 @@ func TestPsListContainersSize(t *testing.T) { name := "test_size" runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "sh", "-c", "echo 1 > test") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } id, err := getIDByName(name) if err != nil { t.Fatal(err) @@ -222,7 +265,9 @@ func TestPsListContainersSize(t *testing.T) { case <-time.After(3 * time.Second): t.Fatalf("Calling \"docker ps -s\" timed out!") } - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } lines := strings.Split(strings.Trim(out, "\n "), "\n") sizeIndex := strings.Index(lines[0], "SIZE") idIndex := strings.Index(lines[0], "CONTAINER ID") @@ -247,24 +292,31 @@ func TestPsListContainersFilterStatus(t *testing.T) { // start exited container runCmd := exec.Command(dockerBinary, "run", "-d", "busybox") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } firstID := stripTrailingCharacters(out) // make sure the exited cintainer is not running runCmd = exec.Command(dockerBinary, "wait", firstID) - out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if out, _, err = runCommandWithOutput(runCmd); err != nil { + t.Fatal(out, err) + } // start running container runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "sleep 360") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } secondID := stripTrailingCharacters(out) // filter containers by exited runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--filter=status=exited") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } containerOut := strings.TrimSpace(out) if containerOut != firstID[:12] { t.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID[:12], containerOut, out) @@ -272,7 +324,9 @@ func TestPsListContainersFilterStatus(t *testing.T) { runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--filter=status=running") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } containerOut = strings.TrimSpace(out) if containerOut != secondID[:12] { t.Fatalf("Expected id %s, got %s for running filter, output: %q", secondID[:12], containerOut, out) @@ -283,6 +337,66 @@ func TestPsListContainersFilterStatus(t *testing.T) { logDone("ps - test ps filter status") } +func TestPsListContainersFilterID(t *testing.T) { + // start container + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox") + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatal(out, err) + } + firstID := stripTrailingCharacters(out) + + // start another container + runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "sleep 360") + if out, _, err = runCommandWithOutput(runCmd); err != nil { + t.Fatal(out, err) + } + + // filter containers by id + runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--filter=id="+firstID) + if out, _, err = runCommandWithOutput(runCmd); err != nil { + t.Fatal(out, err) + } + containerOut := strings.TrimSpace(out) + if containerOut != firstID[:12] { + t.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID[:12], containerOut, out) + } + + deleteAllContainers() + + logDone("ps - test ps filter id") +} + +func TestPsListContainersFilterName(t *testing.T) { + // start container + runCmd := exec.Command(dockerBinary, "run", "-d", "--name=a_name_to_match", "busybox") + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatal(out, err) + } + firstID := stripTrailingCharacters(out) + + // start another container + runCmd = exec.Command(dockerBinary, "run", "-d", "--name=b_name_to_match", "busybox", "sh", "-c", "sleep 360") + if out, _, err = runCommandWithOutput(runCmd); err != nil { + t.Fatal(out, err) + } + + // filter containers by name + runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--filter=name=a_name_to_match") + if out, _, err = runCommandWithOutput(runCmd); err != nil { + t.Fatal(out, err) + } + containerOut := strings.TrimSpace(out) + if containerOut != firstID[:12] { + t.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID[:12], containerOut, out) + } + + deleteAllContainers() + + logDone("ps - test ps filter name") +} + func TestPsListContainersFilterExited(t *testing.T) { deleteAllContainers() defer deleteAllContainers() diff --git a/integration-cli/docker_cli_pull_test.go b/integration-cli/docker_cli_pull_test.go index cadabde815..b67b1caca5 100644 --- a/integration-cli/docker_cli_pull_test.go +++ b/integration-cli/docker_cli_pull_test.go @@ -1,7 +1,6 @@ package main import ( - "fmt" "os/exec" "testing" ) @@ -11,11 +10,8 @@ import ( // pulling an image from the central registry should work func TestPullImageFromCentralRegistry(t *testing.T) { pullCmd := exec.Command(dockerBinary, "pull", "scratch") - out, exitCode, err := runCommandWithOutput(pullCmd) - errorOut(err, t, fmt.Sprintf("%s %s", out, err)) - - if err != nil || exitCode != 0 { - t.Fatal("pulling the scratch image from the registry has failed") + if out, _, err := runCommandWithOutput(pullCmd); err != nil { + t.Fatalf("pulling the scratch image from the registry has failed: %s, %v", out, err) } logDone("pull - pull scratch") } @@ -23,10 +19,8 @@ func TestPullImageFromCentralRegistry(t *testing.T) { // pulling a non-existing image from the central registry should return a non-zero exit code func TestPullNonExistingImage(t *testing.T) { pullCmd := exec.Command(dockerBinary, "pull", "fooblahblah1234") - _, exitCode, err := runCommandWithOutput(pullCmd) - - if err == nil || exitCode == 0 { - t.Fatal("expected non-zero exit status when pulling non-existing image") + if out, _, err := runCommandWithOutput(pullCmd); err == nil { + t.Fatalf("expected non-zero exit status when pulling non-existing image: %s", out) } logDone("pull - pull fooblahblah1234 (non-existing image)") } diff --git a/integration-cli/docker_cli_push_test.go b/integration-cli/docker_cli_push_test.go index 160bb9e286..0dfd85a9d4 100644 --- a/integration-cli/docker_cli_push_test.go +++ b/integration-cli/docker_cli_push_test.go @@ -15,22 +15,17 @@ func TestPushBusyboxImage(t *testing.T) { // tag the image to upload it tot he private registry repoName := fmt.Sprintf("%v/busybox", privateRegistryURL) tagCmd := exec.Command(dockerBinary, "tag", "busybox", repoName) - out, exitCode, err := runCommandWithOutput(tagCmd) - errorOut(err, t, fmt.Sprintf("%v %v", out, err)) - - if err != nil || exitCode != 0 { - t.Fatal("image tagging failed") + if out, _, err := runCommandWithOutput(tagCmd); err != nil { + t.Fatalf("image tagging failed: %s, %v", out, err) } pushCmd := exec.Command(dockerBinary, "push", repoName) - out, exitCode, err = runCommandWithOutput(pushCmd) - errorOut(err, t, fmt.Sprintf("%v %v", out, err)) + if out, _, err := runCommandWithOutput(pushCmd); err != nil { + t.Fatalf("pushing the image to the private registry has failed: %s, %v", out, err) + } deleteImages(repoName) - if err != nil || exitCode != 0 { - t.Fatal("pushing the image to the private registry has failed") - } logDone("push - push busybox to private registry") } @@ -39,10 +34,8 @@ func TestPushUnprefixedRepo(t *testing.T) { // skip this test until we're able to use a registry t.Skip() pushCmd := exec.Command(dockerBinary, "push", "busybox") - _, exitCode, err := runCommandWithOutput(pushCmd) - - if err == nil || exitCode == 0 { - t.Fatal("pushing an unprefixed repo didn't result in a non-zero exit status") + if out, _, err := runCommandWithOutput(pushCmd); err == nil { + t.Fatalf("pushing an unprefixed repo didn't result in a non-zero exit status: %s", out) } logDone("push - push unprefixed busybox repo --> must fail") } diff --git a/integration-cli/docker_cli_restart_test.go b/integration-cli/docker_cli_restart_test.go index 7dc1819fe3..3a390ef2c3 100644 --- a/integration-cli/docker_cli_restart_test.go +++ b/integration-cli/docker_cli_restart_test.go @@ -10,29 +10,37 @@ import ( func TestRestartStoppedContainer(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "echo", "foobar") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } cleanedContainerID := stripTrailingCharacters(out) runCmd = exec.Command(dockerBinary, "wait", cleanedContainerID) - out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if out, _, err = runCommandWithOutput(runCmd); err != nil { + t.Fatal(out, err) + } runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID) out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if out != "foobar\n" { t.Errorf("container should've printed 'foobar'") } runCmd = exec.Command(dockerBinary, "restart", cleanedContainerID) - out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if out, _, err = runCommandWithOutput(runCmd); err != nil { + t.Fatal(out, err) + } runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID) out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if out != "foobar\nfoobar\n" { t.Errorf("container should've printed 'foobar' twice") @@ -46,7 +54,9 @@ func TestRestartStoppedContainer(t *testing.T) { func TestRestartRunningContainer(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "echo foobar && sleep 30 && echo 'should not print this'") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } cleanedContainerID := stripTrailingCharacters(out) @@ -54,19 +64,24 @@ func TestRestartRunningContainer(t *testing.T) { runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID) out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if out != "foobar\n" { t.Errorf("container should've printed 'foobar'") } runCmd = exec.Command(dockerBinary, "restart", "-t", "1", cleanedContainerID) - out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if out, _, err = runCommandWithOutput(runCmd); err != nil { + t.Fatal(out, err) + } runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID) out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } time.Sleep(1 * time.Second) @@ -83,13 +98,17 @@ func TestRestartRunningContainer(t *testing.T) { func TestRestartWithVolumes(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "-v", "/test", "busybox", "top") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } cleanedContainerID := stripTrailingCharacters(out) runCmd = exec.Command(dockerBinary, "inspect", "--format", "{{ len .Volumes }}", cleanedContainerID) out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if out = strings.Trim(out, " \n\r"); out != "1" { t.Errorf("expect 1 volume received %s", out) @@ -97,15 +116,20 @@ func TestRestartWithVolumes(t *testing.T) { runCmd = exec.Command(dockerBinary, "inspect", "--format", "{{ .Volumes }}", cleanedContainerID) volumes, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, volumes) + if err != nil { + t.Fatal(volumes, err) + } runCmd = exec.Command(dockerBinary, "restart", cleanedContainerID) - out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if out, _, err = runCommandWithOutput(runCmd); err != nil { + t.Fatal(out, err) + } runCmd = exec.Command(dockerBinary, "inspect", "--format", "{{ len .Volumes }}", cleanedContainerID) out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if out = strings.Trim(out, " \n\r"); out != "1" { t.Errorf("expect 1 volume after restart received %s", out) @@ -113,7 +137,9 @@ func TestRestartWithVolumes(t *testing.T) { runCmd = exec.Command(dockerBinary, "inspect", "--format", "{{ .Volumes }}", cleanedContainerID) volumesAfterRestart, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, volumesAfterRestart) + if err != nil { + t.Fatal(volumesAfterRestart, err) + } if volumes != volumesAfterRestart { volumes = strings.Trim(volumes, " \n\r") diff --git a/integration-cli/docker_cli_rm_test.go b/integration-cli/docker_cli_rm_test.go index 6c8dc38089..6681840ecd 100644 --- a/integration-cli/docker_cli_rm_test.go +++ b/integration-cli/docker_cli_rm_test.go @@ -102,10 +102,11 @@ func TestRmContainerOrphaning(t *testing.T) { t.Fatalf("%v: %s", err, out) } if !strings.Contains(out, img1) { - t.Fatalf("Orphaned container (could not find '%s' in docker images): %s", img1, out) + t.Fatalf("Orphaned container (could not find %q in docker images): %s", img1, out) } deleteAllContainers() + deleteImages(img1) logDone("rm - container orphaning") } @@ -114,7 +115,7 @@ func TestRmInvalidContainer(t *testing.T) { if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "rm", "unknown")); err == nil { t.Fatal("Expected error on rm unknown container, got none") } else if !strings.Contains(out, "failed to remove one or more containers") { - t.Fatal("Expected output to contain 'failed to remove one or more containers', got %q", out) + t.Fatalf("Expected output to contain 'failed to remove one or more containers', got %q", out) } logDone("rm - delete unknown container") diff --git a/integration-cli/docker_cli_rmi_test.go b/integration-cli/docker_cli_rmi_test.go index 4fb150bab8..63d9f92983 100644 --- a/integration-cli/docker_cli_rmi_test.go +++ b/integration-cli/docker_cli_rmi_test.go @@ -1,7 +1,6 @@ package main import ( - "fmt" "os/exec" "strings" "testing" @@ -13,7 +12,9 @@ func TestRmiWithContainerFails(t *testing.T) { // create a container runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, fmt.Sprintf("failed to create a container: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to create a container: %s, %v", out, err) + } cleanedContainerID := stripTrailingCharacters(out) @@ -28,7 +29,7 @@ func TestRmiWithContainerFails(t *testing.T) { } // make sure it didn't delete the busybox name - images, _, _ := cmd(t, "images") + images, _, _ := dockerCmd(t, "images") if !strings.Contains(images, "busybox") { t.Fatalf("The name 'busybox' should not have been removed from images: %q", images) } @@ -39,41 +40,41 @@ func TestRmiWithContainerFails(t *testing.T) { } func TestRmiTag(t *testing.T) { - imagesBefore, _, _ := cmd(t, "images", "-a") - cmd(t, "tag", "busybox", "utest:tag1") - cmd(t, "tag", "busybox", "utest/docker:tag2") - cmd(t, "tag", "busybox", "utest:5000/docker:tag3") + imagesBefore, _, _ := dockerCmd(t, "images", "-a") + dockerCmd(t, "tag", "busybox", "utest:tag1") + dockerCmd(t, "tag", "busybox", "utest/docker:tag2") + dockerCmd(t, "tag", "busybox", "utest:5000/docker:tag3") { - imagesAfter, _, _ := cmd(t, "images", "-a") - if nLines(imagesAfter) != nLines(imagesBefore)+3 { + imagesAfter, _, _ := dockerCmd(t, "images", "-a") + if strings.Count(imagesAfter, "\n") != strings.Count(imagesBefore, "\n")+3 { t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) } } - cmd(t, "rmi", "utest/docker:tag2") + dockerCmd(t, "rmi", "utest/docker:tag2") { - imagesAfter, _, _ := cmd(t, "images", "-a") - if nLines(imagesAfter) != nLines(imagesBefore)+2 { + imagesAfter, _, _ := dockerCmd(t, "images", "-a") + if strings.Count(imagesAfter, "\n") != strings.Count(imagesBefore, "\n")+2 { t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) } } - cmd(t, "rmi", "utest:5000/docker:tag3") + dockerCmd(t, "rmi", "utest:5000/docker:tag3") { - imagesAfter, _, _ := cmd(t, "images", "-a") - if nLines(imagesAfter) != nLines(imagesBefore)+1 { + imagesAfter, _, _ := dockerCmd(t, "images", "-a") + if strings.Count(imagesAfter, "\n") != strings.Count(imagesBefore, "\n")+1 { t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) } } - cmd(t, "rmi", "utest:tag1") + dockerCmd(t, "rmi", "utest:tag1") { - imagesAfter, _, _ := cmd(t, "images", "-a") - if nLines(imagesAfter) != nLines(imagesBefore)+0 { + imagesAfter, _, _ := dockerCmd(t, "images", "-a") + if strings.Count(imagesAfter, "\n") != strings.Count(imagesBefore, "\n")+0 { t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) } } - logDone("tag,rmi- tagging the same images multiple times then removing tags") + logDone("rmi - tag,rmi- tagging the same images multiple times then removing tags") } func TestRmiTagWithExistingContainers(t *testing.T) { @@ -98,3 +99,23 @@ func TestRmiTagWithExistingContainers(t *testing.T) { logDone("rmi - delete tag with existing containers") } + +func TestRmiForceWithExistingContainers(t *testing.T) { + image := "busybox-clone" + if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "build", "--no-cache", "-t", image, "/docker-busybox")); err != nil { + t.Fatalf("Could not build %s: %s, %v", image, out, err) + } + + if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--name", "test-force-rmi", image, "/bin/true")); err != nil { + t.Fatalf("Could not run container: %s, %v", out, err) + } + + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "rmi", "-f", image)) + if err != nil { + t.Fatalf("Could not remove image %s: %s, %v", image, out, err) + } + + deleteAllContainers() + + logDone("rmi - force delete with existing containers") +} diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index 417368f4f0..0b56f235fe 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -13,11 +13,13 @@ import ( "reflect" "regexp" "sort" + "strconv" "strings" "sync" "testing" "time" + "github.com/docker/docker/nat" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/networkfs/resolvconf" "github.com/kr/pty" @@ -42,7 +44,7 @@ func TestRunEchoStdout(t *testing.T) { // "test" should be printed func TestRunEchoStdoutWithMemoryLimit(t *testing.T) { - runCmd := exec.Command(dockerBinary, "run", "-m", "4m", "busybox", "echo", "test") + runCmd := exec.Command(dockerBinary, "run", "-m", "16m", "busybox", "echo", "test") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatalf("failed to run container: %v, output: %q", err, out) @@ -79,7 +81,7 @@ func TestRunEchoStdoutWitCPULimit(t *testing.T) { // "test" should be printed func TestRunEchoStdoutWithCPUAndMemoryLimit(t *testing.T) { - runCmd := exec.Command(dockerBinary, "run", "-c", "1000", "-m", "4m", "busybox", "echo", "test") + runCmd := exec.Command(dockerBinary, "run", "-c", "1000", "-m", "16m", "busybox", "echo", "test") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatalf("failed to run container: %v, output: %q", err, out) @@ -142,8 +144,6 @@ func TestRunPingGoogle(t *testing.T) { t.Fatalf("failed to run container: %v, output: %q", err, out) } - errorOut(err, t, "container should've been able to ping 8.8.8.8") - deleteAllContainers() logDone("run - ping 8.8.8.8") @@ -153,11 +153,8 @@ func TestRunPingGoogle(t *testing.T) { // some versions of lxc might make this test fail func TestRunExitCodeZero(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "busybox", "true") - exitCode, err := runCommand(runCmd) - errorOut(err, t, fmt.Sprintf("%s", err)) - - if exitCode != 0 { - t.Errorf("container should've exited with exit code 0") + if out, _, err := runCommandWithOutput(runCmd); err != nil { + t.Errorf("container should've exited with exit code 0: %s, %v", out, err) } deleteAllContainers() @@ -194,26 +191,31 @@ func TestRunStdinPipe(t *testing.T) { out = stripTrailingCharacters(out) inspectCmd := exec.Command(dockerBinary, "inspect", out) - inspectOut, _, err := runCommandWithOutput(inspectCmd) - errorOut(err, t, fmt.Sprintf("out should've been a container id: %s %s", out, inspectOut)) + if out, _, err := runCommandWithOutput(inspectCmd); err != nil { + t.Fatalf("out should've been a container id: %s %v", out, err) + } waitCmd := exec.Command(dockerBinary, "wait", out) - _, _, err = runCommandWithOutput(waitCmd) - errorOut(err, t, fmt.Sprintf("error thrown while waiting for container: %s", out)) + if waitOut, _, err := runCommandWithOutput(waitCmd); err != nil { + t.Fatalf("error thrown while waiting for container: %s, %v", waitOut, err) + } logsCmd := exec.Command(dockerBinary, "logs", out) - containerLogs, _, err := runCommandWithOutput(logsCmd) - errorOut(err, t, fmt.Sprintf("error thrown while trying to get container logs: %s", err)) + logsOut, _, err := runCommandWithOutput(logsCmd) + if err != nil { + t.Fatalf("error thrown while trying to get container logs: %s, %v", logsOut, err) + } - containerLogs = stripTrailingCharacters(containerLogs) + containerLogs := stripTrailingCharacters(logsOut) if containerLogs != "blahblah" { t.Errorf("logs didn't print the container's logs %s", containerLogs) } rmCmd := exec.Command(dockerBinary, "rm", out) - _, _, err = runCommandWithOutput(rmCmd) - errorOut(err, t, fmt.Sprintf("rm failed to remove container %s", err)) + if out, _, err = runCommandWithOutput(rmCmd); err != nil { + t.Fatalf("rm failed to remove container: %s, %v", out, err) + } deleteAllContainers() @@ -231,16 +233,20 @@ func TestRunDetachedContainerIDPrinting(t *testing.T) { out = stripTrailingCharacters(out) inspectCmd := exec.Command(dockerBinary, "inspect", out) - inspectOut, _, err := runCommandWithOutput(inspectCmd) - errorOut(err, t, fmt.Sprintf("out should've been a container id: %s %s", out, inspectOut)) + if inspectOut, _, err := runCommandWithOutput(inspectCmd); err != nil { + t.Fatalf("out should've been a container id: %s %v", inspectOut, err) + } waitCmd := exec.Command(dockerBinary, "wait", out) - _, _, err = runCommandWithOutput(waitCmd) - errorOut(err, t, fmt.Sprintf("error thrown while waiting for container: %s", out)) + if waitOut, _, err := runCommandWithOutput(waitCmd); err != nil { + t.Fatalf("error thrown while waiting for container: %s, %v", waitOut, err) + } rmCmd := exec.Command(dockerBinary, "rm", out) rmOut, _, err := runCommandWithOutput(rmCmd) - errorOut(err, t, "rm failed to remove container") + if err != nil { + t.Fatalf("rm failed to remove container: %s, %v", rmOut, err) + } rmOut = stripTrailingCharacters(rmOut) if rmOut != out { @@ -268,7 +274,9 @@ func TestRunWorkingDirectory(t *testing.T) { runCmd = exec.Command(dockerBinary, "run", "--workdir", "/root", "busybox", "pwd") out, _, _, err = runCommandWithStdoutStderr(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } out = stripTrailingCharacters(out) @@ -555,7 +563,8 @@ func TestRunCreateVolumeWithSymlink(t *testing.T) { // Tests that a volume path that has a symlink exists in a container mounting it with `--volumes-from`. func TestRunVolumesFromSymlinkPath(t *testing.T) { - buildCmd := exec.Command(dockerBinary, "build", "-t", "docker-test-volumesfromsymlinkpath", "-") + name := "docker-test-volumesfromsymlinkpath" + buildCmd := exec.Command(dockerBinary, "build", "-t", name, "-") buildCmd.Stdin = strings.NewReader(`FROM busybox RUN mkdir /baz && ln -s /baz /foo VOLUME ["/foo/bar"]`) @@ -565,7 +574,7 @@ func TestRunVolumesFromSymlinkPath(t *testing.T) { t.Fatalf("could not build 'docker-test-volumesfromsymlinkpath': %v", err) } - cmd := exec.Command(dockerBinary, "run", "--name", "test-volumesfromsymlinkpath", "docker-test-volumesfromsymlinkpath") + cmd := exec.Command(dockerBinary, "run", "--name", "test-volumesfromsymlinkpath", name) exitCode, err := runCommand(cmd) if err != nil || exitCode != 0 { t.Fatalf("[run] (volume) err: %v, exitcode: %d", err, exitCode) @@ -577,8 +586,8 @@ func TestRunVolumesFromSymlinkPath(t *testing.T) { t.Fatalf("[run] err: %v, exitcode: %d", err, exitCode) } - deleteImages("docker-test-volumesfromsymlinkpath") deleteAllContainers() + deleteImages(name) logDone("run - volumes-from symlink path") } @@ -789,7 +798,7 @@ func TestRunLoopbackWhenNetworkDisabled(t *testing.T) { } func TestRunNetHostNotAllowedWithLinks(t *testing.T) { - _, _, err := cmd(t, "run", "--name", "linked", "busybox", "true") + _, _, err := dockerCmd(t, "run", "--name", "linked", "busybox", "true") cmd := exec.Command(dockerBinary, "run", "--net=host", "--link", "linked:linked", "busybox", "true") _, _, err = runCommandWithOutput(cmd) @@ -884,6 +893,7 @@ func TestRunUnPrivilegedCanMknod(t *testing.T) { } func TestRunCapDropInvalid(t *testing.T) { + defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "--cap-drop=CHPASS", "busybox", "ls") out, _, err := runCommandWithOutput(cmd) if err == nil { @@ -954,6 +964,8 @@ func TestRunCapDropALLAddMknodCannotMknod(t *testing.T) { } func TestRunCapAddInvalid(t *testing.T) { + defer deleteAllContainers() + cmd := exec.Command(dockerBinary, "run", "--cap-add=CHPASS", "busybox", "ls") out, _, err := runCommandWithOutput(cmd) if err == nil { @@ -1183,7 +1195,7 @@ func TestRunModeHostname(t *testing.T) { t.Fatal(err) } if actual := strings.Trim(out, "\r\n"); actual != hostname { - t.Fatalf("expected %q, but says: '%s'", hostname, actual) + t.Fatalf("expected %q, but says: %q", hostname, actual) } deleteAllContainers() @@ -1192,7 +1204,7 @@ func TestRunModeHostname(t *testing.T) { } func TestRunRootWorkdir(t *testing.T) { - s, _, err := cmd(t, "run", "--workdir", "/", "busybox", "pwd") + s, _, err := dockerCmd(t, "run", "--workdir", "/", "busybox", "pwd") if err != nil { t.Fatal(s, err) } @@ -1206,7 +1218,7 @@ func TestRunRootWorkdir(t *testing.T) { } func TestRunAllowBindMountingRoot(t *testing.T) { - s, _, err := cmd(t, "run", "-v", "/:/host", "busybox", "ls", "/host") + s, _, err := dockerCmd(t, "run", "-v", "/:/host", "busybox", "ls", "/host") if err != nil { t.Fatal(s, err) } @@ -1245,6 +1257,7 @@ func TestRunWithVolumesIsRecursive(t *testing.T) { if err := mount.Mount("tmpfs", tmpfsDir, "tmpfs", ""); err != nil { t.Fatalf("failed to create a tmpfs mount at %s - %s", tmpfsDir, err) } + defer mount.Unmount(tmpfsDir) f, err := ioutil.TempFile(tmpfsDir, "touch-me") if err != nil { @@ -1358,11 +1371,11 @@ func TestRunDnsOptionsBasedOnHostResolvConf(t *testing.T) { actualSearch := resolvconf.GetSearchDomains([]byte(out)) if len(actualSearch) != len(hostSearch) { - t.Fatalf("expected %q search domain(s), but it has: '%s'", len(hostSearch), len(actualSearch)) + t.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch)) } for i := range actualSearch { if actualSearch[i] != hostSearch[i] { - t.Fatalf("expected %q domain, but says: '%s'", actualSearch[i], hostSearch[i]) + t.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i]) } } @@ -1374,11 +1387,11 @@ func TestRunDnsOptionsBasedOnHostResolvConf(t *testing.T) { actualNameservers := resolvconf.GetNameservers([]byte(out)) if len(actualNameservers) != len(hostNamservers) { - t.Fatalf("expected %q nameserver(s), but it has: '%s'", len(hostNamservers), len(actualNameservers)) + t.Fatalf("expected %q nameserver(s), but it has: %q", len(hostNamservers), len(actualNameservers)) } for i := range actualNameservers { if actualNameservers[i] != hostNamservers[i] { - t.Fatalf("expected %q nameserver, but says: '%s'", actualNameservers[i], hostNamservers[i]) + t.Fatalf("expected %q nameserver, but says: %q", actualNameservers[i], hostNamservers[i]) } } @@ -1422,7 +1435,7 @@ func TestRunDnsOptionsBasedOnHostResolvConf(t *testing.T) { } for i := range actualSearch { if actualSearch[i] != hostSearch[i] { - t.Fatalf("expected %q domain, but says: '%s'", actualSearch[i], hostSearch[i]) + t.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i]) } } @@ -1605,7 +1618,7 @@ func TestRunCopyVolumeContent(t *testing.T) { } // Test that the content is copied from the image to the volume - cmd := exec.Command(dockerBinary, "run", "--rm", "-v", "/hello", name, "sh", "-c", "find", "/hello") + cmd := exec.Command(dockerBinary, "run", "--rm", "-v", "/hello", name, "find", "/hello") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) @@ -1710,6 +1723,8 @@ func TestRunExitOnStdinClose(t *testing.T) { // Test for #2267 func TestRunWriteHostsFileAndNotCommit(t *testing.T) { + defer deleteAllContainers() + name := "writehosts" cmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hosts && cat /etc/hosts") out, _, err := runCommandWithOutput(cmd) @@ -1737,6 +1752,8 @@ func TestRunWriteHostsFileAndNotCommit(t *testing.T) { // Test for #2267 func TestRunWriteHostnameFileAndNotCommit(t *testing.T) { + defer deleteAllContainers() + name := "writehostname" cmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hostname && cat /etc/hostname") out, _, err := runCommandWithOutput(cmd) @@ -1764,6 +1781,8 @@ func TestRunWriteHostnameFileAndNotCommit(t *testing.T) { // Test for #2267 func TestRunWriteResolvFileAndNotCommit(t *testing.T) { + defer deleteAllContainers() + name := "writeresolv" cmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/resolv.conf && cat /etc/resolv.conf") out, _, err := runCommandWithOutput(cmd) @@ -1790,13 +1809,15 @@ func TestRunWriteResolvFileAndNotCommit(t *testing.T) { } func TestRunWithBadDevice(t *testing.T) { + defer deleteAllContainers() + name := "baddevice" cmd := exec.Command(dockerBinary, "run", "--name", name, "--device", "/etc", "busybox", "true") out, _, err := runCommandWithOutput(cmd) if err == nil { t.Fatal("Run should fail with bad device") } - expected := `"/etc": not a device node` + expected := `\"/etc\": not a device node` if !strings.Contains(out, expected) { t.Fatalf("Output should contain %q, actual out: %q", expected, out) } @@ -1804,6 +1825,8 @@ func TestRunWithBadDevice(t *testing.T) { } func TestRunEntrypoint(t *testing.T) { + defer deleteAllContainers() + name := "entrypoint" cmd := exec.Command(dockerBinary, "run", "--name", name, "--entrypoint", "/bin/echo", "busybox", "-n", "foobar") out, _, err := runCommandWithOutput(cmd) @@ -1818,6 +1841,8 @@ func TestRunEntrypoint(t *testing.T) { } func TestRunBindMounts(t *testing.T) { + defer deleteAllContainers() + tmpDir, err := ioutil.TempDir("", "docker-test-container") if err != nil { t.Fatal(err) @@ -1872,37 +1897,25 @@ func TestRunMutableNetworkFiles(t *testing.T) { for _, fn := range []string{"resolv.conf", "hosts"} { deleteAllContainers() - out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", "c1", "busybox", "sh", "-c", fmt.Sprintf("echo success >/etc/%s; while true; do sleep 1; done", fn))) - if err != nil { - t.Fatal(err, out) - } - - time.Sleep(1 * time.Second) - - contID := strings.TrimSpace(out) - - f, err := os.Open(filepath.Join("/var/lib/docker/containers", contID, fn)) + content, err := runCommandAndReadContainerFile(fn, exec.Command(dockerBinary, "run", "-d", "--name", "c1", "busybox", "sh", "-c", fmt.Sprintf("echo success >/etc/%s; while true; do sleep 1; done", fn))) if err != nil { t.Fatal(err) } - content, err := ioutil.ReadAll(f) - f.Close() - if strings.TrimSpace(string(content)) != "success" { t.Fatal("Content was not what was modified in the container", string(content)) } - out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", "c2", "busybox", "sh", "-c", fmt.Sprintf("while true; do cat /etc/%s; sleep 1; done", fn))) + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", "c2", "busybox", "sh", "-c", fmt.Sprintf("while true; do cat /etc/%s; sleep 1; done", fn))) if err != nil { t.Fatal(err) } - contID = strings.TrimSpace(out) + contID := strings.TrimSpace(out) - resolvConfPath := filepath.Join("/var/lib/docker/containers", contID, fn) + resolvConfPath := containerStorageFile(contID, fn) - f, err = os.OpenFile(resolvConfPath, os.O_WRONLY|os.O_SYNC|os.O_APPEND, 0644) + f, err := os.OpenFile(resolvConfPath, os.O_WRONLY|os.O_SYNC|os.O_APPEND, 0644) if err != nil { t.Fatal(err) } @@ -2010,6 +2023,41 @@ func TestRunNetworkNotInitializedNoneMode(t *testing.T) { logDone("run - network must not be initialized in 'none' mode") } +func TestRunSetMacAddress(t *testing.T) { + mac := "12:34:56:78:9a:bc" + cmd := exec.Command("/bin/bash", "-c", dockerBinary+` run -i --rm --mac-address=`+mac+` busybox /bin/sh -c "ip link show eth0 | tail -1 | awk '{ print \$2 }'"`) + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + actualMac := strings.TrimSpace(out) + if actualMac != mac { + t.Fatalf("Set MAC address with --mac-address failed. The container has an incorrect MAC address: %q, expected: %q", actualMac, mac) + } + + deleteAllContainers() + logDone("run - setting MAC address with --mac-address") +} + +func TestRunInspectMacAddress(t *testing.T) { + mac := "12:34:56:78:9a:bc" + cmd := exec.Command(dockerBinary, "run", "-d", "--mac-address="+mac, "busybox", "top") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + id := strings.TrimSpace(out) + inspectedMac, err := inspectField(id, "NetworkSettings.MacAddress") + if err != nil { + t.Fatal(err) + } + if inspectedMac != mac { + t.Fatalf("docker inspect outputs wrong MAC address: %q, should be: %q", inspectedMac, mac) + } + deleteAllContainers() + logDone("run - inspecting MAC address") +} + func TestRunDeallocatePortOnMissingIptablesRule(t *testing.T) { cmd := exec.Command(dockerBinary, "run", "-d", "-p", "23:23", "busybox", "top") out, _, err := runCommandWithOutput(cmd) @@ -2260,7 +2308,7 @@ func TestRunRedirectStdout(t *testing.T) { }() select { - case <-time.After(time.Second): + case <-time.After(10 * time.Second): t.Fatal("command timeout") case <-ch: } @@ -2392,8 +2440,6 @@ func TestRunNoOutputFromPullInStdout(t *testing.T) { } func TestRunVolumesCleanPaths(t *testing.T) { - defer deleteAllContainers() - if _, err := buildImage("run_volumes_clean_paths", `FROM busybox VOLUME /foo/`, @@ -2401,6 +2447,7 @@ func TestRunVolumesCleanPaths(t *testing.T) { t.Fatal(err) } defer deleteImages("run_volumes_clean_paths") + defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-v", "/foo", "-v", "/bar/", "--name", "dark_helmet", "run_volumes_clean_paths") if out, _, err := runCommandWithOutput(cmd); err != nil { @@ -2440,3 +2487,287 @@ func TestRunVolumesCleanPaths(t *testing.T) { logDone("run - volume paths are cleaned") } + +// Regression test for #3631 +func TestRunSlowStdoutConsumer(t *testing.T) { + defer deleteAllContainers() + + c := exec.Command("/bin/bash", "-c", dockerBinary+` run --rm -i busybox /bin/sh -c "dd if=/dev/zero of=/foo bs=1024 count=2000 &>/dev/null; catv /foo"`) + + stdout, err := c.StdoutPipe() + if err != nil { + t.Fatal(err) + } + + if err := c.Start(); err != nil { + t.Fatal(err) + } + n, err := consumeWithSpeed(stdout, 10000, 5*time.Millisecond, nil) + if err != nil { + t.Fatal(err) + } + + expected := 2 * 1024 * 2000 + if n != expected { + t.Fatalf("Expected %d, got %d", expected, n) + } + + logDone("run - slow consumer") +} + +func TestRunAllowPortRangeThroughExpose(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-d", "--expose", "3000-3003", "-P", "busybox", "top") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + id := strings.TrimSpace(out) + portstr, err := inspectFieldJSON(id, "NetworkSettings.Ports") + if err != nil { + t.Fatal(err) + } + var ports nat.PortMap + err = unmarshalJSON([]byte(portstr), &ports) + for port, binding := range ports { + portnum, _ := strconv.Atoi(strings.Split(string(port), "/")[0]) + if portnum < 3000 || portnum > 3003 { + t.Fatalf("Port is out of range ", portnum, binding, out) + } + if binding == nil || len(binding) != 1 || len(binding[0].HostPort) == 0 { + t.Fatal("Port is not mapped for the port "+port, out) + } + } + if err := deleteContainer(id); err != nil { + t.Fatal(err) + } + logDone("run - allow port range through --expose flag") +} + +func TestRunUnknownCommand(t *testing.T) { + defer deleteAllContainers() + runCmd := exec.Command(dockerBinary, "create", "busybox", "/bin/nada") + cID, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + t.Fatalf("Failed to create container: %v, output: %q", err, cID) + } + cID = strings.TrimSpace(cID) + + runCmd = exec.Command(dockerBinary, "start", cID) + _, _, _, err = runCommandWithStdoutStderr(runCmd) + if err == nil { + t.Fatalf("Container should not have been able to start!") + } + + runCmd = exec.Command(dockerBinary, "inspect", "--format={{.State.ExitCode}}", cID) + rc, _, _, err2 := runCommandWithStdoutStderr(runCmd) + rc = strings.TrimSpace(rc) + + if err2 != nil { + t.Fatalf("Error getting status of container: %v", err2) + } + + if rc != "-1" { + t.Fatalf("ExitCode(%v) was supposed to be -1", rc) + } + + logDone("run - Unknown Command") +} + +func TestRunModeIpcHost(t *testing.T) { + hostIpc, err := os.Readlink("/proc/1/ns/ipc") + if err != nil { + t.Fatal(err) + } + + cmd := exec.Command(dockerBinary, "run", "--ipc=host", "busybox", "readlink", "/proc/self/ns/ipc") + out2, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out2) + } + + out2 = strings.Trim(out2, "\n") + if hostIpc != out2 { + t.Fatalf("IPC different with --ipc=host %s != %s\n", hostIpc, out2) + } + + cmd = exec.Command(dockerBinary, "run", "busybox", "readlink", "/proc/self/ns/ipc") + out2, _, err = runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out2) + } + + out2 = strings.Trim(out2, "\n") + if hostIpc == out2 { + t.Fatalf("IPC should be different without --ipc=host %s != %s\n", hostIpc, out2) + } + deleteAllContainers() + + logDone("run - hostname and several network modes") +} + +func TestRunModeIpcContainer(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + id := strings.TrimSpace(out) + state, err := inspectField(id, "State.Running") + if err != nil { + t.Fatal(err) + } + if state != "true" { + t.Fatal("Container state is 'not running'") + } + pid1, err := inspectField(id, "State.Pid") + if err != nil { + t.Fatal(err) + } + + parentContainerIpc, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/ipc", pid1)) + if err != nil { + t.Fatal(err) + } + cmd = exec.Command(dockerBinary, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "readlink", "/proc/self/ns/ipc") + out2, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out2) + } + + out2 = strings.Trim(out2, "\n") + if parentContainerIpc != out2 { + t.Fatalf("IPC different with --ipc=container:%s %s != %s\n", id, parentContainerIpc, out2) + } + deleteAllContainers() + + logDone("run - hostname and several network modes") +} + +func TestContainerNetworkMode(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + id := strings.TrimSpace(out) + if err := waitRun(id); err != nil { + t.Fatal(err) + } + pid1, err := inspectField(id, "State.Pid") + if err != nil { + t.Fatal(err) + } + + parentContainerNet, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/net", pid1)) + if err != nil { + t.Fatal(err) + } + cmd = exec.Command(dockerBinary, "run", fmt.Sprintf("--net=container:%s", id), "busybox", "readlink", "/proc/self/ns/net") + out2, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out2) + } + + out2 = strings.Trim(out2, "\n") + if parentContainerNet != out2 { + t.Fatalf("NET different with --net=container:%s %s != %s\n", id, parentContainerNet, out2) + } + deleteAllContainers() + + logDone("run - container shared network namespace") +} + +func TestRunTLSverify(t *testing.T) { + cmd := exec.Command(dockerBinary, "ps") + out, ec, err := runCommandWithOutput(cmd) + if err != nil || ec != 0 { + t.Fatalf("Should have worked: %v:\n%v", err, out) + } + + // Regardless of whether we specify true or false we need to + // test to make sure tls is turned on if --tlsverify is specified at all + + cmd = exec.Command(dockerBinary, "--tlsverify=false", "ps") + out, ec, err = runCommandWithOutput(cmd) + if err == nil || ec == 0 || !strings.Contains(out, "trying to connect") { + t.Fatalf("Should have failed: \nec:%v\nout:%v\nerr:%v", ec, out, err) + } + + cmd = exec.Command(dockerBinary, "--tlsverify=true", "ps") + out, ec, err = runCommandWithOutput(cmd) + if err == nil || ec == 0 || !strings.Contains(out, "cert") { + t.Fatalf("Should have failed: \nec:%v\nout:%v\nerr:%v", ec, out, err) + } + + logDone("run - verify tls is set for --tlsverify") +} + +func TestRunPortFromDockerRangeInUse(t *testing.T) { + defer deleteAllContainers() + // first find allocator current position + cmd := exec.Command(dockerBinary, "run", "-d", "-p", ":80", "busybox", "top") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(out, err) + } + id := strings.TrimSpace(out) + cmd = exec.Command(dockerBinary, "port", id) + out, _, err = runCommandWithOutput(cmd) + if err != nil { + t.Fatal(out, err) + } + out = strings.TrimSpace(out) + out = strings.Split(out, ":")[1] + lastPort, err := strconv.Atoi(out) + if err != nil { + t.Fatal(err) + } + port := lastPort + 1 + l, err := net.Listen("tcp", ":"+strconv.Itoa(port)) + if err != nil { + t.Fatal(err) + } + defer l.Close() + cmd = exec.Command(dockerBinary, "run", "-d", "-p", ":80", "busybox", "top") + out, _, err = runCommandWithOutput(cmd) + if err != nil { + t.Fatalf(out, err) + } + id = strings.TrimSpace(out) + cmd = exec.Command(dockerBinary, "port", id) + out, _, err = runCommandWithOutput(cmd) + if err != nil { + t.Fatal(out, err) + } + + logDone("run - find another port if port from autorange already bound") +} + +func TestRunTtyWithPipe(t *testing.T) { + defer deleteAllContainers() + + done := make(chan struct{}) + go func() { + defer close(done) + + cmd := exec.Command(dockerBinary, "run", "-ti", "busybox", "true") + if _, err := cmd.StdinPipe(); err != nil { + t.Fatal(err) + } + + expected := "cannot enable tty mode" + if out, _, err := runCommandWithOutput(cmd); err == nil { + t.Fatal("run should have failed") + } else if !strings.Contains(out, expected) { + t.Fatal("run failed with error %q: expected %q", out, expected) + } + }() + + select { + case <-done: + case <-time.After(3 * time.Second): + t.Fatal("container is running but should have failed") + } + + logDone("run - forbid piped stdin with tty") +} diff --git a/integration-cli/docker_cli_save_load_test.go b/integration-cli/docker_cli_save_load_test.go index f15e6b51dd..6de5a79636 100644 --- a/integration-cli/docker_cli_save_load_test.go +++ b/integration-cli/docker_cli_save_load_test.go @@ -1,53 +1,68 @@ package main import ( + "bytes" "fmt" "io/ioutil" "os" "os/exec" "path/filepath" "reflect" + "sort" + "strings" "testing" + + "github.com/docker/docker/vendor/src/github.com/kr/pty" ) // save a repo and try to load it using stdout func TestSaveAndLoadRepoStdout(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, fmt.Sprintf("failed to create a container: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to create a container: %s, %v", out, err) + } cleanedContainerID := stripTrailingCharacters(out) repoName := "foobar-save-load-test" inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) - out, _, err = runCommandWithOutput(inspectCmd) - errorOut(err, t, fmt.Sprintf("output should've been a container id: %v %v", cleanedContainerID, err)) + if out, _, err = runCommandWithOutput(inspectCmd); err != nil { + t.Fatalf("output should've been a container id: %s, %v", out, err) + } commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName) - out, _, err = runCommandWithOutput(commitCmd) - errorOut(err, t, fmt.Sprintf("failed to commit container: %v %v", out, err)) + if out, _, err = runCommandWithOutput(commitCmd); err != nil { + t.Fatalf("failed to commit container: %s, %v", out, err) + } inspectCmd = exec.Command(dockerBinary, "inspect", repoName) before, _, err := runCommandWithOutput(inspectCmd) - errorOut(err, t, fmt.Sprintf("the repo should exist before saving it: %v %v", before, err)) + if err != nil { + t.Fatalf("the repo should exist before saving it: %s, %v", before, err) + } saveCmdTemplate := `%v save %v > /tmp/foobar-save-load-test.tar` saveCmdFinal := fmt.Sprintf(saveCmdTemplate, dockerBinary, repoName) saveCmd := exec.Command("bash", "-c", saveCmdFinal) - out, _, err = runCommandWithOutput(saveCmd) - errorOut(err, t, fmt.Sprintf("failed to save repo: %v %v", out, err)) + if out, _, err = runCommandWithOutput(saveCmd); err != nil { + t.Fatalf("failed to save repo: %s, %v", out, err) + } deleteImages(repoName) loadCmdFinal := `cat /tmp/foobar-save-load-test.tar | docker load` loadCmd := exec.Command("bash", "-c", loadCmdFinal) - out, _, err = runCommandWithOutput(loadCmd) - errorOut(err, t, fmt.Sprintf("failed to load repo: %v %v", out, err)) + if out, _, err = runCommandWithOutput(loadCmd); err != nil { + t.Fatalf("failed to load repo: %s, %v", out, err) + } inspectCmd = exec.Command(dockerBinary, "inspect", repoName) after, _, err := runCommandWithOutput(inspectCmd) - errorOut(err, t, fmt.Sprintf("the repo should exist after loading it: %v %v", after, err)) + if err != nil { + t.Fatalf("the repo should exist after loading it: %s %v", after, err) + } if before != after { t.Fatalf("inspect is not the same after a save / load") @@ -58,8 +73,35 @@ func TestSaveAndLoadRepoStdout(t *testing.T) { os.Remove("/tmp/foobar-save-load-test.tar") - logDone("save - save a repo using stdout") - logDone("load - load a repo using stdout") + logDone("save - save/load a repo using stdout") + + pty, tty, err := pty.Open() + if err != nil { + t.Fatalf("Could not open pty: %v", err) + } + cmd := exec.Command(dockerBinary, "save", repoName) + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + if err := cmd.Start(); err != nil { + t.Fatalf("start err: %v", err) + } + if err := cmd.Wait(); err == nil { + t.Fatal("did not break writing to a TTY") + } + + buf := make([]byte, 1024) + + n, err := pty.Read(buf) + if err != nil { + t.Fatal("could not read tty output") + } + + if !bytes.Contains(buf[:n], []byte("Cowardly refusing")) { + t.Fatal("help output is not being yielded", out) + } + + logDone("save - do not save to a tty") } // save a repo using gz compression and try to load it using stdout @@ -129,92 +171,29 @@ func TestSaveXzAndLoadRepoStdout(t *testing.T) { logDone("load - save a repo with xz compression & load it using stdout") } -// save a repo using xz+gz compression and try to load it using stdout -func TestSaveXzGzAndLoadRepoStdout(t *testing.T) { - tempDir, err := ioutil.TempDir("", "test-save-xz-gz-load-repo-stdout") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - tarballPath := filepath.Join(tempDir, "foobar-save-load-test.tar.xz.gz") - - runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") - out, _, err := runCommandWithOutput(runCmd) - if err != nil { - t.Fatalf("failed to create a container: %v %v", out, err) - } - - cleanedContainerID := stripTrailingCharacters(out) - - repoName := "foobar-save-load-test-xz-gz" - - inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) - out, _, err = runCommandWithOutput(inspectCmd) - if err != nil { - t.Fatalf("output should've been a container id: %v %v", cleanedContainerID, err) - } - - commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName) - out, _, err = runCommandWithOutput(commitCmd) - if err != nil { - t.Fatalf("failed to commit container: %v %v", out, err) - } - - inspectCmd = exec.Command(dockerBinary, "inspect", repoName) - before, _, err := runCommandWithOutput(inspectCmd) - if err != nil { - t.Fatalf("the repo should exist before saving it: %v %v", before, err) - } - - saveCmdTemplate := `%v save %v | xz -c | gzip -c > %s` - saveCmdFinal := fmt.Sprintf(saveCmdTemplate, dockerBinary, repoName, tarballPath) - saveCmd := exec.Command("bash", "-c", saveCmdFinal) - out, _, err = runCommandWithOutput(saveCmd) - if err != nil { - t.Fatalf("failed to save repo: %v %v", out, err) - } - - deleteImages(repoName) - - loadCmdFinal := fmt.Sprintf(`cat %s | docker load`, tarballPath) - loadCmd := exec.Command("bash", "-c", loadCmdFinal) - out, _, err = runCommandWithOutput(loadCmd) - if err == nil { - t.Fatalf("expected error, but succeeded with no error and output: %v", out) - } - - inspectCmd = exec.Command(dockerBinary, "inspect", repoName) - after, _, err := runCommandWithOutput(inspectCmd) - if err == nil { - t.Fatalf("the repo should not exist: %v", after) - } - - deleteContainer(cleanedContainerID) - deleteImages(repoName) - - logDone("load - save a repo with xz+gz compression & load it using stdout") -} - func TestSaveSingleTag(t *testing.T) { repoName := "foobar-save-single-tag-test" tagCmdFinal := fmt.Sprintf("%v tag busybox:latest %v:latest", dockerBinary, repoName) tagCmd := exec.Command("bash", "-c", tagCmdFinal) - out, _, err := runCommandWithOutput(tagCmd) - errorOut(err, t, fmt.Sprintf("failed to tag repo: %v %v", out, err)) + if out, _, err := runCommandWithOutput(tagCmd); err != nil { + t.Fatalf("failed to tag repo: %s, %v", out, err) + } idCmdFinal := fmt.Sprintf("%v images -q --no-trunc %v", dockerBinary, repoName) idCmd := exec.Command("bash", "-c", idCmdFinal) - out, _, err = runCommandWithOutput(idCmd) - errorOut(err, t, fmt.Sprintf("failed to get repo ID: %v %v", out, err)) + out, _, err := runCommandWithOutput(idCmd) + if err != nil { + t.Fatalf("failed to get repo ID: %s, %v", out, err) + } cleanedImageID := stripTrailingCharacters(out) saveCmdFinal := fmt.Sprintf("%v save %v:latest | tar t | grep -E '(^repositories$|%v)'", dockerBinary, repoName, cleanedImageID) saveCmd := exec.Command("bash", "-c", saveCmdFinal) - out, _, err = runCommandWithOutput(saveCmd) - errorOut(err, t, fmt.Sprintf("failed to save repo with image ID and 'repositories' file: %v %v", out, err)) + if out, _, err = runCommandWithOutput(saveCmd); err != nil { + t.Fatalf("failed to save repo with image ID and 'repositories' file: %s, %v", out, err) + } deleteImages(repoName) @@ -226,27 +205,33 @@ func TestSaveImageId(t *testing.T) { tagCmdFinal := fmt.Sprintf("%v tag scratch:latest %v:latest", dockerBinary, repoName) tagCmd := exec.Command("bash", "-c", tagCmdFinal) - out, _, err := runCommandWithOutput(tagCmd) - errorOut(err, t, fmt.Sprintf("failed to tag repo: %v %v", out, err)) + if out, _, err := runCommandWithOutput(tagCmd); err != nil { + t.Fatalf("failed to tag repo: %s, %v", out, err) + } idLongCmdFinal := fmt.Sprintf("%v images -q --no-trunc %v", dockerBinary, repoName) idLongCmd := exec.Command("bash", "-c", idLongCmdFinal) - out, _, err = runCommandWithOutput(idLongCmd) - errorOut(err, t, fmt.Sprintf("failed to get repo ID: %v %v", out, err)) + out, _, err := runCommandWithOutput(idLongCmd) + if err != nil { + t.Fatalf("failed to get repo ID: %s, %v", out, err) + } cleanedLongImageID := stripTrailingCharacters(out) idShortCmdFinal := fmt.Sprintf("%v images -q %v", dockerBinary, repoName) idShortCmd := exec.Command("bash", "-c", idShortCmdFinal) out, _, err = runCommandWithOutput(idShortCmd) - errorOut(err, t, fmt.Sprintf("failed to get repo short ID: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to get repo short ID: %s, %v", out, err) + } cleanedShortImageID := stripTrailingCharacters(out) saveCmdFinal := fmt.Sprintf("%v save %v | tar t | grep %v", dockerBinary, cleanedShortImageID, cleanedLongImageID) saveCmd := exec.Command("bash", "-c", saveCmdFinal) - out, _, err = runCommandWithOutput(saveCmd) - errorOut(err, t, fmt.Sprintf("failed to save repo with image ID: %v %v", out, err)) + if out, _, err = runCommandWithOutput(saveCmd); err != nil { + t.Fatalf("failed to save repo with image ID: %s, %v", out, err) + } deleteImages(repoName) @@ -257,40 +242,50 @@ func TestSaveImageId(t *testing.T) { func TestSaveAndLoadRepoFlags(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, fmt.Sprintf("failed to create a container: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to create a container: %s, %v", out, err) + } cleanedContainerID := stripTrailingCharacters(out) repoName := "foobar-save-load-test" inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) - out, _, err = runCommandWithOutput(inspectCmd) - errorOut(err, t, fmt.Sprintf("output should've been a container id: %v %v", cleanedContainerID, err)) + if out, _, err = runCommandWithOutput(inspectCmd); err != nil { + t.Fatalf("output should've been a container id: %s, %v", out, err) + } commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName) - out, _, err = runCommandWithOutput(commitCmd) - errorOut(err, t, fmt.Sprintf("failed to commit container: %v %v", out, err)) + if out, _, err = runCommandWithOutput(commitCmd); err != nil { + t.Fatalf("failed to commit container: %s, %v", out, err) + } inspectCmd = exec.Command(dockerBinary, "inspect", repoName) before, _, err := runCommandWithOutput(inspectCmd) - errorOut(err, t, fmt.Sprintf("the repo should exist before saving it: %v %v", before, err)) + if err != nil { + t.Fatalf("the repo should exist before saving it: %s, %v", before, err) + } saveCmdTemplate := `%v save -o /tmp/foobar-save-load-test.tar %v` saveCmdFinal := fmt.Sprintf(saveCmdTemplate, dockerBinary, repoName) saveCmd := exec.Command("bash", "-c", saveCmdFinal) - out, _, err = runCommandWithOutput(saveCmd) - errorOut(err, t, fmt.Sprintf("failed to save repo: %v %v", out, err)) + if out, _, err = runCommandWithOutput(saveCmd); err != nil { + t.Fatalf("failed to save repo: %s, %v", out, err) + } deleteImages(repoName) loadCmdFinal := `docker load -i /tmp/foobar-save-load-test.tar` loadCmd := exec.Command("bash", "-c", loadCmdFinal) - out, _, err = runCommandWithOutput(loadCmd) - errorOut(err, t, fmt.Sprintf("failed to load repo: %v %v", out, err)) + if out, _, err = runCommandWithOutput(loadCmd); err != nil { + t.Fatalf("failed to load repo: %s, %v", out, err) + } inspectCmd = exec.Command(dockerBinary, "inspect", repoName) after, _, err := runCommandWithOutput(inspectCmd) - errorOut(err, t, fmt.Sprintf("the repo should exist after loading it: %v %v", after, err)) + if err != nil { + t.Fatalf("the repo should exist after loading it: %s, %v", after, err) + } if before != after { t.Fatalf("inspect is not the same after a save / load") @@ -301,8 +296,7 @@ func TestSaveAndLoadRepoFlags(t *testing.T) { os.Remove("/tmp/foobar-save-load-test.tar") - logDone("save - save a repo using -o") - logDone("load - load a repo using -i") + logDone("save - save a repo using -o && load a repo using -i") } func TestSaveMultipleNames(t *testing.T) { @@ -311,24 +305,90 @@ func TestSaveMultipleNames(t *testing.T) { // Make one image tagCmdFinal := fmt.Sprintf("%v tag scratch:latest %v-one:latest", dockerBinary, repoName) tagCmd := exec.Command("bash", "-c", tagCmdFinal) - out, _, err := runCommandWithOutput(tagCmd) - errorOut(err, t, fmt.Sprintf("failed to tag repo: %v %v", out, err)) + if out, _, err := runCommandWithOutput(tagCmd); err != nil { + t.Fatalf("failed to tag repo: %s, %v", out, err) + } + defer deleteImages(repoName + "-one") + // Make two images tagCmdFinal = fmt.Sprintf("%v tag scratch:latest %v-two:latest", dockerBinary, repoName) tagCmd = exec.Command("bash", "-c", tagCmdFinal) - out, _, err = runCommandWithOutput(tagCmd) - errorOut(err, t, fmt.Sprintf("failed to tag repo: %v %v", out, err)) + if out, _, err := runCommandWithOutput(tagCmd); err != nil { + t.Fatalf("failed to tag repo: %s, %v", out, err) + } + defer deleteImages(repoName + "-two") saveCmdFinal := fmt.Sprintf("%v save %v-one %v-two:latest | tar xO repositories | grep -q -E '(-one|-two)'", dockerBinary, repoName, repoName) saveCmd := exec.Command("bash", "-c", saveCmdFinal) - out, _, err = runCommandWithOutput(saveCmd) - errorOut(err, t, fmt.Sprintf("failed to save multiple repos: %v %v", out, err)) + if out, _, err := runCommandWithOutput(saveCmd); err != nil { + t.Fatalf("failed to save multiple repos: %s, %v", out, err) + } deleteImages(repoName) logDone("save - save by multiple names") } +func TestSaveRepoWithMultipleImages(t *testing.T) { + + makeImage := func(from string, tag string) string { + runCmd := exec.Command(dockerBinary, "run", "-d", from, "true") + var ( + out string + err error + ) + if out, _, err = runCommandWithOutput(runCmd); err != nil { + t.Fatalf("failed to create a container: %v %v", out, err) + } + cleanedContainerID := stripTrailingCharacters(out) + + commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, tag) + if out, _, err = runCommandWithOutput(commitCmd); err != nil { + t.Fatalf("failed to commit container: %v %v", out, err) + } + imageID := stripTrailingCharacters(out) + + deleteContainer(cleanedContainerID) + return imageID + } + + repoName := "foobar-save-multi-images-test" + tagFoo := repoName + ":foo" + tagBar := repoName + ":bar" + + idFoo := makeImage("busybox:latest", tagFoo) + idBar := makeImage("busybox:latest", tagBar) + + deleteImages(repoName) + + // create the archive + saveCmdFinal := fmt.Sprintf("%v save %v | tar t | grep 'VERSION' |cut -d / -f1", dockerBinary, repoName) + saveCmd := exec.Command("bash", "-c", saveCmdFinal) + out, _, err := runCommandWithOutput(saveCmd) + if err != nil { + t.Fatalf("failed to save multiple images: %s, %v", out, err) + } + actual := strings.Split(stripTrailingCharacters(out), "\n") + + // make the list of expected layers + historyCmdFinal := fmt.Sprintf("%v history -q --no-trunc %v", dockerBinary, "busybox:latest") + historyCmd := exec.Command("bash", "-c", historyCmdFinal) + out, _, err = runCommandWithOutput(historyCmd) + if err != nil { + t.Fatalf("failed to get history: %s, %v", out, err) + } + + expected := append(strings.Split(stripTrailingCharacters(out), "\n"), idFoo, idBar) + + sort.Strings(actual) + sort.Strings(expected) + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("achive does not contains the right layers: got %v, expected %v", actual, expected) + } + + logDone("save - save repository with multiple images") +} + // Issue #6722 #5892 ensure directories are included in changes func TestSaveDirectoryPermissions(t *testing.T) { layerEntries := []string{"opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"} @@ -336,12 +396,12 @@ func TestSaveDirectoryPermissions(t *testing.T) { name := "save-directory-permissions" tmpDir, err := ioutil.TempDir("", "save-layers-with-directories") - extractionDirectory := filepath.Join(tmpDir, "image-extraction-dir") - os.Mkdir(extractionDirectory, 0777) - if err != nil { t.Errorf("failed to create temporary directory: %s", err) } + extractionDirectory := filepath.Join(tmpDir, "image-extraction-dir") + os.Mkdir(extractionDirectory, 0777) + defer os.RemoveAll(tmpDir) defer deleteImages(name) _, err = buildImage(name, @@ -355,8 +415,7 @@ func TestSaveDirectoryPermissions(t *testing.T) { saveCmdFinal := fmt.Sprintf("%s save %s | tar -xf - -C %s", dockerBinary, name, extractionDirectory) saveCmd := exec.Command("bash", "-c", saveCmdFinal) - out, _, err := runCommandWithOutput(saveCmd) - if err != nil { + if out, _, err := runCommandWithOutput(saveCmd); err != nil { t.Errorf("failed to save and extract image: %s", out) } diff --git a/integration-cli/docker_cli_search_test.go b/integration-cli/docker_cli_search_test.go index e8b9efdc19..fafb5df750 100644 --- a/integration-cli/docker_cli_search_test.go +++ b/integration-cli/docker_cli_search_test.go @@ -1,7 +1,6 @@ package main import ( - "fmt" "os/exec" "strings" "testing" @@ -11,10 +10,8 @@ import ( func TestSearchOnCentralRegistry(t *testing.T) { searchCmd := exec.Command(dockerBinary, "search", "busybox") out, exitCode, err := runCommandWithOutput(searchCmd) - errorOut(err, t, fmt.Sprintf("encountered error while searching: %v", err)) - if err != nil || exitCode != 0 { - t.Fatal("failed to search on the central registry") + t.Fatalf("failed to search on the central registry: %s, %v", out, err) } if !strings.Contains(out, "Busybox base image.") { diff --git a/integration-cli/docker_cli_start_test.go b/integration-cli/docker_cli_start_test.go index addc781ca9..8041c01c68 100644 --- a/integration-cli/docker_cli_start_test.go +++ b/integration-cli/docker_cli_start_test.go @@ -1,6 +1,7 @@ package main import ( + "fmt" "os/exec" "strings" "testing" @@ -11,8 +12,8 @@ import ( func TestStartAttachReturnsOnError(t *testing.T) { defer deleteAllContainers() - cmd(t, "run", "-d", "--name", "test", "busybox") - cmd(t, "stop", "test") + dockerCmd(t, "run", "-d", "--name", "test", "busybox") + dockerCmd(t, "wait", "test") // Expect this to fail because the above container is stopped, this is what we want if _, err := runCommand(exec.Command(dockerBinary, "run", "-d", "--name", "test2", "--link", "test:test", "busybox")); err == nil { @@ -38,12 +39,83 @@ func TestStartAttachReturnsOnError(t *testing.T) { logDone("start - error on start with attach exits") } +// gh#8555: Exit code should be passed through when using start -a +func TestStartAttachCorrectExitCode(t *testing.T) { + defer deleteAllContainers() + + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "sleep 2; exit 1") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + t.Fatalf("failed to run container: %v, output: %q", err, out) + } + + out = stripTrailingCharacters(out) + + // make sure the container has exited before trying the "start -a" + waitCmd := exec.Command(dockerBinary, "wait", out) + if out, _, err = runCommandWithOutput(waitCmd); err != nil { + t.Fatal(out, err) + } + + startCmd := exec.Command(dockerBinary, "start", "-a", out) + startOut, exitCode, err := runCommandWithOutput(startCmd) + if err != nil && !strings.Contains("exit status 1", fmt.Sprintf("%s", err)) { + t.Fatalf("start command failed unexpectedly with error: %v, output: %q", err, startOut) + } + if exitCode != 1 { + t.Fatalf("start -a did not respond with proper exit code: expected 1, got %d", exitCode) + } + + logDone("start - correct exit code returned with -a") +} + +func TestStartRecordError(t *testing.T) { + defer deleteAllContainers() + + // when container runs successfully, we should not have state.Error + dockerCmd(t, "run", "-d", "-p", "9999:9999", "--name", "test", "busybox", "top") + stateErr, err := inspectField("test", "State.Error") + if err != nil { + t.Fatalf("Failed to inspect %q state's error, got error %q", "test", err) + } + if stateErr != "" { + t.Fatalf("Expected to not have state error but got state.Error(%q)", stateErr) + } + + // Expect this to fail and records error because of ports conflict + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", "test2", "-p", "9999:9999", "busybox", "top")) + if err == nil { + t.Fatalf("Expected error but got none, output %q", out) + } + stateErr, err = inspectField("test2", "State.Error") + if err != nil { + t.Fatalf("Failed to inspect %q state's error, got error %q", "test2", err) + } + expected := "port is already allocated" + if stateErr == "" || !strings.Contains(stateErr, expected) { + t.Fatalf("State.Error(%q) does not include %q", stateErr, expected) + } + + // Expect the conflict to be resolved when we stop the initial container + dockerCmd(t, "stop", "test") + dockerCmd(t, "start", "test2") + stateErr, err = inspectField("test2", "State.Error") + if err != nil { + t.Fatalf("Failed to inspect %q state's error, got error %q", "test", err) + } + if stateErr != "" { + t.Fatalf("Expected to not have state error but got state.Error(%q)", stateErr) + } + + logDone("start - set state error when start fails") +} + // gh#8726: a failed Start() breaks --volumes-from on subsequent Start()'s func TestStartVolumesFromFailsCleanly(t *testing.T) { defer deleteAllContainers() // Create the first data volume - cmd(t, "run", "-d", "--name", "data_before", "-v", "/foo", "busybox") + dockerCmd(t, "run", "-d", "--name", "data_before", "-v", "/foo", "busybox") // Expect this to fail because the data test after contaienr doesn't exist yet if _, err := runCommand(exec.Command(dockerBinary, "run", "-d", "--name", "consumer", "--volumes-from", "data_before", "--volumes-from", "data_after", "busybox")); err == nil { @@ -51,13 +123,13 @@ func TestStartVolumesFromFailsCleanly(t *testing.T) { } // Create the second data volume - cmd(t, "run", "-d", "--name", "data_after", "-v", "/bar", "busybox") + dockerCmd(t, "run", "-d", "--name", "data_after", "-v", "/bar", "busybox") // Now, all the volumes should be there - cmd(t, "start", "consumer") + dockerCmd(t, "start", "consumer") // Check that we have the volumes we want - out, _, _ := cmd(t, "inspect", "--format='{{ len .Volumes }}'", "consumer") + out, _, _ := dockerCmd(t, "inspect", "--format='{{ len .Volumes }}'", "consumer") n_volumes := strings.Trim(out, " \r\n'") if n_volumes != "2" { t.Fatalf("Missing volumes: expected 2, got %s", n_volumes) diff --git a/integration-cli/docker_cli_tag_test.go b/integration-cli/docker_cli_tag_test.go index 815416f208..bfab851115 100644 --- a/integration-cli/docker_cli_tag_test.go +++ b/integration-cli/docker_cli_tag_test.go @@ -3,6 +3,7 @@ package main import ( "fmt" "os/exec" + "strings" "testing" ) @@ -13,8 +14,9 @@ func TestTagUnprefixedRepoByName(t *testing.T) { } tagCmd := exec.Command(dockerBinary, "tag", "busybox:latest", "testfoobarbaz") - out, _, err := runCommandWithOutput(tagCmd) - errorOut(err, t, fmt.Sprintf("%v %v", out, err)) + if out, _, err := runCommandWithOutput(tagCmd); err != nil { + t.Fatal(out, err) + } deleteImages("testfoobarbaz") @@ -25,12 +27,15 @@ func TestTagUnprefixedRepoByName(t *testing.T) { func TestTagUnprefixedRepoByID(t *testing.T) { getIDCmd := exec.Command(dockerBinary, "inspect", "-f", "{{.Id}}", "busybox") out, _, err := runCommandWithOutput(getIDCmd) - errorOut(err, t, fmt.Sprintf("failed to get the image ID of busybox: %v", err)) + if err != nil { + t.Fatalf("failed to get the image ID of busybox: %s, %v", out, err) + } cleanedImageID := stripTrailingCharacters(out) tagCmd := exec.Command(dockerBinary, "tag", cleanedImageID, "testfoobarbaz") - out, _, err = runCommandWithOutput(tagCmd) - errorOut(err, t, fmt.Sprintf("%s %s", out, err)) + if out, _, err = runCommandWithOutput(tagCmd); err != nil { + t.Fatal(out, err) + } deleteImages("testfoobarbaz") @@ -88,3 +93,42 @@ func TestTagValidPrefixedRepo(t *testing.T) { logDone(logMessage) } } + +// tag an image with an existed tag name without -f option should fail +func TestTagExistedNameWithoutForce(t *testing.T) { + if err := pullImageIfNotExist("busybox:latest"); err != nil { + t.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + + tagCmd := exec.Command(dockerBinary, "tag", "busybox:latest", "busybox:test") + if out, _, err := runCommandWithOutput(tagCmd); err != nil { + t.Fatal(out, err) + } + tagCmd = exec.Command(dockerBinary, "tag", "busybox:latest", "busybox:test") + out, _, err := runCommandWithOutput(tagCmd) + if err == nil || !strings.Contains(out, "Conflict: Tag test is already set to image") { + t.Fatal("tag busybox busybox:test should have failed,because busybox:test is existed") + } + deleteImages("busybox:test") + + logDone("tag - busybox with an existed tag name without -f option --> must fail") +} + +// tag an image with an existed tag name with -f option should work +func TestTagExistedNameWithForce(t *testing.T) { + if err := pullImageIfNotExist("busybox:latest"); err != nil { + t.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + + tagCmd := exec.Command(dockerBinary, "tag", "busybox:latest", "busybox:test") + if out, _, err := runCommandWithOutput(tagCmd); err != nil { + t.Fatal(out, err) + } + tagCmd = exec.Command(dockerBinary, "tag", "-f", "busybox:latest", "busybox:test") + if out, _, err := runCommandWithOutput(tagCmd); err != nil { + t.Fatal(out, err) + } + deleteImages("busybox:test") + + logDone("tag - busybox with an existed tag name with -f option work") +} diff --git a/integration-cli/docker_cli_top_test.go b/integration-cli/docker_cli_top_test.go index f3ff15bceb..de0d3d2e89 100644 --- a/integration-cli/docker_cli_top_test.go +++ b/integration-cli/docker_cli_top_test.go @@ -1,7 +1,6 @@ package main import ( - "fmt" "os/exec" "strings" "testing" @@ -10,17 +9,21 @@ import ( func TestTopMultipleArgs(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-i", "-d", "busybox", "sleep", "20") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, fmt.Sprintf("failed to start the container: %v", err)) + if err != nil { + t.Fatalf("failed to start the container: %s, %v", out, err) + } cleanedContainerID := stripTrailingCharacters(out) defer deleteContainer(cleanedContainerID) topCmd := exec.Command(dockerBinary, "top", cleanedContainerID, "-o", "pid") out, _, err = runCommandWithOutput(topCmd) - errorOut(err, t, fmt.Sprintf("failed to run top: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to run top: %s, %v", out, err) + } if !strings.Contains(out, "PID") { - errorOut(nil, t, fmt.Sprintf("did not see PID after top -o pid")) + t.Fatalf("did not see PID after top -o pid: %s", out) } logDone("top - multiple arguments") @@ -29,27 +32,34 @@ func TestTopMultipleArgs(t *testing.T) { func TestTopNonPrivileged(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-i", "-d", "busybox", "sleep", "20") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, fmt.Sprintf("failed to start the container: %v", err)) + if err != nil { + t.Fatalf("failed to start the container: %s, %v", out, err) + } cleanedContainerID := stripTrailingCharacters(out) topCmd := exec.Command(dockerBinary, "top", cleanedContainerID) - out, _, err = runCommandWithOutput(topCmd) - errorOut(err, t, fmt.Sprintf("failed to run top: %v %v", out, err)) + out1, _, err := runCommandWithOutput(topCmd) + if err != nil { + t.Fatalf("failed to run top: %s, %v", out1, err) + } topCmd = exec.Command(dockerBinary, "top", cleanedContainerID) - out2, _, err2 := runCommandWithOutput(topCmd) - errorOut(err2, t, fmt.Sprintf("failed to run top: %v %v", out2, err2)) + out2, _, err := runCommandWithOutput(topCmd) + if err != nil { + t.Fatalf("failed to run top: %s, %v", out2, err) + } killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID) - _, err = runCommand(killCmd) - errorOut(err, t, fmt.Sprintf("failed to kill container: %v", err)) + if out, _, err = runCommandWithOutput(killCmd); err != nil { + t.Fatalf("failed to kill container: %s, %v", out, err) + } deleteContainer(cleanedContainerID) - if !strings.Contains(out, "sleep 20") && !strings.Contains(out2, "sleep 20") { + if !strings.Contains(out1, "sleep 20") && !strings.Contains(out2, "sleep 20") { t.Fatal("top should've listed `sleep 20` in the process list, but failed twice") - } else if !strings.Contains(out, "sleep 20") { + } else if !strings.Contains(out1, "sleep 20") { t.Fatal("top should've listed `sleep 20` in the process list, but failed the first time") } else if !strings.Contains(out2, "sleep 20") { t.Fatal("top should've listed `sleep 20` in the process list, but failed the second itime") @@ -61,27 +71,34 @@ func TestTopNonPrivileged(t *testing.T) { func TestTopPrivileged(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "--privileged", "-i", "-d", "busybox", "sleep", "20") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, fmt.Sprintf("failed to start the container: %v", err)) + if err != nil { + t.Fatalf("failed to start the container: %s, %v", out, err) + } cleanedContainerID := stripTrailingCharacters(out) topCmd := exec.Command(dockerBinary, "top", cleanedContainerID) - out, _, err = runCommandWithOutput(topCmd) - errorOut(err, t, fmt.Sprintf("failed to run top: %v %v", out, err)) + out1, _, err := runCommandWithOutput(topCmd) + if err != nil { + t.Fatalf("failed to run top: %s, %v", out1, err) + } topCmd = exec.Command(dockerBinary, "top", cleanedContainerID) - out2, _, err2 := runCommandWithOutput(topCmd) - errorOut(err2, t, fmt.Sprintf("failed to run top: %v %v", out2, err2)) + out2, _, err := runCommandWithOutput(topCmd) + if err != nil { + t.Fatalf("failed to run top: %s, %v", out2, err) + } killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID) - _, err = runCommand(killCmd) - errorOut(err, t, fmt.Sprintf("failed to kill container: %v", err)) + if out, _, err = runCommandWithOutput(killCmd); err != nil { + t.Fatalf("failed to kill container: %s, %v", out, err) + } deleteContainer(cleanedContainerID) - if !strings.Contains(out, "sleep 20") && !strings.Contains(out2, "sleep 20") { + if !strings.Contains(out1, "sleep 20") && !strings.Contains(out2, "sleep 20") { t.Fatal("top should've listed `sleep 20` in the process list, but failed twice") - } else if !strings.Contains(out, "sleep 20") { + } else if !strings.Contains(out1, "sleep 20") { t.Fatal("top should've listed `sleep 20` in the process list, but failed the first time") } else if !strings.Contains(out2, "sleep 20") { t.Fatal("top should've listed `sleep 20` in the process list, but failed the second itime") diff --git a/integration-cli/docker_cli_version_test.go b/integration-cli/docker_cli_version_test.go index 7f1838e5d9..0759ba6767 100644 --- a/integration-cli/docker_cli_version_test.go +++ b/integration-cli/docker_cli_version_test.go @@ -1,7 +1,6 @@ package main import ( - "fmt" "os/exec" "strings" "testing" @@ -10,11 +9,9 @@ import ( // ensure docker version works func TestVersionEnsureSucceeds(t *testing.T) { versionCmd := exec.Command(dockerBinary, "version") - out, exitCode, err := runCommandWithOutput(versionCmd) - errorOut(err, t, fmt.Sprintf("encountered error while running docker version: %v", err)) - - if err != nil || exitCode != 0 { - t.Fatal("failed to execute docker version") + out, _, err := runCommandWithOutput(versionCmd) + if err != nil { + t.Fatalf("failed to execute docker version: %s, %v", out, err) } stringsToCheck := []string{ diff --git a/integration-cli/docker_test_vars.go b/integration-cli/docker_test_vars.go index 23903a39a9..78c481bd23 100644 --- a/integration-cli/docker_test_vars.go +++ b/integration-cli/docker_test_vars.go @@ -16,10 +16,11 @@ var ( // the private registry to use for tests privateRegistryURL = "127.0.0.1:5000" - dockerBasePath = "/var/lib/docker" - execDriverPath = dockerBasePath + "/execdriver/native" - volumesConfigPath = dockerBasePath + "/volumes" - volumesStoragePath = dockerBasePath + "/vfs/dir" + dockerBasePath = "/var/lib/docker" + execDriverPath = dockerBasePath + "/execdriver/native" + volumesConfigPath = dockerBasePath + "/volumes" + volumesStoragePath = dockerBasePath + "/vfs/dir" + containerStoragePath = dockerBasePath + "/containers" workingDirectory string ) diff --git a/integration-cli/docker_utils.go b/integration-cli/docker_utils.go index 3bdf36ec19..93cb4a6b3d 100644 --- a/integration-cli/docker_utils.go +++ b/integration-cli/docker_utils.go @@ -1,6 +1,8 @@ package main import ( + "bytes" + "encoding/json" "errors" "fmt" "io" @@ -41,10 +43,10 @@ func NewDaemon(t *testing.T) *Daemon { t.Fatal("Please set the DEST environment variable") } - dir := filepath.Join(dest, fmt.Sprintf("daemon%d", time.Now().Unix())) + dir := filepath.Join(dest, fmt.Sprintf("daemon%d", time.Now().UnixNano()%100000000)) daemonFolder, err := filepath.Abs(dir) if err != nil { - t.Fatalf("Could not make '%s' an absolute path: %v", dir, err) + t.Fatalf("Could not make %q an absolute path: %v", dir, err) } if err := os.MkdirAll(filepath.Join(daemonFolder, "graph"), 0600); err != nil { @@ -69,10 +71,23 @@ func (d *Daemon) Start(arg ...string) error { args := []string{ "--host", d.sock(), - "--daemon", "--debug", + "--daemon", "--graph", fmt.Sprintf("%s/graph", d.folder), "--pidfile", fmt.Sprintf("%s/docker.pid", d.folder), } + + // If we don't explicitly set the log-level or debug flag(-D) then + // turn on debug mode + foundIt := false + for _, a := range arg { + if strings.Contains(a, "--log-level") || strings.Contains(a, "-D") { + foundIt = true + } + } + if !foundIt { + args = append(args, "--debug") + } + if d.storageDriver != "" { args = append(args, "--storage-driver", d.storageDriver) } @@ -83,7 +98,7 @@ func (d *Daemon) Start(arg ...string) error { args = append(args, arg...) d.cmd = exec.Command(dockerBinary, args...) - d.logFile, err = os.OpenFile(filepath.Join(d.folder, "docker.log"), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0600) + d.logFile, err = os.OpenFile(filepath.Join(d.folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) if err != nil { d.t.Fatalf("Could not create %s/docker.log: %v", d.folder, err) } @@ -107,8 +122,13 @@ func (d *Daemon) Start(arg ...string) error { tick := time.Tick(500 * time.Millisecond) // make sure daemon is ready to receive requests + startTime := time.Now().Unix() for { d.t.Log("waiting for daemon to start") + if time.Now().Unix()-startTime > 5 { + // After 5 seconds, give up + return errors.New("Daemon exited and never started") + } select { case <-time.After(2 * time.Second): return errors.New("timeout: daemon does not respond") @@ -231,7 +251,7 @@ func (d *Daemon) Cmd(name string, arg ...string) (string, error) { return string(b), err } -func sockRequest(method, endpoint string) ([]byte, error) { +func sockRequest(method, endpoint string, data interface{}) ([]byte, error) { // FIX: the path to sock should not be hardcoded sock := filepath.Join("/", "var", "run", "docker.sock") c, err := net.DialTimeout("unix", sock, time.Duration(10*time.Second)) @@ -242,7 +262,12 @@ func sockRequest(method, endpoint string) ([]byte, error) { client := httputil.NewClientConn(c, nil) defer client.Close() - req, err := http.NewRequest(method, endpoint, nil) + jsonData := bytes.NewBuffer(nil) + if err := json.NewEncoder(jsonData).Encode(data); err != nil { + return nil, err + } + + req, err := http.NewRequest(method, endpoint, jsonData) req.Header.Set("Content-Type", "application/json") if err != nil { return nil, fmt.Errorf("could not create new request: %v", err) @@ -254,7 +279,8 @@ func sockRequest(method, endpoint string) ([]byte, error) { } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("received status != 200 OK: %s", resp.Status) + body, _ := ioutil.ReadAll(resp.Body) + return body, fmt.Errorf("received status != 200 OK: %s", resp.Status) } return ioutil.ReadAll(resp.Body) @@ -302,8 +328,51 @@ func deleteAllContainers() error { return nil } +func getPausedContainers() (string, error) { + getPausedContainersCmd := exec.Command(dockerBinary, "ps", "-f", "status=paused", "-q", "-a") + out, exitCode, err := runCommandWithOutput(getPausedContainersCmd) + if exitCode != 0 && err == nil { + err = fmt.Errorf("failed to get a list of paused containers: %v\n", out) + } + + return out, err +} + +func unpauseContainer(container string) error { + unpauseCmd := exec.Command(dockerBinary, "unpause", container) + exitCode, err := runCommand(unpauseCmd) + if exitCode != 0 && err == nil { + err = fmt.Errorf("failed to unpause container") + } + + return nil +} + +func unpauseAllContainers() error { + containers, err := getPausedContainers() + if err != nil { + fmt.Println(containers) + return err + } + + containers = strings.Replace(containers, "\n", " ", -1) + containers = strings.Trim(containers, " ") + containerList := strings.Split(containers, " ") + + for _, value := range containerList { + if err = unpauseContainer(value); err != nil { + return err + } + } + + return nil +} + func deleteImages(images ...string) error { - rmiCmd := exec.Command(dockerBinary, "rmi", strings.Join(images, " ")) + args := make([]string, 1, 2) + args[0] = "rmi" + args = append(args, images...) + rmiCmd := exec.Command(dockerBinary, args...) exitCode, err := runCommand(rmiCmd) // set error manually if not set if exitCode != 0 && err == nil { @@ -317,7 +386,7 @@ func imageExists(image string) error { inspectCmd := exec.Command(dockerBinary, "inspect", image) exitCode, err := runCommand(inspectCmd) if exitCode != 0 && err == nil { - err = fmt.Errorf("couldn't find image '%s'", image) + err = fmt.Errorf("couldn't find image %q", image) } return err } @@ -328,20 +397,17 @@ func pullImageIfNotExist(image string) (err error) { _, exitCode, err := runCommandWithOutput(pullCmd) if err != nil || exitCode != 0 { - err = fmt.Errorf("image '%s' wasn't found locally and it couldn't be pulled: %s", image, err) + err = fmt.Errorf("image %q wasn't found locally and it couldn't be pulled: %s", image, err) } } return } -// deprecated, use dockerCmd instead -func cmd(t *testing.T, args ...string) (string, int, error) { - return dockerCmd(t, args...) -} - func dockerCmd(t *testing.T, args ...string) (string, int, error) { out, status, err := runCommandWithOutput(exec.Command(dockerBinary, args...)) - errorOut(err, t, fmt.Sprintf("'%s' failed with errors: %v (%v)", strings.Join(args, " "), err, out)) + if err != nil { + t.Fatalf("%q failed with errors: %s, %v", strings.Join(args, " "), out, err) + } return out, status, err } @@ -349,7 +415,7 @@ func dockerCmd(t *testing.T, args ...string) (string, int, error) { func dockerCmdWithTimeout(timeout time.Duration, args ...string) (string, int, error) { out, status, err := runCommandWithOutputAndTimeout(exec.Command(dockerBinary, args...), timeout) if err != nil { - return out, status, fmt.Errorf("'%s' failed with errors: %v : %q)", strings.Join(args, " "), err, out) + return out, status, fmt.Errorf("%q failed with errors: %v : %q)", strings.Join(args, " "), err, out) } return out, status, err } @@ -360,7 +426,7 @@ func dockerCmdInDir(t *testing.T, path string, args ...string) (string, int, err dockerCommand.Dir = path out, status, err := runCommandWithOutput(dockerCommand) if err != nil { - return out, status, fmt.Errorf("'%s' failed with errors: %v : %q)", strings.Join(args, " "), err, out) + return out, status, fmt.Errorf("%q failed with errors: %v : %q)", strings.Join(args, " "), err, out) } return out, status, err } @@ -371,7 +437,7 @@ func dockerCmdInDirWithTimeout(timeout time.Duration, path string, args ...strin dockerCommand.Dir = path out, status, err := runCommandWithOutputAndTimeout(dockerCommand, timeout) if err != nil { - return out, status, fmt.Errorf("'%s' failed with errors: %v : %q)", strings.Join(args, " "), err, out) + return out, status, fmt.Errorf("%q failed with errors: %v : %q)", strings.Join(args, " "), err, out) } return out, status, err } @@ -531,7 +597,7 @@ func getContainerState(t *testing.T, id string) (int, bool, error) { ) out, exitCode, err := dockerCmd(t, "inspect", "--format={{.State.Running}} {{.State.ExitCode}}", id) if err != nil || exitCode != 0 { - return 0, false, fmt.Errorf("'%s' doesn't exist: %s", id, err) + return 0, false, fmt.Errorf("%q doesn't exist: %s", id, err) } out = strings.Trim(out, "\n") @@ -570,6 +636,25 @@ func buildImageWithOut(name, dockerfile string, useCache bool) (string, string, return id, out, nil } +func buildImageWithStdoutStderr(name, dockerfile string, useCache bool) (string, string, string, error) { + args := []string{"build", "-t", name} + if !useCache { + args = append(args, "--no-cache") + } + args = append(args, "-") + buildCmd := exec.Command(dockerBinary, args...) + buildCmd.Stdin = strings.NewReader(dockerfile) + stdout, stderr, exitCode, err := runCommandWithStdoutStderr(buildCmd) + if err != nil || exitCode != 0 { + return "", stdout, stderr, fmt.Errorf("failed to build the image: %s", stdout) + } + id, err := getIDByName(name) + if err != nil { + return "", stdout, stderr, err + } + return id, stdout, stderr, nil +} + func buildImage(name, dockerfile string, useCache bool) (string, error) { id, _, err := buildImageWithOut(name, dockerfile, useCache) return id, err @@ -712,3 +797,36 @@ func readFile(src string, t *testing.T) (content string) { } return string(data) } + +func containerStorageFile(containerId, basename string) string { + return filepath.Join("/var/lib/docker/containers", containerId, basename) +} + +// docker commands that use this function must be run with the '-d' switch. +func runCommandAndReadContainerFile(filename string, cmd *exec.Cmd) ([]byte, error) { + out, _, err := runCommandWithOutput(cmd) + if err != nil { + return nil, fmt.Errorf("%v: %q", err, out) + } + + time.Sleep(1 * time.Second) + + contID := strings.TrimSpace(out) + + return readContainerFile(contID, filename) +} + +func readContainerFile(containerId, filename string) ([]byte, error) { + f, err := os.Open(containerStorageFile(containerId, filename)) + if err != nil { + return nil, err + } + defer f.Close() + + content, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + return content, nil +} diff --git a/integration-cli/utils.go b/integration-cli/utils.go index f3f128e329..2de432549c 100644 --- a/integration-cli/utils.go +++ b/integration-cli/utils.go @@ -13,7 +13,6 @@ import ( "reflect" "strings" "syscall" - "testing" "time" "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" @@ -96,13 +95,6 @@ func runCommand(cmd *exec.Cmd) (exitCode int, err error) { return } -func startCommand(cmd *exec.Cmd) (exitCode int, err error) { - exitCode = 0 - err = cmd.Start() - exitCode = processExitCode(err) - return -} - func logDone(message string) { fmt.Printf("[PASSED]: %s\n", message) } @@ -113,22 +105,6 @@ func stripTrailingCharacters(target string) string { return target } -func errorOut(err error, t *testing.T, message string) { - if err != nil { - t.Fatal(message) - } -} - -func errorOutOnNonNilError(err error, t *testing.T, message string) { - if err == nil { - t.Fatalf(message) - } -} - -func nLines(s string) int { - return strings.Count(s, "\n") -} - func unmarshalJSON(data []byte, result interface{}) error { err := json.Unmarshal(data, result) if err != nil { @@ -138,10 +114,6 @@ func unmarshalJSON(data []byte, result interface{}) error { return nil } -func deepEqual(expected interface{}, result interface{}) bool { - return reflect.DeepEqual(result, expected) -} - func convertSliceOfStringsToMap(input []string) map[string]struct{} { output := make(map[string]struct{}) for _, v := range input { @@ -266,3 +238,26 @@ func makeRandomString(n int) string { } return string(b) } + +// Reads chunkSize bytes from reader after every interval. +// Returns total read bytes. +func consumeWithSpeed(reader io.Reader, chunkSize int, interval time.Duration, stop chan bool) (n int, err error) { + buffer := make([]byte, chunkSize) + for { + select { + case <-stop: + return + default: + var readBytes int + readBytes, err = reader.Read(buffer) + n += readBytes + if err != nil { + if err == io.EOF { + err = nil + } + return + } + time.Sleep(interval) + } + } +} diff --git a/integration/api_test.go b/integration/api_test.go index 8fa295e7b1..8e45f89282 100644 --- a/integration/api_test.go +++ b/integration/api_test.go @@ -21,100 +21,6 @@ import ( "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) -func TestGetContainersJSON(t *testing.T) { - eng := NewTestEngine(t) - defer mkDaemonFromEngine(eng, t).Nuke() - - job := eng.Job("containers") - job.SetenvBool("all", true) - outs, err := job.Stdout.AddTable() - if err != nil { - t.Fatal(err) - } - if err := job.Run(); err != nil { - t.Fatal(err) - } - beginLen := len(outs.Data) - - containerID := createTestContainer(eng, &runconfig.Config{ - Image: unitTestImageID, - Cmd: []string{"echo", "test"}, - }, t) - - if containerID == "" { - t.Fatalf("Received empty container ID") - } - - req, err := http.NewRequest("GET", "/containers/json?all=1", nil) - if err != nil { - t.Fatal(err) - } - - r := httptest.NewRecorder() - if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { - t.Fatal(err) - } - assertHttpNotError(r, t) - containers := engine.NewTable("", 0) - if _, err := containers.ReadListFrom(r.Body.Bytes()); err != nil { - t.Fatal(err) - } - if len(containers.Data) != beginLen+1 { - t.Fatalf("Expected %d container, %d found (started with: %d)", beginLen+1, len(containers.Data), beginLen) - } - if id := containers.Data[0].Get("Id"); id != containerID { - t.Fatalf("Container ID mismatch. Expected: %s, received: %s\n", containerID, id) - } -} - -func TestGetContainersExport(t *testing.T) { - eng := NewTestEngine(t) - defer mkDaemonFromEngine(eng, t).Nuke() - - // Create a container and remove a file - containerID := createTestContainer(eng, - &runconfig.Config{ - Image: unitTestImageID, - Cmd: []string{"touch", "/test"}, - }, - t, - ) - containerRun(eng, containerID, t) - - r := httptest.NewRecorder() - - req, err := http.NewRequest("GET", "/containers/"+containerID+"/export", nil) - if err != nil { - t.Fatal(err) - } - if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { - t.Fatal(err) - } - assertHttpNotError(r, t) - - if r.Code != http.StatusOK { - t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) - } - - found := false - for tarReader := tar.NewReader(r.Body); ; { - h, err := tarReader.Next() - if err != nil { - if err == io.EOF { - break - } - t.Fatal(err) - } - if h.Name == "test" { - found = true - break - } - } - if !found { - t.Fatalf("The created test file has not been found in the exported image") - } -} - func TestSaveImageAndThenLoad(t *testing.T) { eng := NewTestEngine(t) defer mkDaemonFromEngine(eng, t).Nuke() @@ -186,46 +92,6 @@ func TestSaveImageAndThenLoad(t *testing.T) { } } -func TestGetContainersChanges(t *testing.T) { - eng := NewTestEngine(t) - defer mkDaemonFromEngine(eng, t).Nuke() - - // Create a container and remove a file - containerID := createTestContainer(eng, - &runconfig.Config{ - Image: unitTestImageID, - Cmd: []string{"/bin/rm", "/etc/passwd"}, - }, - t, - ) - containerRun(eng, containerID, t) - - r := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/containers/"+containerID+"/changes", nil) - if err != nil { - t.Fatal(err) - } - if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { - t.Fatal(err) - } - assertHttpNotError(r, t) - outs := engine.NewTable("", 0) - if _, err := outs.ReadListFrom(r.Body.Bytes()); err != nil { - t.Fatal(err) - } - - // Check the changelog - success := false - for _, elem := range outs.Data { - if elem.Get("Path") == "/etc/passwd" && elem.GetInt("Kind") == 2 { - success = true - } - } - if !success { - t.Fatalf("/etc/passwd as been removed but is not present in the diff") - } -} - func TestGetContainersTop(t *testing.T) { eng := NewTestEngine(t) defer mkDaemonFromEngine(eng, t).Nuke() @@ -919,8 +785,8 @@ func TestGetEnabledCors(t *testing.T) { if allowOrigin != "*" { t.Errorf("Expected header Access-Control-Allow-Origin to be \"*\", %s found.", allowOrigin) } - if allowHeaders != "Origin, X-Requested-With, Content-Type, Accept" { - t.Errorf("Expected header Access-Control-Allow-Headers to be \"Origin, X-Requested-With, Content-Type, Accept\", %s found.", allowHeaders) + if allowHeaders != "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth" { + t.Errorf("Expected header Access-Control-Allow-Headers to be \"Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth\", %s found.", allowHeaders) } if allowMethods != "GET, POST, DELETE, PUT, OPTIONS" { t.Errorf("Expected hearder Access-Control-Allow-Methods to be \"GET, POST, DELETE, PUT, OPTIONS\", %s found.", allowMethods) diff --git a/integration/commands_test.go b/integration/commands_test.go index 532e6f79fa..aa21791b50 100644 --- a/integration/commands_test.go +++ b/integration/commands_test.go @@ -9,12 +9,13 @@ import ( "testing" "time" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/api/client" "github.com/docker/docker/daemon" - "github.com/docker/docker/pkg/log" "github.com/docker/docker/pkg/term" "github.com/docker/docker/utils" "github.com/docker/libtrust" + "github.com/kr/pty" ) func closeWrap(args ...io.Closer) error { @@ -162,72 +163,20 @@ func TestRunDisconnect(t *testing.T) { }) } -// Expected behaviour: the process stay alive when the client disconnects -// but the client detaches. -func TestRunDisconnectTty(t *testing.T) { - - stdin, stdinPipe := io.Pipe() - stdout, stdoutPipe := io.Pipe() - key, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) - defer cleanup(globalEngine, t) - - c1 := make(chan struct{}) - go func() { - defer close(c1) - // We're simulating a disconnect so the return value doesn't matter. What matters is the - // fact that CmdRun returns. - if err := cli.CmdRun("-i", "-t", unitTestImageID, "/bin/cat"); err != nil { - log.Debugf("Error CmdRun: %s", err) - } - }() - - container := waitContainerStart(t, 10*time.Second) - - state := setRaw(t, container) - defer unsetRaw(t, container, state) - - // Client disconnect after run -i should keep stdin out in TTY mode - setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() { - if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil { - t.Fatal(err) - } - }) - - // Close pipes (simulate disconnect) - if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil { - t.Fatal(err) - } - - // wait for CmdRun to return - setTimeout(t, "Waiting for CmdRun timed out", 5*time.Second, func() { - <-c1 - }) - - // In tty mode, we expect the process to stay alive even after client's stdin closes. - - // Give some time to monitor to do his thing - container.WaitStop(500 * time.Millisecond) - if !container.IsRunning() { - t.Fatalf("/bin/cat should still be running after closing stdin (tty mode)") - } -} - // TestRunDetach checks attaching and detaching with the escape sequence. func TestRunDetach(t *testing.T) { - - stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() + cpty, tty, err := pty.Open() + if err != nil { + t.Fatal(err) + } + key, err := libtrust.GenerateECP256PrivateKey() if err != nil { t.Fatal(err) } - cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) + cli := client.NewDockerCli(tty, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) ch := make(chan struct{}) @@ -242,22 +191,22 @@ func TestRunDetach(t *testing.T) { defer unsetRaw(t, container, state) setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() { - if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil { + if err := assertPipe("hello\n", "hello", stdout, cpty, 150); err != nil { t.Fatal(err) } }) setTimeout(t, "Escape sequence timeout", 5*time.Second, func() { - stdinPipe.Write([]byte{16}) + cpty.Write([]byte{16}) time.Sleep(100 * time.Millisecond) - stdinPipe.Write([]byte{17}) + cpty.Write([]byte{17}) }) // wait for CmdRun to return setTimeout(t, "Waiting for CmdRun timed out", 15*time.Second, func() { <-ch }) - closeWrap(stdin, stdinPipe, stdout, stdoutPipe) + closeWrap(cpty, stdout, stdoutPipe) time.Sleep(500 * time.Millisecond) if !container.IsRunning() { @@ -271,14 +220,18 @@ func TestRunDetach(t *testing.T) { // TestAttachDetach checks that attach in tty mode can be detached using the long container ID func TestAttachDetach(t *testing.T) { - stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() + cpty, tty, err := pty.Open() + if err != nil { + t.Fatal(err) + } + key, err := libtrust.GenerateECP256PrivateKey() if err != nil { t.Fatal(err) } - cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) + cli := client.NewDockerCli(tty, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) ch := make(chan struct{}) @@ -309,9 +262,13 @@ func TestAttachDetach(t *testing.T) { state := setRaw(t, container) defer unsetRaw(t, container, state) - stdin, stdinPipe = io.Pipe() stdout, stdoutPipe = io.Pipe() - cli = client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) + cpty, tty, err = pty.Open() + if err != nil { + t.Fatal(err) + } + + cli = client.NewDockerCli(tty, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) ch = make(chan struct{}) go func() { @@ -324,7 +281,7 @@ func TestAttachDetach(t *testing.T) { }() setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() { - if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil { + if err := assertPipe("hello\n", "hello", stdout, cpty, 150); err != nil { if err != io.ErrClosedPipe { t.Fatal(err) } @@ -332,9 +289,9 @@ func TestAttachDetach(t *testing.T) { }) setTimeout(t, "Escape sequence timeout", 5*time.Second, func() { - stdinPipe.Write([]byte{16}) + cpty.Write([]byte{16}) time.Sleep(100 * time.Millisecond) - stdinPipe.Write([]byte{17}) + cpty.Write([]byte{17}) }) // wait for CmdRun to return @@ -342,7 +299,7 @@ func TestAttachDetach(t *testing.T) { <-ch }) - closeWrap(stdin, stdinPipe, stdout, stdoutPipe) + closeWrap(cpty, stdout, stdoutPipe) time.Sleep(500 * time.Millisecond) if !container.IsRunning() { @@ -356,14 +313,18 @@ func TestAttachDetach(t *testing.T) { // TestAttachDetachTruncatedID checks that attach in tty mode can be detached func TestAttachDetachTruncatedID(t *testing.T) { - stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() + cpty, tty, err := pty.Open() + if err != nil { + t.Fatal(err) + } + key, err := libtrust.GenerateECP256PrivateKey() if err != nil { t.Fatal(err) } - cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) + cli := client.NewDockerCli(tty, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) // Discard the CmdRun output @@ -379,9 +340,13 @@ func TestAttachDetachTruncatedID(t *testing.T) { state := setRaw(t, container) defer unsetRaw(t, container, state) - stdin, stdinPipe = io.Pipe() stdout, stdoutPipe = io.Pipe() - cli = client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) + cpty, tty, err = pty.Open() + if err != nil { + t.Fatal(err) + } + + cli = client.NewDockerCli(tty, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) ch := make(chan struct{}) go func() { @@ -394,7 +359,7 @@ func TestAttachDetachTruncatedID(t *testing.T) { }() setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() { - if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil { + if err := assertPipe("hello\n", "hello", stdout, cpty, 150); err != nil { if err != io.ErrClosedPipe { t.Fatal(err) } @@ -402,16 +367,16 @@ func TestAttachDetachTruncatedID(t *testing.T) { }) setTimeout(t, "Escape sequence timeout", 5*time.Second, func() { - stdinPipe.Write([]byte{16}) + cpty.Write([]byte{16}) time.Sleep(100 * time.Millisecond) - stdinPipe.Write([]byte{17}) + cpty.Write([]byte{17}) }) // wait for CmdRun to return setTimeout(t, "Waiting for CmdAttach timed out", 15*time.Second, func() { <-ch }) - closeWrap(stdin, stdinPipe, stdout, stdoutPipe) + closeWrap(cpty, stdout, stdoutPipe) time.Sleep(500 * time.Millisecond) if !container.IsRunning() { @@ -425,14 +390,18 @@ func TestAttachDetachTruncatedID(t *testing.T) { // Expected behaviour, the process stays alive when the client disconnects func TestAttachDisconnect(t *testing.T) { - stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() + cpty, tty, err := pty.Open() + if err != nil { + t.Fatal(err) + } + key, err := libtrust.GenerateECP256PrivateKey() if err != nil { t.Fatal(err) } - cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) + cli := client.NewDockerCli(tty, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) go func() { @@ -470,12 +439,12 @@ func TestAttachDisconnect(t *testing.T) { }() setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() { - if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil { + if err := assertPipe("hello\n", "hello", stdout, cpty, 150); err != nil { t.Fatal(err) } }) // Close pipes (client disconnects) - if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil { + if err := closeWrap(cpty, stdout, stdoutPipe); err != nil { t.Fatal(err) } diff --git a/integration/graph_test.go b/integration/graph_test.go index 203476cbb2..56e5a90642 100644 --- a/integration/graph_test.go +++ b/integration/graph_test.go @@ -74,7 +74,7 @@ func TestInterruptedRegister(t *testing.T) { Created: time.Now(), } w.CloseWithError(errors.New("But I'm not a tarball!")) // (Nobody's perfect, darling) - graph.Register(image, nil, badArchive) + graph.Register(image, badArchive) if _, err := graph.Get(image.ID); err == nil { t.Fatal("Image should not exist after Register is interrupted") } @@ -83,7 +83,7 @@ func TestInterruptedRegister(t *testing.T) { if err != nil { t.Fatal(err) } - if err := graph.Register(image, nil, goodArchive); err != nil { + if err := graph.Register(image, goodArchive); err != nil { t.Fatal(err) } } @@ -133,7 +133,7 @@ func TestRegister(t *testing.T) { Comment: "testing", Created: time.Now(), } - err = graph.Register(image, nil, archive) + err = graph.Register(image, archive) if err != nil { t.Fatal(err) } @@ -228,7 +228,7 @@ func TestDelete(t *testing.T) { t.Fatal(err) } // Test delete twice (pull -> rm -> pull -> rm) - if err := graph.Register(img1, nil, archive); err != nil { + if err := graph.Register(img1, archive); err != nil { t.Fatal(err) } if err := graph.Delete(img1.ID); err != nil { @@ -262,9 +262,9 @@ func TestByParent(t *testing.T) { Created: time.Now(), Parent: parentImage.ID, } - _ = graph.Register(parentImage, nil, archive1) - _ = graph.Register(childImage1, nil, archive2) - _ = graph.Register(childImage2, nil, archive3) + _ = graph.Register(parentImage, archive1) + _ = graph.Register(childImage1, archive2) + _ = graph.Register(childImage2, archive3) byParent, err := graph.ByParent() if err != nil { diff --git a/integration/runtime_test.go b/integration/runtime_test.go index 0a9a74cf9d..d173af1f7f 100644 --- a/integration/runtime_test.go +++ b/integration/runtime_test.go @@ -16,12 +16,13 @@ import ( "testing" "time" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon" + "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/engine" "github.com/docker/docker/image" "github.com/docker/docker/nat" "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/log" "github.com/docker/docker/pkg/reexec" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" @@ -79,15 +80,6 @@ func cleanup(eng *engine.Engine, t *testing.T) error { return nil } -func layerArchive(tarfile string) (io.Reader, error) { - // FIXME: need to close f somewhere - f, err := os.Open(tarfile) - if err != nil { - return nil, err - } - return f, nil -} - func init() { // Always use the same driver (vfs) for all integration tests. // To test other drivers, we need a dedicated driver validation suite. @@ -661,7 +653,7 @@ func TestRestore(t *testing.T) { if err := container3.Run(); err != nil { t.Fatal(err) } - container2.SetStopped(0) + container2.SetStopped(&execdriver.ExitStatus{0, false}) } func TestDefaultContainerName(t *testing.T) { @@ -669,7 +661,7 @@ func TestDefaultContainerName(t *testing.T) { daemon := mkDaemonFromEngine(eng, t) defer nuke(daemon) - config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}, nil) + config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}) if err != nil { t.Fatal(err) } @@ -693,7 +685,7 @@ func TestRandomContainerName(t *testing.T) { daemon := mkDaemonFromEngine(eng, t) defer nuke(daemon) - config, _, _, err := parseRun([]string{GetTestImage(daemon).ID, "echo test"}, nil) + config, _, _, err := parseRun([]string{GetTestImage(daemon).ID, "echo test"}) if err != nil { t.Fatal(err) } @@ -724,7 +716,7 @@ func TestContainerNameValidation(t *testing.T) { {"abc-123_AAA.1", true}, {"\000asdf", false}, } { - config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}, nil) + config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}) if err != nil { if !test.Valid { continue @@ -765,7 +757,7 @@ func TestLinkChildContainer(t *testing.T) { daemon := mkDaemonFromEngine(eng, t) defer nuke(daemon) - config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}, nil) + config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}) if err != nil { t.Fatal(err) } @@ -781,7 +773,7 @@ func TestLinkChildContainer(t *testing.T) { t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID) } - config, _, _, err = parseRun([]string{GetTestImage(daemon).ID, "echo test"}, nil) + config, _, _, err = parseRun([]string{GetTestImage(daemon).ID, "echo test"}) if err != nil { t.Fatal(err) } @@ -807,7 +799,7 @@ func TestGetAllChildren(t *testing.T) { daemon := mkDaemonFromEngine(eng, t) defer nuke(daemon) - config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}, nil) + config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}) if err != nil { t.Fatal(err) } @@ -823,7 +815,7 @@ func TestGetAllChildren(t *testing.T) { t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID) } - config, _, _, err = parseRun([]string{unitTestImageID, "echo test"}, nil) + config, _, _, err = parseRun([]string{unitTestImageID, "echo test"}) if err != nil { t.Fatal(err) } diff --git a/integration/server_test.go b/integration/server_test.go index a90399957d..1af7bbe22f 100644 --- a/integration/server_test.go +++ b/integration/server_test.go @@ -12,7 +12,7 @@ func TestCreateNumberHostname(t *testing.T) { eng := NewTestEngine(t) defer mkDaemonFromEngine(eng, t).Nuke() - config, _, _, err := parseRun([]string{"-h", "web.0", unitTestImageID, "echo test"}, nil) + config, _, _, err := parseRun([]string{"-h", "web.0", unitTestImageID, "echo test"}) if err != nil { t.Fatal(err) } @@ -24,7 +24,7 @@ func TestCommit(t *testing.T) { eng := NewTestEngine(t) defer mkDaemonFromEngine(eng, t).Nuke() - config, _, _, err := parseRun([]string{unitTestImageID, "/bin/cat"}, nil) + config, _, _, err := parseRun([]string{unitTestImageID, "/bin/cat"}) if err != nil { t.Fatal(err) } @@ -48,7 +48,7 @@ func TestMergeConfigOnCommit(t *testing.T) { container1, _, _ := mkContainer(runtime, []string{"-e", "FOO=bar", unitTestImageID, "echo test > /tmp/foo"}, t) defer runtime.Destroy(container1) - config, _, _, err := parseRun([]string{container1.ID, "cat /tmp/foo"}, nil) + config, _, _, err := parseRun([]string{container1.ID, "cat /tmp/foo"}) if err != nil { t.Error(err) } @@ -102,7 +102,7 @@ func TestRestartKillWait(t *testing.T) { runtime := mkDaemonFromEngine(eng, t) defer runtime.Nuke() - config, hostConfig, _, err := parseRun([]string{"-i", unitTestImageID, "/bin/cat"}, nil) + config, hostConfig, _, err := parseRun([]string{"-i", unitTestImageID, "/bin/cat"}) if err != nil { t.Fatal(err) } @@ -163,7 +163,7 @@ func TestCreateStartRestartStopStartKillRm(t *testing.T) { eng := NewTestEngine(t) defer mkDaemonFromEngine(eng, t).Nuke() - config, hostConfig, _, err := parseRun([]string{"-i", unitTestImageID, "/bin/cat"}, nil) + config, hostConfig, _, err := parseRun([]string{"-i", unitTestImageID, "/bin/cat"}) if err != nil { t.Fatal(err) } diff --git a/integration/utils_test.go b/integration/utils_test.go index e1abfa72fc..0c78a76170 100644 --- a/integration/utils_test.go +++ b/integration/utils_test.go @@ -9,6 +9,7 @@ import ( "net/http/httptest" "os" "path" + "path/filepath" "strings" "testing" "time" @@ -18,29 +19,27 @@ import ( "github.com/docker/docker/builtins" "github.com/docker/docker/daemon" "github.com/docker/docker/engine" - "github.com/docker/docker/pkg/log" flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" ) +type Fataler interface { + Fatal(...interface{}) +} + // This file contains utility functions for docker's unit test suite. // It has to be named XXX_test.go, apparently, in other to access private functions // from other XXX_test.go functions. // Create a temporary daemon suitable for unit testing. // Call t.Fatal() at the first error. -func mkDaemon(f log.Fataler) *daemon.Daemon { +func mkDaemon(f Fataler) *daemon.Daemon { eng := newTestEngine(f, false, "") return mkDaemonFromEngine(eng, f) - // FIXME: - // [...] - // Mtu: docker.GetDefaultNetworkMtu(), - // [...] } -func createNamedTestContainer(eng *engine.Engine, config *runconfig.Config, f log.Fataler, name string) (shortId string) { +func createNamedTestContainer(eng *engine.Engine, config *runconfig.Config, f Fataler, name string) (shortId string) { job := eng.Job("create", name) if err := job.ImportEnv(config); err != nil { f.Fatal(err) @@ -53,23 +52,23 @@ func createNamedTestContainer(eng *engine.Engine, config *runconfig.Config, f lo return engine.Tail(outputBuffer, 1) } -func createTestContainer(eng *engine.Engine, config *runconfig.Config, f log.Fataler) (shortId string) { +func createTestContainer(eng *engine.Engine, config *runconfig.Config, f Fataler) (shortId string) { return createNamedTestContainer(eng, config, f, "") } -func startContainer(eng *engine.Engine, id string, t log.Fataler) { +func startContainer(eng *engine.Engine, id string, t Fataler) { job := eng.Job("start", id) if err := job.Run(); err != nil { t.Fatal(err) } } -func containerRun(eng *engine.Engine, id string, t log.Fataler) { +func containerRun(eng *engine.Engine, id string, t Fataler) { startContainer(eng, id, t) containerWait(eng, id, t) } -func containerFileExists(eng *engine.Engine, id, dir string, t log.Fataler) bool { +func containerFileExists(eng *engine.Engine, id, dir string, t Fataler) bool { c := getContainer(eng, id, t) if err := c.Mount(); err != nil { t.Fatal(err) @@ -84,7 +83,7 @@ func containerFileExists(eng *engine.Engine, id, dir string, t log.Fataler) bool return true } -func containerAttach(eng *engine.Engine, id string, t log.Fataler) (io.WriteCloser, io.ReadCloser) { +func containerAttach(eng *engine.Engine, id string, t Fataler) (io.WriteCloser, io.ReadCloser) { c := getContainer(eng, id, t) i, err := c.StdinPipe() if err != nil { @@ -97,31 +96,31 @@ func containerAttach(eng *engine.Engine, id string, t log.Fataler) (io.WriteClos return i, o } -func containerWait(eng *engine.Engine, id string, t log.Fataler) int { +func containerWait(eng *engine.Engine, id string, t Fataler) int { ex, _ := getContainer(eng, id, t).WaitStop(-1 * time.Second) return ex } -func containerWaitTimeout(eng *engine.Engine, id string, t log.Fataler) error { +func containerWaitTimeout(eng *engine.Engine, id string, t Fataler) error { _, err := getContainer(eng, id, t).WaitStop(500 * time.Millisecond) return err } -func containerKill(eng *engine.Engine, id string, t log.Fataler) { +func containerKill(eng *engine.Engine, id string, t Fataler) { if err := eng.Job("kill", id).Run(); err != nil { t.Fatal(err) } } -func containerRunning(eng *engine.Engine, id string, t log.Fataler) bool { +func containerRunning(eng *engine.Engine, id string, t Fataler) bool { return getContainer(eng, id, t).IsRunning() } -func containerAssertExists(eng *engine.Engine, id string, t log.Fataler) { +func containerAssertExists(eng *engine.Engine, id string, t Fataler) { getContainer(eng, id, t) } -func containerAssertNotExists(eng *engine.Engine, id string, t log.Fataler) { +func containerAssertNotExists(eng *engine.Engine, id string, t Fataler) { daemon := mkDaemonFromEngine(eng, t) if c := daemon.Get(id); c != nil { t.Fatal(fmt.Errorf("Container %s should not exist", id)) @@ -130,7 +129,7 @@ func containerAssertNotExists(eng *engine.Engine, id string, t log.Fataler) { // assertHttpNotError expect the given response to not have an error. // Otherwise the it causes the test to fail. -func assertHttpNotError(r *httptest.ResponseRecorder, t log.Fataler) { +func assertHttpNotError(r *httptest.ResponseRecorder, t Fataler) { // Non-error http status are [200, 400) if r.Code < http.StatusOK || r.Code >= http.StatusBadRequest { t.Fatal(fmt.Errorf("Unexpected http error: %v", r.Code)) @@ -139,14 +138,14 @@ func assertHttpNotError(r *httptest.ResponseRecorder, t log.Fataler) { // assertHttpError expect the given response to have an error. // Otherwise the it causes the test to fail. -func assertHttpError(r *httptest.ResponseRecorder, t log.Fataler) { +func assertHttpError(r *httptest.ResponseRecorder, t Fataler) { // Non-error http status are [200, 400) if !(r.Code < http.StatusOK || r.Code >= http.StatusBadRequest) { t.Fatal(fmt.Errorf("Unexpected http success code: %v", r.Code)) } } -func getContainer(eng *engine.Engine, id string, t log.Fataler) *daemon.Container { +func getContainer(eng *engine.Engine, id string, t Fataler) *daemon.Container { daemon := mkDaemonFromEngine(eng, t) c := daemon.Get(id) if c == nil { @@ -155,7 +154,7 @@ func getContainer(eng *engine.Engine, id string, t log.Fataler) *daemon.Containe return c } -func mkDaemonFromEngine(eng *engine.Engine, t log.Fataler) *daemon.Daemon { +func mkDaemonFromEngine(eng *engine.Engine, t Fataler) *daemon.Daemon { iDaemon := eng.Hack_GetGlobalVar("httpapi.daemon") if iDaemon == nil { panic("Legacy daemon field not set in engine") @@ -167,7 +166,7 @@ func mkDaemonFromEngine(eng *engine.Engine, t log.Fataler) *daemon.Daemon { return daemon } -func newTestEngine(t log.Fataler, autorestart bool, root string) *engine.Engine { +func newTestEngine(t Fataler, autorestart bool, root string) *engine.Engine { if root == "" { if dir, err := newTestDirectory(unitTestStoreBase); err != nil { t.Fatal(err) @@ -189,6 +188,7 @@ func newTestEngine(t log.Fataler, autorestart bool, root string) *engine.Engine // Either InterContainerCommunication or EnableIptables must be set, // otherwise NewDaemon will fail because of conflicting settings. InterContainerCommunication: true, + TrustKeyPath: filepath.Join(root, "key.json"), } d, err := daemon.NewDaemon(cfg, eng) if err != nil { @@ -200,7 +200,7 @@ func newTestEngine(t log.Fataler, autorestart bool, root string) *engine.Engine return eng } -func NewTestEngine(t log.Fataler) *engine.Engine { +func NewTestEngine(t Fataler) *engine.Engine { return newTestEngine(t, false, "") } @@ -251,7 +251,7 @@ func readFile(src string, t *testing.T) (content string) { // The caller is responsible for destroying the container. // Call t.Fatal() at the first error. func mkContainer(r *daemon.Daemon, args []string, t *testing.T) (*daemon.Container, *runconfig.HostConfig, error) { - config, hc, _, err := parseRun(args, nil) + config, hc, _, err := parseRun(args) defer func() { if err != nil && t != nil { t.Fatal(err) @@ -352,9 +352,9 @@ func getImages(eng *engine.Engine, t *testing.T, all bool, filter string) *engin } -func parseRun(args []string, sysInfo *sysinfo.SysInfo) (*runconfig.Config, *runconfig.HostConfig, *flag.FlagSet, error) { +func parseRun(args []string) (*runconfig.Config, *runconfig.HostConfig, *flag.FlagSet, error) { cmd := flag.NewFlagSet("run", flag.ContinueOnError) cmd.SetOutput(ioutil.Discard) cmd.Usage = nil - return runconfig.Parse(cmd, args, sysInfo) + return runconfig.Parse(cmd, args) } diff --git a/links/links.go b/links/links.go index d2d699398e..fc4d95ab08 100644 --- a/links/links.go +++ b/links/links.go @@ -47,6 +47,20 @@ func (l *Link) Alias() string { return alias } +func nextContiguous(ports []nat.Port, value int, index int) int { + if index+1 == len(ports) { + return index + } + for i := index + 1; i < len(ports); i++ { + if ports[i].Int() > value+1 { + return i - 1 + } + + value++ + } + return len(ports) - 1 +} + func (l *Link) ToEnv() []string { env := []string{} alias := strings.Replace(strings.ToUpper(l.Alias()), "-", "_", -1) @@ -55,12 +69,35 @@ func (l *Link) ToEnv() []string { env = append(env, fmt.Sprintf("%s_PORT=%s://%s:%s", alias, p.Proto(), l.ChildIP, p.Port())) } - // Load exposed ports into the environment - for _, p := range l.Ports { + //sort the ports so that we can bulk the continuous ports together + nat.Sort(l.Ports, func(ip, jp nat.Port) bool { + // If the two ports have the same number, tcp takes priority + // Sort in desc order + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") + }) + + for i := 0; i < len(l.Ports); { + p := l.Ports[i] + j := nextContiguous(l.Ports, p.Int(), i) + if j > i+1 { + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_START=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT_START=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) + + q := l.Ports[j] + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_END=%s://%s:%s", alias, p.Port(), strings.ToUpper(q.Proto()), q.Proto(), l.ChildIP, q.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT_END=%s", alias, p.Port(), strings.ToUpper(q.Proto()), q.Port())) + + i = j + 1 + continue + } + env = append(env, fmt.Sprintf("%s_PORT_%s_%s=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) + i++ } // Load the linked container's name into the environment @@ -125,7 +162,7 @@ func (l *Link) toggle(action string, ignoreErrors bool) error { out := make([]string, len(l.Ports)) for i, p := range l.Ports { - out[i] = fmt.Sprintf("%s/%s", p.Port(), p.Proto()) + out[i] = string(p) } job.SetenvList("Ports", out) diff --git a/links/links_test.go b/links/links_test.go index c26559e599..7ba9513ea0 100644 --- a/links/links_test.go +++ b/links/links_test.go @@ -107,3 +107,52 @@ func TestLinkEnv(t *testing.T) { t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) } } + +func TestLinkMultipleEnv(t *testing.T) { + ports := make(nat.PortSet) + ports[nat.Port("6379/tcp")] = struct{}{} + ports[nat.Port("6380/tcp")] = struct{}{} + ports[nat.Port("6381/tcp")] = struct{}{} + + link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports, nil) + if err != nil { + t.Fatal(err) + } + + rawEnv := link.ToEnv() + env := make(map[string]string, len(rawEnv)) + for _, e := range rawEnv { + parts := strings.Split(e, "=") + if len(parts) != 2 { + t.FailNow() + } + env[parts[0]] = parts[1] + } + if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) + } + if env["DOCKER_PORT_6379_TCP_START"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP_START"]) + } + if env["DOCKER_PORT_6379_TCP_END"] != "tcp://172.0.17.2:6381" { + t.Fatalf("Expected tcp://172.0.17.2:6381, got %s", env["DOCKER_PORT_6379_TCP_END"]) + } + if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { + t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) + } + if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { + t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) + } + if env["DOCKER_PORT_6379_TCP_PORT_START"] != "6379" { + t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT_START"]) + } + if env["DOCKER_PORT_6379_TCP_PORT_END"] != "6381" { + t.Fatalf("Expected 6381, got %s", env["DOCKER_PORT_6379_TCP_PORT_END"]) + } + if env["DOCKER_NAME"] != "/db/docker" { + t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) + } + if env["DOCKER_ENV_PASSWORD"] != "gordon" { + t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) + } +} diff --git a/nat/nat.go b/nat/nat.go index b0177289ce..1246626b0d 100644 --- a/nat/nat.go +++ b/nat/nat.go @@ -42,44 +42,37 @@ func ParsePort(rawPort string) (int, error) { } func (p Port) Proto() string { - parts := strings.Split(string(p), "/") - if len(parts) == 1 { - return "tcp" - } - return parts[1] + proto, _ := SplitProtoPort(string(p)) + return proto } func (p Port) Port() string { - return strings.Split(string(p), "/")[0] + _, port := SplitProtoPort(string(p)) + return port } func (p Port) Int() int { - i, err := ParsePort(p.Port()) + port, err := ParsePort(p.Port()) if err != nil { panic(err) } - return i + return port } // Splits a port in the format of proto/port func SplitProtoPort(rawPort string) (string, string) { - var port string - var proto string - parts := strings.Split(rawPort, "/") - - if len(parts) == 0 || parts[0] == "" { // we have "" or ""/ - port = "" - proto = "" - } else { // we have # or #/ or #/... - port = parts[0] - if len(parts) > 1 && parts[1] != "" { - proto = parts[1] // we have #/... - } else { - proto = "tcp" // we have # or #/ - } + l := len(parts) + if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 { + return "", "" } - return proto, port + if l == 1 { + return "tcp", rawPort + } + if len(parts[1]) == 0 { + return "tcp", parts[0] + } + return parts[1], parts[0] } func validateProto(proto string) bool { diff --git a/nat/nat_test.go b/nat/nat_test.go index a8c2cb584e..4ae9f4ece5 100644 --- a/nat/nat_test.go +++ b/nat/nat_test.go @@ -76,13 +76,13 @@ func TestSplitProtoPort(t *testing.T) { proto, port = SplitProtoPort("") if proto != "" || port != "" { - t.Fatal("parsing an empty string yielded surprising results") + t.Fatal("parsing an empty string yielded surprising results", proto, port) } proto, port = SplitProtoPort("1234") if proto != "tcp" || port != "1234" { - t.Fatal("tcp is not the default protocol for portspec '1234'") + t.Fatal("tcp is not the default protocol for portspec '1234'", proto, port) } proto, port = SplitProtoPort("1234/") diff --git a/opts/opts.go b/opts/opts.go index 4ca7ec58ce..f15064ac69 100644 --- a/opts/opts.go +++ b/opts/opts.go @@ -5,7 +5,7 @@ import ( "net" "net/url" "os" - "path/filepath" + "path" "regexp" "strings" @@ -43,6 +43,10 @@ func MirrorListVar(values *[]string, names []string, usage string) { flag.Var(newListOptsRef(values, ValidateMirror), names, usage) } +func LabelListVar(values *[]string, names []string, usage string) { + flag.Var(newListOptsRef(values, ValidateLabel), names, usage) +} + // ListOpts type type ListOpts struct { values *[]string @@ -151,13 +155,13 @@ func ValidatePath(val string) (string, error) { splited := strings.SplitN(val, ":", 2) if len(splited) == 1 { containerPath = splited[0] - val = filepath.Clean(splited[0]) + val = path.Clean(splited[0]) } else { containerPath = splited[1] - val = fmt.Sprintf("%s:%s", splited[0], filepath.Clean(splited[1])) + val = fmt.Sprintf("%s:%s", splited[0], path.Clean(splited[1])) } - if !filepath.IsAbs(containerPath) { + if !path.IsAbs(containerPath) { return val, fmt.Errorf("%s is not an absolute path", containerPath) } return val, nil @@ -227,3 +231,10 @@ func ValidateMirror(val string) (string, error) { return fmt.Sprintf("%s://%s/v1/", uri.Scheme, uri.Host), nil } + +func ValidateLabel(val string) (string, error) { + if strings.Count(val, "=") != 1 { + return "", fmt.Errorf("bad attribute format: %s", val) + } + return val, nil +} diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go index 74c6014506..ec45d8546d 100644 --- a/pkg/archive/archive.go +++ b/pkg/archive/archive.go @@ -18,8 +18,8 @@ import ( "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/log" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/promise" "github.com/docker/docker/pkg/system" @@ -34,6 +34,7 @@ type ( Excludes []string Compression Compression NoLchown bool + Name string } // Archiver allows the reuse of most utility functions of this package @@ -164,7 +165,15 @@ func (compression *Compression) Extension() string { return "" } -func addTarFile(path, name string, tw *tar.Writer, twBuf *bufio.Writer) error { +type tarAppender struct { + TarWriter *tar.Writer + Buffer *bufio.Writer + + // for hardlink mapping + SeenFiles map[uint64]string +} + +func (ta *tarAppender) addTarFile(path, name string) error { fi, err := os.Lstat(path) if err != nil { return err @@ -188,15 +197,23 @@ func addTarFile(path, name string, tw *tar.Writer, twBuf *bufio.Writer) error { hdr.Name = name - stat, ok := fi.Sys().(*syscall.Stat_t) - if ok { - // Currently go does not fill in the major/minors - if stat.Mode&syscall.S_IFBLK == syscall.S_IFBLK || - stat.Mode&syscall.S_IFCHR == syscall.S_IFCHR { - hdr.Devmajor = int64(major(uint64(stat.Rdev))) - hdr.Devminor = int64(minor(uint64(stat.Rdev))) - } + nlink, inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys()) + if err != nil { + return err + } + // if it's a regular file and has more than 1 link, + // it's hardlinked, so set the type flag accordingly + if fi.Mode().IsRegular() && nlink > 1 { + // a link should have a name that it links too + // and that linked name should be first in the tar archive + if oldpath, ok := ta.SeenFiles[inode]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 // This Must be here for the writer math to add up! + } else { + ta.SeenFiles[inode] = name + } } capability, _ := system.Lgetxattr(path, "security.capability") @@ -205,7 +222,7 @@ func addTarFile(path, name string, tw *tar.Writer, twBuf *bufio.Writer) error { hdr.Xattrs["security.capability"] = string(capability) } - if err := tw.WriteHeader(hdr); err != nil { + if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } @@ -215,17 +232,17 @@ func addTarFile(path, name string, tw *tar.Writer, twBuf *bufio.Writer) error { return err } - twBuf.Reset(tw) - _, err = io.Copy(twBuf, file) + ta.Buffer.Reset(ta.TarWriter) + defer ta.Buffer.Reset(nil) + _, err = io.Copy(ta.Buffer, file) file.Close() if err != nil { return err } - err = twBuf.Flush() + err = ta.Buffer.Flush() if err != nil { return err } - twBuf.Reset(nil) } return nil @@ -270,7 +287,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L mode |= syscall.S_IFIFO } - if err := syscall.Mknod(path, mode, int(mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { + if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { return err } @@ -370,9 +387,15 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) return nil, err } - tw := tar.NewWriter(compressWriter) - go func() { + ta := &tarAppender{ + TarWriter: tar.NewWriter(compressWriter), + Buffer: pools.BufioWriter32KPool.Get(nil), + SeenFiles: make(map[uint64]string), + } + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors @@ -382,9 +405,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) options.Includes = []string{"."} } - twBuf := pools.BufioWriter32KPool.Get(nil) - defer pools.BufioWriter32KPool.Put(twBuf) - + var renamedRelFilePath string // For when tar.Options.Name is set for _, include := range options.Includes { filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error { if err != nil { @@ -393,7 +414,9 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) } relFilePath, err := filepath.Rel(srcPath, filePath) - if err != nil { + if err != nil || (relFilePath == "." && f.IsDir()) { + // Error getting relative path OR we are looking + // at the root path. Skip in both situations. return nil } @@ -410,7 +433,16 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) return nil } - if err := addTarFile(filePath, relFilePath, tw, twBuf); err != nil { + // Rename the base resource + if options.Name != "" && filePath == srcPath+"/"+filepath.Base(relFilePath) { + renamedRelFilePath = relFilePath + } + // Set this to make sure the items underneath also get renamed + if options.Name != "" { + relFilePath = strings.Replace(relFilePath, renamedRelFilePath, options.Name, 1) + } + + if err := ta.addTarFile(filePath, relFilePath); err != nil { log.Debugf("Can't add file %s to tar: %s", srcPath, err) } return nil @@ -418,7 +450,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) } // Make sure to check the error on Close. - if err := tw.Close(); err != nil { + if err := ta.TarWriter.Close(); err != nil { log.Debugf("Can't close tar writer: %s", err) } if err := compressWriter.Close(); err != nil { @@ -737,17 +769,33 @@ func NewTempArchive(src Archive, dir string) (*TempArchive, error) { return nil, err } size := st.Size() - return &TempArchive{f, size}, nil + return &TempArchive{File: f, Size: size}, nil } type TempArchive struct { *os.File - Size int64 // Pre-computed from Stat().Size() as a convenience + Size int64 // Pre-computed from Stat().Size() as a convenience + read int64 + closed bool +} + +// Close closes the underlying file if it's still open, or does a no-op +// to allow callers to try to close the TempArchive multiple times safely. +func (archive *TempArchive) Close() error { + if archive.closed { + return nil + } + + archive.closed = true + + return archive.File.Close() } func (archive *TempArchive) Read(data []byte) (int, error) { n, err := archive.File.Read(data) - if err != nil { + archive.read += int64(n) + if err != nil || archive.read == archive.Size { + archive.Close() os.Remove(archive.File.Name()) } return n, err diff --git a/pkg/archive/archive_test.go b/pkg/archive/archive_test.go index 7c9db44434..fdba6fb87c 100644 --- a/pkg/archive/archive_test.go +++ b/pkg/archive/archive_test.go @@ -9,6 +9,8 @@ import ( "os/exec" "path" "path/filepath" + "strings" + "syscall" "testing" "time" @@ -64,6 +66,50 @@ func TestCmdStreamGood(t *testing.T) { } } +func TestTarFiles(t *testing.T) { + // try without hardlinks + if err := checkNoChanges(1000, false); err != nil { + t.Fatal(err) + } + // try with hardlinks + if err := checkNoChanges(1000, true); err != nil { + t.Fatal(err) + } +} + +func checkNoChanges(fileNum int, hardlinks bool) error { + srcDir, err := ioutil.TempDir("", "docker-test-srcDir") + if err != nil { + return err + } + defer os.RemoveAll(srcDir) + + destDir, err := ioutil.TempDir("", "docker-test-destDir") + if err != nil { + return err + } + defer os.RemoveAll(destDir) + + _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) + if err != nil { + return err + } + + err = TarUntar(srcDir, destDir) + if err != nil { + return err + } + + changes, err := ChangesDirs(destDir, srcDir) + if err != nil { + return err + } + if len(changes) > 0 { + return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) + } + return nil +} + func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { archive, err := TarWithOptions(origin, options) if err != nil { @@ -210,13 +256,100 @@ func TestUntarUstarGnuConflict(t *testing.T) { } } -func prepareUntarSourceDirectory(numberOfFiles int, targetPath string) (int, error) { +func TestTarWithHardLink(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := os.Link(path.Join(origin, "1"), path.Join(origin, "2")); err != nil { + t.Fatal(err) + } + + var i1, i2 uint64 + if i1, err = getNlink(path.Join(origin, "1")); err != nil { + t.Fatal(err) + } + // sanity check that we can hardlink + if i1 != 2 { + t.Skipf("skipping since hardlinks don't work here; expected 2 links, got %d", i1) + } + + dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dest) + + // we'll do this in two steps to separate failure + fh, err := Tar(origin, Uncompressed) + if err != nil { + t.Fatal(err) + } + + // ensure we can read the whole thing with no error, before writing back out + buf, err := ioutil.ReadAll(fh) + if err != nil { + t.Fatal(err) + } + + bRdr := bytes.NewReader(buf) + err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) + if err != nil { + t.Fatal(err) + } + + if i1, err = getInode(path.Join(dest, "1")); err != nil { + t.Fatal(err) + } + if i2, err = getInode(path.Join(dest, "2")); err != nil { + t.Fatal(err) + } + + if i1 != i2 { + t.Errorf("expected matching inodes, but got %d and %d", i1, i2) + } +} + +func getNlink(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + statT, ok := stat.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) + } + return statT.Nlink, nil +} + +func getInode(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + statT, ok := stat.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) + } + return statT.Ino, nil +} + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { fileData := []byte("fooo") for n := 0; n < numberOfFiles; n++ { fileName := fmt.Sprintf("file-%d", n) if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { return 0, err } + if makeLinks { + if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } } totalSize := numberOfFiles * len(fileData) return totalSize, nil @@ -232,14 +365,43 @@ func BenchmarkTarUntar(b *testing.B) { b.Fatal(err) } target := path.Join(tempDir, "dest") - n, err := prepareUntarSourceDirectory(100, origin) + n, err := prepareUntarSourceDirectory(100, origin, false) if err != nil { b.Fatal(err) } - b.ResetTimer() - b.SetBytes(int64(n)) defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) + + b.ResetTimer() + b.SetBytes(int64(n)) + for n := 0; n < b.N; n++ { + err := TarUntar(origin, target) + if err != nil { + b.Fatal(err) + } + os.RemoveAll(target) + } +} + +func BenchmarkTarUntarWithLinks(b *testing.B) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + b.Fatal(err) + } + tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") + if err != nil { + b.Fatal(err) + } + target := path.Join(tempDir, "dest") + n, err := prepareUntarSourceDirectory(100, origin, true) + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(origin) + defer os.RemoveAll(tempDir) + + b.ResetTimer() + b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := TarUntar(origin, target) if err != nil { @@ -446,3 +608,18 @@ func TestUntarInvalidSymlink(t *testing.T) { } } } + +func TestTempArchiveCloseMultipleTimes(t *testing.T) { + reader := ioutil.NopCloser(strings.NewReader("hello")) + tempArchive, err := NewTempArchive(reader, "") + buf := make([]byte, 10) + n, err := tempArchive.Read(buf) + if n != 5 { + t.Fatalf("Expected to read 5 bytes. Read %d instead", n) + } + for i := 0; i < 3; i++ { + if err = tempArchive.Close(); err != nil { + t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) + } + } +} diff --git a/pkg/archive/archive_unix.go b/pkg/archive/archive_unix.go new file mode 100644 index 0000000000..c0e8aee93c --- /dev/null +++ b/pkg/archive/archive_unix.go @@ -0,0 +1,39 @@ +// +build !windows + +package archive + +import ( + "errors" + "syscall" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" +) + +func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + err = errors.New("cannot convert stat value to syscall.Stat_t") + return + } + + nlink = uint32(s.Nlink) + inode = uint64(s.Ino) + + // Currently go does not fil in the major/minors + if s.Mode&syscall.S_IFBLK == syscall.S_IFBLK || + s.Mode&syscall.S_IFCHR == syscall.S_IFCHR { + hdr.Devmajor = int64(major(uint64(s.Rdev))) + hdr.Devminor = int64(minor(uint64(s.Rdev))) + } + + return +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} diff --git a/pkg/archive/archive_windows.go b/pkg/archive/archive_windows.go new file mode 100644 index 0000000000..3cc2493f6f --- /dev/null +++ b/pkg/archive/archive_windows.go @@ -0,0 +1,12 @@ +// +build windows + +package archive + +import ( + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" +) + +func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) { + // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows + return +} diff --git a/pkg/archive/changes.go b/pkg/archive/changes.go index 5fbdcc90af..85217f6e08 100644 --- a/pkg/archive/changes.go +++ b/pkg/archive/changes.go @@ -12,7 +12,7 @@ import ( "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" ) @@ -135,7 +135,7 @@ func Changes(layers []string, rw string) ([]Change, error) { type FileInfo struct { parent *FileInfo name string - stat syscall.Stat_t + stat *system.Stat children map[string]*FileInfo capability []byte added bool @@ -168,7 +168,7 @@ func (info *FileInfo) path() string { } func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.Mode&syscall.S_IFDIR == syscall.S_IFDIR + return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR == syscall.S_IFDIR } func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { @@ -199,21 +199,21 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { oldChild, _ := oldChildren[name] if oldChild != nil { // change? - oldStat := &oldChild.stat - newStat := &newChild.stat + oldStat := oldChild.stat + newStat := newChild.stat // Note: We can't compare inode or ctime or blocksize here, because these change // when copying a file into a container. However, that is not generally a problem // because any content change will change mtime, and any status change should // be visible when actually comparing the stat fields. The only time this // breaks down is if some code intentionally hides a change by setting // back mtime - if oldStat.Mode != newStat.Mode || - oldStat.Uid != newStat.Uid || - oldStat.Gid != newStat.Gid || - oldStat.Rdev != newStat.Rdev || + if oldStat.Mode() != newStat.Mode() || + oldStat.Uid() != newStat.Uid() || + oldStat.Gid() != newStat.Gid() || + oldStat.Rdev() != newStat.Rdev() || // Don't look at size for dirs, its not a good measure of change - (oldStat.Size != newStat.Size && oldStat.Mode&syscall.S_IFDIR != syscall.S_IFDIR) || - !sameFsTimeSpec(system.GetLastModification(oldStat), system.GetLastModification(newStat)) || + (oldStat.Size() != newStat.Size() && oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR) || + !sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || bytes.Compare(oldChild.capability, newChild.capability) != 0 { change := Change{ Path: newChild.path(), @@ -299,9 +299,11 @@ func collectFileInfo(sourceDir string) (*FileInfo, error) { parent: parent, } - if err := syscall.Lstat(path, &info.stat); err != nil { + s, err := system.Lstat(path) + if err != nil { return err } + info.stat = s info.capability, _ = system.Lgetxattr(path, "security.capability") @@ -333,6 +335,8 @@ func ChangesDirs(newDir, oldDir string) ([]Change, error) { newRoot, err2 = collectFileInfo(newDir) errs <- err2 }() + + // block until both routines have returned for i := 0; i < 2; i++ { if err := <-errs; err != nil { return nil, err @@ -357,22 +361,18 @@ func ChangesSize(newDir string, changes []Change) int64 { return size } -func major(device uint64) uint64 { - return (device >> 8) & 0xfff -} - -func minor(device uint64) uint64 { - return (device & 0xff) | ((device >> 12) & 0xfff00) -} - // ExportChanges produces an Archive from the provided changes, relative to dir. func ExportChanges(dir string, changes []Change) (Archive, error) { reader, writer := io.Pipe() - tw := tar.NewWriter(writer) - go func() { - twBuf := pools.BufioWriter32KPool.Get(nil) - defer pools.BufioWriter32KPool.Put(twBuf) + ta := &tarAppender{ + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + SeenFiles: make(map[uint64]string), + } + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors @@ -390,22 +390,24 @@ func ExportChanges(dir string, changes []Change) (Archive, error) { AccessTime: timestamp, ChangeTime: timestamp, } - if err := tw.WriteHeader(hdr); err != nil { + if err := ta.TarWriter.WriteHeader(hdr); err != nil { log.Debugf("Can't write whiteout header: %s", err) } } else { path := filepath.Join(dir, change.Path) - if err := addTarFile(path, change.Path[1:], tw, twBuf); err != nil { + if err := ta.addTarFile(path, change.Path[1:]); err != nil { log.Debugf("Can't add file %s to tar: %s", path, err) } } } // Make sure to check the error on Close. - if err := tw.Close(); err != nil { + if err := ta.TarWriter.Close(); err != nil { log.Debugf("Can't close layer: %s", err) } - writer.Close() + if err := writer.Close(); err != nil { + log.Debugf("failed close Changes writer: %s", err) + } }() return reader, nil } diff --git a/pkg/archive/diff.go b/pkg/archive/diff.go index 80bb197468..ba22c41f3c 100644 --- a/pkg/archive/diff.go +++ b/pkg/archive/diff.go @@ -12,15 +12,9 @@ import ( "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" ) -// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. -// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, -// then the top 12 bits of the minor -func mkdev(major int64, minor int64) uint32 { - return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) -} - func UnpackLayer(dest string, layer ArchiveReader) error { tr := tar.NewReader(layer) trBuf := pools.BufioReader32KPool.Get(tr) @@ -155,11 +149,15 @@ func UnpackLayer(dest string, layer ArchiveReader) error { // applies it to the directory `dest`. func ApplyLayer(dest string, layer ArchiveReader) error { dest = filepath.Clean(dest) - // We need to be able to set any perms - oldmask := syscall.Umask(0) - defer syscall.Umask(oldmask) - layer, err := DecompressStream(layer) + // We need to be able to set any perms + oldmask, err := system.Umask(0) + if err != nil { + return err + } + defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform + + layer, err = DecompressStream(layer) if err != nil { return err } diff --git a/pkg/archive/example_changes.go b/pkg/archive/example_changes.go new file mode 100644 index 0000000000..cedd46a408 --- /dev/null +++ b/pkg/archive/example_changes.go @@ -0,0 +1,97 @@ +// +build ignore + +// Simple tool to create an archive stream from an old and new directory +// +// By default it will stream the comparison of two temporary directories with junk files +package main + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/archive" +) + +var ( + flDebug = flag.Bool("D", false, "debugging output") + flNewDir = flag.String("newdir", "", "") + flOldDir = flag.String("olddir", "", "") + log = logrus.New() +) + +func main() { + flag.Usage = func() { + fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") + fmt.Printf("%s [OPTIONS]\n", os.Args[0]) + flag.PrintDefaults() + } + flag.Parse() + log.Out = os.Stderr + if (len(os.Getenv("DEBUG")) > 0) || *flDebug { + logrus.SetLevel(logrus.DebugLevel) + } + var newDir, oldDir string + + if len(*flNewDir) == 0 { + var err error + newDir, err = ioutil.TempDir("", "docker-test-newDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(newDir) + if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { + log.Fatal(err) + } + } else { + newDir = *flNewDir + } + + if len(*flOldDir) == 0 { + oldDir, err := ioutil.TempDir("", "docker-test-oldDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(oldDir) + } else { + oldDir = *flOldDir + } + + changes, err := archive.ChangesDirs(newDir, oldDir) + if err != nil { + log.Fatal(err) + } + + a, err := archive.ExportChanges(newDir, changes) + if err != nil { + log.Fatal(err) + } + defer a.Close() + + i, err := io.Copy(os.Stdout, a) + if err != nil && err != io.EOF { + log.Fatal(err) + } + fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) +} + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + if makeLinks { + if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} diff --git a/pkg/broadcastwriter/broadcastwriter.go b/pkg/broadcastwriter/broadcastwriter.go index 1898302e79..232cf3dfc8 100644 --- a/pkg/broadcastwriter/broadcastwriter.go +++ b/pkg/broadcastwriter/broadcastwriter.go @@ -6,8 +6,8 @@ import ( "sync" "time" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/jsonlog" - "github.com/docker/docker/pkg/log" ) // BroadcastWriter accumulate multiple io.WriteCloser by stream. diff --git a/pkg/devicemapper/MAINTAINERS b/pkg/devicemapper/MAINTAINERS new file mode 100644 index 0000000000..4428dec019 --- /dev/null +++ b/pkg/devicemapper/MAINTAINERS @@ -0,0 +1 @@ +Vincent Batts (@vbatts) diff --git a/daemon/graphdriver/devmapper/attach_loopback.go b/pkg/devicemapper/attach_loopback.go similarity index 96% rename from daemon/graphdriver/devmapper/attach_loopback.go rename to pkg/devicemapper/attach_loopback.go index 9cfa18a4d3..d39cbc6cf5 100644 --- a/daemon/graphdriver/devmapper/attach_loopback.go +++ b/pkg/devicemapper/attach_loopback.go @@ -1,13 +1,13 @@ // +build linux -package devmapper +package devicemapper import ( "fmt" "os" "syscall" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" ) func stringToLoopName(src string) [LoNameSize]uint8 { @@ -84,7 +84,7 @@ func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.Fil // attachLoopDevice attaches the given sparse file to the next // available loopback device. It returns an opened *os.File. -func attachLoopDevice(sparseName string) (loop *os.File, err error) { +func AttachLoopDevice(sparseName string) (loop *os.File, err error) { // Try to retrieve the next available loopback device via syscall. // If it fails, we discard error and start loopking for a diff --git a/daemon/graphdriver/devmapper/devmapper.go b/pkg/devicemapper/devmapper.go similarity index 67% rename from daemon/graphdriver/devmapper/devmapper.go rename to pkg/devicemapper/devmapper.go index d09e740749..c23a3624db 100644 --- a/daemon/graphdriver/devmapper/devmapper.go +++ b/pkg/devicemapper/devmapper.go @@ -1,6 +1,6 @@ // +build linux -package devmapper +package devicemapper import ( "errors" @@ -9,11 +9,11 @@ import ( "runtime" "syscall" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" ) type DevmapperLogger interface { - log(level int, file string, line int, dmError int, message string) + DMLog(level int, file string, line int, dmError int, message string) } const ( @@ -51,6 +51,7 @@ var ( ErrTaskSetRo = errors.New("dm_task_set_ro failed") ErrTaskAddTarget = errors.New("dm_task_add_target failed") ErrTaskSetSector = errors.New("dm_task_set_sector failed") + ErrTaskGetDeps = errors.New("dm_task_get_deps failed") ErrTaskGetInfo = errors.New("dm_task_get_info failed") ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed") ErrTaskSetCookie = errors.New("dm_task_set_cookie failed") @@ -61,11 +62,12 @@ var ( ErrSetDevDir = errors.New("dm_set_dev_dir failed") ErrGetLibraryVersion = errors.New("dm_get_library_version failed") ErrCreateRemoveTask = errors.New("Can't create task of type DeviceRemove") - ErrRunRemoveDevice = errors.New("running removeDevice failed") - ErrInvalidAddNode = errors.New("Invalide AddNoce type") + ErrRunRemoveDevice = errors.New("running RemoveDevice failed") + ErrInvalidAddNode = errors.New("Invalid AddNode type") ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") ErrLoopbackSetCapacity = errors.New("Unable set loopback capacity") ErrBusy = errors.New("Device is Busy") + ErrDeviceIdExists = errors.New("Device Id Exists") dmSawBusy bool dmSawExist bool @@ -75,6 +77,11 @@ type ( Task struct { unmanaged *CDmTask } + Deps struct { + Count uint32 + Filler uint32 + Device []uint64 + } Info struct { Exists int Suspended int @@ -91,6 +98,16 @@ type ( AddNodeType int ) +// Returns whether error conveys the information about device Id already +// exist or not. This will be true if device creation or snap creation +// operation fails if device or snap device already exists in pool. +// Current implementation is little crude as it scans the error string +// for exact pattern match. Replacing it with more robust implementation +// is desirable. +func DeviceIdExists(err error) bool { + return fmt.Sprint(err) == fmt.Sprint(ErrDeviceIdExists) +} + func (t *Task) destroy() { if t != nil { DmTaskDestroy(t.unmanaged) @@ -98,6 +115,20 @@ func (t *Task) destroy() { } } +// TaskCreateNamed is a convenience function for TaskCreate when a name +// will be set on the task as well +func TaskCreateNamed(t TaskType, name string) (*Task, error) { + task := TaskCreate(t) + if task == nil { + return nil, fmt.Errorf("Can't create task of type %d", int(t)) + } + if err := task.SetName(name); err != nil { + return nil, fmt.Errorf("Can't set task name %s", name) + } + return task, nil +} + +// TaskCreate initializes a devicemapper task of tasktype func TaskCreate(tasktype TaskType) *Task { Ctask := DmTaskCreate(int(tasktype)) if Ctask == nil { @@ -171,6 +202,14 @@ func (t *Task) AddTarget(start, size uint64, ttype, params string) error { return nil } +func (t *Task) GetDeps() (*Deps, error) { + var deps *Deps + if deps = DmTaskGetDeps(t.unmanaged); deps == nil { + return nil, ErrTaskGetDeps + } + return deps, nil +} + func (t *Task) GetInfo() (*Info, error) { info := &Info{} if res := DmTaskGetInfo(t.unmanaged, info); res != 1 { @@ -258,7 +297,8 @@ func LogInitVerbose(level int) { var dmLogger DevmapperLogger = nil -func logInit(logger DevmapperLogger) { +// initialize the logger for the device mapper library +func LogInit(logger DevmapperLogger) { dmLogger = logger LogWithErrnoInit() } @@ -281,17 +321,27 @@ func GetLibraryVersion() (string, error) { // Useful helper for cleanup func RemoveDevice(name string) error { - task := TaskCreate(DeviceRemove) + log.Debugf("[devmapper] RemoveDevice START") + defer log.Debugf("[devmapper] RemoveDevice END") + task, err := TaskCreateNamed(DeviceRemove, name) if task == nil { - return ErrCreateRemoveTask - } - if err := task.SetName(name); err != nil { - log.Debugf("Can't set task name %s", name) return err } - if err := task.Run(); err != nil { - return ErrRunRemoveDevice + + var cookie uint = 0 + if err := task.SetCookie(&cookie, 0); err != nil { + return fmt.Errorf("Can not set cookie: %s", err) } + defer UdevWait(cookie) + + dmSawBusy = false // reset before the task is run + if err = task.Run(); err != nil { + if dmSawBusy { + return ErrBusy + } + return fmt.Errorf("Error running RemoveDevice %s", err) + } + return nil } @@ -328,8 +378,8 @@ func BlockDeviceDiscard(path string) error { } // This is the programmatic example of "dmsetup create" -func createPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { - task, err := createTask(DeviceCreate, poolName) +func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { + task, err := TaskCreateNamed(DeviceCreate, poolName) if task == nil { return err } @@ -345,21 +395,21 @@ func createPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize } var cookie uint = 0 - if err := task.SetCookie(&cookie, 0); err != nil { + var flags uint16 = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag + if err := task.SetCookie(&cookie, flags); err != nil { return fmt.Errorf("Can't set cookie %s", err) } + defer UdevWait(cookie) if err := task.Run(); err != nil { - return fmt.Errorf("Error running DeviceCreate (createPool) %s", err) + return fmt.Errorf("Error running DeviceCreate (CreatePool) %s", err) } - UdevWait(cookie) - return nil } -func reloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { - task, err := createTask(DeviceReload, poolName) +func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { + task, err := TaskCreateNamed(DeviceReload, poolName) if task == nil { return err } @@ -381,19 +431,19 @@ func reloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize return nil } -func createTask(t TaskType, name string) (*Task, error) { - task := TaskCreate(t) +func GetDeps(name string) (*Deps, error) { + task, err := TaskCreateNamed(DeviceDeps, name) if task == nil { - return nil, fmt.Errorf("Can't create task of type %d", int(t)) + return nil, err } - if err := task.SetName(name); err != nil { - return nil, fmt.Errorf("Can't set task name %s", name) + if err := task.Run(); err != nil { + return nil, err } - return task, nil + return task.GetDeps() } -func getInfo(name string) (*Info, error) { - task, err := createTask(DeviceInfo, name) +func GetInfo(name string) (*Info, error) { + task, err := TaskCreateNamed(DeviceInfo, name) if task == nil { return nil, err } @@ -403,7 +453,7 @@ func getInfo(name string) (*Info, error) { return task.GetInfo() } -func getDriverVersion() (string, error) { +func GetDriverVersion() (string, error) { task := TaskCreate(DeviceVersion) if task == nil { return "", fmt.Errorf("Can't create DeviceVersion task") @@ -414,24 +464,24 @@ func getDriverVersion() (string, error) { return task.GetDriverVersion() } -func getStatus(name string) (uint64, uint64, string, string, error) { - task, err := createTask(DeviceStatus, name) +func GetStatus(name string) (uint64, uint64, string, string, error) { + task, err := TaskCreateNamed(DeviceStatus, name) if task == nil { - log.Debugf("getStatus: Error createTask: %s", err) + log.Debugf("GetStatus: Error TaskCreateNamed: %s", err) return 0, 0, "", "", err } if err := task.Run(); err != nil { - log.Debugf("getStatus: Error Run: %s", err) + log.Debugf("GetStatus: Error Run: %s", err) return 0, 0, "", "", err } devinfo, err := task.GetInfo() if err != nil { - log.Debugf("getStatus: Error GetInfo: %s", err) + log.Debugf("GetStatus: Error GetInfo: %s", err) return 0, 0, "", "", err } if devinfo.Exists == 0 { - log.Debugf("getStatus: Non existing device %s", name) + log.Debugf("GetStatus: Non existing device %s", name) return 0, 0, "", "", fmt.Errorf("Non existing device %s", name) } @@ -439,8 +489,8 @@ func getStatus(name string) (uint64, uint64, string, string, error) { return start, length, targetType, params, nil } -func setTransactionId(poolName string, oldId uint64, newId uint64) error { - task, err := createTask(DeviceTargetMsg, poolName) +func SetTransactionId(poolName string, oldId uint64, newId uint64) error { + task, err := TaskCreateNamed(DeviceTargetMsg, poolName) if task == nil { return err } @@ -454,13 +504,13 @@ func setTransactionId(poolName string, oldId uint64, newId uint64) error { } if err := task.Run(); err != nil { - return fmt.Errorf("Error running setTransactionId %s", err) + return fmt.Errorf("Error running SetTransactionId %s", err) } return nil } -func suspendDevice(name string) error { - task, err := createTask(DeviceSuspend, name) +func SuspendDevice(name string) error { + task, err := TaskCreateNamed(DeviceSuspend, name) if task == nil { return err } @@ -470,8 +520,8 @@ func suspendDevice(name string) error { return nil } -func resumeDevice(name string) error { - task, err := createTask(DeviceResume, name) +func ResumeDevice(name string) error { + task, err := TaskCreateNamed(DeviceResume, name) if task == nil { return err } @@ -480,49 +530,44 @@ func resumeDevice(name string) error { if err := task.SetCookie(&cookie, 0); err != nil { return fmt.Errorf("Can't set cookie %s", err) } + defer UdevWait(cookie) if err := task.Run(); err != nil { return fmt.Errorf("Error running DeviceResume %s", err) } - UdevWait(cookie) - return nil } -func createDevice(poolName string, deviceId *int) error { - log.Debugf("[devmapper] createDevice(poolName=%v, deviceId=%v)", poolName, *deviceId) +func CreateDevice(poolName string, deviceId int) error { + log.Debugf("[devmapper] CreateDevice(poolName=%v, deviceId=%v)", poolName, deviceId) + task, err := TaskCreateNamed(DeviceTargetMsg, poolName) + if task == nil { + return err + } - for { - task, err := createTask(DeviceTargetMsg, poolName) - if task == nil { - return err - } + if err := task.SetSector(0); err != nil { + return fmt.Errorf("Can't set sector %s", err) + } - if err := task.SetSector(0); err != nil { - return fmt.Errorf("Can't set sector %s", err) - } + if err := task.SetMessage(fmt.Sprintf("create_thin %d", deviceId)); err != nil { + return fmt.Errorf("Can't set message %s", err) + } - if err := task.SetMessage(fmt.Sprintf("create_thin %d", *deviceId)); err != nil { - return fmt.Errorf("Can't set message %s", err) + dmSawExist = false // reset before the task is run + if err := task.Run(); err != nil { + // Caller wants to know about ErrDeviceIdExists so that it can try with a different device id. + if dmSawExist { + return ErrDeviceIdExists + } else { + return fmt.Errorf("Error running CreateDevice %s", err) } - - dmSawExist = false - if err := task.Run(); err != nil { - if dmSawExist { - // Already exists, try next id - *deviceId++ - continue - } - return fmt.Errorf("Error running createDevice %s", err) - } - break } return nil } -func deleteDevice(poolName string, deviceId int) error { - task, err := createTask(DeviceTargetMsg, poolName) +func DeleteDevice(poolName string, deviceId int) error { + task, err := TaskCreateNamed(DeviceTargetMsg, poolName) if task == nil { return err } @@ -536,30 +581,13 @@ func deleteDevice(poolName string, deviceId int) error { } if err := task.Run(); err != nil { - return fmt.Errorf("Error running deleteDevice %s", err) + return fmt.Errorf("Error running DeleteDevice %s", err) } return nil } -func removeDevice(name string) error { - log.Debugf("[devmapper] removeDevice START") - defer log.Debugf("[devmapper] removeDevice END") - task, err := createTask(DeviceRemove, name) - if task == nil { - return err - } - dmSawBusy = false - if err = task.Run(); err != nil { - if dmSawBusy { - return ErrBusy - } - return fmt.Errorf("Error running removeDevice %s", err) - } - return nil -} - -func activateDevice(poolName string, name string, deviceId int, size uint64) error { - task, err := createTask(DeviceCreate, name) +func ActivateDevice(poolName string, name string, deviceId int, size uint64) error { + task, err := TaskCreateNamed(DeviceCreate, name) if task == nil { return err } @@ -577,67 +605,62 @@ func activateDevice(poolName string, name string, deviceId int, size uint64) err return fmt.Errorf("Can't set cookie %s", err) } - if err := task.Run(); err != nil { - return fmt.Errorf("Error running DeviceCreate (activateDevice) %s", err) - } + defer UdevWait(cookie) - UdevWait(cookie) + if err := task.Run(); err != nil { + return fmt.Errorf("Error running DeviceCreate (ActivateDevice) %s", err) + } return nil } -func createSnapDevice(poolName string, deviceId *int, baseName string, baseDeviceId int) error { - devinfo, _ := getInfo(baseName) +func CreateSnapDevice(poolName string, deviceId int, baseName string, baseDeviceId int) error { + devinfo, _ := GetInfo(baseName) doSuspend := devinfo != nil && devinfo.Exists != 0 if doSuspend { - if err := suspendDevice(baseName); err != nil { + if err := SuspendDevice(baseName); err != nil { return err } } - for { - task, err := createTask(DeviceTargetMsg, poolName) - if task == nil { - if doSuspend { - resumeDevice(baseName) - } - return err + task, err := TaskCreateNamed(DeviceTargetMsg, poolName) + if task == nil { + if doSuspend { + ResumeDevice(baseName) } + return err + } - if err := task.SetSector(0); err != nil { - if doSuspend { - resumeDevice(baseName) - } - return fmt.Errorf("Can't set sector %s", err) + if err := task.SetSector(0); err != nil { + if doSuspend { + ResumeDevice(baseName) } + return fmt.Errorf("Can't set sector %s", err) + } - if err := task.SetMessage(fmt.Sprintf("create_snap %d %d", *deviceId, baseDeviceId)); err != nil { - if doSuspend { - resumeDevice(baseName) - } - return fmt.Errorf("Can't set message %s", err) + if err := task.SetMessage(fmt.Sprintf("create_snap %d %d", deviceId, baseDeviceId)); err != nil { + if doSuspend { + ResumeDevice(baseName) } + return fmt.Errorf("Can't set message %s", err) + } - dmSawExist = false - if err := task.Run(); err != nil { - if dmSawExist { - // Already exists, try next id - *deviceId++ - continue - } - - if doSuspend { - resumeDevice(baseName) - } + dmSawExist = false // reset before the task is run + if err := task.Run(); err != nil { + if doSuspend { + ResumeDevice(baseName) + } + // Caller wants to know about ErrDeviceIdExists so that it can try with a different device id. + if dmSawExist { + return ErrDeviceIdExists + } else { return fmt.Errorf("Error running DeviceCreate (createSnapDevice) %s", err) } - - break } if doSuspend { - if err := resumeDevice(baseName); err != nil { + if err := ResumeDevice(baseName); err != nil { return err } } diff --git a/daemon/graphdriver/devmapper/devmapper_log.go b/pkg/devicemapper/devmapper_log.go similarity index 83% rename from daemon/graphdriver/devmapper/devmapper_log.go rename to pkg/devicemapper/devmapper_log.go index ec7809cc51..d6550bd626 100644 --- a/daemon/graphdriver/devmapper/devmapper_log.go +++ b/pkg/devicemapper/devmapper_log.go @@ -1,6 +1,6 @@ // +build linux -package devmapper +package devicemapper import "C" @@ -25,6 +25,6 @@ func DevmapperLogCallback(level C.int, file *C.char, line C.int, dm_errno_or_cla } if dmLogger != nil { - dmLogger.log(int(level), C.GoString(file), int(line), int(dm_errno_or_class), msg) + dmLogger.DMLog(int(level), C.GoString(file), int(line), int(dm_errno_or_class), msg) } } diff --git a/daemon/graphdriver/devmapper/devmapper_wrapper.go b/pkg/devicemapper/devmapper_wrapper.go similarity index 91% rename from daemon/graphdriver/devmapper/devmapper_wrapper.go rename to pkg/devicemapper/devmapper_wrapper.go index bd1c6fd5b6..499405a10d 100644 --- a/daemon/graphdriver/devmapper/devmapper_wrapper.go +++ b/pkg/devicemapper/devmapper_wrapper.go @@ -1,6 +1,6 @@ // +build linux -package devmapper +package devicemapper /* #cgo LDFLAGS: -L. -ldevmapper @@ -38,9 +38,7 @@ static void log_with_errno_init() */ import "C" -import ( - "unsafe" -) +import "unsafe" type ( CDmTask C.struct_dm_task @@ -84,6 +82,12 @@ const ( LoNameSize = C.LO_NAME_SIZE ) +const ( + DmUdevDisableSubsystemRulesFlag = C.DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG + DmUdevDisableDiskRulesFlag = C.DM_UDEV_DISABLE_DISK_RULES_FLAG + DmUdevDisableOtherRulesFlag = C.DM_UDEV_DISABLE_OTHER_RULES_FLAG +) + var ( DmGetLibraryVersion = dmGetLibraryVersionFct DmGetNextTarget = dmGetNextTargetFct @@ -92,6 +96,7 @@ var ( DmTaskAddTarget = dmTaskAddTargetFct DmTaskCreate = dmTaskCreateFct DmTaskDestroy = dmTaskDestroyFct + DmTaskGetDeps = dmTaskGetDepsFct DmTaskGetInfo = dmTaskGetInfoFct DmTaskGetDriverVersion = dmTaskGetDriverVersionFct DmTaskRun = dmTaskRunFct @@ -168,6 +173,21 @@ func dmTaskAddTargetFct(task *CDmTask, return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams)) } +func dmTaskGetDepsFct(task *CDmTask) *Deps { + Cdeps := C.dm_task_get_deps((*C.struct_dm_task)(task)) + if Cdeps == nil { + return nil + } + deps := &Deps{ + Count: uint32(Cdeps.count), + Filler: uint32(Cdeps.filler), + } + for _, device := range Cdeps.device { + deps.Device = append(deps.Device, (uint64)(device)) + } + return deps +} + func dmTaskGetInfoFct(task *CDmTask, info *Info) int { Cinfo := C.struct_dm_info{} defer func() { diff --git a/daemon/graphdriver/devmapper/ioctl.go b/pkg/devicemapper/ioctl.go similarity index 98% rename from daemon/graphdriver/devmapper/ioctl.go rename to pkg/devicemapper/ioctl.go index 29caab0664..f97e9d1682 100644 --- a/daemon/graphdriver/devmapper/ioctl.go +++ b/pkg/devicemapper/ioctl.go @@ -1,6 +1,6 @@ // +build linux -package devmapper +package devicemapper import ( "syscall" diff --git a/pkg/fileutils/fileutils.go b/pkg/fileutils/fileutils.go index acc27f55b5..4e4a91b91a 100644 --- a/pkg/fileutils/fileutils.go +++ b/pkg/fileutils/fileutils.go @@ -1,7 +1,7 @@ package fileutils import ( - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "path/filepath" ) diff --git a/pkg/graphdb/conn_sqlite3.go b/pkg/graphdb/conn_sqlite3.go index b6a8027a81..455790ac28 100644 --- a/pkg/graphdb/conn_sqlite3.go +++ b/pkg/graphdb/conn_sqlite3.go @@ -4,31 +4,15 @@ package graphdb import ( "database/sql" - "os" _ "code.google.com/p/gosqlite/sqlite3" // registers sqlite ) func NewSqliteConn(root string) (*Database, error) { - initDatabase := false - - stat, err := os.Stat(root) - if err != nil { - if os.IsNotExist(err) { - initDatabase = true - } else { - return nil, err - } - } - - if stat != nil && stat.Size() == 0 { - initDatabase = true - } - conn, err := sql.Open("sqlite3", root) if err != nil { return nil, err } - return NewDatabase(conn, initDatabase) + return NewDatabase(conn) } diff --git a/pkg/graphdb/graphdb.go b/pkg/graphdb/graphdb.go index 59873fefb3..62342033ac 100644 --- a/pkg/graphdb/graphdb.go +++ b/pkg/graphdb/graphdb.go @@ -73,45 +73,55 @@ func IsNonUniqueNameError(err error) bool { } // Create a new graph database initialized with a root entity -func NewDatabase(conn *sql.DB, init bool) (*Database, error) { +func NewDatabase(conn *sql.DB) (*Database, error) { if conn == nil { return nil, fmt.Errorf("Database connection cannot be nil") } db := &Database{conn: conn} - if init { - if _, err := conn.Exec(createEntityTable); err != nil { - return nil, err - } - if _, err := conn.Exec(createEdgeTable); err != nil { - return nil, err - } - if _, err := conn.Exec(createEdgeIndices); err != nil { - return nil, err - } - - rollback := func() { - conn.Exec("ROLLBACK") - } - - // Create root entities - if _, err := conn.Exec("BEGIN"); err != nil { - return nil, err - } - if _, err := conn.Exec("INSERT INTO entity (id) VALUES (?);", "0"); err != nil { - rollback() - return nil, err - } - - if _, err := conn.Exec("INSERT INTO edge (entity_id, name) VALUES(?,?);", "0", "/"); err != nil { - rollback() - return nil, err - } - - if _, err := conn.Exec("COMMIT"); err != nil { - return nil, err - } + if _, err := conn.Exec(createEntityTable); err != nil { + return nil, err } + if _, err := conn.Exec(createEdgeTable); err != nil { + return nil, err + } + if _, err := conn.Exec(createEdgeIndices); err != nil { + return nil, err + } + + rollback := func() { + conn.Exec("ROLLBACK") + } + + // Create root entities + if _, err := conn.Exec("BEGIN"); err != nil { + return nil, err + } + + if _, err := conn.Exec("DELETE FROM entity where id = ?", "0"); err != nil { + rollback() + return nil, err + } + + if _, err := conn.Exec("INSERT INTO entity (id) VALUES (?);", "0"); err != nil { + rollback() + return nil, err + } + + if _, err := conn.Exec("DELETE FROM edge where entity_id=? and name=?", "0", "/"); err != nil { + rollback() + return nil, err + } + + if _, err := conn.Exec("INSERT INTO edge (entity_id, name) VALUES(?,?);", "0", "/"); err != nil { + rollback() + return nil, err + } + + if _, err := conn.Exec("COMMIT"); err != nil { + return nil, err + } + return db, nil } @@ -131,8 +141,8 @@ func (db *Database) Set(fullPath, id string) (*Entity, error) { if _, err := db.conn.Exec("BEGIN EXCLUSIVE"); err != nil { return nil, err } - var entityId string - if err := db.conn.QueryRow("SELECT id FROM entity WHERE id = ?;", id).Scan(&entityId); err != nil { + var entityID string + if err := db.conn.QueryRow("SELECT id FROM entity WHERE id = ?;", id).Scan(&entityID); err != nil { if err == sql.ErrNoRows { if _, err := db.conn.Exec("INSERT INTO entity (id) VALUES(?);", id); err != nil { rollback() @@ -320,14 +330,14 @@ func (db *Database) RefPaths(id string) Edges { for rows.Next() { var name string - var parentId string - if err := rows.Scan(&name, &parentId); err != nil { + var parentID string + if err := rows.Scan(&name, &parentID); err != nil { return refs } refs = append(refs, &Edge{ EntityID: id, Name: name, - ParentID: parentId, + ParentID: parentID, }) } return refs @@ -443,11 +453,11 @@ func (db *Database) children(e *Entity, name string, depth int, entities []WalkM defer rows.Close() for rows.Next() { - var entityId, entityName string - if err := rows.Scan(&entityId, &entityName); err != nil { + var entityID, entityName string + if err := rows.Scan(&entityID, &entityName); err != nil { return nil, err } - child := &Entity{entityId} + child := &Entity{entityID} edge := &Edge{ ParentID: e.id, Name: entityName, @@ -490,11 +500,11 @@ func (db *Database) parents(e *Entity) (parents []string, err error) { defer rows.Close() for rows.Next() { - var parentId string - if err := rows.Scan(&parentId); err != nil { + var parentID string + if err := rows.Scan(&parentID); err != nil { return nil, err } - parents = append(parents, parentId) + parents = append(parents, parentID) } return parents, nil diff --git a/pkg/graphdb/graphdb_test.go b/pkg/graphdb/graphdb_test.go index 7568e66de4..f22828560c 100644 --- a/pkg/graphdb/graphdb_test.go +++ b/pkg/graphdb/graphdb_test.go @@ -14,7 +14,7 @@ import ( func newTestDb(t *testing.T) (*Database, string) { p := path.Join(os.TempDir(), "sqlite.db") conn, err := sql.Open("sqlite3", p) - db, err := NewDatabase(conn, true) + db, err := NewDatabase(conn) if err != nil { t.Fatal(err) } diff --git a/pkg/httputils/resumablerequestreader.go b/pkg/httputils/resumablerequestreader.go index 3cd1f49179..10edd43a98 100644 --- a/pkg/httputils/resumablerequestreader.go +++ b/pkg/httputils/resumablerequestreader.go @@ -6,7 +6,7 @@ import ( "net/http" "time" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" ) type resumableRequestReader struct { diff --git a/pkg/iptables/MAINTAINERS b/pkg/iptables/MAINTAINERS index 1e998f8ac1..134b02a071 100644 --- a/pkg/iptables/MAINTAINERS +++ b/pkg/iptables/MAINTAINERS @@ -1 +1,2 @@ Michael Crosby (@crosbymichael) +Jessie Frazelle (@jfrazelle) diff --git a/pkg/iptables/iptables.go b/pkg/iptables/iptables.go index 88d8b5f352..b783347fa3 100644 --- a/pkg/iptables/iptables.go +++ b/pkg/iptables/iptables.go @@ -4,11 +4,12 @@ import ( "errors" "fmt" "net" - "os" "os/exec" "regexp" "strconv" "strings" + + log "github.com/Sirupsen/logrus" ) type Action string @@ -19,9 +20,9 @@ const ( ) var ( - ErrIptablesNotFound = errors.New("Iptables not found") nat = []string{"-t", "nat"} supportsXlock = false + ErrIptablesNotFound = errors.New("Iptables not found") ) type Chain struct { @@ -29,6 +30,15 @@ type Chain struct { Bridge string } +type ChainError struct { + Chain string + Output []byte +} + +func (e *ChainError) Error() string { + return fmt.Sprintf("Error iptables %s: %s", e.Chain, string(e.Output)) +} + func init() { supportsXlock = exec.Command("iptables", "--wait", "-L", "-n").Run() == nil } @@ -77,7 +87,7 @@ func (c *Chain) Forward(action Action, ip net.IP, port int, proto, dest_addr str "--to-destination", net.JoinHostPort(dest_addr, strconv.Itoa(dest_port))); err != nil { return err } else if len(output) != 0 { - return fmt.Errorf("Error iptables forward: %s", output) + return &ChainError{Chain: "FORWARD", Output: output} } fAction := action @@ -93,7 +103,7 @@ func (c *Chain) Forward(action Action, ip net.IP, port int, proto, dest_addr str "-j", "ACCEPT"); err != nil { return err } else if len(output) != 0 { - return fmt.Errorf("Error iptables forward: %s", output) + return &ChainError{Chain: "FORWARD", Output: output} } return nil @@ -107,7 +117,7 @@ func (c *Chain) Prerouting(action Action, args ...string) error { if output, err := Raw(append(a, "-j", c.Name)...); err != nil { return err } else if len(output) != 0 { - return fmt.Errorf("Error iptables prerouting: %s", output) + return &ChainError{Chain: "PREROUTING", Output: output} } return nil } @@ -120,7 +130,7 @@ func (c *Chain) Output(action Action, args ...string) error { if output, err := Raw(append(a, "-j", c.Name)...); err != nil { return err } else if len(output) != 0 { - return fmt.Errorf("Error iptables output: %s", output) + return &ChainError{Chain: "OUTPUT", Output: output} } return nil } @@ -175,9 +185,7 @@ func Raw(args ...string) ([]byte, error) { args = append([]string{"--wait"}, args...) } - if os.Getenv("DEBUG") != "" { - fmt.Fprintf(os.Stderr, fmt.Sprintf("[debug] %s, %v\n", path, args)) - } + log.Debugf("%s, %v", path, args) output, err := exec.Command(path, args...).CombinedOutput() if err != nil { diff --git a/pkg/jsonlog/jsonlog.go b/pkg/jsonlog/jsonlog.go index b0c61a803f..3a96d86f82 100644 --- a/pkg/jsonlog/jsonlog.go +++ b/pkg/jsonlog/jsonlog.go @@ -4,8 +4,9 @@ import ( "encoding/json" "fmt" "io" - "log" "time" + + log "github.com/Sirupsen/logrus" ) type JSONLog struct { diff --git a/pkg/log/log.go b/pkg/log/log.go deleted file mode 100644 index 53be6cf182..0000000000 --- a/pkg/log/log.go +++ /dev/null @@ -1,83 +0,0 @@ -package log - -import ( - "fmt" - "io" - "os" - "runtime" - "strings" -) - -type priority int - -const ( - errorFormat = "[%s] %s:%d %s\n" - logFormat = "[%s] %s\n" - - fatal priority = iota - error - info - debug -) - -// A common interface to access the Fatal method of -// both testing.B and testing.T. -type Fataler interface { - Fatal(args ...interface{}) -} - -func (p priority) String() string { - switch p { - case fatal: - return "fatal" - case error: - return "error" - case info: - return "info" - case debug: - return "debug" - } - - return "" -} - -// Debug function, if the debug flag is set, then display. Do nothing otherwise -// If Docker is in damon mode, also send the debug info on the socket -func Debugf(format string, a ...interface{}) { - if os.Getenv("DEBUG") != "" { - logf(os.Stderr, debug, format, a...) - } -} - -func Infof(format string, a ...interface{}) { - logf(os.Stdout, info, format, a...) -} - -func Errorf(format string, a ...interface{}) { - logf(os.Stderr, error, format, a...) -} - -func Fatalf(format string, a ...interface{}) { - logf(os.Stderr, fatal, format, a...) - os.Exit(1) -} - -func logf(stream io.Writer, level priority, format string, a ...interface{}) { - var prefix string - - if level <= error || level == debug { - // Retrieve the stack infos - _, file, line, ok := runtime.Caller(2) - if !ok { - file = "" - line = -1 - } else { - file = file[strings.LastIndex(file, "/")+1:] - } - prefix = fmt.Sprintf(errorFormat, level.String(), file, line, format) - } else { - prefix = fmt.Sprintf(logFormat, level.String(), format) - } - - fmt.Fprintf(stream, prefix, a...) -} diff --git a/pkg/log/log_test.go b/pkg/log/log_test.go deleted file mode 100644 index 83ba5fd27c..0000000000 --- a/pkg/log/log_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package log - -import ( - "bytes" - "regexp" - - "testing" -) - -func TestLogFatalf(t *testing.T) { - var output *bytes.Buffer - - tests := []struct { - Level priority - Format string - Values []interface{} - ExpectedPattern string - }{ - {fatal, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[fatal\\] testing.go:\\d+ 1 \\+ 1 = 2"}, - {error, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[error\\] testing.go:\\d+ 1 \\+ 1 = 2"}, - {info, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[info\\] 1 \\+ 1 = 2"}, - {debug, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[debug\\] testing.go:\\d+ 1 \\+ 1 = 2"}, - } - - for i, test := range tests { - output = &bytes.Buffer{} - logf(output, test.Level, test.Format, test.Values...) - - expected := regexp.MustCompile(test.ExpectedPattern) - if !expected.MatchString(output.String()) { - t.Errorf("[%d] Log output does not match expected pattern:\n\tExpected: %s\n\tOutput: %s", - i, - expected.String(), - output.String()) - } - } -} diff --git a/pkg/mflag/flag.go b/pkg/mflag/flag.go index b40f911769..a30c41b045 100644 --- a/pkg/mflag/flag.go +++ b/pkg/mflag/flag.go @@ -23,12 +23,12 @@ flag.Var(&flagVal, []string{"name"}, "help message for flagname") For such flags, the default value is just the initial value of the variable. - You can also add "deprecated" flags, they are still usable, bur are not shown + You can also add "deprecated" flags, they are still usable, but are not shown in the usage and will display a warning when you try to use them: - var ip = flag.Int([]string{"f", "#flagname", "-flagname"}, 1234, "help message for flagname") - this will display: `Warning: '-flagname' is deprecated, it will be replaced by '--flagname' soon. See usage.` and + var ip = flag.Int([]string{"#f", "#flagname", "-flagname2"}, 1234, "help message for flagname") + this will display: `Warning: '--flagname' is deprecated, it will be replaced by '--flagname2' soon. See usage.` and var ip = flag.Int([]string{"f", "#flagname"}, 1234, "help message for flagname") - will display: `Warning: '-t' is deprecated, it will be removed soon. See usage.` + will display: `Warning: '-f' is deprecated, it will be removed soon. See usage.` You can also group one letter flags, bif you declare var v = flag.Bool([]string{"v", "-verbose"}, false, "help message for verbose") @@ -394,12 +394,22 @@ func (f *FlagSet) Lookup(name string) *Flag { return f.formal[name] } +// Indicates whether the specified flag was specified at all on the cmd line +func (f *FlagSet) IsSet(name string) bool { + return f.actual[name] != nil +} + // Lookup returns the Flag structure of the named command-line flag, // returning nil if none exists. func Lookup(name string) *Flag { return CommandLine.formal[name] } +// Indicates whether the specified flag was specified at all on the cmd line +func IsSet(name string) bool { + return CommandLine.IsSet(name) +} + // Set sets the value of the named flag. func (f *FlagSet) Set(name, value string) error { flag, ok := f.formal[name] diff --git a/pkg/mflag/flag_test.go b/pkg/mflag/flag_test.go index 340a1cb175..622e8a9bfc 100644 --- a/pkg/mflag/flag_test.go +++ b/pkg/mflag/flag_test.go @@ -168,11 +168,14 @@ func testParse(f *FlagSet, t *testing.T) { } boolFlag := f.Bool([]string{"bool"}, false, "bool value") bool2Flag := f.Bool([]string{"bool2"}, false, "bool2 value") + f.Bool([]string{"bool3"}, false, "bool3 value") + bool4Flag := f.Bool([]string{"bool4"}, false, "bool4 value") intFlag := f.Int([]string{"-int"}, 0, "int value") int64Flag := f.Int64([]string{"-int64"}, 0, "int64 value") uintFlag := f.Uint([]string{"uint"}, 0, "uint value") uint64Flag := f.Uint64([]string{"-uint64"}, 0, "uint64 value") stringFlag := f.String([]string{"string"}, "0", "string value") + f.String([]string{"string2"}, "0", "string2 value") singleQuoteFlag := f.String([]string{"squote"}, "", "single quoted value") doubleQuoteFlag := f.String([]string{"dquote"}, "", "double quoted value") mixedQuoteFlag := f.String([]string{"mquote"}, "", "mixed quoted value") @@ -185,6 +188,7 @@ func testParse(f *FlagSet, t *testing.T) { args := []string{ "-bool", "-bool2=true", + "-bool4=false", "--int", "22", "--int64", "0x23", "-uint", "24", @@ -212,6 +216,18 @@ func testParse(f *FlagSet, t *testing.T) { if *bool2Flag != true { t.Error("bool2 flag should be true, is ", *bool2Flag) } + if !f.IsSet("bool2") { + t.Error("bool2 should be marked as set") + } + if f.IsSet("bool3") { + t.Error("bool3 should not be marked as set") + } + if !f.IsSet("bool4") { + t.Error("bool4 should be marked as set") + } + if *bool4Flag != false { + t.Error("bool4 flag should be false, is ", *bool4Flag) + } if *intFlag != 22 { t.Error("int flag should be 22, is ", *intFlag) } @@ -227,6 +243,12 @@ func testParse(f *FlagSet, t *testing.T) { if *stringFlag != "hello" { t.Error("string flag should be `hello`, is ", *stringFlag) } + if !f.IsSet("string") { + t.Error("string flag should be marked as set") + } + if f.IsSet("string2") { + t.Error("string2 flag should not be marked as set") + } if *singleQuoteFlag != "single" { t.Error("single quote string flag should be `single`, is ", *singleQuoteFlag) } diff --git a/pkg/mount/flags.go b/pkg/mount/flags.go index 742698e8d3..17dbd7a64c 100644 --- a/pkg/mount/flags.go +++ b/pkg/mount/flags.go @@ -37,7 +37,14 @@ func parseOptions(options string) (int, string) { "nodiratime": {false, NODIRATIME}, "bind": {false, BIND}, "rbind": {false, RBIND}, + "unbindable": {false, UNBINDABLE}, + "runbindable": {false, RUNBINDABLE}, "private": {false, PRIVATE}, + "rprivate": {false, RPRIVATE}, + "shared": {false, SHARED}, + "rshared": {false, RSHARED}, + "slave": {false, SLAVE}, + "rslave": {false, RSLAVE}, "relatime": {false, RELATIME}, "norelatime": {true, RELATIME}, "strictatime": {false, STRICTATIME}, diff --git a/pkg/mount/flags_freebsd.go b/pkg/mount/flags_freebsd.go index 4ddf4d7090..a59b58960b 100644 --- a/pkg/mount/flags_freebsd.go +++ b/pkg/mount/flags_freebsd.go @@ -19,7 +19,14 @@ const ( MANDLOCK = 0 NODEV = 0 NODIRATIME = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 RBIND = 0 RELATIVE = 0 RELATIME = 0 diff --git a/pkg/mount/flags_linux.go b/pkg/mount/flags_linux.go index 0bb47d8c90..9986621c8f 100644 --- a/pkg/mount/flags_linux.go +++ b/pkg/mount/flags_linux.go @@ -17,7 +17,14 @@ const ( NODIRATIME = syscall.MS_NODIRATIME BIND = syscall.MS_BIND RBIND = syscall.MS_BIND | syscall.MS_REC + UNBINDABLE = syscall.MS_UNBINDABLE + RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC PRIVATE = syscall.MS_PRIVATE + RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC + SLAVE = syscall.MS_SLAVE + RSLAVE = syscall.MS_SLAVE | syscall.MS_REC + SHARED = syscall.MS_SHARED + RSHARED = syscall.MS_SHARED | syscall.MS_REC RELATIME = syscall.MS_RELATIME STRICTATIME = syscall.MS_STRICTATIME ) diff --git a/pkg/mount/flags_unsupported.go b/pkg/mount/flags_unsupported.go index 5a14108880..c4f82176b8 100644 --- a/pkg/mount/flags_unsupported.go +++ b/pkg/mount/flags_unsupported.go @@ -11,7 +11,14 @@ const ( NODIRATIME = 0 NOEXEC = 0 NOSUID = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 RBIND = 0 RELATIME = 0 RELATIVE = 0 diff --git a/pkg/mount/mountinfo.go b/pkg/mount/mountinfo.go index 78b83ced4a..ec8e8bca2a 100644 --- a/pkg/mount/mountinfo.go +++ b/pkg/mount/mountinfo.go @@ -1,7 +1,7 @@ package mount type MountInfo struct { - Id, Parent, Major, Minor int - Root, Mountpoint, Opts string - Fstype, Source, VfsOpts string + Id, Parent, Major, Minor int + Root, Mountpoint, Opts, Optional string + Fstype, Source, VfsOpts string } diff --git a/pkg/mount/mountinfo_freebsd.go b/pkg/mount/mountinfo_freebsd.go index a16bdb84f8..2fe91862d8 100644 --- a/pkg/mount/mountinfo_freebsd.go +++ b/pkg/mount/mountinfo_freebsd.go @@ -32,6 +32,8 @@ func parseMountTable() ([]*MountInfo, error) { for _, entry := range entries { var mountinfo MountInfo mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) + mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) + mountinfo.Fstype = C.GoString(&entry.f_fstypename[0]) out = append(out, &mountinfo) } return out, nil diff --git a/pkg/mount/mountinfo_linux.go b/pkg/mount/mountinfo_linux.go index 84bf5516b5..0eb018e231 100644 --- a/pkg/mount/mountinfo_linux.go +++ b/pkg/mount/mountinfo_linux.go @@ -1,3 +1,5 @@ +// +build linux + package mount import ( @@ -23,7 +25,7 @@ const ( (9) filesystem type: name of filesystem of the form "type[.subtype]" (10) mount source: filesystem specific information or "none" (11) super options: per super block options*/ - mountinfoFormat = "%d %d %d:%d %s %s %s " + mountinfoFormat = "%d %d %d:%d %s %s %s %s" ) // Parse /proc/self/mountinfo because comparing Dev and ino does not work from bind mounts @@ -49,13 +51,14 @@ func parseInfoFile(r io.Reader) ([]*MountInfo, error) { } var ( - p = &MountInfo{} - text = s.Text() + p = &MountInfo{} + text = s.Text() + optionalFields string ) if _, err := fmt.Sscanf(text, mountinfoFormat, &p.Id, &p.Parent, &p.Major, &p.Minor, - &p.Root, &p.Mountpoint, &p.Opts); err != nil { + &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil { return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) } // Safe as mountinfo encodes mountpoints with spaces as \040. @@ -65,6 +68,10 @@ func parseInfoFile(r io.Reader) ([]*MountInfo, error) { return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text) } + if optionalFields != "-" { + p.Optional = optionalFields + } + p.Fstype = postSeparatorFields[0] p.Source = postSeparatorFields[1] p.VfsOpts = strings.Join(postSeparatorFields[2:], " ") @@ -72,3 +79,14 @@ func parseInfoFile(r io.Reader) ([]*MountInfo, error) { } return out, nil } + +// PidMountInfo collects the mounts for a specific Pid +func PidMountInfo(pid int) ([]*MountInfo, error) { + f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} diff --git a/pkg/mount/mountinfo_linux_test.go b/pkg/mount/mountinfo_linux_test.go index 3c214476df..e92b7e2c74 100644 --- a/pkg/mount/mountinfo_linux_test.go +++ b/pkg/mount/mountinfo_linux_test.go @@ -446,3 +446,32 @@ func TestParseGentooMountinfo(t *testing.T) { t.Fatal(err) } } + +func TestParseFedoraMountinfoFields(t *testing.T) { + r := bytes.NewBuffer([]byte(fedoraMountinfo)) + infos, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } + expectedLength := 58 + if len(infos) != expectedLength { + t.Fatalf("Expected %d entries, got %d", expectedLength, len(infos)) + } + mi := MountInfo{ + Id: 15, + Parent: 35, + Major: 0, + Minor: 3, + Root: "/", + Mountpoint: "/proc", + Opts: "rw,nosuid,nodev,noexec,relatime", + Optional: "shared:5", + Fstype: "proc", + Source: "proc", + VfsOpts: "rw", + } + + if *infos[0] != mi { + t.Fatalf("expected %#v, got %#v", mi, infos[0]) + } +} diff --git a/pkg/mount/sharedsubtree_linux.go b/pkg/mount/sharedsubtree_linux.go new file mode 100644 index 0000000000..cd9b86cefa --- /dev/null +++ b/pkg/mount/sharedsubtree_linux.go @@ -0,0 +1,54 @@ +// +build linux + +package mount + +func MakeShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "shared") +} + +func MakeRShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "rshared") +} + +func MakePrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "private") +} + +func MakeRPrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "rprivate") +} + +func MakeSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "slave") +} + +func MakeRSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "rslave") +} + +func MakeUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "unbindable") +} + +func MakeRUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "runbindable") +} + +func ensureMountedAs(mountPoint, options string) error { + mounted, err := Mounted(mountPoint) + if err != nil { + return err + } + + if !mounted { + if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { + return err + } + } + mounted, err = Mounted(mountPoint) + if err != nil { + return err + } + + return ForceMount("", mountPoint, "none", options) +} diff --git a/pkg/mount/sharedsubtree_linux_test.go b/pkg/mount/sharedsubtree_linux_test.go new file mode 100644 index 0000000000..0986bd9c75 --- /dev/null +++ b/pkg/mount/sharedsubtree_linux_test.go @@ -0,0 +1,331 @@ +// +build linux + +package mount + +import ( + "os" + "path" + "syscall" + "testing" +) + +// nothing is propogated in or out +func TestSubtreePrivate(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + outside1Dir = path.Join(tmp, "outside1") + outside2Dir = path.Join(tmp, "outside2") + + outside1Path = path.Join(outside1Dir, "file.txt") + outside2Path = path.Join(outside2Dir, "file.txt") + outside1CheckPath = path.Join(targetDir, "a", "file.txt") + outside2CheckPath = path.Join(sourceDir, "b", "file.txt") + ) + if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(targetDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside1Dir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside2Dir, 0777); err != nil { + t.Fatal(err) + } + + if err := createFile(outside1Path); err != nil { + t.Fatal(err) + } + if err := createFile(outside2Path); err != nil { + t.Fatal(err) + } + + // mount the shared directory to a target + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // next, make the target private + if err := MakePrivate(targetDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // mount in an outside path to a mounted path inside the _source_ + if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(sourceDir, "a")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_not_ show in the _target_ + if _, err := os.Stat(outside1CheckPath); err != nil && !os.IsNotExist(err) { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not be visible, but is", outside1CheckPath) + } + + // next mount outside2Dir into the _target_ + if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(targetDir, "b")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_not_ show in the _source_ + if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not be visible, but is", outside2CheckPath) + } +} + +// Testing that when a target is a shared mount, +// then child mounts propogate to the source +func TestSubtreeShared(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + outsideDir = path.Join(tmp, "outside") + + outsidePath = path.Join(outsideDir, "file.txt") + sourceCheckPath = path.Join(sourceDir, "a", "file.txt") + ) + + if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(targetDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outsideDir, 0777); err != nil { + t.Fatal(err) + } + + if err := createFile(outsidePath); err != nil { + t.Fatal(err) + } + + // mount the source as shared + if err := MakeShared(sourceDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(sourceDir); err != nil { + t.Fatal(err) + } + }() + + // mount the shared directory to a target + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // mount in an outside path to a mounted path inside the target + if err := Mount(outsideDir, path.Join(targetDir, "a"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(targetDir, "a")); err != nil { + t.Fatal(err) + } + }() + + // NOW, check that the file from the outside directory is avaible in the source directory + if _, err := os.Stat(sourceCheckPath); err != nil { + t.Fatal(err) + } +} + +// testing that mounts to a shared source show up in the slave target, +// and that mounts into a slave target do _not_ show up in the shared source +func TestSubtreeSharedSlave(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + outside1Dir = path.Join(tmp, "outside1") + outside2Dir = path.Join(tmp, "outside2") + + outside1Path = path.Join(outside1Dir, "file.txt") + outside2Path = path.Join(outside2Dir, "file.txt") + outside1CheckPath = path.Join(targetDir, "a", "file.txt") + outside2CheckPath = path.Join(sourceDir, "b", "file.txt") + ) + if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(targetDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside1Dir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside2Dir, 0777); err != nil { + t.Fatal(err) + } + + if err := createFile(outside1Path); err != nil { + t.Fatal(err) + } + if err := createFile(outside2Path); err != nil { + t.Fatal(err) + } + + // mount the source as shared + if err := MakeShared(sourceDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(sourceDir); err != nil { + t.Fatal(err) + } + }() + + // mount the shared directory to a target + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // next, make the target slave + if err := MakeSlave(targetDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // mount in an outside path to a mounted path inside the _source_ + if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(sourceDir, "a")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_ show in the _target_ + if _, err := os.Stat(outside1CheckPath); err != nil { + t.Fatal(err) + } + + // next mount outside2Dir into the _target_ + if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(targetDir, "b")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_not_ show in the _source_ + if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not be visible, but is", outside2CheckPath) + } +} + +func TestSubtreeUnbindable(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + ) + if err := os.MkdirAll(sourceDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(targetDir, 0777); err != nil { + t.Fatal(err) + } + + // next, make the source unbindable + if err := MakeUnbindable(sourceDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(sourceDir); err != nil { + t.Fatal(err) + } + }() + + // then attempt to mount it to target. It should fail + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil && err != syscall.EINVAL { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not have been bindable", sourceDir) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() +} + +func createFile(path string) error { + f, err := os.Create(path) + if err != nil { + return err + } + f.WriteString("hello world!") + return f.Close() +} diff --git a/pkg/namesgenerator/names-generator.go b/pkg/namesgenerator/names-generator.go index ebb5850bda..b641e915fc 100644 --- a/pkg/namesgenerator/names-generator.go +++ b/pkg/namesgenerator/names-generator.go @@ -7,7 +7,7 @@ import ( ) var ( - left = [...]string{"happy", "jolly", "dreamy", "sad", "angry", "pensive", "focused", "sleepy", "grave", "distracted", "determined", "stoic", "stupefied", "sharp", "agitated", "cocky", "tender", "goofy", "furious", "desperate", "hopeful", "compassionate", "silly", "lonely", "condescending", "naughty", "kickass", "drunk", "boring", "nostalgic", "ecstatic", "insane", "cranky", "mad", "jovial", "sick", "hungry", "thirsty", "elegant", "backstabbing", "clever", "trusting", "loving", "suspicious", "berserk", "high", "romantic", "prickly", "evil"} + left = [...]string{"happy", "jolly", "dreamy", "sad", "angry", "pensive", "focused", "sleepy", "grave", "distracted", "determined", "stoic", "stupefied", "sharp", "agitated", "cocky", "tender", "goofy", "furious", "desperate", "hopeful", "compassionate", "silly", "lonely", "condescending", "naughty", "kickass", "drunk", "boring", "nostalgic", "ecstatic", "insane", "cranky", "mad", "jovial", "sick", "hungry", "thirsty", "elegant", "backstabbing", "clever", "trusting", "loving", "suspicious", "berserk", "high", "romantic", "prickly", "evil", "admiring", "adoring", "reverent", "serene", "fervent", "modest", "gloomy", "elated"} // Docker 0.7.x generates names from notable scientists and hackers. // // Ada Lovelace invented the first algorithm. http://en.wikipedia.org/wiki/Ada_Lovelace (thanks James Turnbull) @@ -22,6 +22,7 @@ var ( // Charles Babbage invented the concept of a programmable computer. http://en.wikipedia.org/wiki/Charles_Babbage. // Charles Darwin established the principles of natural evolution. http://en.wikipedia.org/wiki/Charles_Darwin. // Dennis Ritchie and Ken Thompson created UNIX and the C programming language. http://en.wikipedia.org/wiki/Dennis_Ritchie http://en.wikipedia.org/wiki/Ken_Thompson + // Dorothy Hodgkin was a British biochemist, credited with the development of protein crystallography. She was awarded the Nobel Prize in Chemistry in 1964. http://en.wikipedia.org/wiki/Dorothy_Hodgkin // Douglas Engelbart gave the mother of all demos: http://en.wikipedia.org/wiki/Douglas_Engelbart // Elizabeth Blackwell - American doctor and first American woman to receive a medical degree - http://en.wikipedia.org/wiki/Elizabeth_Blackwell // Emmett Brown invented time travel. http://en.wikipedia.org/wiki/Emmett_Brown (thanks Brian Goff) @@ -31,6 +32,7 @@ var ( // Françoise Barré-Sinoussi - French virologist and Nobel Prize Laureate in Physiology or Medicine; her work was fundamental in identifying HIV as the cause of AIDS. http://en.wikipedia.org/wiki/Fran%C3%A7oise_Barr%C3%A9-Sinoussi // Galileo was a founding father of modern astronomy, and faced politics and obscurantism to establish scientific truth. http://en.wikipedia.org/wiki/Galileo_Galilei // Gertrude Elion - American biochemist, pharmacologist and the 1988 recipient of the Nobel Prize in Medicine - http://en.wikipedia.org/wiki/Gertrude_Elion + // Gerty Theresa Cori - American biochemist who became the third woman—and first American woman—to win a Nobel Prize in science, and the first woman to be awarded the Nobel Prize in Physiology or Medicine. Cori was born in Prague. http://en.wikipedia.org/wiki/Gerty_Cori // Grace Hopper developed the first compiler for a computer programming language and is credited with popularizing the term "debugging" for fixing computer glitches. http://en.wikipedia.org/wiki/Grace_Hopper // Henry Poincare made fundamental contributions in several fields of mathematics. http://en.wikipedia.org/wiki/Henri_Poincar%C3%A9 // Hypatia - Greek Alexandrine Neoplatonist philosopher in Egypt who was one of the earliest mothers of mathematics - http://en.wikipedia.org/wiki/Hypatia @@ -56,7 +58,7 @@ var ( // Mary Leakey - British paleoanthropologist who discovered the first fossilized Proconsul skull - http://en.wikipedia.org/wiki/Mary_Leakey // Muhammad ibn Jābir al-Ḥarrānī al-Battānī was a founding father of astronomy. http://en.wikipedia.org/wiki/Mu%E1%B8%A5ammad_ibn_J%C4%81bir_al-%E1%B8%A4arr%C4%81n%C4%AB_al-Batt%C4%81n%C4%AB // Niels Bohr is the father of quantum theory. http://en.wikipedia.org/wiki/Niels_Bohr. - // Nikola Tesla invented the AC electric system and every gaget ever used by a James Bond villain. http://en.wikipedia.org/wiki/Nikola_Tesla + // Nikola Tesla invented the AC electric system and every gadget ever used by a James Bond villain. http://en.wikipedia.org/wiki/Nikola_Tesla // Pierre de Fermat pioneered several aspects of modern mathematics. http://en.wikipedia.org/wiki/Pierre_de_Fermat // Rachel Carson - American marine biologist and conservationist, her book Silent Spring and other writings are credited with advancing the global environmental movement. http://en.wikipedia.org/wiki/Rachel_Carson // Radia Perlman is a software designer and network engineer and most famous for her invention of the spanning-tree protocol (STP). http://en.wikipedia.org/wiki/Radia_Perlman @@ -64,6 +66,7 @@ var ( // Richard Matthew Stallman - the founder of the Free Software movement, the GNU project, the Free Software Foundation, and the League for Programming Freedom. He also invented the concept of copyleft to protect the ideals of this movement, and enshrined this concept in the widely-used GPL (General Public License) for software. http://en.wikiquote.org/wiki/Richard_Stallman // Rob Pike was a key contributor to Unix, Plan 9, the X graphic system, utf-8, and the Go programming language. http://en.wikipedia.org/wiki/Rob_Pike // Rosalind Franklin - British biophysicist and X-ray crystallographer whose research was critical to the understanding of DNA - http://en.wikipedia.org/wiki/Rosalind_Franklin + // Rosalyn Sussman Yalow - Rosalyn Sussman Yalow was an American medical physicist, and a co-winner of the 1977 Nobel Prize in Physiology or Medicine for development of the radioimmunoassay technique. http://en.wikipedia.org/wiki/Rosalyn_Sussman_Yalow // Sophie Kowalevski - Russian mathematician responsible for important original contributions to analysis, differential equations and mechanics - http://en.wikipedia.org/wiki/Sofia_Kovalevskaya // Sophie Wilson designed the first Acorn Micro-Computer and the instruction set for ARM processors. http://en.wikipedia.org/wiki/Sophie_Wilson // Stephen Hawking pioneered the field of cosmology by combining general relativity and quantum mechanics. http://en.wikipedia.org/wiki/Stephen_Hawking @@ -73,7 +76,8 @@ var ( // http://en.wikipedia.org/wiki/John_Bardeen // http://en.wikipedia.org/wiki/Walter_Houser_Brattain // http://en.wikipedia.org/wiki/William_Shockley - right = [...]string{"albattani", "almeida", "archimedes", "ardinghelli", "babbage", "bardeen", "bartik", "bell", "blackwell", "bohr", "brattain", "brown", "carson", "colden", "curie", "darwin", "davinci", "einstein", "elion", "engelbart", "euclid", "fermat", "fermi", "feynman", "franklin", "galileo", "goldstine", "goodall", "hawking", "heisenberg", "hoover", "hopper", "hypatia", "jones", "kirch", "kowalevski", "lalande", "leakey", "lovelace", "lumiere", "mayer", "mccarthy", "mcclintock", "mclean", "meitner", "mestorf", "morse", "newton", "nobel", "pare", "pasteur", "perlman", "pike", "poincare", "ptolemy", "ritchie", "rosalind", "sammet", "shockley", "sinoussi", "stallman", "tesla", "thompson", "torvalds", "turing", "wilson", "wozniak", "wright", "yonath"} + // Yeong-Sil Jang was a Korean scientist and astronomer during the Joseon Dynasty; he invented the first metal printing press and water gauge. http://en.wikipedia.org/wiki/Jang_Yeong-sil + right = [...]string{"albattani", "almeida", "archimedes", "ardinghelli", "babbage", "bardeen", "bartik", "bell", "blackwell", "bohr", "brattain", "brown", "carson", "colden", "cori", "curie", "darwin", "davinci", "einstein", "elion", "engelbart", "euclid", "fermat", "fermi", "feynman", "franklin", "galileo", "goldstine", "goodall", "hawking", "heisenberg", "hodgkin", "hoover", "hopper", "hypatia", "jang", "jones", "kirch", "kowalevski", "lalande", "leakey", "lovelace", "lumiere", "mayer", "mccarthy", "mcclintock", "mclean", "meitner", "mestorf", "morse", "newton", "nobel", "pare", "pasteur", "perlman", "pike", "poincare", "ptolemy", "ritchie", "rosalind", "sammet", "shockley", "sinoussi", "stallman", "tesla", "thompson", "torvalds", "turing", "wilson", "wozniak", "wright", "yalow", "yonath"} ) func GetRandomName(retry int) string { diff --git a/pkg/networkfs/etchosts/etchosts.go b/pkg/networkfs/etchosts/etchosts.go index 6cf29b046f..d7edef27f6 100644 --- a/pkg/networkfs/etchosts/etchosts.go +++ b/pkg/networkfs/etchosts/etchosts.go @@ -3,40 +3,54 @@ package etchosts import ( "bytes" "fmt" + "io" "io/ioutil" "regexp" ) -var defaultContent = map[string]string{ - "localhost": "127.0.0.1", - "localhost ip6-localhost ip6-loopback": "::1", - "ip6-localnet": "fe00::0", - "ip6-mcastprefix": "ff00::0", - "ip6-allnodes": "ff02::1", - "ip6-allrouters": "ff02::2", +type Record struct { + Hosts string + IP string } -func Build(path, IP, hostname, domainname string, extraContent *map[string]string) error { +func (r Record) WriteTo(w io.Writer) (int64, error) { + n, err := fmt.Fprintf(w, "%s\t%s\n", r.IP, r.Hosts) + return int64(n), err +} + +var defaultContent = []Record{ + {Hosts: "localhost", IP: "127.0.0.1"}, + {Hosts: "localhost ip6-localhost ip6-loopback", IP: "::1"}, + {Hosts: "ip6-localnet", IP: "fe00::0"}, + {Hosts: "ip6-mcastprefix", IP: "ff00::0"}, + {Hosts: "ip6-allnodes", IP: "ff02::1"}, + {Hosts: "ip6-allrouters", IP: "ff02::2"}, +} + +func Build(path, IP, hostname, domainname string, extraContent []Record) error { content := bytes.NewBuffer(nil) if IP != "" { + var mainRec Record + mainRec.IP = IP if domainname != "" { - content.WriteString(fmt.Sprintf("%s\t%s.%s %s\n", IP, hostname, domainname, hostname)) + mainRec.Hosts = fmt.Sprintf("%s.%s %s", hostname, domainname, hostname) } else { - content.WriteString(fmt.Sprintf("%s\t%s\n", IP, hostname)) + mainRec.Hosts = hostname } - } - - for hosts, ip := range defaultContent { - if _, err := content.WriteString(fmt.Sprintf("%s\t%s\n", ip, hosts)); err != nil { + if _, err := mainRec.WriteTo(content); err != nil { return err } } - if extraContent != nil { - for hosts, ip := range *extraContent { - if _, err := content.WriteString(fmt.Sprintf("%s\t%s\n", ip, hosts)); err != nil { - return err - } + for _, r := range defaultContent { + if _, err := r.WriteTo(content); err != nil { + return err + } + } + + for _, r := range extraContent { + if _, err := r.WriteTo(content); err != nil { + return err } } diff --git a/pkg/networkfs/etchosts/etchosts_test.go b/pkg/networkfs/etchosts/etchosts_test.go index 05a4f447f7..c033904c31 100644 --- a/pkg/networkfs/etchosts/etchosts_test.go +++ b/pkg/networkfs/etchosts/etchosts_test.go @@ -7,6 +7,32 @@ import ( "testing" ) +func TestBuildDefault(t *testing.T) { + file, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(file.Name()) + + // check that /etc/hosts has consistent ordering + for i := 0; i <= 5; i++ { + err = Build(file.Name(), "", "", "", nil) + if err != nil { + t.Fatal(err) + } + + content, err := ioutil.ReadFile(file.Name()) + if err != nil { + t.Fatal(err) + } + expected := "127.0.0.1\tlocalhost\n::1\tlocalhost ip6-localhost ip6-loopback\nfe00::0\tip6-localnet\nff00::0\tip6-mcastprefix\nff02::1\tip6-allnodes\nff02::2\tip6-allrouters\n" + + if expected != string(content) { + t.Fatalf("Expected to find '%s' got '%s'", expected, content) + } + } +} + func TestBuildHostnameDomainname(t *testing.T) { file, err := ioutil.TempFile("", "") if err != nil { diff --git a/pkg/parsers/filters/parse.go b/pkg/parsers/filters/parse.go index 27c7132e8e..8b045a3098 100644 --- a/pkg/parsers/filters/parse.go +++ b/pkg/parsers/filters/parse.go @@ -3,6 +3,7 @@ package filters import ( "encoding/json" "errors" + "regexp" "strings" ) @@ -28,7 +29,9 @@ func ParseFlag(arg string, prev Args) (Args, error) { } f := strings.SplitN(arg, "=", 2) - filters[f[0]] = append(filters[f[0]], f[1]) + name := strings.ToLower(strings.TrimSpace(f[0])) + value := strings.TrimSpace(f[1]) + filters[name] = append(filters[name], value) return filters, nil } @@ -61,3 +64,22 @@ func FromParam(p string) (Args, error) { } return args, nil } + +func (filters Args) Match(field, source string) bool { + fieldValues := filters[field] + + //do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + for _, name2match := range fieldValues { + match, err := regexp.MatchString(name2match, source) + if err != nil { + continue + } + if match { + return true + } + } + return false +} diff --git a/pkg/parsers/operatingsystem/operatingsystem_test.go b/pkg/parsers/operatingsystem/operatingsystem_test.go index d264b35f03..b7d54cbb1c 100644 --- a/pkg/parsers/operatingsystem/operatingsystem_test.go +++ b/pkg/parsers/operatingsystem/operatingsystem_test.go @@ -38,12 +38,13 @@ BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`) ) dir := os.TempDir() + etcOsRelease = filepath.Join(dir, "etcOsRelease") + defer func() { + os.Remove(etcOsRelease) etcOsRelease = backup - os.RemoveAll(dir) }() - etcOsRelease = filepath.Join(dir, "etcOsRelease") for expect, osRelease := range map[string][]byte{ "Ubuntu 14.04 LTS": ubuntuTrusty, "Gentoo/Linux": gentoo, @@ -92,13 +93,13 @@ func TestIsContainerized(t *testing.T) { ) dir := os.TempDir() - defer func() { - proc1Cgroup = backup - os.RemoveAll(dir) - }() - proc1Cgroup = filepath.Join(dir, "proc1Cgroup") + defer func() { + os.Remove(proc1Cgroup) + proc1Cgroup = backup + }() + if err := ioutil.WriteFile(proc1Cgroup, nonContainerizedProc1Cgroup, 0600); err != nil { t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) } diff --git a/pkg/parsers/parsers.go b/pkg/parsers/parsers.go index e6e3718b40..2851fe163a 100644 --- a/pkg/parsers/parsers.go +++ b/pkg/parsers/parsers.go @@ -7,63 +7,59 @@ import ( ) // FIXME: Change this not to receive default value as parameter -func ParseHost(defaultHost string, defaultUnix, addr string) (string, error) { - var ( - proto string - host string - port int - ) +func ParseHost(defaultTCPAddr, defaultUnixAddr, addr string) (string, error) { addr = strings.TrimSpace(addr) - switch { - case addr == "tcp://": - return "", fmt.Errorf("Invalid bind address format: %s", addr) - case strings.HasPrefix(addr, "unix://"): - proto = "unix" - addr = strings.TrimPrefix(addr, "unix://") - if addr == "" { - addr = defaultUnix - } - case strings.HasPrefix(addr, "tcp://"): - proto = "tcp" - addr = strings.TrimPrefix(addr, "tcp://") - case strings.HasPrefix(addr, "fd://"): + if addr == "" { + addr = fmt.Sprintf("unix://%s", defaultUnixAddr) + } + addrParts := strings.Split(addr, "://") + if len(addrParts) == 1 { + addrParts = []string{"tcp", addrParts[0]} + } + + switch addrParts[0] { + case "tcp": + return ParseTCPAddr(addrParts[1], defaultTCPAddr) + case "unix": + return ParseUnixAddr(addrParts[1], defaultUnixAddr) + case "fd": return addr, nil - case addr == "": - proto = "unix" - addr = defaultUnix default: - if strings.Contains(addr, "://") { - return "", fmt.Errorf("Invalid bind address protocol: %s", addr) - } - proto = "tcp" - } - - if proto != "unix" && strings.Contains(addr, ":") { - hostParts := strings.Split(addr, ":") - if len(hostParts) != 2 { - return "", fmt.Errorf("Invalid bind address format: %s", addr) - } - if hostParts[0] != "" { - host = hostParts[0] - } else { - host = defaultHost - } - - if p, err := strconv.Atoi(hostParts[1]); err == nil && p != 0 { - port = p - } else { - return "", fmt.Errorf("Invalid bind address format: %s", addr) - } - - } else if proto == "tcp" && !strings.Contains(addr, ":") { return "", fmt.Errorf("Invalid bind address format: %s", addr) - } else { - host = addr } - if proto == "unix" { - return fmt.Sprintf("%s://%s", proto, host), nil +} + +func ParseUnixAddr(addr string, defaultAddr string) (string, error) { + addr = strings.TrimPrefix(addr, "unix://") + if strings.Contains(addr, "://") { + return "", fmt.Errorf("Invalid proto, expected unix: %s", addr) } - return fmt.Sprintf("%s://%s:%d", proto, host, port), nil + if addr == "" { + addr = defaultAddr + } + return fmt.Sprintf("unix://%s", addr), nil +} + +func ParseTCPAddr(addr string, defaultAddr string) (string, error) { + addr = strings.TrimPrefix(addr, "tcp://") + if strings.Contains(addr, "://") || addr == "" { + return "", fmt.Errorf("Invalid proto, expected tcp: %s", addr) + } + + hostParts := strings.Split(addr, ":") + if len(hostParts) != 2 { + return "", fmt.Errorf("Invalid bind address format: %s", addr) + } + host := hostParts[0] + if host == "" { + host = defaultAddr + } + + p, err := strconv.Atoi(hostParts[1]) + if err != nil && p == 0 { + return "", fmt.Errorf("Invalid bind address format: %s", addr) + } + return fmt.Sprintf("tcp://%s:%d", host, p), nil } // Get a repos name and returns the right reposName + tag diff --git a/pkg/proxy/tcp_proxy.go b/pkg/proxy/tcp_proxy.go index 1aa6d9fd70..eacf1427a3 100644 --- a/pkg/proxy/tcp_proxy.go +++ b/pkg/proxy/tcp_proxy.go @@ -2,9 +2,10 @@ package proxy import ( "io" - "log" "net" "syscall" + + log "github.com/Sirupsen/logrus" ) type TCPProxy struct { diff --git a/pkg/proxy/udp_proxy.go b/pkg/proxy/udp_proxy.go index ae6a7bbc42..a3fcf116e3 100644 --- a/pkg/proxy/udp_proxy.go +++ b/pkg/proxy/udp_proxy.go @@ -2,17 +2,18 @@ package proxy import ( "encoding/binary" - "log" "net" "strings" "sync" "syscall" "time" + + log "github.com/Sirupsen/logrus" ) const ( UDPConnTrackTimeout = 90 * time.Second - UDPBufSize = 2048 + UDPBufSize = 65507 ) // A net.Addr where the IP is split into two fields so you can use it as a key diff --git a/pkg/signal/signal_unix.go b/pkg/signal/signal_unix.go new file mode 100644 index 0000000000..613e30e57c --- /dev/null +++ b/pkg/signal/signal_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package signal + +import ( + "syscall" +) + +// Signals used in api/client (no windows equivalent, use +// invalid signals so they don't get handled) +const SIGCHLD = syscall.SIGCHLD +const SIGWINCH = syscall.SIGWINCH diff --git a/pkg/signal/signal_windows.go b/pkg/signal/signal_windows.go new file mode 100644 index 0000000000..9f00b99994 --- /dev/null +++ b/pkg/signal/signal_windows.go @@ -0,0 +1,12 @@ +// +build windows + +package signal + +import ( + "syscall" +) + +// Signals used in api/client (no windows equivalent, use +// invalid signals so they don't get handled) +const SIGCHLD = syscall.Signal(0xff) +const SIGWINCH = syscall.Signal(0xff) diff --git a/pkg/signal/trap.go b/pkg/signal/trap.go index cbdfd1ff17..78a709b30a 100644 --- a/pkg/signal/trap.go +++ b/pkg/signal/trap.go @@ -1,11 +1,12 @@ package signal import ( - "log" "os" gosignal "os/signal" "sync/atomic" "syscall" + + log "github.com/Sirupsen/logrus" ) // Trap sets up a simplified signal "trap", appropriate for common @@ -28,14 +29,13 @@ func Trap(cleanup func()) { interruptCount := uint32(0) for sig := range c { go func(sig os.Signal) { - log.Printf("Received signal '%v', starting shutdown of docker...\n", sig) + log.Infof("Received signal '%v', starting shutdown of docker...", sig) switch sig { case os.Interrupt, syscall.SIGTERM: // If the user really wants to interrupt, let him do so. if atomic.LoadUint32(&interruptCount) < 3 { - atomic.AddUint32(&interruptCount, 1) // Initiate the cleanup only once - if atomic.LoadUint32(&interruptCount) == 1 { + if atomic.AddUint32(&interruptCount, 1) == 1 { // Call cleanup handler cleanup() os.Exit(0) @@ -43,7 +43,7 @@ func Trap(cleanup func()) { return } } else { - log.Printf("Force shutdown of docker, interrupting cleanup\n") + log.Infof("Force shutdown of docker, interrupting cleanup") } case syscall.SIGQUIT: } diff --git a/pkg/stdcopy/stdcopy.go b/pkg/stdcopy/stdcopy.go index 79e15bc852..a61779ce53 100644 --- a/pkg/stdcopy/stdcopy.go +++ b/pkg/stdcopy/stdcopy.go @@ -5,7 +5,7 @@ import ( "errors" "io" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" ) const ( diff --git a/pkg/sysinfo/sysinfo.go b/pkg/sysinfo/sysinfo.go index 0c28719f61..001111f43d 100644 --- a/pkg/sysinfo/sysinfo.go +++ b/pkg/sysinfo/sysinfo.go @@ -2,10 +2,10 @@ package sysinfo import ( "io/ioutil" - "log" "os" "path" + log "github.com/Sirupsen/logrus" "github.com/docker/libcontainer/cgroups" ) diff --git a/pkg/system/lstat.go b/pkg/system/lstat.go new file mode 100644 index 0000000000..9ef82d5523 --- /dev/null +++ b/pkg/system/lstat.go @@ -0,0 +1,16 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +func Lstat(path string) (*Stat, error) { + s := &syscall.Stat_t{} + err := syscall.Lstat(path, s) + if err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/pkg/system/lstat_test.go b/pkg/system/lstat_test.go new file mode 100644 index 0000000000..9bab4d7b0c --- /dev/null +++ b/pkg/system/lstat_test.go @@ -0,0 +1,27 @@ +package system + +import ( + "os" + "testing" +) + +func TestLstat(t *testing.T) { + file, invalid, _, dir := prepareFiles(t) + defer os.RemoveAll(dir) + + statFile, err := Lstat(file) + if err != nil { + t.Fatal(err) + } + if statFile == nil { + t.Fatal("returned empty stat for existing file") + } + + statInvalid, err := Lstat(invalid) + if err == nil { + t.Fatal("did not return error for non-existing file") + } + if statInvalid != nil { + t.Fatal("returned non-nil stat for non-existing file") + } +} diff --git a/pkg/system/lstat_windows.go b/pkg/system/lstat_windows.go new file mode 100644 index 0000000000..213a7c7ade --- /dev/null +++ b/pkg/system/lstat_windows.go @@ -0,0 +1,8 @@ +// +build windows + +package system + +func Lstat(path string) (*Stat, error) { + // should not be called on cli code path + return nil, ErrNotSupportedPlatform +} diff --git a/pkg/system/meminfo.go b/pkg/system/meminfo.go new file mode 100644 index 0000000000..3b6e947e67 --- /dev/null +++ b/pkg/system/meminfo.go @@ -0,0 +1,17 @@ +package system + +// MemInfo contains memory statistics of the host system. +type MemInfo struct { + // Total usable RAM (i.e. physical RAM minus a few reserved bits and the + // kernel binary code). + MemTotal int64 + + // Amount of free memory. + MemFree int64 + + // Total amount of swap space available. + SwapTotal int64 + + // Amount of swap space that is currently unused. + SwapFree int64 +} diff --git a/pkg/system/meminfo_linux.go b/pkg/system/meminfo_linux.go new file mode 100644 index 0000000000..b7de3ff776 --- /dev/null +++ b/pkg/system/meminfo_linux.go @@ -0,0 +1,67 @@ +package system + +import ( + "bufio" + "errors" + "io" + "os" + "strconv" + "strings" + + "github.com/docker/docker/pkg/units" +) + +var ( + ErrMalformed = errors.New("malformed file") +) + +// Retrieve memory statistics of the host system and parse them into a MemInfo +// type. +func ReadMemInfo() (*MemInfo, error) { + file, err := os.Open("/proc/meminfo") + if err != nil { + return nil, err + } + defer file.Close() + return parseMemInfo(file) +} + +func parseMemInfo(reader io.Reader) (*MemInfo, error) { + meminfo := &MemInfo{} + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + // Expected format: ["MemTotal:", "1234", "kB"] + parts := strings.Fields(scanner.Text()) + + // Sanity checks: Skip malformed entries. + if len(parts) < 3 || parts[2] != "kB" { + continue + } + + // Convert to bytes. + size, err := strconv.Atoi(parts[1]) + if err != nil { + continue + } + bytes := int64(size) * units.KiB + + switch parts[0] { + case "MemTotal:": + meminfo.MemTotal = bytes + case "MemFree:": + meminfo.MemFree = bytes + case "SwapTotal:": + meminfo.SwapTotal = bytes + case "SwapFree:": + meminfo.SwapFree = bytes + } + + } + + // Handle errors that may have occurred during the reading of the file. + if err := scanner.Err(); err != nil { + return nil, err + } + + return meminfo, nil +} diff --git a/pkg/system/meminfo_linux_test.go b/pkg/system/meminfo_linux_test.go new file mode 100644 index 0000000000..377405ea69 --- /dev/null +++ b/pkg/system/meminfo_linux_test.go @@ -0,0 +1,37 @@ +package system + +import ( + "strings" + "testing" + + "github.com/docker/docker/pkg/units" +) + +func TestMemInfo(t *testing.T) { + const input = ` + MemTotal: 1 kB + MemFree: 2 kB + SwapTotal: 3 kB + SwapFree: 4 kB + Malformed1: + Malformed2: 1 + Malformed3: 2 MB + Malformed4: X kB + ` + meminfo, err := parseMemInfo(strings.NewReader(input)) + if err != nil { + t.Fatal(err) + } + if meminfo.MemTotal != 1*units.KiB { + t.Fatalf("Unexpected MemTotal: %d", meminfo.MemTotal) + } + if meminfo.MemFree != 2*units.KiB { + t.Fatalf("Unexpected MemFree: %d", meminfo.MemFree) + } + if meminfo.SwapTotal != 3*units.KiB { + t.Fatalf("Unexpected SwapTotal: %d", meminfo.SwapTotal) + } + if meminfo.SwapFree != 4*units.KiB { + t.Fatalf("Unexpected SwapFree: %d", meminfo.SwapFree) + } +} diff --git a/pkg/system/meminfo_unsupported.go b/pkg/system/meminfo_unsupported.go new file mode 100644 index 0000000000..63b8b16e05 --- /dev/null +++ b/pkg/system/meminfo_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux + +package system + +func ReadMemInfo() (*MemInfo, error) { + return nil, ErrNotSupportedPlatform +} diff --git a/pkg/system/mknod.go b/pkg/system/mknod.go new file mode 100644 index 0000000000..06f9c6afbb --- /dev/null +++ b/pkg/system/mknod.go @@ -0,0 +1,18 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +func Mknod(path string, mode uint32, dev int) error { + return syscall.Mknod(path, mode, dev) +} + +// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. +// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, +// then the top 12 bits of the minor +func Mkdev(major int64, minor int64) uint32 { + return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) +} diff --git a/pkg/system/mknod_windows.go b/pkg/system/mknod_windows.go new file mode 100644 index 0000000000..b4020c11b6 --- /dev/null +++ b/pkg/system/mknod_windows.go @@ -0,0 +1,12 @@ +// +build windows + +package system + +func Mknod(path string, mode uint32, dev int) error { + // should not be called on cli code path + return ErrNotSupportedPlatform +} + +func Mkdev(major int64, minor int64) uint32 { + panic("Mkdev not implemented on windows, should not be called on cli code") +} diff --git a/pkg/system/stat.go b/pkg/system/stat.go new file mode 100644 index 0000000000..5d47494d21 --- /dev/null +++ b/pkg/system/stat.go @@ -0,0 +1,42 @@ +package system + +import ( + "syscall" +) + +type Stat struct { + mode uint32 + uid uint32 + gid uint32 + rdev uint64 + size int64 + mtim syscall.Timespec +} + +func (s Stat) Mode() uint32 { + return s.mode +} + +func (s Stat) Uid() uint32 { + return s.uid +} + +func (s Stat) Gid() uint32 { + return s.gid +} + +func (s Stat) Rdev() uint64 { + return s.rdev +} + +func (s Stat) Size() int64 { + return s.size +} + +func (s Stat) Mtim() syscall.Timespec { + return s.mtim +} + +func (s Stat) GetLastModification() syscall.Timespec { + return s.Mtim() +} diff --git a/pkg/system/stat_linux.go b/pkg/system/stat_linux.go index e702200360..47cebef5cf 100644 --- a/pkg/system/stat_linux.go +++ b/pkg/system/stat_linux.go @@ -4,10 +4,11 @@ import ( "syscall" ) -func GetLastAccess(stat *syscall.Stat_t) syscall.Timespec { - return stat.Atim -} - -func GetLastModification(stat *syscall.Stat_t) syscall.Timespec { - return stat.Mtim +func fromStatT(s *syscall.Stat_t) (*Stat, error) { + return &Stat{size: s.Size, + mode: s.Mode, + uid: s.Uid, + gid: s.Gid, + rdev: s.Rdev, + mtim: s.Mtim}, nil } diff --git a/pkg/system/stat_test.go b/pkg/system/stat_test.go new file mode 100644 index 0000000000..abcc8ea7a6 --- /dev/null +++ b/pkg/system/stat_test.go @@ -0,0 +1,36 @@ +package system + +import ( + "os" + "syscall" + "testing" +) + +func TestFromStatT(t *testing.T) { + file, _, _, dir := prepareFiles(t) + defer os.RemoveAll(dir) + + stat := &syscall.Stat_t{} + err := syscall.Lstat(file, stat) + + s, err := fromStatT(stat) + if err != nil { + t.Fatal(err) + } + + if stat.Mode != s.Mode() { + t.Fatal("got invalid mode") + } + if stat.Uid != s.Uid() { + t.Fatal("got invalid uid") + } + if stat.Gid != s.Gid() { + t.Fatal("got invalid gid") + } + if stat.Rdev != s.Rdev() { + t.Fatal("got invalid rdev") + } + if stat.Mtim != s.Mtim() { + t.Fatal("got invalid mtim") + } +} diff --git a/pkg/system/stat_unsupported.go b/pkg/system/stat_unsupported.go index 4686a4c346..c4d53e6cd6 100644 --- a/pkg/system/stat_unsupported.go +++ b/pkg/system/stat_unsupported.go @@ -1,13 +1,16 @@ -// +build !linux +// +build !linux,!windows package system -import "syscall" +import ( + "syscall" +) -func GetLastAccess(stat *syscall.Stat_t) syscall.Timespec { - return stat.Atimespec -} - -func GetLastModification(stat *syscall.Stat_t) syscall.Timespec { - return stat.Mtimespec +func fromStatT(s *syscall.Stat_t) (*Stat, error) { + return &Stat{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil } diff --git a/pkg/system/stat_windows.go b/pkg/system/stat_windows.go new file mode 100644 index 0000000000..584e8940cc --- /dev/null +++ b/pkg/system/stat_windows.go @@ -0,0 +1,12 @@ +// +build windows + +package system + +import ( + "errors" + "syscall" +) + +func fromStatT(s *syscall.Win32FileAttributeData) (*Stat, error) { + return nil, errors.New("fromStatT should not be called on windows path") +} diff --git a/pkg/system/umask.go b/pkg/system/umask.go new file mode 100644 index 0000000000..fddbecd390 --- /dev/null +++ b/pkg/system/umask.go @@ -0,0 +1,11 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +func Umask(newmask int) (oldmask int, err error) { + return syscall.Umask(newmask), nil +} diff --git a/pkg/system/umask_windows.go b/pkg/system/umask_windows.go new file mode 100644 index 0000000000..3be563f89e --- /dev/null +++ b/pkg/system/umask_windows.go @@ -0,0 +1,8 @@ +// +build windows + +package system + +func Umask(newmask int) (oldmask int, err error) { + // should not be called on cli code path + return 0, ErrNotSupportedPlatform +} diff --git a/pkg/system/utimes_test.go b/pkg/system/utimes_test.go index 38e4020cb5..1dea47cc15 100644 --- a/pkg/system/utimes_test.go +++ b/pkg/system/utimes_test.go @@ -8,7 +8,7 @@ import ( "testing" ) -func prepareFiles(t *testing.T) (string, string, string) { +func prepareFiles(t *testing.T) (string, string, string, string) { dir, err := ioutil.TempDir("", "docker-system-test") if err != nil { t.Fatal(err) @@ -26,11 +26,12 @@ func prepareFiles(t *testing.T) (string, string, string) { t.Fatal(err) } - return file, invalid, symlink + return file, invalid, symlink, dir } func TestLUtimesNano(t *testing.T) { - file, invalid, symlink := prepareFiles(t) + file, invalid, symlink, dir := prepareFiles(t) + defer os.RemoveAll(dir) before, err := os.Stat(file) if err != nil { diff --git a/pkg/tarsum/MAINTAINER b/pkg/tarsum/MAINTAINER deleted file mode 100644 index bd492e8394..0000000000 --- a/pkg/tarsum/MAINTAINER +++ /dev/null @@ -1 +0,0 @@ -Eric Windisch (@ewindisch) diff --git a/pkg/tarsum/MAINTAINERS b/pkg/tarsum/MAINTAINERS new file mode 100644 index 0000000000..9571a14a38 --- /dev/null +++ b/pkg/tarsum/MAINTAINERS @@ -0,0 +1,4 @@ +Derek McGowan (github: dmcgowan) +Eric Windisch (github: ewindisch) +Josh Hawn (github: jlhawn) +Vincent Batts (github: vbatts) diff --git a/pkg/tarsum/tarsum.go b/pkg/tarsum/tarsum.go index 6581f3f234..ba09d4a121 100644 --- a/pkg/tarsum/tarsum.go +++ b/pkg/tarsum/tarsum.go @@ -7,13 +7,11 @@ import ( "encoding/hex" "hash" "io" - "sort" - "strconv" "strings" "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" ) const ( @@ -29,18 +27,18 @@ const ( // including the byte payload of the image's json metadata as well, and for // calculating the checksums for buildcache. func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) { - if _, ok := tarSumVersions[v]; !ok { - return nil, ErrVersionNotImplemented - } - return &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v}, nil + return NewTarSumHash(r, dc, v, DefaultTHash) } // Create a new TarSum, providing a THash to use rather than the DefaultTHash func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) { - if _, ok := tarSumVersions[v]; !ok { - return nil, ErrVersionNotImplemented + headerSelector, err := getTarHeaderSelector(v) + if err != nil { + return nil, err } - return &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, tHash: tHash}, nil + ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash} + err = ts.initTarSum() + return ts, err } // TarSum is the generic interface for calculating fixed time @@ -69,8 +67,9 @@ type tarSum struct { currentFile string finished bool first bool - DisableCompression bool // false by default. When false, the output gzip compressed. - tarSumVersion Version // this field is not exported so it can not be mutated during use + DisableCompression bool // false by default. When false, the output gzip compressed. + tarSumVersion Version // this field is not exported so it can not be mutated during use + headerSelector tarHeaderSelector // handles selecting and ordering headers for files in the archive } func (ts tarSum) Hash() THash { @@ -103,49 +102,12 @@ type simpleTHash struct { func (sth simpleTHash) Name() string { return sth.n } func (sth simpleTHash) Hash() hash.Hash { return sth.h() } -func (ts tarSum) selectHeaders(h *tar.Header, v Version) (set [][2]string) { - for _, elem := range [][2]string{ - {"name", h.Name}, - {"mode", strconv.Itoa(int(h.Mode))}, - {"uid", strconv.Itoa(h.Uid)}, - {"gid", strconv.Itoa(h.Gid)}, - {"size", strconv.Itoa(int(h.Size))}, - {"mtime", strconv.Itoa(int(h.ModTime.UTC().Unix()))}, - {"typeflag", string([]byte{h.Typeflag})}, - {"linkname", h.Linkname}, - {"uname", h.Uname}, - {"gname", h.Gname}, - {"devmajor", strconv.Itoa(int(h.Devmajor))}, - {"devminor", strconv.Itoa(int(h.Devminor))}, - } { - if v >= VersionDev && elem[0] == "mtime" { - continue - } - set = append(set, elem) - } - return -} - func (ts *tarSum) encodeHeader(h *tar.Header) error { - for _, elem := range ts.selectHeaders(h, ts.Version()) { + for _, elem := range ts.headerSelector.selectHeaders(h) { if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil { return err } } - - // include the additional pax headers, from an ordered list - if ts.Version() >= VersionDev { - var keys []string - for k := range h.Xattrs { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - if _, err := ts.h.Write([]byte(k + h.Xattrs[k])); err != nil { - return err - } - } - } return nil } @@ -170,12 +132,6 @@ func (ts *tarSum) initTarSum() error { } func (ts *tarSum) Read(buf []byte) (int, error) { - if ts.writer == nil { - if err := ts.initTarSum(); err != nil { - return 0, err - } - } - if ts.finished { return ts.bufWriter.Read(buf) } diff --git a/pkg/tarsum/tarsum_spec.md b/pkg/tarsum/tarsum_spec.md new file mode 100644 index 0000000000..7a6f8edc7c --- /dev/null +++ b/pkg/tarsum/tarsum_spec.md @@ -0,0 +1,225 @@ +page_title: TarSum checksum specification +page_description: Documentation for algorithms used in the TarSum checksum calculation +page_keywords: docker, checksum, validation, tarsum + +# TarSum Checksum Specification + +## Abstract + +This document describes the algorithms used in performing the TarSum checksum +calculation on filesystem layers, the need for this method over existing +methods, and the versioning of this calculation. + + +## Introduction + +The transportation of filesystems, regarding Docker, is done with tar(1) +archives. There are a variety of tar serialization formats [2], and a key +concern here is ensuring a repeatable checksum given a set of inputs from a +generic tar archive. Types of transportation include distribution to and from a +registry endpoint, saving and loading through commands or Docker daemon APIs, +transferring the build context from client to Docker daemon, and committing the +filesystem of a container to become an image. + +As tar archives are used for transit, but not preserved in many situations, the +focus of the algorithm is to ensure the integrity of the preserved filesystem, +while maintaining a deterministic accountability. This includes neither +constraining the ordering or manipulation of the files during the creation or +unpacking of the archive, nor include additional metadata state about the file +system attributes. + +## Intended Audience + +This document is outlining the methods used for consistent checksum calculation +for filesystems transported via tar archives. + +Auditing these methodologies is an open and iterative process. This document +should accommodate the review of source code. Ultimately, this document should +be the starting point of further refinements to the algorithm and its future +versions. + +## Concept + +The checksum mechanism must ensure the integrity and assurance of the +filesystem payload. + +## Checksum Algorithm Profile + +A checksum mechanism must define the following operations and attributes: + +* Associated hashing cipher - used to checksum each file payload and attribute + information. +* Checksum list - each file of the filesystem archive has its checksum + calculated from the payload and attributes of the file. The final checksum is + calculated from this list, with specific ordering. +* Version - as the algorithm adapts to requirements, there are behaviors of the + algorithm to manage by versioning. +* Archive being calculated - the tar archive having its checksum calculated + +## Elements of TarSum checksum + +The calculated sum output is a text string. The elements included in the output +of the calculated sum comprise the information needed for validation of the sum +(TarSum version and hashing cipher used) and the expected checksum in hexadecimal +form. + +There are two delimiters used: +* '+' separates TarSum version from hashing cipher +* ':' separates calculation mechanics from expected hash + +Example: + +``` + "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e" + | | \ | + | | \ | + |_version_|_cipher__|__ | + | \ | + |_calculation_mechanics_|______________________expected_sum_______________________| +``` + +## Versioning + +Versioning was introduced [0] to accommodate differences in calculation needed, +and ability to maintain reverse compatibility. + +The general algorithm will be describe further in the 'Calculation'. + +### Version0 + +This is the initial version of TarSum. + +Its element in the TarSum checksum string is `tarsum`. + +### Version1 + +Its element in the TarSum checksum is `tarsum.v1`. + +The notable changes in this version: +* Exclusion of file `mtime` from the file information headers, in each file + checksum calculation +* Inclusion of extended attributes (`xattrs`. Also seen as `SCHILY.xattr.` prefixed Pax + tar file info headers) keys and values in each file checksum calculation + +### VersionDev + +*Do not use unless validating refinements to the checksum algorithm* + +Its element in the TarSum checksum is `tarsum.dev`. + +This is a floating place holder for a next version and grounds for testing +changes. The methods used for calculation are subject to change without notice, +and this version is for testing and not for production use. + +## Ciphers + +The official default and standard hashing cipher used in the calculation mechanic +is `sha256`. This refers to SHA256 hash algorithm as defined in FIPS 180-4. + +Though the TarSum algorithm itself is not exclusively bound to the single +hashing cipher `sha256`, support for alternate hashing ciphers was later added +[1]. Use cases for alternate cipher could include future-proofing TarSum +checksum format and using faster cipher hashes for tar filesystem checksums. + +## Calculation + +### Requirement + +As mentioned earlier, the calculation is such that it takes into consideration +the lifecycle of the tar archive. In that the tar archive is not an immutable, +permanent artifact. Otherwise options like relying on a known hashing cipher +checksum of the archive itself would be reliable enough. The tar archive of the +filesystem is used as a transportation medium for Docker images, and the +archive is discarded once its contents are extracted. Therefore, for consistent +validation items such as order of files in the tar archive and time stamps are +subject to change once an image is received. + +### Process + +The method is typically iterative due to reading tar info headers from the +archive stream, though this is not a strict requirement. + +#### Files + +Each file in the tar archive have their contents (headers and body) checksummed +individually using the designated associated hashing cipher. The ordered +headers of the file are written to the checksum calculation first, and then the +payload of the file body. + +The resulting checksum of the file is appended to the list of file sums. The +sum is encoded as a string of the hexadecimal digest. Additionally, the file +name and position in the archive is kept as reference for special ordering. + +#### Headers + +The following headers are read, in this +order ( and the corresponding representation of its value): +* 'name' - string +* 'mode' - string of the base10 integer +* 'uid' - string of the integer +* 'gid' - string of the integer +* 'size' - string of the integer +* 'mtime' (_Version0 only_) - string of integer of the seconds since 1970-01-01 00:00:00 UTC +* 'typeflag' - string of the char +* 'linkname' - string +* 'uname' - string +* 'gname' - string +* 'devmajor' - string of the integer +* 'devminor' - string of the integer + +For >= Version1, the extented attribute headers ("SCHILY.xattr." prefixed pax +headers) included after the above list. These xattrs key/values are first +sorted by the keys. + +#### Header Format + +The ordered headers are written to the hash in the format of + + "{.key}{.value}" + +with no newline. + +#### Body + +After the order headers of the file have been added to the checksum for the +file, the body of the file is written to the hash. + +#### List of file sums + +The list of file sums is sorted by the string of the hexadecimal digest. + +If there are two files in the tar with matching paths, the order of occurrence +for that path is reflected for the sums of the corresponding file header and +body. + +#### Final Checksum + +Begin with a fresh or initial state of the associated hash cipher. If there is +additional payload to include in the TarSum calculation for the archive, it is +written first. Then each checksum from the ordered list of file sums is written +to the hash. + +The resulting digest is formatted per the Elements of TarSum checksum, +including the TarSum version, the associated hash cipher and the hexadecimal +encoded checksum digest. + +## Security Considerations + +The initial version of TarSum has undergone one update that could invalidate +handcrafted tar archives. The tar archive format supports appending of files +with same names as prior files in the archive. The latter file will clobber the +prior file of the same path. Due to this the algorithm now accounts for files +with matching paths, and orders the list of file sums accordingly [3]. + +## Footnotes + +* [0] Versioning https://github.com/docker/docker/commit/747f89cd327db9d50251b17797c4d825162226d0 +* [1] Alternate ciphers https://github.com/docker/docker/commit/4e9925d780665149b8bc940d5ba242ada1973c4e +* [2] Tar http://en.wikipedia.org/wiki/Tar_%28computing%29 +* [3] Name collision https://github.com/docker/docker/commit/c5e6362c53cbbc09ddbabd5a7323e04438b57d31 + +## Acknowledgements + +Joffrey F (shin-) and Guillaume J. Charmes (creack) on the initial work of the +TarSum calculation. + diff --git a/pkg/tarsum/tarsum_test.go b/pkg/tarsum/tarsum_test.go index 1e06cda178..26f12cc847 100644 --- a/pkg/tarsum/tarsum_test.go +++ b/pkg/tarsum/tarsum_test.go @@ -132,6 +132,7 @@ func sizedTar(opts sizedOptions) io.Reader { fh = bytes.NewBuffer([]byte{}) } tarW := tar.NewWriter(fh) + defer tarW.Close() for i := int64(0); i < opts.num; i++ { err := tarW.WriteHeader(&tar.Header{ Name: fmt.Sprintf("/testdata%d", i), @@ -230,6 +231,17 @@ func TestEmptyTar(t *testing.T) { if resultSum != expectedSum { t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) } + + // Test without ever actually writing anything. + if ts, err = NewTarSum(bytes.NewReader([]byte{}), true, Version0); err != nil { + t.Fatal(err) + } + + resultSum = ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } } var ( @@ -318,6 +330,153 @@ func TestTarSums(t *testing.T) { } } +func TestIteration(t *testing.T) { + headerTests := []struct { + expectedSum string // TODO(vbatts) it would be nice to get individual sums of each + version Version + hdr *tar.Header + data []byte + }{ + { + "tarsum+sha256:626c4a2e9a467d65c33ae81f7f3dedd4de8ccaee72af73223c4bc4718cbc7bbd", + Version0, + &tar.Header{ + Name: "file.txt", + Size: 0, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte(""), + }, + { + "tarsum.dev+sha256:6ffd43a1573a9913325b4918e124ee982a99c0f3cba90fc032a65f5e20bdd465", + VersionDev, + &tar.Header{ + Name: "file.txt", + Size: 0, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte(""), + }, + { + "tarsum.dev+sha256:b38166c059e11fb77bef30bf16fba7584446e80fcc156ff46d47e36c5305d8ef", + VersionDev, + &tar.Header{ + Name: "another.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte("test"), + }, + { + "tarsum.dev+sha256:4cc2e71ac5d31833ab2be9b4f7842a14ce595ec96a37af4ed08f87bc374228cd", + VersionDev, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.key1": "value1", + "user.key2": "value2", + }, + }, + []byte("test"), + }, + { + "tarsum.dev+sha256:65f4284fa32c0d4112dd93c3637697805866415b570587e4fd266af241503760", + VersionDev, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.KEY1": "value1", // adding different case to ensure different sum + "user.key2": "value2", + }, + }, + []byte("test"), + }, + { + "tarsum+sha256:c12bb6f1303a9ddbf4576c52da74973c00d14c109bcfa76b708d5da1154a07fa", + Version0, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.NOT": "CALCULATED", + }, + }, + []byte("test"), + }, + } + for _, htest := range headerTests { + s, err := renderSumForHeader(htest.version, htest.hdr, htest.data) + if err != nil { + t.Fatal(err) + } + + if s != htest.expectedSum { + t.Errorf("expected sum: %q, got: %q", htest.expectedSum, s) + } + } + +} + +func renderSumForHeader(v Version, h *tar.Header, data []byte) (string, error) { + buf := bytes.NewBuffer(nil) + // first build our test tar + tw := tar.NewWriter(buf) + if err := tw.WriteHeader(h); err != nil { + return "", err + } + if _, err := tw.Write(data); err != nil { + return "", err + } + tw.Close() + + ts, err := NewTarSum(buf, true, v) + if err != nil { + return "", err + } + tr := tar.NewReader(ts) + for { + hdr, err := tr.Next() + if hdr == nil || err == io.EOF { + // Signals the end of the archive. + break + } + if err != nil { + return "", err + } + if _, err = io.Copy(ioutil.Discard, tr); err != nil { + return "", err + } + } + return ts.Sum(nil), nil +} + func Benchmark9kTar(b *testing.B) { buf := bytes.NewBuffer([]byte{}) fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") @@ -328,10 +487,13 @@ func Benchmark9kTar(b *testing.B) { n, err := io.Copy(buf, fh) fh.Close() + reader := bytes.NewReader(buf.Bytes()) + b.SetBytes(n) b.ResetTimer() for i := 0; i < b.N; i++ { - ts, err := NewTarSum(buf, true, Version0) + reader.Seek(0, 0) + ts, err := NewTarSum(reader, true, Version0) if err != nil { b.Error(err) return @@ -351,10 +513,13 @@ func Benchmark9kTarGzip(b *testing.B) { n, err := io.Copy(buf, fh) fh.Close() + reader := bytes.NewReader(buf.Bytes()) + b.SetBytes(n) b.ResetTimer() for i := 0; i < b.N; i++ { - ts, err := NewTarSum(buf, false, Version0) + reader.Seek(0, 0) + ts, err := NewTarSum(reader, false, Version0) if err != nil { b.Error(err) return diff --git a/pkg/tarsum/versioning.go b/pkg/tarsum/versioning.go index e1161fc5ab..3a656612ff 100644 --- a/pkg/tarsum/versioning.go +++ b/pkg/tarsum/versioning.go @@ -2,7 +2,11 @@ package tarsum import ( "errors" + "sort" + "strconv" "strings" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) // versioning of the TarSum algorithm @@ -10,11 +14,11 @@ import ( // i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b" type Version int +// Prefix of "tarsum" const ( - // Prefix of "tarsum" Version0 Version = iota - // Prefix of "tarsum.dev" - // NOTE: this variable will be of an unsettled next-version of the TarSum calculation + Version1 + // NOTE: this variable will be either the latest or an unsettled next-version of the TarSum calculation VersionDev ) @@ -28,8 +32,9 @@ func GetVersions() []Version { } var tarSumVersions = map[Version]string{ - 0: "tarsum", - 1: "tarsum.dev", + Version0: "tarsum", + Version1: "tarsum.v1", + VersionDev: "tarsum.dev", } func (tsv Version) String() string { @@ -50,7 +55,78 @@ func GetVersionFromTarsum(tarsum string) (Version, error) { return -1, ErrNotVersion } +// Errors that may be returned by functions in this package var ( ErrNotVersion = errors.New("string does not include a TarSum Version") ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented") ) + +// tarHeaderSelector is the interface which different versions +// of tarsum should use for selecting and ordering tar headers +// for each item in the archive. +type tarHeaderSelector interface { + selectHeaders(h *tar.Header) (orderedHeaders [][2]string) +} + +type tarHeaderSelectFunc func(h *tar.Header) (orderedHeaders [][2]string) + +func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]string) { + return f(h) +} + +func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + return [][2]string{ + {"name", h.Name}, + {"mode", strconv.Itoa(int(h.Mode))}, + {"uid", strconv.Itoa(h.Uid)}, + {"gid", strconv.Itoa(h.Gid)}, + {"size", strconv.Itoa(int(h.Size))}, + {"mtime", strconv.Itoa(int(h.ModTime.UTC().Unix()))}, + {"typeflag", string([]byte{h.Typeflag})}, + {"linkname", h.Linkname}, + {"uname", h.Uname}, + {"gname", h.Gname}, + {"devmajor", strconv.Itoa(int(h.Devmajor))}, + {"devminor", strconv.Itoa(int(h.Devminor))}, + } +} + +func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + // Get extended attributes. + xAttrKeys := make([]string, len(h.Xattrs)) + for k := range h.Xattrs { + xAttrKeys = append(xAttrKeys, k) + } + sort.Strings(xAttrKeys) + + // Make the slice with enough capacity to hold the 11 basic headers + // we want from the v0 selector plus however many xattrs we have. + orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys)) + + // Copy all headers from v0 excluding the 'mtime' header (the 5th element). + v0headers := v0TarHeaderSelect(h) + orderedHeaders = append(orderedHeaders, v0headers[0:5]...) + orderedHeaders = append(orderedHeaders, v0headers[6:]...) + + // Finally, append the sorted xattrs. + for _, k := range xAttrKeys { + orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]}) + } + + return +} + +var registeredHeaderSelectors = map[Version]tarHeaderSelectFunc{ + Version0: v0TarHeaderSelect, + Version1: v1TarHeaderSelect, + VersionDev: v1TarHeaderSelect, +} + +func getTarHeaderSelector(v Version) (tarHeaderSelector, error) { + headerSelector, ok := registeredHeaderSelectors[v] + if !ok { + return nil, ErrVersionNotImplemented + } + + return headerSelector, nil +} diff --git a/pkg/tarsum/versioning_test.go b/pkg/tarsum/versioning_test.go index b851c3be6f..4ddb72ec55 100644 --- a/pkg/tarsum/versioning_test.go +++ b/pkg/tarsum/versioning_test.go @@ -11,11 +11,17 @@ func TestVersion(t *testing.T) { t.Errorf("expected %q, got %q", expected, v.String()) } - expected = "tarsum.dev" + expected = "tarsum.v1" v = 1 if v.String() != expected { t.Errorf("expected %q, got %q", expected, v.String()) } + + expected = "tarsum.dev" + v = 2 + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } } func TestGetVersion(t *testing.T) { diff --git a/pkg/term/console_windows.go b/pkg/term/console_windows.go new file mode 100644 index 0000000000..6335b2b837 --- /dev/null +++ b/pkg/term/console_windows.go @@ -0,0 +1,87 @@ +// +build windows + +package term + +import ( + "syscall" + "unsafe" +) + +const ( + // Consts for Get/SetConsoleMode function + // see http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx + ENABLE_ECHO_INPUT = 0x0004 + ENABLE_INSERT_MODE = 0x0020 + ENABLE_LINE_INPUT = 0x0002 + ENABLE_MOUSE_INPUT = 0x0010 + ENABLE_PROCESSED_INPUT = 0x0001 + ENABLE_QUICK_EDIT_MODE = 0x0040 + ENABLE_WINDOW_INPUT = 0x0008 + // If parameter is a screen buffer handle, additional values + ENABLE_PROCESSED_OUTPUT = 0x0001 + ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 +) + +var kernel32DLL = syscall.NewLazyDLL("kernel32.dll") + +var ( + setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode") + getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo") +) + +func GetConsoleMode(fileDesc uintptr) (uint32, error) { + var mode uint32 + err := syscall.GetConsoleMode(syscall.Handle(fileDesc), &mode) + return mode, err +} + +func SetConsoleMode(fileDesc uintptr, mode uint32) error { + r, _, err := setConsoleModeProc.Call(fileDesc, uintptr(mode), 0) + if r == 0 { + if err != nil { + return err + } + return syscall.EINVAL + } + return nil +} + +// types for calling GetConsoleScreenBufferInfo +// see http://msdn.microsoft.com/en-us/library/windows/desktop/ms682093(v=vs.85).aspx +type ( + SHORT int16 + + SMALL_RECT struct { + Left SHORT + Top SHORT + Right SHORT + Bottom SHORT + } + + COORD struct { + X SHORT + Y SHORT + } + + WORD uint16 + + CONSOLE_SCREEN_BUFFER_INFO struct { + dwSize COORD + dwCursorPosition COORD + wAttributes WORD + srWindow SMALL_RECT + dwMaximumWindowSize COORD + } +) + +func GetConsoleScreenBufferInfo(fileDesc uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) { + var info CONSOLE_SCREEN_BUFFER_INFO + r, _, err := getConsoleScreenBufferInfoProc.Call(uintptr(fileDesc), uintptr(unsafe.Pointer(&info)), 0) + if r == 0 { + if err != nil { + return nil, err + } + return nil, syscall.EINVAL + } + return &info, nil +} diff --git a/pkg/term/tc_linux_cgo.go b/pkg/term/tc_linux_cgo.go new file mode 100644 index 0000000000..ae9516c99c --- /dev/null +++ b/pkg/term/tc_linux_cgo.go @@ -0,0 +1,47 @@ +// +build linux,cgo + +package term + +import ( + "syscall" + "unsafe" +) + +// #include +import "C" + +type Termios syscall.Termios + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if err := tcget(fd, &oldState.termios); err != 0 { + return nil, err + } + + newState := oldState.termios + + C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&newState))) + if err := tcset(fd, &newState); err != 0 { + return nil, err + } + return &oldState, nil +} + +func tcget(fd uintptr, p *Termios) syscall.Errno { + ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) + if ret != 0 { + return err.(syscall.Errno) + } + return 0 +} + +func tcset(fd uintptr, p *Termios) syscall.Errno { + ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) + if ret != 0 { + return err.(syscall.Errno) + } + return 0 +} diff --git a/pkg/term/tc_other.go b/pkg/term/tc_other.go new file mode 100644 index 0000000000..266039bac3 --- /dev/null +++ b/pkg/term/tc_other.go @@ -0,0 +1,19 @@ +// +build !windows +// +build !linux !cgo + +package term + +import ( + "syscall" + "unsafe" +) + +func tcget(fd uintptr, p *Termios) syscall.Errno { + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p))) + return err +} + +func tcset(fd uintptr, p *Termios) syscall.Errno { + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p))) + return err +} diff --git a/pkg/term/term.go b/pkg/term/term.go index ea94b44ade..8d807d8d44 100644 --- a/pkg/term/term.go +++ b/pkg/term/term.go @@ -1,3 +1,5 @@ +// +build !windows + package term import ( @@ -45,8 +47,7 @@ func SetWinsize(fd uintptr, ws *Winsize) error { // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd uintptr) bool { var termios Termios - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&termios))) - return err == 0 + return tcget(fd, &termios) == 0 } // Restore restores the terminal connected to the given file descriptor to a @@ -55,8 +56,7 @@ func RestoreTerminal(fd uintptr, state *State) error { if state == nil { return ErrInvalidState } - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&state.termios))) - if err != 0 { + if err := tcset(fd, &state.termios); err != 0 { return err } return nil @@ -64,7 +64,7 @@ func RestoreTerminal(fd uintptr, state *State) error { func SaveState(fd uintptr) (*State, error) { var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + if err := tcget(fd, &oldState.termios); err != 0 { return nil, err } @@ -75,7 +75,7 @@ func DisableEcho(fd uintptr, state *State) error { newState := state.termios newState.Lflag &^= syscall.ECHO - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { + if err := tcset(fd, &newState); err != 0 { return err } handleInterrupt(fd, state) diff --git a/pkg/term/term_windows.go b/pkg/term/term_windows.go new file mode 100644 index 0000000000..d372e86a88 --- /dev/null +++ b/pkg/term/term_windows.go @@ -0,0 +1,89 @@ +// +build windows + +package term + +type State struct { + mode uint32 +} + +type Winsize struct { + Height uint16 + Width uint16 + x uint16 + y uint16 +} + +func GetWinsize(fd uintptr) (*Winsize, error) { + ws := &Winsize{} + var info *CONSOLE_SCREEN_BUFFER_INFO + info, err := GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil, err + } + ws.Height = uint16(info.srWindow.Right - info.srWindow.Left + 1) + ws.Width = uint16(info.srWindow.Bottom - info.srWindow.Top + 1) + + ws.x = 0 // todo azlinux -- this is the pixel size of the Window, and not currently used by any caller + ws.y = 0 + + return ws, nil +} + +func SetWinsize(fd uintptr, ws *Winsize) error { + return nil +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + _, e := GetConsoleMode(fd) + return e == nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func RestoreTerminal(fd uintptr, state *State) error { + return SetConsoleMode(fd, state.mode) +} + +func SaveState(fd uintptr) (*State, error) { + mode, e := GetConsoleMode(fd) + if e != nil { + return nil, e + } + return &State{mode}, nil +} + +// see http://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx for these flag settings +func DisableEcho(fd uintptr, state *State) error { + state.mode &^= (ENABLE_ECHO_INPUT) + state.mode |= (ENABLE_PROCESSED_INPUT | ENABLE_LINE_INPUT) + return SetConsoleMode(fd, state.mode) +} + +func SetRawTerminal(fd uintptr) (*State, error) { + oldState, err := MakeRaw(fd) + if err != nil { + return nil, err + } + // TODO (azlinux): implement handling interrupt and restore state of terminal + return oldState, err +} + +// MakeRaw puts the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var state *State + state, err := SaveState(fd) + if err != nil { + return nil, err + } + + // see http://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx for these flag settings + state.mode &^= (ENABLE_ECHO_INPUT | ENABLE_PROCESSED_INPUT | ENABLE_LINE_INPUT) + err = SetConsoleMode(fd, state.mode) + if err != nil { + return nil, err + } + return state, nil +} diff --git a/pkg/term/termios_linux.go b/pkg/term/termios_linux.go index 4a717c84a7..024187ff06 100644 --- a/pkg/term/termios_linux.go +++ b/pkg/term/termios_linux.go @@ -1,3 +1,5 @@ +// +build !cgo + package term import ( diff --git a/pkg/timeutils/json.go b/pkg/timeutils/json.go index 19f107bffe..8043d69d18 100644 --- a/pkg/timeutils/json.go +++ b/pkg/timeutils/json.go @@ -6,18 +6,21 @@ import ( ) const ( - // Define our own version of RFC339Nano because we want one + // RFC3339NanoFixed is our own version of RFC339Nano because we want one // that pads the nano seconds part with zeros to ensure // the timestamps are aligned in the logs. RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" - JSONFormat = `"` + time.RFC3339Nano + `"` + // JSONFormat is the format used by FastMarshalJSON + JSONFormat = `"` + time.RFC3339Nano + `"` ) +// FastMarshalJSON avoids one of the extra allocations that +// time.MarshalJSON is making. func FastMarshalJSON(t time.Time) (string, error) { if y := t.Year(); y < 0 || y >= 10000 { // RFC 3339 is clear that years are 4 digits exactly. // See golang.org/issue/4556#c15 for more discussion. - return "", errors.New("Time.MarshalJSON: year outside of range [0,9999]") + return "", errors.New("time.MarshalJSON: year outside of range [0,9999]") } return t.Format(JSONFormat), nil } diff --git a/pkg/truncindex/truncindex.go b/pkg/truncindex/truncindex.go index 89aa88d6b7..c5b71752b5 100644 --- a/pkg/truncindex/truncindex.go +++ b/pkg/truncindex/truncindex.go @@ -10,7 +10,9 @@ import ( ) var ( - ErrNoID = errors.New("prefix can't be empty") + // ErrNoID is thrown when attempting to use empty prefixes + ErrNoID = errors.New("prefix can't be empty") + errDuplicateID = errors.New("multiple IDs were found") ) func init() { @@ -27,56 +29,62 @@ type TruncIndex struct { ids map[string]struct{} } +// NewTruncIndex creates a new TruncIndex and initializes with a list of IDs func NewTruncIndex(ids []string) (idx *TruncIndex) { idx = &TruncIndex{ ids: make(map[string]struct{}), trie: patricia.NewTrie(), } for _, id := range ids { - idx.addId(id) + idx.addID(id) } return } -func (idx *TruncIndex) addId(id string) error { +func (idx *TruncIndex) addID(id string) error { if strings.Contains(id, " ") { - return fmt.Errorf("Illegal character: ' '") + return fmt.Errorf("illegal character: ' '") } if id == "" { return ErrNoID } if _, exists := idx.ids[id]; exists { - return fmt.Errorf("Id already exists: '%s'", id) + return fmt.Errorf("id already exists: '%s'", id) } idx.ids[id] = struct{}{} if inserted := idx.trie.Insert(patricia.Prefix(id), struct{}{}); !inserted { - return fmt.Errorf("Failed to insert id: %s", id) + return fmt.Errorf("failed to insert id: %s", id) } return nil } +// Add adds a new ID to the TruncIndex func (idx *TruncIndex) Add(id string) error { idx.Lock() defer idx.Unlock() - if err := idx.addId(id); err != nil { + if err := idx.addID(id); err != nil { return err } return nil } +// Delete removes an ID from the TruncIndex. If there are multiple IDs +// with the given prefix, an error is thrown. func (idx *TruncIndex) Delete(id string) error { idx.Lock() defer idx.Unlock() if _, exists := idx.ids[id]; !exists || id == "" { - return fmt.Errorf("No such id: '%s'", id) + return fmt.Errorf("no such id: '%s'", id) } delete(idx.ids, id) if deleted := idx.trie.Delete(patricia.Prefix(id)); !deleted { - return fmt.Errorf("No such id: '%s'", id) + return fmt.Errorf("no such id: '%s'", id) } return nil } +// Get retrieves an ID from the TruncIndex. If there are multiple IDs +// with the given prefix, an error is thrown. func (idx *TruncIndex) Get(s string) (string, error) { idx.RLock() defer idx.RUnlock() @@ -90,17 +98,17 @@ func (idx *TruncIndex) Get(s string) (string, error) { if id != "" { // we haven't found the ID if there are two or more IDs id = "" - return fmt.Errorf("we've found two entries") + return errDuplicateID } id = string(prefix) return nil } if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil { - return "", fmt.Errorf("No such id: %s", s) + return "", fmt.Errorf("no such id: %s", s) } if id != "" { return id, nil } - return "", fmt.Errorf("No such id: %s", s) + return "", fmt.Errorf("no such id: %s", s) } diff --git a/pkg/units/MAINTAINERS b/pkg/units/MAINTAINERS index 68a97d2fc2..96abeae570 100644 --- a/pkg/units/MAINTAINERS +++ b/pkg/units/MAINTAINERS @@ -1,2 +1,2 @@ -Michael Crosby (@crosbymichael) Victor Vieux (@vieux) +Jessie Frazelle (@jfrazelle) diff --git a/pkg/units/size.go b/pkg/units/size.go index ea39bbddf7..264f388225 100644 --- a/pkg/units/size.go +++ b/pkg/units/size.go @@ -10,6 +10,7 @@ import ( // See: http://en.wikipedia.org/wiki/Binary_prefix const ( // Decimal + KB = 1000 MB = 1000 * KB GB = 1000 * MB @@ -17,6 +18,7 @@ const ( PB = 1000 * TB // Binary + KiB = 1024 MiB = 1024 * KiB GiB = 1024 * MiB @@ -32,18 +34,26 @@ var ( sizeRegex = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`) ) -var unitAbbrs = [...]string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} +var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} +var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} // HumanSize returns a human-readable approximation of a size // using SI standard (eg. "44kB", "17MB") func HumanSize(size int64) string { + return intToString(float64(size), 1000.0, decimapAbbrs) +} + +func BytesSize(size float64) string { + return intToString(size, 1024.0, binaryAbbrs) +} + +func intToString(size, unit float64, _map []string) string { i := 0 - sizef := float64(size) - for sizef >= 1000.0 { - sizef = sizef / 1000.0 + for size >= unit { + size = size / unit i++ } - return fmt.Sprintf("%.4g %s", sizef, unitAbbrs[i]) + return fmt.Sprintf("%.4g %s", size, _map[i]) } // FromHumanSize returns an integer from a human-readable specification of a @@ -52,7 +62,7 @@ func FromHumanSize(size string) (int64, error) { return parseSize(size, decimalMap) } -// Parses a human-readable string representing an amount of RAM +// RAMInBytes parses a human-readable string representing an amount of RAM // in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and // returns the number of bytes, or -1 if the string is unparseable. // Units are case-insensitive, and the 'b' suffix is optional. @@ -64,7 +74,7 @@ func RAMInBytes(size string) (int64, error) { func parseSize(sizeStr string, uMap unitMap) (int64, error) { matches := sizeRegex.FindStringSubmatch(sizeStr) if len(matches) != 3 { - return -1, fmt.Errorf("Invalid size: '%s'", sizeStr) + return -1, fmt.Errorf("invalid size: '%s'", sizeStr) } size, err := strconv.ParseInt(matches[1], 10, 0) diff --git a/pkg/units/size_test.go b/pkg/units/size_test.go index 8dae7e716b..5b329fcf68 100644 --- a/pkg/units/size_test.go +++ b/pkg/units/size_test.go @@ -7,6 +7,16 @@ import ( "testing" ) +func TestBytesSize(t *testing.T) { + assertEquals(t, "1 KiB", BytesSize(1024)) + assertEquals(t, "1 MiB", BytesSize(1024*1024)) + assertEquals(t, "1 MiB", BytesSize(1048576)) + assertEquals(t, "2 MiB", BytesSize(2*MiB)) + assertEquals(t, "3.42 GiB", BytesSize(3.42*GiB)) + assertEquals(t, "5.372 TiB", BytesSize(5.372*TiB)) + assertEquals(t, "2.22 PiB", BytesSize(2.22*PiB)) +} + func TestHumanSize(t *testing.T) { assertEquals(t, "1 kB", HumanSize(1000)) assertEquals(t, "1.024 kB", HumanSize(1024)) diff --git a/pkg/urlutil/git.go b/pkg/urlutil/git.go new file mode 100644 index 0000000000..ba88ddf6e6 --- /dev/null +++ b/pkg/urlutil/git.go @@ -0,0 +1,30 @@ +package urlutil + +import "strings" + +var ( + validPrefixes = []string{ + "git://", + "github.com/", + "git@", + } +) + +// IsGitURL returns true if the provided str is a git repository URL. +func IsGitURL(str string) bool { + if IsURL(str) && strings.HasSuffix(str, ".git") { + return true + } + for _, prefix := range validPrefixes { + if strings.HasPrefix(str, prefix) { + return true + } + } + return false +} + +// IsGitTransport returns true if the provided str is a git transport by inspecting +// the prefix of the string for known protocols used in git. +func IsGitTransport(str string) bool { + return IsURL(str) || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@") +} diff --git a/pkg/urlutil/git_test.go b/pkg/urlutil/git_test.go new file mode 100644 index 0000000000..01dcea7da3 --- /dev/null +++ b/pkg/urlutil/git_test.go @@ -0,0 +1,43 @@ +package urlutil + +import "testing" + +var ( + gitUrls = []string{ + "git://github.com/docker/docker", + "git@github.com:docker/docker.git", + "git@bitbucket.org:atlassianlabs/atlassian-docker.git", + "https://github.com/docker/docker.git", + "http://github.com/docker/docker.git", + } + incompleteGitUrls = []string{ + "github.com/docker/docker", + } +) + +func TestValidGitTransport(t *testing.T) { + for _, url := range gitUrls { + if IsGitTransport(url) == false { + t.Fatalf("%q should be detected as valid Git prefix", url) + } + } + + for _, url := range incompleteGitUrls { + if IsGitTransport(url) == true { + t.Fatalf("%q should not be detected as valid Git prefix", url) + } + } +} + +func TestIsGIT(t *testing.T) { + for _, url := range gitUrls { + if IsGitURL(url) == false { + t.Fatalf("%q should be detected as valid Git url", url) + } + } + for _, url := range incompleteGitUrls { + if IsGitURL(url) == false { + t.Fatalf("%q should be detected as valid Git url", url) + } + } +} diff --git a/pkg/urlutil/url.go b/pkg/urlutil/url.go new file mode 100644 index 0000000000..eeae56efe7 --- /dev/null +++ b/pkg/urlutil/url.go @@ -0,0 +1,19 @@ +package urlutil + +import "strings" + +var validUrlPrefixes = []string{ + "http://", + "https://", +} + +// IsURL returns true if the provided str is a valid URL by doing +// a simple change for the transport of the url. +func IsURL(str string) bool { + for _, prefix := range validUrlPrefixes { + if strings.HasPrefix(str, prefix) { + return true + } + } + return false +} diff --git a/pkg/version/version.go b/pkg/version/version.go index 6a7d63544b..cc802a654c 100644 --- a/pkg/version/version.go +++ b/pkg/version/version.go @@ -5,53 +5,59 @@ import ( "strings" ) +// Version provides utility methods for comparing versions. type Version string -func (me Version) compareTo(other Version) int { +func (v Version) compareTo(other Version) int { var ( - meTab = strings.Split(string(me), ".") + currTab = strings.Split(string(v), ".") otherTab = strings.Split(string(other), ".") ) - max := len(meTab) + max := len(currTab) if len(otherTab) > max { max = len(otherTab) } for i := 0; i < max; i++ { - var meInt, otherInt int + var currInt, otherInt int - if len(meTab) > i { - meInt, _ = strconv.Atoi(meTab[i]) + if len(currTab) > i { + currInt, _ = strconv.Atoi(currTab[i]) } if len(otherTab) > i { otherInt, _ = strconv.Atoi(otherTab[i]) } - if meInt > otherInt { + if currInt > otherInt { return 1 } - if otherInt > meInt { + if otherInt > currInt { return -1 } } return 0 } -func (me Version) LessThan(other Version) bool { - return me.compareTo(other) == -1 +// LessThan checks if a version is less than another version +func (v Version) LessThan(other Version) bool { + return v.compareTo(other) == -1 } -func (me Version) LessThanOrEqualTo(other Version) bool { - return me.compareTo(other) <= 0 +// LessThanOrEqualTo checks if a version is less than or equal to another +func (v Version) LessThanOrEqualTo(other Version) bool { + return v.compareTo(other) <= 0 } -func (me Version) GreaterThan(other Version) bool { - return me.compareTo(other) == 1 +// GreaterThan checks if a version is greater than another one +func (v Version) GreaterThan(other Version) bool { + return v.compareTo(other) == 1 } -func (me Version) GreaterThanOrEqualTo(other Version) bool { - return me.compareTo(other) >= 0 +// GreaterThanOrEqualTo checks ia version is greater than or equal to another +func (v Version) GreaterThanOrEqualTo(other Version) bool { + return v.compareTo(other) >= 0 } -func (me Version) Equal(other Version) bool { - return me.compareTo(other) == 0 +// Equal checks if a version is equal to another +func (v Version) Equal(other Version) bool { + return v.compareTo(other) == 0 } diff --git a/hack/CONTRIBUTORS.md b/project/CONTRIBUTORS.md similarity index 100% rename from hack/CONTRIBUTORS.md rename to project/CONTRIBUTORS.md diff --git a/project/GOVERNANCE.md b/project/GOVERNANCE.md new file mode 100644 index 0000000000..52a8bf05d6 --- /dev/null +++ b/project/GOVERNANCE.md @@ -0,0 +1,17 @@ +# Docker Governance Advisory Board Meetings + +In the spirit of openness, Docker created a Governance Advisory Board, and committed to make all materials and notes from the meetings of this group public. +All output from the meetings should be considered proposals only, and are subject to the review and approval of the community and the project leadership. + +The materials from the first Docker Governance Advisory Board meeting, held on October 28, 2014, are available at +[Google Docs Folder](http://goo.gl/Alfj8r) + +These include: + +* First Meeting Notes +* DGAB Charter +* Presentation 1: Introductory Presentation, including State of The Project +* Presentation 2: Overall Contribution Structure/Docker Project Core Proposal +* Presentation 3: Long Term Roadmap/Statement of Direction + + diff --git a/hack/MAINTAINERS b/project/MAINTAINERS similarity index 100% rename from hack/MAINTAINERS rename to project/MAINTAINERS diff --git a/hack/MAINTAINERS.md b/project/MAINTAINERS.md similarity index 100% rename from hack/MAINTAINERS.md rename to project/MAINTAINERS.md diff --git a/hack/PACKAGERS.md b/project/PACKAGERS.md similarity index 97% rename from hack/PACKAGERS.md rename to project/PACKAGERS.md index 265f7d676b..ae3d7dfddd 100644 --- a/hack/PACKAGERS.md +++ b/project/PACKAGERS.md @@ -162,6 +162,12 @@ SELinux, you will need to use the `selinux` build tag: export DOCKER_BUILDTAGS='selinux' ``` +If your version of btrfs-progs is < 3.16.1 (also called btrfs-tools), then you +will need the following tag to not check for btrfs version headers: +```bash +export DOCKER_BUILDTAGS='btrfs_noversion' +``` + There are build tags for disabling graphdrivers as well. By default, support for all graphdrivers are built in. @@ -267,6 +273,7 @@ installed and available at runtime: * iptables version 1.4 or later * procps (or similar provider of a "ps" executable) +* e2fsprogs version 1.4.12 or later (in use: mkfs.ext4, mkfs.xfs, tune2fs) * XZ Utils version 4.9 or later * a [properly mounted](https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount) diff --git a/hack/PRINCIPLES.md b/project/PRINCIPLES.md similarity index 100% rename from hack/PRINCIPLES.md rename to project/PRINCIPLES.md diff --git a/hack/README.md b/project/README.md similarity index 100% rename from hack/README.md rename to project/README.md diff --git a/hack/RELEASE-CHECKLIST.md b/project/RELEASE-CHECKLIST.md similarity index 100% rename from hack/RELEASE-CHECKLIST.md rename to project/RELEASE-CHECKLIST.md diff --git a/project/ROADMAP.md b/project/ROADMAP.md new file mode 100644 index 0000000000..bee2ea83a6 --- /dev/null +++ b/project/ROADMAP.md @@ -0,0 +1,43 @@ +# Docker: Statement of Direction + +This document is a high-level overview of where we want to take Docker. +It is a curated selection of planned improvements which are either important, difficult, or both. + +For a more complete view of planned and requested improvements, see [the Github issues](https://github.com/docker/docker/issues). + +To suggest changes to the roadmap, including additions, please write the change as if it were already in effect, and make a pull request. + + +## Orchestration + +Orchestration touches on several aspects of multi-container applications.  These include provisioning hosts with the Docker daemon, organizing and maintaining multiple Docker hosts as a cluster, composing an application using multiple containers, and handling the networking between the containers across the hosts. + +Today, users accomplish this using a combination of glue scripts and various tools, like Shipper, Deis, Pipeworks, etc. + +We want the Docker API to support all aspects of orchestration natively, so that these tools can cleanly and seamlessly integrate into the Docker user experience, and remain interoperable with each other. + +## Networking + +The current Docker networking model works for communication between containers all residing on the same host.  Since Docker applications in production are made up of many containers deployed across multiple hosts (and sometimes multiple data centers), Docker’s networking model will evolve to accommodate this.  An aspect of this evolution includes providing a Networking API to enable alternative implementations. + +## Storage + +Currently, stateful Docker containers are pinned to specific hosts during their lifetime.  To support additional resiliency, capacity management, and load balancing we want to enable live stateful containers to dynamically migrate between hosts.  While the Docker Project will provide a “batteries included” implementation for a great out-of-box experience, we will also provide an API for alternative implementations. + +## Microsoft Windows + +The next Microsoft Windows Server will ship with primitives to support container-based process isolation and resource management.  The Docker Project will guide contributors and maintainers developing native Microsoft versions of the Docker Remote API client and Docker daemon to take advantage of these primitives. + +## Provenance + +When assembling Docker applications we want users to be confident that images they didn’t create themselves are safe to use and build upon.  Provenance gives users the capability to digitally verify the inputs and processes constituting an image’s origins and lifecycle events. + +## Plugin API + +We want Docker to run everywhere, and to integrate with every devops tool. Those are ambitious goals, and the only way to reach them is with the Docker community. For the community to participate fully, we need an API which allows Docker to be deeply and easily customized. + +We are working on a plugin API which will make Docker very customization-friendly. We believe it will facilitate the integrations listed above – and many more we didn’t even think about. + +## Multi-Architecture Support + +Our goal is to make Docker run everywhere. However, currently Docker only runs on x86_64 systems. We plan on expanding architecture support, so that Docker containers can be created and used on more architectures, including ARM, Joyent SmartOS, and Microsoft. diff --git a/hack/allmaintainers.sh b/project/allmaintainers.sh similarity index 100% rename from hack/allmaintainers.sh rename to project/allmaintainers.sh diff --git a/hack/dind b/project/dind similarity index 100% rename from hack/dind rename to project/dind diff --git a/hack/generate-authors.sh b/project/generate-authors.sh similarity index 82% rename from hack/generate-authors.sh rename to project/generate-authors.sh index 83f61df373..0994662767 100755 --- a/hack/generate-authors.sh +++ b/project/generate-authors.sh @@ -8,7 +8,7 @@ cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.." { cat <<-'EOH' # This file lists all individuals having contributed content to the repository. - # For how it is generated, see `hack/generate-authors.sh`. + # For how it is generated, see `project/generate-authors.sh`. EOH echo git log --format='%aN <%aE>' | sort -uf diff --git a/hack/getmaintainer.sh b/project/getmaintainer.sh similarity index 100% rename from hack/getmaintainer.sh rename to project/getmaintainer.sh diff --git a/hack/install.sh b/project/install.sh similarity index 92% rename from hack/install.sh rename to project/install.sh index 9652e4672d..8678562460 100755 --- a/hack/install.sh +++ b/project/install.sh @@ -70,22 +70,33 @@ if [ -z "$lsb_dist" ] && [ -r /etc/lsb-release ]; then lsb_dist="$(. /etc/lsb-release && echo "$DISTRIB_ID")" fi if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then - lsb_dist='Debian' + lsb_dist='debian' fi if [ -z "$lsb_dist" ] && [ -r /etc/fedora-release ]; then - lsb_dist='Fedora' + lsb_dist='fedora' +fi +if [ -z "$lsb_dist" ] && [ -r /etc/os-release ]; then + lsb_dist="$(. /etc/os-release && echo "$ID")" fi +lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')" case "$lsb_dist" in - Fedora) - ( - set -x - $sh_c 'sleep 3; yum -y -q install docker-io' - ) + amzn|fedora) + if [ "$lsb_dist" = 'amzn' ]; then + ( + set -x + $sh_c 'sleep 3; yum -y -q install docker' + ) + else + ( + set -x + $sh_c 'sleep 3; yum -y -q install docker-io' + ) + fi if command_exists docker && [ -e /var/run/docker.sock ]; then ( set -x - $sh_c 'docker run --rm hello-world' + $sh_c 'docker version' ) || true fi your_user=your-user @@ -101,7 +112,7 @@ case "$lsb_dist" in exit 0 ;; - Ubuntu|Debian|LinuxMint) + ubuntu|debian|linuxmint) export DEBIAN_FRONTEND=noninteractive did_apt_get_update= @@ -162,7 +173,7 @@ case "$lsb_dist" in if command_exists docker && [ -e /var/run/docker.sock ]; then ( set -x - $sh_c 'docker run --rm hello-world' + $sh_c 'docker version' ) || true fi your_user=your-user @@ -178,7 +189,7 @@ case "$lsb_dist" in exit 0 ;; - Gentoo) + gentoo) if [ "$url" = "https://test.docker.com/" ]; then echo >&2 echo >&2 ' You appear to be trying to install the latest nightly build in Gentoo.' diff --git a/hack/make.sh b/project/make.sh similarity index 96% rename from hack/make.sh rename to project/make.sh index d6da3057fa..2b3a530ea1 100755 --- a/hack/make.sh +++ b/project/make.sh @@ -96,11 +96,19 @@ fi # Use these flags when compiling the tests and final binary LDFLAGS=' - -w -X '$DOCKER_PKG'/dockerversion.GITCOMMIT "'$GITCOMMIT'" -X '$DOCKER_PKG'/dockerversion.VERSION "'$VERSION'" ' + +if [ -z "$DEBUG" ]; then + LDFLAGS="-w $LDFLAGS" +fi + LDFLAGS_STATIC='-linkmode external' +# Cgo -H windows is incompatible with -linkmode external. +if [ "$(go env GOOS)" == 'windows' ]; then + LDFLAGS_STATIC='' +fi EXTLDFLAGS_STATIC='-static' # ORIG_BUILDFLAGS is necessary for the cross target which cannot always build # with options like -race. @@ -215,7 +223,7 @@ bundle() { bundle=$(basename $bundlescript) echo "---> Making bundle: $bundle (in bundles/$VERSION/$bundle)" mkdir -p bundles/$VERSION/$bundle - source $bundlescript $(pwd)/bundles/$VERSION/$bundle + source "$bundlescript" "$(pwd)/bundles/$VERSION/$bundle" } main() { diff --git a/hack/make/.ensure-busybox b/project/make/.ensure-busybox similarity index 100% rename from hack/make/.ensure-busybox rename to project/make/.ensure-busybox diff --git a/hack/make/.ensure-scratch b/project/make/.ensure-scratch similarity index 100% rename from hack/make/.ensure-scratch rename to project/make/.ensure-scratch diff --git a/hack/make/.go-compile-test-dir b/project/make/.go-compile-test-dir similarity index 100% rename from hack/make/.go-compile-test-dir rename to project/make/.go-compile-test-dir diff --git a/hack/make/.validate b/project/make/.validate similarity index 100% rename from hack/make/.validate rename to project/make/.validate diff --git a/hack/make/README.md b/project/make/README.md similarity index 100% rename from hack/make/README.md rename to project/make/README.md diff --git a/project/make/binary b/project/make/binary new file mode 100755 index 0000000000..6b988b1708 --- /dev/null +++ b/project/make/binary @@ -0,0 +1,28 @@ +#!/bin/bash +set -e + +DEST=$1 +BINARY_NAME="docker-$VERSION" +BINARY_EXTENSION= +if [ "$(go env GOOS)" = 'windows' ]; then + BINARY_EXTENSION='.exe' +fi +BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION" + +# Cygdrive paths don't play well with go build -o. +if [[ "$(uname -s)" == CYGWIN* ]]; then + DEST=$(cygpath -mw $DEST) +fi + +go build \ + -o "$DEST/$BINARY_FULLNAME" \ + "${BUILDFLAGS[@]}" \ + -ldflags " + $LDFLAGS + $LDFLAGS_STATIC_DOCKER + " \ + ./docker +echo "Created binary: $DEST/$BINARY_FULLNAME" +ln -sf "$BINARY_FULLNAME" "$DEST/docker$BINARY_EXTENSION" + +hash_files "$DEST/$BINARY_FULLNAME" diff --git a/hack/make/cover b/project/make/cover similarity index 100% rename from hack/make/cover rename to project/make/cover diff --git a/hack/make/cross b/project/make/cross similarity index 100% rename from hack/make/cross rename to project/make/cross diff --git a/hack/make/dynbinary b/project/make/dynbinary similarity index 100% rename from hack/make/dynbinary rename to project/make/dynbinary diff --git a/hack/make/dyntest-integration b/project/make/dyntest-integration similarity index 100% rename from hack/make/dyntest-integration rename to project/make/dyntest-integration diff --git a/hack/make/dyntest-unit b/project/make/dyntest-unit similarity index 100% rename from hack/make/dyntest-unit rename to project/make/dyntest-unit diff --git a/hack/make/test-integration b/project/make/test-integration similarity index 100% rename from hack/make/test-integration rename to project/make/test-integration diff --git a/hack/make/test-integration-cli b/project/make/test-integration-cli similarity index 100% rename from hack/make/test-integration-cli rename to project/make/test-integration-cli diff --git a/hack/make/test-unit b/project/make/test-unit similarity index 68% rename from hack/make/test-unit rename to project/make/test-unit index 5040e37d6b..910b887a8e 100644 --- a/hack/make/test-unit +++ b/project/make/test-unit @@ -22,29 +22,31 @@ bundle_test_unit() { if [ -z "$TESTDIRS" ]; then TESTDIRS=$(find_dirs '*_test.go') fi - - if command -v parallel &> /dev/null; then ( - # accomodate parallel to be able to access variables - export SHELL="$BASH" - export HOME="$(mktemp -d)" - mkdir -p "$HOME/.parallel" - touch "$HOME/.parallel/ignored_vars" + ( export LDFLAGS="$LDFLAGS $LDFLAGS_STATIC_DOCKER" export TESTFLAGS export HAVE_GO_TEST_COVER export DEST - # some hack to export array variables - export BUILDFLAGS_FILE="$HOME/buildflags_file" - ( IFS=$'\n'; echo "${BUILDFLAGS[*]}" ) > "$BUILDFLAGS_FILE" + if command -v parallel &> /dev/null; then + # accomodate parallel to be able to access variables + export SHELL="$BASH" + export HOME="$(mktemp -d)" + mkdir -p "$HOME/.parallel" + touch "$HOME/.parallel/ignored_vars" - echo "$TESTDIRS" | parallel --jobs "$PARALLEL_JOBS" --halt 2 --env _ "$(dirname "$BASH_SOURCE")/.go-compile-test-dir" - rm -rf "$HOME" - ) else - # aww, no "parallel" available - fall back to boring - for test_dir in $TESTDIRS; do - "$(dirname "$BASH_SOURCE")/.go-compile-test-dir" "$test_dir" - done - fi + # some hack to export array variables + export BUILDFLAGS_FILE="$HOME/buildflags_file" + ( IFS=$'\n'; echo "${BUILDFLAGS[*]}" ) > "$BUILDFLAGS_FILE" + + echo "$TESTDIRS" | parallel --jobs "$PARALLEL_JOBS" --halt 2 --env _ "$(dirname "$BASH_SOURCE")/.go-compile-test-dir" + rm -rf "$HOME" + else + # aww, no "parallel" available - fall back to boring + for test_dir in $TESTDIRS; do + "$(dirname "$BASH_SOURCE")/.go-compile-test-dir" "$test_dir" + done + fi + ) echo "$TESTDIRS" | go_run_test_dir } } diff --git a/hack/make/tgz b/project/make/tgz similarity index 100% rename from hack/make/tgz rename to project/make/tgz diff --git a/hack/make/ubuntu b/project/make/ubuntu similarity index 100% rename from hack/make/ubuntu rename to project/make/ubuntu diff --git a/hack/make/validate-dco b/project/make/validate-dco similarity index 100% rename from hack/make/validate-dco rename to project/make/validate-dco diff --git a/hack/make/validate-gofmt b/project/make/validate-gofmt similarity index 100% rename from hack/make/validate-gofmt rename to project/make/validate-gofmt diff --git a/hack/release.sh b/project/release.sh similarity index 100% rename from hack/release.sh rename to project/release.sh diff --git a/hack/stats.sh b/project/stats.sh similarity index 100% rename from hack/stats.sh rename to project/stats.sh diff --git a/hack/vendor.sh b/project/vendor.sh similarity index 91% rename from hack/vendor.sh rename to project/vendor.sh index 7ecb1a5cd4..6ebce73ca7 100755 --- a/hack/vendor.sh +++ b/project/vendor.sh @@ -51,7 +51,9 @@ clone hg code.google.com/p/go.net 84a4013f96e0 clone hg code.google.com/p/gosqlite 74691fb6f837 -clone git github.com/docker/libtrust d273ef2565ca +clone git github.com/docker/libtrust 230dfd18c232 + +clone git github.com/Sirupsen/logrus v0.6.0 # get Go tip's archive/tar, for xattr support and improved performance # TODO after Go 1.4 drops, bump our minimum supported version and drop this vendored dep @@ -64,7 +66,7 @@ if [ "$1" = '--go' ]; then mv tmp-tar src/code.google.com/p/go/src/pkg/archive/tar fi -clone git github.com/docker/libcontainer 8d1d0ba38a7348c5cfdc05aea3be34d75aadc8de +clone git github.com/docker/libcontainer 53eca435e63db58b06cf796d3a9326db5fd42253 # see src/github.com/docker/libcontainer/update-vendor.sh which is the "source of truth" for libcontainer deps (just like this file) rm -rf src/github.com/docker/libcontainer/vendor eval "$(grep '^clone ' src/github.com/docker/libcontainer/update-vendor.sh | grep -v 'github.com/codegangsta/cli')" diff --git a/registry/auth.go b/registry/auth.go index dad58c1636..4276064083 100644 --- a/registry/auth.go +++ b/registry/auth.go @@ -126,8 +126,8 @@ func LoadConfig(rootPath string) (*ConfigFile, error) { return &configFile, err } authConfig.Auth = "" - configFile.Configs[k] = authConfig authConfig.ServerAddress = k + configFile.Configs[k] = authConfig } } return &configFile, nil @@ -229,7 +229,7 @@ func Login(authConfig *AuthConfig, factory *utils.HTTPRequestFactory) (string, e return "", err } if resp.StatusCode == 200 { - status = "Login Succeeded" + return "Login Succeeded", nil } else if resp.StatusCode == 401 { return "", fmt.Errorf("Wrong login/password, please try again") } else if resp.StatusCode == 403 { @@ -237,12 +237,11 @@ func Login(authConfig *AuthConfig, factory *utils.HTTPRequestFactory) (string, e return "", fmt.Errorf("Login: Account is not Active. Please check your e-mail for a confirmation link.") } return "", fmt.Errorf("Login: Account is not Active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress) - } else { - return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, resp.StatusCode, resp.Header) } - } else { - return "", fmt.Errorf("Registration: %s", reqBody) + return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, resp.StatusCode, resp.Header) } + return "", fmt.Errorf("Registration: %s", reqBody) + } else if reqStatusCode == 401 { // This case would happen with private registries where /v1/users is // protected, so people can use `docker login` as an auth check. @@ -258,7 +257,7 @@ func Login(authConfig *AuthConfig, factory *utils.HTTPRequestFactory) (string, e return "", err } if resp.StatusCode == 200 { - status = "Login Succeeded" + return "Login Succeeded", nil } else if resp.StatusCode == 401 { return "", fmt.Errorf("Wrong login/password, please try again") } else { diff --git a/registry/endpoint.go b/registry/endpoint.go index d65fd7e8aa..c485a13d8f 100644 --- a/registry/endpoint.go +++ b/registry/endpoint.go @@ -9,14 +9,14 @@ import ( "net/url" "strings" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" ) // for mocking in unit tests var lookupIP = net.LookupIP // scans string for api version in the URL path. returns the trimmed hostname, if version found, string and API version. -func scanForApiVersion(hostname string) (string, APIVersion) { +func scanForAPIVersion(hostname string) (string, APIVersion) { var ( chunks []string apiVersionStr string @@ -77,7 +77,7 @@ func newEndpoint(hostname string, insecureRegistries []string) (*Endpoint, error if !strings.HasPrefix(hostname, "http") { hostname = "https://" + hostname } - trimmedHostname, endpoint.Version = scanForApiVersion(hostname) + trimmedHostname, endpoint.Version = scanForAPIVersion(hostname) endpoint.URL, err = url.Parse(trimmedHostname) if err != nil { return nil, err diff --git a/registry/registry.go b/registry/registry.go index e0285a2336..f3a4a340b5 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -14,7 +14,7 @@ import ( "strings" "time" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/utils" ) @@ -35,15 +35,16 @@ const ( ConnectTimeout ) -func newClient(jar http.CookieJar, roots *x509.CertPool, cert *tls.Certificate, timeout TimeoutType, secure bool) *http.Client { +func newClient(jar http.CookieJar, roots *x509.CertPool, certs []tls.Certificate, timeout TimeoutType, secure bool) *http.Client { tlsConfig := tls.Config{ RootCAs: roots, // Avoid fallback to SSL protocols < TLS1.0 - MinVersion: tls.VersionTLS10, + MinVersion: tls.VersionTLS10, + Certificates: certs, } - if cert != nil { - tlsConfig.Certificates = append(tlsConfig.Certificates, *cert) + if !secure { + tlsConfig.InsecureSkipVerify = true } if !secure { @@ -60,7 +61,9 @@ func newClient(jar http.CookieJar, roots *x509.CertPool, cert *tls.Certificate, case ConnectTimeout: httpTransport.Dial = func(proto string, addr string) (net.Conn, error) { // Set the connect timeout to 5 seconds - conn, err := net.DialTimeout(proto, addr, 5*time.Second) + d := net.Dialer{Timeout: 5 * time.Second, DualStack: true} + + conn, err := d.Dial(proto, addr) if err != nil { return nil, err } @@ -70,7 +73,9 @@ func newClient(jar http.CookieJar, roots *x509.CertPool, cert *tls.Certificate, } case ReceiveTimeout: httpTransport.Dial = func(proto string, addr string) (net.Conn, error) { - conn, err := net.Dial(proto, addr) + d := net.Dialer{DualStack: true} + + conn, err := d.Dial(proto, addr) if err != nil { return nil, err } @@ -89,7 +94,7 @@ func newClient(jar http.CookieJar, roots *x509.CertPool, cert *tls.Certificate, func doRequest(req *http.Request, jar http.CookieJar, timeout TimeoutType, secure bool) (*http.Response, *http.Client, error) { var ( pool *x509.CertPool - certs []*tls.Certificate + certs []tls.Certificate ) if secure && req.URL.Scheme == "https" { @@ -132,7 +137,7 @@ func doRequest(req *http.Request, jar http.CookieJar, timeout TimeoutType, secur if err != nil { return nil, nil, err } - certs = append(certs, &cert) + certs = append(certs, cert) } if strings.HasSuffix(f.Name(), ".key") { keyName := f.Name() @@ -154,16 +159,9 @@ func doRequest(req *http.Request, jar http.CookieJar, timeout TimeoutType, secur return res, client, nil } - for i, cert := range certs { - client := newClient(jar, pool, cert, timeout, secure) - res, err := client.Do(req) - // If this is the last cert, otherwise, continue to next cert if 403 or 5xx - if i == len(certs)-1 || err == nil && res.StatusCode != 403 && res.StatusCode < 500 { - return res, client, err - } - } - - return nil, nil, nil + client := newClient(jar, pool, certs, timeout, secure) + res, err := client.Do(req) + return res, client, err } func validateRepositoryName(repositoryName string) error { diff --git a/registry/registry_mock_test.go b/registry/registry_mock_test.go index 50724f0f99..887d2ef6f2 100644 --- a/registry/registry_mock_test.go +++ b/registry/registry_mock_test.go @@ -17,7 +17,7 @@ import ( "github.com/gorilla/mux" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" ) var ( @@ -230,8 +230,8 @@ func handlerGetImage(w http.ResponseWriter, r *http.Request) { return } writeHeaders(w) - layer_size := len(layer["layer"]) - w.Header().Add("X-Docker-Size", strconv.Itoa(layer_size)) + layerSize := len(layer["layer"]) + w.Header().Add("X-Docker-Size", strconv.Itoa(layerSize)) io.WriteString(w, layer[vars["action"]]) } @@ -240,16 +240,16 @@ func handlerPutImage(w http.ResponseWriter, r *http.Request) { return } vars := mux.Vars(r) - image_id := vars["image_id"] + imageID := vars["image_id"] action := vars["action"] - layer, exists := testLayers[image_id] + layer, exists := testLayers[imageID] if !exists { if action != "json" { http.NotFound(w, r) return } layer = make(map[string]string) - testLayers[image_id] = layer + testLayers[imageID] = layer } if checksum := r.Header.Get("X-Docker-Checksum"); checksum != "" { if checksum != layer["checksum_simple"] && checksum != layer["checksum_tarsum"] { @@ -349,9 +349,9 @@ func handlerImages(w http.ResponseWriter, r *http.Request) { return } images := []map[string]string{} - for image_id, layer := range testLayers { + for imageID, layer := range testLayers { image := make(map[string]string) - image["id"] = image_id + image["id"] = imageID image["checksum"] = layer["checksum_tarsum"] image["Tag"] = "latest" images = append(images, image) diff --git a/registry/registry_test.go b/registry/registry_test.go index 1ffb44f313..d24a5f5751 100644 --- a/registry/registry_test.go +++ b/registry/registry_test.go @@ -11,9 +11,12 @@ import ( ) var ( - IMAGE_ID = "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d" - TOKEN = []string{"fake-token"} - REPO = "foo42/bar" + token = []string{"fake-token"} +) + +const ( + imageID = "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d" + REPO = "foo42/bar" ) func spawnTestRegistrySession(t *testing.T) *Session { @@ -43,27 +46,27 @@ func TestPingRegistryEndpoint(t *testing.T) { func TestGetRemoteHistory(t *testing.T) { r := spawnTestRegistrySession(t) - hist, err := r.GetRemoteHistory(IMAGE_ID, makeURL("/v1/"), TOKEN) + hist, err := r.GetRemoteHistory(imageID, makeURL("/v1/"), token) if err != nil { t.Fatal(err) } assertEqual(t, len(hist), 2, "Expected 2 images in history") - assertEqual(t, hist[0], IMAGE_ID, "Expected "+IMAGE_ID+"as first ancestry") + assertEqual(t, hist[0], imageID, "Expected "+imageID+"as first ancestry") assertEqual(t, hist[1], "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", "Unexpected second ancestry") } func TestLookupRemoteImage(t *testing.T) { r := spawnTestRegistrySession(t) - found := r.LookupRemoteImage(IMAGE_ID, makeURL("/v1/"), TOKEN) + found := r.LookupRemoteImage(imageID, makeURL("/v1/"), token) assertEqual(t, found, true, "Expected remote lookup to succeed") - found = r.LookupRemoteImage("abcdef", makeURL("/v1/"), TOKEN) + found = r.LookupRemoteImage("abcdef", makeURL("/v1/"), token) assertEqual(t, found, false, "Expected remote lookup to fail") } func TestGetRemoteImageJSON(t *testing.T) { r := spawnTestRegistrySession(t) - json, size, err := r.GetRemoteImageJSON(IMAGE_ID, makeURL("/v1/"), TOKEN) + json, size, err := r.GetRemoteImageJSON(imageID, makeURL("/v1/"), token) if err != nil { t.Fatal(err) } @@ -72,7 +75,7 @@ func TestGetRemoteImageJSON(t *testing.T) { t.Fatal("Expected non-empty json") } - _, _, err = r.GetRemoteImageJSON("abcdef", makeURL("/v1/"), TOKEN) + _, _, err = r.GetRemoteImageJSON("abcdef", makeURL("/v1/"), token) if err == nil { t.Fatal("Expected image not found error") } @@ -80,7 +83,7 @@ func TestGetRemoteImageJSON(t *testing.T) { func TestGetRemoteImageLayer(t *testing.T) { r := spawnTestRegistrySession(t) - data, err := r.GetRemoteImageLayer(IMAGE_ID, makeURL("/v1/"), TOKEN, 0) + data, err := r.GetRemoteImageLayer(imageID, makeURL("/v1/"), token, 0) if err != nil { t.Fatal(err) } @@ -88,7 +91,7 @@ func TestGetRemoteImageLayer(t *testing.T) { t.Fatal("Expected non-nil data result") } - _, err = r.GetRemoteImageLayer("abcdef", makeURL("/v1/"), TOKEN, 0) + _, err = r.GetRemoteImageLayer("abcdef", makeURL("/v1/"), token, 0) if err == nil { t.Fatal("Expected image not found error") } @@ -96,14 +99,14 @@ func TestGetRemoteImageLayer(t *testing.T) { func TestGetRemoteTags(t *testing.T) { r := spawnTestRegistrySession(t) - tags, err := r.GetRemoteTags([]string{makeURL("/v1/")}, REPO, TOKEN) + tags, err := r.GetRemoteTags([]string{makeURL("/v1/")}, REPO, token) if err != nil { t.Fatal(err) } assertEqual(t, len(tags), 1, "Expected one tag") - assertEqual(t, tags["latest"], IMAGE_ID, "Expected tag latest to map to "+IMAGE_ID) + assertEqual(t, tags["latest"], imageID, "Expected tag latest to map to "+imageID) - _, err = r.GetRemoteTags([]string{makeURL("/v1/")}, "foo42/baz", TOKEN) + _, err = r.GetRemoteTags([]string{makeURL("/v1/")}, "foo42/baz", token) if err == nil { t.Fatal("Expected error when fetching tags for bogus repo") } @@ -111,11 +114,11 @@ func TestGetRemoteTags(t *testing.T) { func TestGetRepositoryData(t *testing.T) { r := spawnTestRegistrySession(t) - parsedUrl, err := url.Parse(makeURL("/v1/")) + parsedURL, err := url.Parse(makeURL("/v1/")) if err != nil { t.Fatal(err) } - host := "http://" + parsedUrl.Host + "/v1/" + host := "http://" + parsedURL.Host + "/v1/" data, err := r.GetRepositoryData("foo42/bar") if err != nil { t.Fatal(err) @@ -137,7 +140,7 @@ func TestPushImageJSONRegistry(t *testing.T) { Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", } - err := r.PushImageJSONRegistry(imgData, []byte{0x42, 0xdf, 0x0}, makeURL("/v1/"), TOKEN) + err := r.PushImageJSONRegistry(imgData, []byte{0x42, 0xdf, 0x0}, makeURL("/v1/"), token) if err != nil { t.Fatal(err) } @@ -146,7 +149,7 @@ func TestPushImageJSONRegistry(t *testing.T) { func TestPushImageLayerRegistry(t *testing.T) { r := spawnTestRegistrySession(t) layer := strings.NewReader("") - _, _, err := r.PushImageLayerRegistry(IMAGE_ID, layer, makeURL("/v1/"), TOKEN, []byte{}) + _, _, err := r.PushImageLayerRegistry(imageID, layer, makeURL("/v1/"), token, []byte{}) if err != nil { t.Fatal(err) } @@ -180,7 +183,7 @@ func TestResolveRepositoryName(t *testing.T) { func TestPushRegistryTag(t *testing.T) { r := spawnTestRegistrySession(t) - err := r.PushRegistryTag("foo42/bar", IMAGE_ID, "stable", makeURL("/v1/"), TOKEN) + err := r.PushRegistryTag("foo42/bar", imageID, "stable", makeURL("/v1/"), token) if err != nil { t.Fatal(err) } diff --git a/registry/session.go b/registry/session.go index 28959967de..4b2f55225f 100644 --- a/registry/session.go +++ b/registry/session.go @@ -3,6 +3,7 @@ package registry import ( "bytes" "crypto/sha256" + // this is required for some certificates _ "crypto/sha512" "encoding/hex" "encoding/json" @@ -16,8 +17,8 @@ import ( "strings" "time" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/httputils" - "github.com/docker/docker/pkg/log" "github.com/docker/docker/pkg/tarsum" "github.com/docker/docker/utils" ) @@ -229,11 +230,7 @@ func (r *Session) GetRemoteTags(registries []string, repository string, token [] } result := make(map[string]string) - rawJSON, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, err - } - if err := json.Unmarshal(rawJSON, &result); err != nil { + if err := json.NewDecoder(res.Body).Decode(&result); err != nil { return nil, err } return result, nil @@ -243,11 +240,11 @@ func (r *Session) GetRemoteTags(registries []string, repository string, token [] func buildEndpointsList(headers []string, indexEp string) ([]string, error) { var endpoints []string - parsedUrl, err := url.Parse(indexEp) + parsedURL, err := url.Parse(indexEp) if err != nil { return nil, err } - var urlScheme = parsedUrl.Scheme + var urlScheme = parsedURL.Scheme // The Registry's URL scheme has to match the Index' for _, ep := range headers { epList := strings.Split(ep, ",") @@ -304,12 +301,8 @@ func (r *Session) GetRepositoryData(remote string) (*RepositoryData, error) { endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", r.indexEndpoint.URL.Scheme, req.URL.Host)) } - checksumsJSON, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, err - } remoteChecksums := []*ImgData{} - if err := json.Unmarshal(checksumsJSON, &remoteChecksums); err != nil { + if err := json.NewDecoder(res.Body).Decode(&remoteChecksums); err != nil { return nil, err } @@ -469,7 +462,6 @@ func (r *Session) PushRegistryTag(remote, revision, tag, registry string, token func (r *Session) PushImageJSONIndex(remote string, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) { cleanImgList := []*ImgData{} - if validate { for _, elem := range imgList { if elem.Checksum != "" { @@ -491,43 +483,28 @@ func (r *Session) PushImageJSONIndex(remote string, imgList []*ImgData, validate u := fmt.Sprintf("%srepositories/%s/%s", r.indexEndpoint.VersionString(1), remote, suffix) log.Debugf("[registry] PUT %s", u) log.Debugf("Image list pushed to index:\n%s", imgListJSON) - req, err := r.reqFactory.NewRequest("PUT", u, bytes.NewReader(imgListJSON)) - if err != nil { - return nil, err + headers := map[string][]string{ + "Content-type": {"application/json"}, + "X-Docker-Token": {"true"}, } - req.Header.Add("Content-type", "application/json") - req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password) - req.ContentLength = int64(len(imgListJSON)) - req.Header.Set("X-Docker-Token", "true") if validate { - req.Header["X-Docker-Endpoints"] = regs + headers["X-Docker-Endpoints"] = regs } - res, _, err := r.doRequest(req) - if err != nil { - return nil, err - } - defer res.Body.Close() - // Redirect if necessary - for res.StatusCode >= 300 && res.StatusCode < 400 { - log.Debugf("Redirected to %s", res.Header.Get("Location")) - req, err = r.reqFactory.NewRequest("PUT", res.Header.Get("Location"), bytes.NewReader(imgListJSON)) - if err != nil { + var res *http.Response + for { + if res, err = r.putImageRequest(u, headers, imgListJSON); err != nil { return nil, err } - req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password) - req.ContentLength = int64(len(imgListJSON)) - req.Header.Set("X-Docker-Token", "true") - if validate { - req.Header["X-Docker-Endpoints"] = regs + if !shouldRedirect(res) { + break } - res, _, err := r.doRequest(req) - if err != nil { - return nil, err - } - defer res.Body.Close() + res.Body.Close() + u = res.Header.Get("Location") + log.Debugf("Redirected to %s", u) } + defer res.Body.Close() var tokens, endpoints []string if !validate { @@ -570,6 +547,27 @@ func (r *Session) PushImageJSONIndex(remote string, imgList []*ImgData, validate }, nil } +func (r *Session) putImageRequest(u string, headers map[string][]string, body []byte) (*http.Response, error) { + req, err := r.reqFactory.NewRequest("PUT", u, bytes.NewReader(body)) + if err != nil { + return nil, err + } + req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password) + req.ContentLength = int64(len(body)) + for k, v := range headers { + req.Header[k] = v + } + response, _, err := r.doRequest(req) + if err != nil { + return nil, err + } + return response, nil +} + +func shouldRedirect(response *http.Response) bool { + return response.StatusCode >= 300 && response.StatusCode < 400 +} + func (r *Session) SearchRepositories(term string) (*SearchResults, error) { log.Debugf("Index server: %s", r.indexEndpoint) u := r.indexEndpoint.VersionString(1) + "search?q=" + url.QueryEscape(term) @@ -589,12 +587,8 @@ func (r *Session) SearchRepositories(term string) (*SearchResults, error) { if res.StatusCode != 200 { return nil, utils.NewHTTPRequestError(fmt.Sprintf("Unexepected status code %d", res.StatusCode), res) } - rawData, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, err - } result := new(SearchResults) - err = json.Unmarshal(rawData, result) + err = json.NewDecoder(res.Body).Decode(result) return result, err } diff --git a/registry/session_v2.go b/registry/session_v2.go index c0bc19b337..20e9e2ee9c 100644 --- a/registry/session_v2.go +++ b/registry/session_v2.go @@ -8,7 +8,7 @@ import ( "net/url" "strconv" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/utils" "github.com/gorilla/mux" ) diff --git a/runconfig/config.go b/runconfig/config.go index c00110bf71..ca5c3240b6 100644 --- a/runconfig/config.go +++ b/runconfig/config.go @@ -31,6 +31,7 @@ type Config struct { WorkingDir string Entrypoint []string NetworkDisabled bool + MacAddress string OnBuild []string } @@ -52,6 +53,7 @@ func ContainerConfigFromJob(job *engine.Job) *Config { Image: job.Getenv("Image"), WorkingDir: job.Getenv("WorkingDir"), NetworkDisabled: job.GetenvBool("NetworkDisabled"), + MacAddress: job.Getenv("MacAddress"), } job.GetenvJson("ExposedPorts", &config.ExposedPorts) job.GetenvJson("Volumes", &config.Volumes) diff --git a/runconfig/config_test.go b/runconfig/config_test.go index d94ec4ec55..f856c87f54 100644 --- a/runconfig/config_test.go +++ b/runconfig/config_test.go @@ -9,7 +9,7 @@ import ( ) func parse(t *testing.T, args string) (*Config, *HostConfig, error) { - config, hostConfig, _, err := parseRun(strings.Split(args+" ubuntu bash", " "), nil) + config, hostConfig, _, err := parseRun(strings.Split(args+" ubuntu bash", " ")) return config, hostConfig, err } diff --git a/runconfig/exec.go b/runconfig/exec.go index 07de3e43bc..1ced70a86a 100644 --- a/runconfig/exec.go +++ b/runconfig/exec.go @@ -1,6 +1,8 @@ package runconfig import ( + "fmt" + "github.com/docker/docker/engine" flag "github.com/docker/docker/pkg/mflag" ) @@ -17,21 +19,25 @@ type ExecConfig struct { Cmd []string } -func ExecConfigFromJob(job *engine.Job) *ExecConfig { +func ExecConfigFromJob(job *engine.Job) (*ExecConfig, error) { execConfig := &ExecConfig{ - User: job.Getenv("User"), - Privileged: job.GetenvBool("Privileged"), + // TODO(vishh): Expose 'User' once it is supported. + //User: job.Getenv("User"), + // TODO(vishh): Expose 'Privileged' once it is supported. + //Privileged: job.GetenvBool("Privileged"), Tty: job.GetenvBool("Tty"), - Container: job.Getenv("Container"), AttachStdin: job.GetenvBool("AttachStdin"), AttachStderr: job.GetenvBool("AttachStderr"), AttachStdout: job.GetenvBool("AttachStdout"), } - if cmd := job.GetenvList("Cmd"); cmd != nil { - execConfig.Cmd = cmd + cmd := job.GetenvList("Cmd") + if len(cmd) == 0 { + return nil, fmt.Errorf("No exec command specified") } - return execConfig + execConfig.Cmd = cmd + + return execConfig, nil } func ParseExec(cmd *flag.FlagSet, args []string) (*ExecConfig, error) { @@ -46,10 +52,11 @@ func ParseExec(cmd *flag.FlagSet, args []string) (*ExecConfig, error) { return nil, err } parsedArgs := cmd.Args() - if len(parsedArgs) > 1 { - container = cmd.Arg(0) - execCmd = parsedArgs[1:] + if len(parsedArgs) < 2 { + return nil, fmt.Errorf("not enough arguments to create exec command") } + container = cmd.Arg(0) + execCmd = parsedArgs[1:] execConfig := &ExecConfig{ // TODO(vishh): Expose '-u' flag once it is supported. diff --git a/runconfig/hostconfig.go b/runconfig/hostconfig.go index ae75434d41..b619e9c31c 100644 --- a/runconfig/hostconfig.go +++ b/runconfig/hostconfig.go @@ -28,6 +28,44 @@ func (n NetworkMode) IsNone() bool { return n == "none" } +type IpcMode string + +// IsPrivate indicates whether container use it's private ipc stack +func (n IpcMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +func (n IpcMode) IsHost() bool { + return n == "host" +} + +func (n IpcMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +func (n IpcMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + case "container": + if len(parts) != 2 || parts[1] == "" { + return false + } + default: + return false + } + return true +} + +func (n IpcMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + type DeviceMapping struct { PathOnHost string PathInContainer string @@ -53,6 +91,7 @@ type HostConfig struct { VolumesFrom []string Devices []DeviceMapping NetworkMode NetworkMode + IpcMode IpcMode CapAdd []string CapDrop []string RestartPolicy RestartPolicy @@ -85,6 +124,7 @@ func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { Privileged: job.GetenvBool("Privileged"), PublishAllPorts: job.GetenvBool("PublishAllPorts"), NetworkMode: NetworkMode(job.Getenv("NetworkMode")), + IpcMode: IpcMode(job.Getenv("IpcMode")), } job.GetenvJson("LxcConf", &hostConfig.LxcConf) diff --git a/runconfig/merge.go b/runconfig/merge.go index 64950bf625..9bc4748446 100644 --- a/runconfig/merge.go +++ b/runconfig/merge.go @@ -3,8 +3,8 @@ package runconfig import ( "strings" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/nat" - "github.com/docker/docker/pkg/log" ) func Merge(userConf, imageConf *Config) error { diff --git a/runconfig/parse.go b/runconfig/parse.go index 43976f604b..0d682f35d3 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -10,7 +10,6 @@ import ( "github.com/docker/docker/opts" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/parsers" - "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/units" "github.com/docker/docker/utils" ) @@ -24,7 +23,7 @@ var ( ErrConflictHostNetworkAndLinks = fmt.Errorf("Conflicting options: --net=host can't be used with links. This would result in undefined behavior.") ) -func Parse(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, *HostConfig, *flag.FlagSet, error) { +func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSet, error) { var ( // FIXME: use utils.ListOpts for attach and volumes? flAttach = opts.NewListOpts(opts.ValidateAttach) @@ -59,21 +58,23 @@ func Parse(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, flCpuShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") flCpuset = cmd.String([]string{"-cpuset"}, "", "CPUs in which to allow execution (0-3, 0,1)") flNetMode = cmd.String([]string{"-net"}, "bridge", "Set the Network mode for the container\n'bridge': creates a new network stack for the container on the docker bridge\n'none': no networking for this container\n'container:': reuses another container network stack\n'host': use the host network stack inside the container. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure.") + flMacAddress = cmd.String([]string{"-mac-address"}, "", "Container MAC address (e.g. 92:d0:c6:0a:29:33)") + flIpcMode = cmd.String([]string{"-ipc"}, "", "Default is to create a private IPC namespace (POSIX SysV IPC) for the container\n'container:': reuses another container shared memory, semaphores and message queues\n'host': use the host shared memory,semaphores and message queues inside the container. Note: the host mode gives the container full access to local shared memory and is therefore considered insecure.") flRestartPolicy = cmd.String([]string{"-restart"}, "", "Restart policy to apply when a container exits (no, on-failure[:max-retry], always)") ) cmd.Var(&flAttach, []string{"a", "-attach"}, "Attach to STDIN, STDOUT or STDERR.") cmd.Var(&flVolumes, []string{"v", "-volume"}, "Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container)") cmd.Var(&flLinks, []string{"#link", "-link"}, "Add link to another container in the form of name:alias") - cmd.Var(&flDevices, []string{"-device"}, "Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc)") + cmd.Var(&flDevices, []string{"-device"}, "Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm)") cmd.Var(&flEnv, []string{"e", "-env"}, "Set environment variables") cmd.Var(&flEnvFile, []string{"-env-file"}, "Read in a line delimited file of environment variables") cmd.Var(&flPublish, []string{"p", "-publish"}, fmt.Sprintf("Publish a container's port to the host\nformat: %s\n(use 'docker port' to see the actual mapping)", nat.PortSpecTemplateFormat)) - cmd.Var(&flExpose, []string{"#expose", "-expose"}, "Expose a port from the container without publishing it to your host") + cmd.Var(&flExpose, []string{"#expose", "-expose"}, "Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host") cmd.Var(&flDns, []string{"#dns", "-dns"}, "Set custom DNS servers") - cmd.Var(&flDnsSearch, []string{"-dns-search"}, "Set custom DNS search domains") + cmd.Var(&flDnsSearch, []string{"-dns-search"}, "Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain)") cmd.Var(&flExtraHosts, []string{"-add-host"}, "Add a custom host-to-IP mapping (host:ip)") cmd.Var(&flVolumesFrom, []string{"#volumes-from", "-volumes-from"}, "Mount volumes from the specified container(s)") cmd.Var(&flLxcOpts, []string{"#lxc-conf", "-lxc-conf"}, "(lxc exec-driver only) Add custom lxc options --lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"") @@ -86,11 +87,6 @@ func Parse(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, return nil, nil, cmd, err } - // Check if the kernel supports memory limit cgroup. - if sysInfo != nil && *flMemoryString != "" && !sysInfo.MemoryLimit { - *flMemoryString = "" - } - // Validate input params if *flWorkingDir != "" && !path.IsAbs(*flWorkingDir) { return nil, nil, cmd, ErrInvalidWorkingDirectory @@ -197,9 +193,24 @@ func Parse(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, if strings.Contains(e, ":") { return nil, nil, cmd, fmt.Errorf("Invalid port format for --expose: %s", e) } - p := nat.NewPort(nat.SplitProtoPort(e)) - if _, exists := ports[p]; !exists { - ports[p] = struct{}{} + //support two formats for expose, original format /[] or /[] + if strings.Contains(e, "-") { + proto, port := nat.SplitProtoPort(e) + //parse the start and end port and create a sequence of ports to expose + parts := strings.Split(port, "-") + start, _ := strconv.Atoi(parts[0]) + end, _ := strconv.Atoi(parts[1]) + for i := start; i <= end; i++ { + p := nat.NewPort(proto, strconv.Itoa(i)) + if _, exists := ports[p]; !exists { + ports[p] = struct{}{} + } + } + } else { + p := nat.NewPort(nat.SplitProtoPort(e)) + if _, exists := ports[p]; !exists { + ports[p] = struct{}{} + } } } @@ -225,6 +236,11 @@ func Parse(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, // parse the '-e' and '--env' after, to allow override envVariables = append(envVariables, flEnv.GetAll()...) + ipcMode := IpcMode(*flIpcMode) + if !ipcMode.Valid() { + return nil, nil, cmd, fmt.Errorf("--ipc: invalid IPC mode: %v", err) + } + netMode, err := parseNetMode(*flNetMode) if err != nil { return nil, nil, cmd, fmt.Errorf("--net: invalid net mode: %v", err) @@ -254,6 +270,7 @@ func Parse(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, Cmd: runCmd, Image: image, Volumes: flVolumes.GetMap(), + MacAddress: *flMacAddress, Entrypoint: entrypoint, WorkingDir: *flWorkingDir, } @@ -271,6 +288,7 @@ func Parse(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, ExtraHosts: flExtraHosts.GetAll(), VolumesFrom: flVolumesFrom.GetAll(), NetworkMode: netMode, + IpcMode: ipcMode, Devices: deviceMappings, CapAdd: flCapAdd.GetAll(), CapDrop: flCapDrop.GetAll(), @@ -278,11 +296,6 @@ func Parse(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, SecurityOpt: flSecurityOpt.GetAll(), } - if sysInfo != nil && flMemory > 0 && !sysInfo.SwapLimit { - //fmt.Fprintf(stdout, "WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n") - config.MemorySwap = -1 - } - // When allocating stdin in attached mode, close stdin at client disconnect if config.OpenStdin && config.AttachStdin { config.StdinOnce = true diff --git a/runconfig/parse_test.go b/runconfig/parse_test.go index e807180d4c..cd90dc3a94 100644 --- a/runconfig/parse_test.go +++ b/runconfig/parse_test.go @@ -6,14 +6,13 @@ import ( flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/parsers" - "github.com/docker/docker/pkg/sysinfo" ) -func parseRun(args []string, sysInfo *sysinfo.SysInfo) (*Config, *HostConfig, *flag.FlagSet, error) { +func parseRun(args []string) (*Config, *HostConfig, *flag.FlagSet, error) { cmd := flag.NewFlagSet("run", flag.ContinueOnError) cmd.SetOutput(ioutil.Discard) cmd.Usage = nil - return Parse(cmd, args, sysInfo) + return Parse(cmd, args) } func TestParseLxcConfOpt(t *testing.T) { @@ -34,27 +33,27 @@ func TestParseLxcConfOpt(t *testing.T) { } func TestNetHostname(t *testing.T) { - if _, _, _, err := parseRun([]string{"-h=name", "img", "cmd"}, nil); err != nil { + if _, _, _, err := parseRun([]string{"-h=name", "img", "cmd"}); err != nil { t.Fatalf("Unexpected error: %s", err) } - if _, _, _, err := parseRun([]string{"--net=host", "img", "cmd"}, nil); err != nil { + if _, _, _, err := parseRun([]string{"--net=host", "img", "cmd"}); err != nil { t.Fatalf("Unexpected error: %s", err) } - if _, _, _, err := parseRun([]string{"-h=name", "--net=bridge", "img", "cmd"}, nil); err != nil { + if _, _, _, err := parseRun([]string{"-h=name", "--net=bridge", "img", "cmd"}); err != nil { t.Fatalf("Unexpected error: %s", err) } - if _, _, _, err := parseRun([]string{"-h=name", "--net=none", "img", "cmd"}, nil); err != nil { + if _, _, _, err := parseRun([]string{"-h=name", "--net=none", "img", "cmd"}); err != nil { t.Fatalf("Unexpected error: %s", err) } - if _, _, _, err := parseRun([]string{"-h=name", "--net=host", "img", "cmd"}, nil); err != ErrConflictNetworkHostname { + if _, _, _, err := parseRun([]string{"-h=name", "--net=host", "img", "cmd"}); err != ErrConflictNetworkHostname { t.Fatalf("Expected error ErrConflictNetworkHostname, got: %s", err) } - if _, _, _, err := parseRun([]string{"-h=name", "--net=container:other", "img", "cmd"}, nil); err != ErrConflictNetworkHostname { + if _, _, _, err := parseRun([]string{"-h=name", "--net=container:other", "img", "cmd"}); err != ErrConflictNetworkHostname { t.Fatalf("Expected error ErrConflictNetworkHostname, got: %s", err) } } diff --git a/trust/service.go b/trust/service.go index c056ac7191..324a478f10 100644 --- a/trust/service.go +++ b/trust/service.go @@ -4,8 +4,8 @@ import ( "fmt" "time" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/engine" - "github.com/docker/docker/pkg/log" "github.com/docker/libtrust" ) diff --git a/trust/trusts.go b/trust/trusts.go index a3c0f5f548..f5e317e9e3 100644 --- a/trust/trusts.go +++ b/trust/trusts.go @@ -12,7 +12,7 @@ import ( "sync" "time" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "github.com/docker/libtrust/trustgraph" ) @@ -182,7 +182,6 @@ func (t *TrustStore) fetch() { go func() { err := t.reload() if err != nil { - // TODO log log.Infof("Reload of trust graph failed: %s", err) } }() diff --git a/utils/http.go b/utils/http.go index c877eefdd2..bcf1865e2e 100644 --- a/utils/http.go +++ b/utils/http.go @@ -5,7 +5,7 @@ import ( "net/http" "strings" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" ) // VersionInfo is used to model entities which has a version. diff --git a/utils/jsonmessage.go b/utils/jsonmessage.go index 3752c997f1..bdc47f0e1d 100644 --- a/utils/jsonmessage.go +++ b/utils/jsonmessage.go @@ -97,7 +97,7 @@ func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { // [2K = erase entire current line fmt.Fprintf(out, "%c[2K\r", 27) endl = "\r" - } else if jm.Progress != nil { //disable progressbar in non-terminal + } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal return nil } if jm.Time != 0 { @@ -109,7 +109,7 @@ func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { if jm.From != "" { fmt.Fprintf(out, "(from %s) ", jm.From) } - if jm.Progress != nil { + if jm.Progress != nil && isTerminal { fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) } else if jm.ProgressMessage != "" { //deprecated fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) diff --git a/utils/tmpdir.go b/utils/tmpdir.go index 921a8f697c..e200f340db 100644 --- a/utils/tmpdir.go +++ b/utils/tmpdir.go @@ -1,12 +1,16 @@ -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd - package utils import ( "os" + "path/filepath" ) // TempDir returns the default directory to use for temporary files. -func TempDir(rootdir string) (string error) { - return os.TempDir(), nil +func TempDir(rootDir string) (string, error) { + var tmpDir string + if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { + tmpDir = filepath.Join(rootDir, "tmp") + } + err := os.MkdirAll(tmpDir, 0700) + return tmpDir, err } diff --git a/utils/tmpdir_unix.go b/utils/tmpdir_unix.go deleted file mode 100644 index 30d7c3a192..0000000000 --- a/utils/tmpdir_unix.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build darwin dragonfly freebsd linux netbsd openbsd - -package utils - -import ( - "os" - "path/filepath" -) - -// TempDir returns the default directory to use for temporary files. -func TempDir(rootDir string) (string, error) { - var tmpDir string - if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { - tmpDir = filepath.Join(rootDir, "tmp") - } - err := os.MkdirAll(tmpDir, 0700) - return tmpDir, err -} diff --git a/utils/utils.go b/utils/utils.go index 4c65f136aa..8d3b3eb73e 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -18,12 +18,12 @@ import ( "strconv" "strings" "sync" - "syscall" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/log" ) type KeyValuePair struct { @@ -252,14 +252,6 @@ func HashData(src io.Reader) (string, error) { return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil } -// FIXME: this is deprecated by CopyWithTar in archive.go -func CopyDirectory(source, dest string) error { - if output, err := exec.Command("cp", "-ra", source, dest).CombinedOutput(); err != nil { - return fmt.Errorf("Error copy: %s (%s)", err, output) - } - return nil -} - type WriteFlusher struct { sync.Mutex w io.Writer @@ -298,17 +290,7 @@ func NewHTTPRequestError(msg string, res *http.Response) error { } } -func IsURL(str string) bool { - return strings.HasPrefix(str, "http://") || strings.HasPrefix(str, "https://") -} - -func IsGIT(str string) bool { - return strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "github.com/") || strings.HasPrefix(str, "git@github.com:") || (strings.HasSuffix(str, ".git") && IsURL(str)) -} - -var ( - localHostRx = regexp.MustCompile(`(?m)^nameserver 127[^\n]+\n*`) -) +var localHostRx = regexp.MustCompile(`(?m)^nameserver 127[^\n]+\n*`) // RemoveLocalDns looks into the /etc/resolv.conf, // and removes any local nameserver entries. @@ -379,7 +361,7 @@ func TestDirectory(templateDir string) (dir string, err error) { return } if templateDir != "" { - if err = CopyDirectory(templateDir, dir); err != nil { + if err = archive.CopyWithTar(templateDir, dir); err != nil { return } } @@ -458,36 +440,6 @@ func ReadSymlinkedDirectory(path string) (string, error) { return realPath, nil } -// TreeSize walks a directory tree and returns its total size in bytes. -func TreeSize(dir string) (size int64, err error) { - data := make(map[uint64]struct{}) - err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, e error) error { - // Ignore directory sizes - if fileInfo == nil { - return nil - } - - s := fileInfo.Size() - if fileInfo.IsDir() || s == 0 { - return nil - } - - // Check inode to handle hard links correctly - inode := fileInfo.Sys().(*syscall.Stat_t).Ino - // inode is not a uint64 on all platforms. Cast it to avoid issues. - if _, exists := data[uint64(inode)]; exists { - return nil - } - // inode is not a uint64 on all platforms. Cast it to avoid issues. - data[uint64(inode)] = struct{}{} - - size += s - - return nil - }) - return -} - // ValidateContextDirectory checks if all the contents of the directory // can be read and returns an error if some files can't be read // symlinks which point to non-existing files don't trigger an error diff --git a/utils/utils_daemon.go b/utils/utils_daemon.go new file mode 100644 index 0000000000..098e227367 --- /dev/null +++ b/utils/utils_daemon.go @@ -0,0 +1,39 @@ +// +build daemon + +package utils + +import ( + "os" + "path/filepath" + "syscall" +) + +// TreeSize walks a directory tree and returns its total size in bytes. +func TreeSize(dir string) (size int64, err error) { + data := make(map[uint64]struct{}) + err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, e error) error { + // Ignore directory sizes + if fileInfo == nil { + return nil + } + + s := fileInfo.Size() + if fileInfo.IsDir() || s == 0 { + return nil + } + + // Check inode to handle hard links correctly + inode := fileInfo.Sys().(*syscall.Stat_t).Ino + // inode is not a uint64 on all platforms. Cast it to avoid issues. + if _, exists := data[uint64(inode)]; exists { + return nil + } + // inode is not a uint64 on all platforms. Cast it to avoid issues. + data[uint64(inode)] = struct{}{} + + size += s + + return nil + }) + return +} diff --git a/vendor/src/github.com/Sirupsen/logrus/.gitignore b/vendor/src/github.com/Sirupsen/logrus/.gitignore new file mode 100644 index 0000000000..66be63a005 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/.gitignore @@ -0,0 +1 @@ +logrus diff --git a/vendor/src/github.com/Sirupsen/logrus/.travis.yml b/vendor/src/github.com/Sirupsen/logrus/.travis.yml new file mode 100644 index 0000000000..d5a559f840 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/.travis.yml @@ -0,0 +1,9 @@ +language: go +go: + - 1.2 + - 1.3 + - tip +install: + - go get github.com/stretchr/testify + - go get github.com/stvp/go-udp-testing + - go get github.com/tobi/airbrake-go diff --git a/vendor/src/github.com/Sirupsen/logrus/LICENSE b/vendor/src/github.com/Sirupsen/logrus/LICENSE new file mode 100644 index 0000000000..f090cb42f3 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/src/github.com/Sirupsen/logrus/README.md b/vendor/src/github.com/Sirupsen/logrus/README.md new file mode 100644 index 0000000000..01769c723f --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/README.md @@ -0,0 +1,342 @@ +# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) + +Logrus is a structured logger for Go (golang), completely API compatible with +the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not +yet stable (pre 1.0), the core API is unlikely change much but please version +control your Logrus to make sure you aren't fetching latest `master` on every +build.** + +Nicely color-coded in development (when a TTY is attached, otherwise just +plain text): + +![Colored](http://i.imgur.com/PY7qMwd.png) + +With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash +or Splunk: + +```json +{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the +ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} + +{"level":"warning","msg":"The group's number increased tremendously!", +"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"A giant walrus appears!", +"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", +"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} + +{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, +"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} +``` + +With the default `log.Formatter = new(logrus.TextFormatter)` when a TTY is not +attached, the output is compatible with the +[l2met](http://r.32k.io/l2met-introduction) format: + +```text +time="2014-04-20 15:36:23.830442383 -0400 EDT" level="info" msg="A group of walrus emerges from the ocean" animal="walrus" size=10 +time="2014-04-20 15:36:23.830584199 -0400 EDT" level="warning" msg="The group's number increased tremendously!" omg=true number=122 +time="2014-04-20 15:36:23.830596521 -0400 EDT" level="info" msg="A giant walrus appears!" animal="walrus" size=10 +time="2014-04-20 15:36:23.830611837 -0400 EDT" level="info" msg="Tremendously sized cow enters the ocean." animal="walrus" size=9 +time="2014-04-20 15:36:23.830626464 -0400 EDT" level="fatal" msg="The ice breaks!" omg=true number=100 +``` + +#### Example + +The simplest way to use Logrus is simply the package-level exported logger: + +```go +package main + +import ( + log "github.com/Sirupsen/logrus" +) + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + }).Info("A walrus appears") +} +``` + +Note that it's completely api-compatible with the stdlib logger, so you can +replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` +and you'll now have the flexibility of Logrus. You can customize it all you +want: + +```go +package main + +import ( + "os" + log "github.com/Sirupsen/logrus" + "github.com/Sirupsen/logrus/hooks/airbrake" +) + +func init() { + // Log as JSON instead of the default ASCII formatter. + log.SetFormatter(&log.JSONFormatter{}) + + // Use the Airbrake hook to report errors that have Error severity or above to + // an exception tracker. You can create custom hooks, see the Hooks section. + log.AddHook(&logrus_airbrake.AirbrakeHook{}) + + // Output to stderr instead of stdout, could also be a file. + log.SetOutput(os.Stderr) + + // Only log the warning severity or above. + log.SetLevel(log.WarnLevel) +} + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(log.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(log.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") +} +``` + +For more advanced usage such as logging to multiple locations from the same +application, you can also create an instance of the `logrus` Logger: + +```go +package main + +import ( + "github.com/Sirupsen/logrus" +) + +// Create a new instance of the logger. You can have any number of instances. +var log = logrus.New() + +func main() { + // The API for setting attributes is a little different than the package level + // exported logger. See Godoc. + log.Out = os.Stderr + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") +} +``` + +#### Fields + +Logrus encourages careful, structured logging though logging fields instead of +long, unparseable error messages. For example, instead of: `log.Fatalf("Failed +to send event %s to topic %s with key %d")`, you should log the much more +discoverable: + +```go +log.WithFields(log.Fields{ + "event": event, + "topic": topic, + "key": key, +}).Fatal("Failed to send event") +``` + +We've found this API forces you to think about logging in a way that produces +much more useful logging messages. We've been in countless situations where just +a single added field to a log statement that was already there would've saved us +hours. The `WithFields` call is optional. + +In general, with Logrus using any of the `printf`-family functions should be +seen as a hint you should add a field, however, you can still use the +`printf`-family functions with Logrus. + +#### Hooks + +You can add hooks for logging levels. For example to send errors to an exception +tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to +multiple places simultaneously, e.g. syslog. + +```go +// Not the real implementation of the Airbrake hook. Just a simple sample. +import ( + log "github.com/Sirupsen/logrus" +) + +func init() { + log.AddHook(new(AirbrakeHook)) +} + +type AirbrakeHook struct{} + +// `Fire()` takes the entry that the hook is fired for. `entry.Data[]` contains +// the fields for the entry. See the Fields section of the README. +func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error { + err := airbrake.Notify(entry.Data["error"].(error)) + if err != nil { + log.WithFields(log.Fields{ + "source": "airbrake", + "endpoint": airbrake.Endpoint, + }).Info("Failed to send error to Airbrake") + } + + return nil +} + +// `Levels()` returns a slice of `Levels` the hook is fired for. +func (hook *AirbrakeHook) Levels() []log.Level { + return []log.Level{ + log.ErrorLevel, + log.FatalLevel, + log.PanicLevel, + } +} +``` + +Logrus comes with built-in hooks. Add those, or your custom hook, in `init`: + +```go +import ( + log "github.com/Sirupsen/logrus" + "github.com/Sirupsen/logrus/hooks/airbrake" + "github.com/Sirupsen/logrus/hooks/syslog" +) + +func init() { + log.AddHook(new(logrus_airbrake.AirbrakeHook)) + log.AddHook(logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")) +} +``` + +* [`github.com/Sirupsen/logrus/hooks/airbrake`](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) + Send errors to an exception tracking service compatible with the Airbrake API. + Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. + +* [`github.com/Sirupsen/logrus/hooks/papertrail`](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) + Send errors to the Papertrail hosted logging service via UDP. + +* [`github.com/Sirupsen/logrus/hooks/syslog`](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) + Send errors to remote syslog server. + Uses standard library `log/syslog` behind the scenes. + +* [`github.com/nubo/hiprus`](https://github.com/nubo/hiprus) + Send errors to a channel in hipchat. + +#### Level logging + +Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. + +```go +log.Debug("Useful debugging information.") +log.Info("Something noteworthy happened!") +log.Warn("You should probably take a look at this.") +log.Error("Something failed but I'm not quitting.") +// Calls os.Exit(1) after logging +log.Fatal("Bye.") +// Calls panic() after logging +log.Panic("I'm bailing.") +``` + +You can set the logging level on a `Logger`, then it will only log entries with +that severity or anything above it: + +```go +// Will log anything that is info or above (warn, error, fatal, panic). Default. +log.SetLevel(log.InfoLevel) +``` + +It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose +environment if your application has that. + +#### Entries + +Besides the fields added with `WithField` or `WithFields` some fields are +automatically added to all logging events: + +1. `time`. The timestamp when the entry was created. +2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after + the `AddFields` call. E.g. `Failed to send event.` +3. `level`. The logging level. E.g. `info`. + +#### Environments + +Logrus has no notion of environment. + +If you wish for hooks and formatters to only be used in specific environments, +you should handle that yourself. For example, if your application has a global +variable `Environment`, which is a string representation of the environment you +could do: + +```go +import ( + log "github.com/Sirupsen/logrus" +) + +init() { + // do something here to set environment depending on an environment variable + // or command-line flag + if Environment == "production" { + log.SetFormatter(logrus.JSONFormatter) + } else { + // The TextFormatter is default, you don't actually have to do this. + log.SetFormatter(logrus.TextFormatter) + } +} +``` + +This configuration is how `logrus` was intended to be used, but JSON in +production is mostly only useful if you do log aggregation with tools like +Splunk or Logstash. + +#### Formatters + +The built-in logging formatters are: + +* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise + without colors. + * *Note:* to force colored output when there is no TTY, set the `ForceColors` + field to `true`. To force no colored output even if there is a TTY set the + `DisableColors` field to `true` +* `logrus.JSONFormatter`. Logs fields as JSON. + +Third party logging formatters: + +* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. + +You can define your formatter by implementing the `Formatter` interface, +requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a +`Fields` type (`map[string]interface{}`) with all your fields as well as the +default ones (see Entries section above): + +```go +type MyJSONFormatter struct { +} + +log.SetFormatter(new(MyJSONFormatter)) + +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + // Note this doesn't include Time, Level and Message which are available on + // the Entry. Consult `godoc` on information about those fields or read the + // source of the official loggers. + serialized, err := json.Marshal(entry.Data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} +``` + +#### Rotation + +Log rotation is not provided with Logrus. Log rotation should be done by an +external program (like `logrotated(8)`) that can compress and delete old log +entries. It should not be a feature of the application-level logger. + + +[godoc]: https://godoc.org/github.com/Sirupsen/logrus diff --git a/vendor/src/github.com/Sirupsen/logrus/entry.go b/vendor/src/github.com/Sirupsen/logrus/entry.go new file mode 100644 index 0000000000..a77c4b0ed1 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/entry.go @@ -0,0 +1,248 @@ +package logrus + +import ( + "bytes" + "fmt" + "io" + "os" + "time" +) + +// An entry is the final or intermediate Logrus logging entry. It contains all +// the fields passed with WithField{,s}. It's finally logged when Debug, Info, +// Warn, Error, Fatal or Panic is called on it. These objects can be reused and +// passed around as much as you wish to avoid field duplication. +type Entry struct { + Logger *Logger + + // Contains all the fields set by the user. + Data Fields + + // Time at which the log entry was created + Time time.Time + + // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic + Level Level + + // Message passed to Debug, Info, Warn, Error, Fatal or Panic + Message string +} + +func NewEntry(logger *Logger) *Entry { + return &Entry{ + Logger: logger, + // Default is three fields, give a little extra room + Data: make(Fields, 5), + } +} + +// Returns a reader for the entry, which is a proxy to the formatter. +func (entry *Entry) Reader() (*bytes.Buffer, error) { + serialized, err := entry.Logger.Formatter.Format(entry) + return bytes.NewBuffer(serialized), err +} + +// Returns the string representation from the reader and ultimately the +// formatter. +func (entry *Entry) String() (string, error) { + reader, err := entry.Reader() + if err != nil { + return "", err + } + + return reader.String(), err +} + +// Add a single field to the Entry. +func (entry *Entry) WithField(key string, value interface{}) *Entry { + return entry.WithFields(Fields{key: value}) +} + +// Add a map of fields to the Entry. +func (entry *Entry) WithFields(fields Fields) *Entry { + data := Fields{} + for k, v := range entry.Data { + data[k] = v + } + for k, v := range fields { + data[k] = v + } + return &Entry{Logger: entry.Logger, Data: data} +} + +func (entry *Entry) log(level Level, msg string) { + entry.Time = time.Now() + entry.Level = level + entry.Message = msg + + if err := entry.Logger.Hooks.Fire(level, entry); err != nil { + entry.Logger.mu.Lock() + fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) + entry.Logger.mu.Unlock() + } + + reader, err := entry.Reader() + if err != nil { + entry.Logger.mu.Lock() + fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) + entry.Logger.mu.Unlock() + } + + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + + _, err = io.Copy(entry.Logger.Out, reader) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) + } + + // To avoid Entry#log() returning a value that only would make sense for + // panic() to use in Entry#Panic(), we avoid the allocation by checking + // directly here. + if level <= PanicLevel { + panic(reader.String()) + } +} + +func (entry *Entry) Debug(args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.log(DebugLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Print(args ...interface{}) { + entry.Info(args...) +} + +func (entry *Entry) Info(args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.log(InfoLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warn(args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.log(WarnLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Error(args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.log(ErrorLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Fatal(args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.log(FatalLevel, fmt.Sprint(args...)) + } + os.Exit(1) +} + +func (entry *Entry) Panic(args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.log(PanicLevel, fmt.Sprint(args...)) + } + panic(fmt.Sprint(args...)) +} + +// Entry Printf family functions + +func (entry *Entry) Debugf(format string, args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.Debug(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Infof(format string, args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.Info(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Printf(format string, args ...interface{}) { + entry.Infof(format, args...) +} + +func (entry *Entry) Warnf(format string, args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.Warn(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Warningf(format string, args ...interface{}) { + entry.Warnf(format, args...) +} + +func (entry *Entry) Errorf(format string, args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.Error(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Fatalf(format string, args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.Fatal(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Panicf(format string, args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.Panic(fmt.Sprintf(format, args...)) + } +} + +// Entry Println family functions + +func (entry *Entry) Debugln(args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.Debug(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Infoln(args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.Info(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Println(args ...interface{}) { + entry.Infoln(args...) +} + +func (entry *Entry) Warnln(args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.Warn(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Warningln(args ...interface{}) { + entry.Warnln(args...) +} + +func (entry *Entry) Errorln(args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.Error(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Fatalln(args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.Fatal(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Panicln(args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.Panic(entry.sprintlnn(args...)) + } +} + +// Sprintlnn => Sprint no newline. This is to get the behavior of how +// fmt.Sprintln where spaces are always added between operands, regardless of +// their type. Instead of vendoring the Sprintln implementation to spare a +// string allocation, we do the simplest thing. +func (entry *Entry) sprintlnn(args ...interface{}) string { + msg := fmt.Sprintln(args...) + return msg[:len(msg)-1] +} diff --git a/vendor/src/github.com/Sirupsen/logrus/examples/basic/basic.go b/vendor/src/github.com/Sirupsen/logrus/examples/basic/basic.go new file mode 100644 index 0000000000..35945509c3 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/examples/basic/basic.go @@ -0,0 +1,29 @@ +package main + +import ( + "github.com/Sirupsen/logrus" +) + +var log = logrus.New() + +func init() { + log.Formatter = new(logrus.JSONFormatter) + log.Formatter = new(logrus.TextFormatter) // default +} + +func main() { + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") +} diff --git a/vendor/src/github.com/Sirupsen/logrus/examples/hook/hook.go b/vendor/src/github.com/Sirupsen/logrus/examples/hook/hook.go new file mode 100644 index 0000000000..42e7a4c982 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/examples/hook/hook.go @@ -0,0 +1,35 @@ +package main + +import ( + "github.com/Sirupsen/logrus" + "github.com/Sirupsen/logrus/hooks/airbrake" + "github.com/tobi/airbrake-go" +) + +var log = logrus.New() + +func init() { + log.Formatter = new(logrus.TextFormatter) // default + log.Hooks.Add(new(logrus_airbrake.AirbrakeHook)) +} + +func main() { + airbrake.Endpoint = "https://exceptions.whatever.com/notifier_api/v2/notices.xml" + airbrake.ApiKey = "whatever" + airbrake.Environment = "production" + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") +} diff --git a/vendor/src/github.com/Sirupsen/logrus/exported.go b/vendor/src/github.com/Sirupsen/logrus/exported.go new file mode 100644 index 0000000000..0e2d59f19a --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/exported.go @@ -0,0 +1,177 @@ +package logrus + +import ( + "io" +) + +var ( + // std is the name of the standard logger in stdlib `log` + std = New() +) + +// SetOutput sets the standard logger output. +func SetOutput(out io.Writer) { + std.mu.Lock() + defer std.mu.Unlock() + std.Out = out +} + +// SetFormatter sets the standard logger formatter. +func SetFormatter(formatter Formatter) { + std.mu.Lock() + defer std.mu.Unlock() + std.Formatter = formatter +} + +// SetLevel sets the standard logger level. +func SetLevel(level Level) { + std.mu.Lock() + defer std.mu.Unlock() + std.Level = level +} + +// AddHook adds a hook to the standard logger hooks. +func AddHook(hook Hook) { + std.mu.Lock() + defer std.mu.Unlock() + std.Hooks.Add(hook) +} + +// WithField creates an entry from the standard logger and adds a field to +// it. If you want multiple fields, use `WithFields`. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithField(key string, value interface{}) *Entry { + return std.WithField(key, value) +} + +// WithFields creates an entry from the standard logger and adds multiple +// fields to it. This is simply a helper for `WithField`, invoking it +// once for each field. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithFields(fields Fields) *Entry { + return std.WithFields(fields) +} + +// Debug logs a message at level Debug on the standard logger. +func Debug(args ...interface{}) { + std.Debug(args...) +} + +// Print logs a message at level Info on the standard logger. +func Print(args ...interface{}) { + std.Print(args...) +} + +// Info logs a message at level Info on the standard logger. +func Info(args ...interface{}) { + std.Info(args...) +} + +// Warn logs a message at level Warn on the standard logger. +func Warn(args ...interface{}) { + std.Warn(args...) +} + +// Warning logs a message at level Warn on the standard logger. +func Warning(args ...interface{}) { + std.Warning(args...) +} + +// Error logs a message at level Error on the standard logger. +func Error(args ...interface{}) { + std.Error(args...) +} + +// Panic logs a message at level Panic on the standard logger. +func Panic(args ...interface{}) { + std.Panic(args...) +} + +// Fatal logs a message at level Fatal on the standard logger. +func Fatal(args ...interface{}) { + std.Fatal(args...) +} + +// Debugf logs a message at level Debug on the standard logger. +func Debugf(format string, args ...interface{}) { + std.Debugf(format, args...) +} + +// Printf logs a message at level Info on the standard logger. +func Printf(format string, args ...interface{}) { + std.Printf(format, args...) +} + +// Infof logs a message at level Info on the standard logger. +func Infof(format string, args ...interface{}) { + std.Infof(format, args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func Warnf(format string, args ...interface{}) { + std.Warnf(format, args...) +} + +// Warningf logs a message at level Warn on the standard logger. +func Warningf(format string, args ...interface{}) { + std.Warningf(format, args...) +} + +// Errorf logs a message at level Error on the standard logger. +func Errorf(format string, args ...interface{}) { + std.Errorf(format, args...) +} + +// Panicf logs a message at level Panic on the standard logger. +func Panicf(format string, args ...interface{}) { + std.Panicf(format, args...) +} + +// Fatalf logs a message at level Fatal on the standard logger. +func Fatalf(format string, args ...interface{}) { + std.Fatalf(format, args...) +} + +// Debugln logs a message at level Debug on the standard logger. +func Debugln(args ...interface{}) { + std.Debugln(args...) +} + +// Println logs a message at level Info on the standard logger. +func Println(args ...interface{}) { + std.Println(args...) +} + +// Infoln logs a message at level Info on the standard logger. +func Infoln(args ...interface{}) { + std.Infoln(args...) +} + +// Warnln logs a message at level Warn on the standard logger. +func Warnln(args ...interface{}) { + std.Warnln(args...) +} + +// Warningln logs a message at level Warn on the standard logger. +func Warningln(args ...interface{}) { + std.Warningln(args...) +} + +// Errorln logs a message at level Error on the standard logger. +func Errorln(args ...interface{}) { + std.Errorln(args...) +} + +// Panicln logs a message at level Panic on the standard logger. +func Panicln(args ...interface{}) { + std.Panicln(args...) +} + +// Fatalln logs a message at level Fatal on the standard logger. +func Fatalln(args ...interface{}) { + std.Fatalln(args...) +} diff --git a/vendor/src/github.com/Sirupsen/logrus/formatter.go b/vendor/src/github.com/Sirupsen/logrus/formatter.go new file mode 100644 index 0000000000..74c49a0e0e --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/formatter.go @@ -0,0 +1,44 @@ +package logrus + +// The Formatter interface is used to implement a custom Formatter. It takes an +// `Entry`. It exposes all the fields, including the default ones: +// +// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. +// * `entry.Data["time"]`. The timestamp. +// * `entry.Data["level"]. The level the entry was logged at. +// +// Any additional fields added with `WithField` or `WithFields` are also in +// `entry.Data`. Format is expected to return an array of bytes which are then +// logged to `logger.Out`. +type Formatter interface { + Format(*Entry) ([]byte, error) +} + +// This is to not silently overwrite `time`, `msg` and `level` fields when +// dumping it. If this code wasn't there doing: +// +// logrus.WithField("level", 1).Info("hello") +// +// Would just silently drop the user provided level. Instead with this code +// it'll logged as: +// +// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} +// +// It's not exported because it's still using Data in an opinionated way. It's to +// avoid code duplication between the two default formatters. +func prefixFieldClashes(entry *Entry) { + _, ok := entry.Data["time"] + if ok { + entry.Data["fields.time"] = entry.Data["time"] + } + + _, ok = entry.Data["msg"] + if ok { + entry.Data["fields.msg"] = entry.Data["msg"] + } + + _, ok = entry.Data["level"] + if ok { + entry.Data["fields.level"] = entry.Data["level"] + } +} diff --git a/vendor/src/github.com/Sirupsen/logrus/formatter_bench_test.go b/vendor/src/github.com/Sirupsen/logrus/formatter_bench_test.go new file mode 100644 index 0000000000..77989da629 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/formatter_bench_test.go @@ -0,0 +1,88 @@ +package logrus + +import ( + "testing" + "time" +) + +// smallFields is a small size data set for benchmarking +var smallFields = Fields{ + "foo": "bar", + "baz": "qux", + "one": "two", + "three": "four", +} + +// largeFields is a large size data set for benchmarking +var largeFields = Fields{ + "foo": "bar", + "baz": "qux", + "one": "two", + "three": "four", + "five": "six", + "seven": "eight", + "nine": "ten", + "eleven": "twelve", + "thirteen": "fourteen", + "fifteen": "sixteen", + "seventeen": "eighteen", + "nineteen": "twenty", + "a": "b", + "c": "d", + "e": "f", + "g": "h", + "i": "j", + "k": "l", + "m": "n", + "o": "p", + "q": "r", + "s": "t", + "u": "v", + "w": "x", + "y": "z", + "this": "will", + "make": "thirty", + "entries": "yeah", +} + +func BenchmarkSmallTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields) +} + +func BenchmarkLargeTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields) +} + +func BenchmarkSmallColoredTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields) +} + +func BenchmarkLargeColoredTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields) +} + +func BenchmarkSmallJSONFormatter(b *testing.B) { + doBenchmark(b, &JSONFormatter{}, smallFields) +} + +func BenchmarkLargeJSONFormatter(b *testing.B) { + doBenchmark(b, &JSONFormatter{}, largeFields) +} + +func doBenchmark(b *testing.B, formatter Formatter, fields Fields) { + entry := &Entry{ + Time: time.Time{}, + Level: InfoLevel, + Message: "message", + Data: fields, + } + var d []byte + var err error + for i := 0; i < b.N; i++ { + d, err = formatter.Format(entry) + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(d))) + } +} diff --git a/vendor/src/github.com/Sirupsen/logrus/hook_test.go b/vendor/src/github.com/Sirupsen/logrus/hook_test.go new file mode 100644 index 0000000000..13f34cb6f8 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/hook_test.go @@ -0,0 +1,122 @@ +package logrus + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +type TestHook struct { + Fired bool +} + +func (hook *TestHook) Fire(entry *Entry) error { + hook.Fired = true + return nil +} + +func (hook *TestHook) Levels() []Level { + return []Level{ + DebugLevel, + InfoLevel, + WarnLevel, + ErrorLevel, + FatalLevel, + PanicLevel, + } +} + +func TestHookFires(t *testing.T) { + hook := new(TestHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + assert.Equal(t, hook.Fired, false) + + log.Print("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, true) + }) +} + +type ModifyHook struct { +} + +func (hook *ModifyHook) Fire(entry *Entry) error { + entry.Data["wow"] = "whale" + return nil +} + +func (hook *ModifyHook) Levels() []Level { + return []Level{ + DebugLevel, + InfoLevel, + WarnLevel, + ErrorLevel, + FatalLevel, + PanicLevel, + } +} + +func TestHookCanModifyEntry(t *testing.T) { + hook := new(ModifyHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.WithField("wow", "elephant").Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["wow"], "whale") + }) +} + +func TestCanFireMultipleHooks(t *testing.T) { + hook1 := new(ModifyHook) + hook2 := new(TestHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook1) + log.Hooks.Add(hook2) + + log.WithField("wow", "elephant").Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["wow"], "whale") + assert.Equal(t, hook2.Fired, true) + }) +} + +type ErrorHook struct { + Fired bool +} + +func (hook *ErrorHook) Fire(entry *Entry) error { + hook.Fired = true + return nil +} + +func (hook *ErrorHook) Levels() []Level { + return []Level{ + ErrorLevel, + } +} + +func TestErrorHookShouldntFireOnInfo(t *testing.T) { + hook := new(ErrorHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.Info("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, false) + }) +} + +func TestErrorHookShouldFireOnError(t *testing.T) { + hook := new(ErrorHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.Error("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, true) + }) +} diff --git a/vendor/src/github.com/Sirupsen/logrus/hooks.go b/vendor/src/github.com/Sirupsen/logrus/hooks.go new file mode 100644 index 0000000000..0da2b3653f --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/hooks.go @@ -0,0 +1,34 @@ +package logrus + +// A hook to be fired when logging on the logging levels returned from +// `Levels()` on your implementation of the interface. Note that this is not +// fired in a goroutine or a channel with workers, you should handle such +// functionality yourself if your call is non-blocking and you don't wish for +// the logging calls for levels returned from `Levels()` to block. +type Hook interface { + Levels() []Level + Fire(*Entry) error +} + +// Internal type for storing the hooks on a logger instance. +type levelHooks map[Level][]Hook + +// Add a hook to an instance of logger. This is called with +// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. +func (hooks levelHooks) Add(hook Hook) { + for _, level := range hook.Levels() { + hooks[level] = append(hooks[level], hook) + } +} + +// Fire all the hooks for the passed level. Used by `entry.log` to fire +// appropriate hooks for a log entry. +func (hooks levelHooks) Fire(level Level, entry *Entry) error { + for _, hook := range hooks[level] { + if err := hook.Fire(entry); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go b/vendor/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go new file mode 100644 index 0000000000..880d21ecdc --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go @@ -0,0 +1,54 @@ +package logrus_airbrake + +import ( + "github.com/Sirupsen/logrus" + "github.com/tobi/airbrake-go" +) + +// AirbrakeHook to send exceptions to an exception-tracking service compatible +// with the Airbrake API. You must set: +// * airbrake.Endpoint +// * airbrake.ApiKey +// * airbrake.Environment (only sends exceptions when set to "production") +// +// Before using this hook, to send an error. Entries that trigger an Error, +// Fatal or Panic should now include an "error" field to send to Airbrake. +type AirbrakeHook struct{} + +func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error { + if entry.Data["error"] == nil { + entry.Logger.WithFields(logrus.Fields{ + "source": "airbrake", + "endpoint": airbrake.Endpoint, + }).Warn("Exceptions sent to Airbrake must have an 'error' key with the error") + return nil + } + + err, ok := entry.Data["error"].(error) + if !ok { + entry.Logger.WithFields(logrus.Fields{ + "source": "airbrake", + "endpoint": airbrake.Endpoint, + }).Warn("Exceptions sent to Airbrake must have an `error` key of type `error`") + return nil + } + + airErr := airbrake.Notify(err) + if airErr != nil { + entry.Logger.WithFields(logrus.Fields{ + "source": "airbrake", + "endpoint": airbrake.Endpoint, + "error": airErr, + }).Warn("Failed to send error to Airbrake") + } + + return nil +} + +func (hook *AirbrakeHook) Levels() []logrus.Level { + return []logrus.Level{ + logrus.ErrorLevel, + logrus.FatalLevel, + logrus.PanicLevel, + } +} diff --git a/vendor/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md b/vendor/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md new file mode 100644 index 0000000000..ae61e9229a --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md @@ -0,0 +1,28 @@ +# Papertrail Hook for Logrus :walrus: + +[Papertrail](https://papertrailapp.com) provides hosted log management. Once stored in Papertrail, you can [group](http://help.papertrailapp.com/kb/how-it-works/groups/) your logs on various dimensions, [search](http://help.papertrailapp.com/kb/how-it-works/search-syntax) them, and trigger [alerts](http://help.papertrailapp.com/kb/how-it-works/alerts). + +In most deployments, you'll want to send logs to Papertrail via their [remote_syslog](http://help.papertrailapp.com/kb/configuration/configuring-centralized-logging-from-text-log-files-in-unix/) daemon, which requires no application-specific configuration. This hook is intended for relatively low-volume logging, likely in managed cloud hosting deployments where installing `remote_syslog` is not possible. + +## Usage + +You can find your Papertrail UDP port on your [Papertrail account page](https://papertrailapp.com/account/destinations). Substitute it below for `YOUR_PAPERTRAIL_UDP_PORT`. + +For `YOUR_APP_NAME`, substitute a short string that will readily identify your application or service in the logs. + +```go +import ( + "log/syslog" + "github.com/Sirupsen/logrus" + "github.com/Sirupsen/logrus/hooks/papertrail" +) + +func main() { + log := logrus.New() + hook, err := logrus_papertrail.NewPapertrailHook("logs.papertrailapp.com", YOUR_PAPERTRAIL_UDP_PORT, YOUR_APP_NAME) + + if err == nil { + log.Hooks.Add(hook) + } +} +``` diff --git a/vendor/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go b/vendor/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go new file mode 100644 index 0000000000..48e2feaeb5 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go @@ -0,0 +1,54 @@ +package logrus_papertrail + +import ( + "fmt" + "net" + "os" + "time" + + "github.com/Sirupsen/logrus" +) + +const ( + format = "Jan 2 15:04:05" +) + +// PapertrailHook to send logs to a logging service compatible with the Papertrail API. +type PapertrailHook struct { + Host string + Port int + AppName string + UDPConn net.Conn +} + +// NewPapertrailHook creates a hook to be added to an instance of logger. +func NewPapertrailHook(host string, port int, appName string) (*PapertrailHook, error) { + conn, err := net.Dial("udp", fmt.Sprintf("%s:%d", host, port)) + return &PapertrailHook{host, port, appName, conn}, err +} + +// Fire is called when a log event is fired. +func (hook *PapertrailHook) Fire(entry *logrus.Entry) error { + date := time.Now().Format(format) + payload := fmt.Sprintf("<22> %s %s: [%s] %s", date, hook.AppName, entry.Data["level"], entry.Message) + + bytesWritten, err := hook.UDPConn.Write([]byte(payload)) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to send log line to Papertrail via UDP. Wrote %d bytes before error: %v", bytesWritten, err) + return err + } + + return nil +} + +// Levels returns the available logging levels. +func (hook *PapertrailHook) Levels() []logrus.Level { + return []logrus.Level{ + logrus.PanicLevel, + logrus.FatalLevel, + logrus.ErrorLevel, + logrus.WarnLevel, + logrus.InfoLevel, + logrus.DebugLevel, + } +} diff --git a/vendor/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go b/vendor/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go new file mode 100644 index 0000000000..96318d0030 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go @@ -0,0 +1,26 @@ +package logrus_papertrail + +import ( + "fmt" + "testing" + + "github.com/Sirupsen/logrus" + "github.com/stvp/go-udp-testing" +) + +func TestWritingToUDP(t *testing.T) { + port := 16661 + udp.SetAddr(fmt.Sprintf(":%d", port)) + + hook, err := NewPapertrailHook("localhost", port, "test") + if err != nil { + t.Errorf("Unable to connect to local UDP server.") + } + + log := logrus.New() + log.Hooks.Add(hook) + + udp.ShouldReceive(t, "foo", func() { + log.Info("foo") + }) +} diff --git a/vendor/src/github.com/Sirupsen/logrus/hooks/syslog/README.md b/vendor/src/github.com/Sirupsen/logrus/hooks/syslog/README.md new file mode 100644 index 0000000000..cd706bc1b1 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/hooks/syslog/README.md @@ -0,0 +1,20 @@ +# Syslog Hooks for Logrus :walrus: + +## Usage + +```go +import ( + "log/syslog" + "github.com/Sirupsen/logrus" + "github.com/Sirupsen/logrus/hooks/syslog" +) + +func main() { + log := logrus.New() + hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + + if err == nil { + log.Hooks.Add(hook) + } +} +``` \ No newline at end of file diff --git a/vendor/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go b/vendor/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go new file mode 100644 index 0000000000..2a18ce6130 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go @@ -0,0 +1,59 @@ +package logrus_syslog + +import ( + "fmt" + "github.com/Sirupsen/logrus" + "log/syslog" + "os" +) + +// SyslogHook to send logs via syslog. +type SyslogHook struct { + Writer *syslog.Writer + SyslogNetwork string + SyslogRaddr string +} + +// Creates a hook to be added to an instance of logger. This is called with +// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")` +// `if err == nil { log.Hooks.Add(hook) }` +func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) { + w, err := syslog.Dial(network, raddr, priority, tag) + return &SyslogHook{w, network, raddr}, err +} + +func (hook *SyslogHook) Fire(entry *logrus.Entry) error { + line, err := entry.String() + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err) + return err + } + + switch entry.Data["level"] { + case "panic": + return hook.Writer.Crit(line) + case "fatal": + return hook.Writer.Crit(line) + case "error": + return hook.Writer.Err(line) + case "warn": + return hook.Writer.Warning(line) + case "info": + return hook.Writer.Info(line) + case "debug": + return hook.Writer.Debug(line) + default: + return nil + } +} + +func (hook *SyslogHook) Levels() []logrus.Level { + return []logrus.Level{ + logrus.PanicLevel, + logrus.FatalLevel, + logrus.ErrorLevel, + logrus.WarnLevel, + logrus.InfoLevel, + logrus.DebugLevel, + } +} diff --git a/vendor/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go b/vendor/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go new file mode 100644 index 0000000000..42762dc10d --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go @@ -0,0 +1,26 @@ +package logrus_syslog + +import ( + "github.com/Sirupsen/logrus" + "log/syslog" + "testing" +) + +func TestLocalhostAddAndPrint(t *testing.T) { + log := logrus.New() + hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + + if err != nil { + t.Errorf("Unable to connect to local syslog.") + } + + log.Hooks.Add(hook) + + for _, level := range hook.Levels() { + if len(log.Hooks[level]) != 1 { + t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level])) + } + } + + log.Info("Congratulations!") +} diff --git a/vendor/src/github.com/Sirupsen/logrus/json_formatter.go b/vendor/src/github.com/Sirupsen/logrus/json_formatter.go new file mode 100644 index 0000000000..9d11b642d4 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/json_formatter.go @@ -0,0 +1,22 @@ +package logrus + +import ( + "encoding/json" + "fmt" + "time" +) + +type JSONFormatter struct{} + +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + prefixFieldClashes(entry) + entry.Data["time"] = entry.Time.Format(time.RFC3339) + entry.Data["msg"] = entry.Message + entry.Data["level"] = entry.Level.String() + + serialized, err := json.Marshal(entry.Data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} diff --git a/vendor/src/github.com/Sirupsen/logrus/logger.go b/vendor/src/github.com/Sirupsen/logrus/logger.go new file mode 100644 index 0000000000..7374fe365d --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/logger.go @@ -0,0 +1,161 @@ +package logrus + +import ( + "io" + "os" + "sync" +) + +type Logger struct { + // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a + // file, or leave it default which is `os.Stdout`. You can also set this to + // something more adventorous, such as logging to Kafka. + Out io.Writer + // Hooks for the logger instance. These allow firing events based on logging + // levels and log entries. For example, to send errors to an error tracking + // service, log to StatsD or dump the core on fatal errors. + Hooks levelHooks + // All log entries pass through the formatter before logged to Out. The + // included formatters are `TextFormatter` and `JSONFormatter` for which + // TextFormatter is the default. In development (when a TTY is attached) it + // logs with colors, but to a file it wouldn't. You can easily implement your + // own that implements the `Formatter` interface, see the `README` or included + // formatters for examples. + Formatter Formatter + // The logging level the logger should log at. This is typically (and defaults + // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be + // logged. `logrus.Debug` is useful in + Level Level + // Used to sync writing to the log. + mu sync.Mutex +} + +// Creates a new logger. Configuration should be set by changing `Formatter`, +// `Out` and `Hooks` directly on the default logger instance. You can also just +// instantiate your own: +// +// var log = &Logger{ +// Out: os.Stderr, +// Formatter: new(JSONFormatter), +// Hooks: make(levelHooks), +// Level: logrus.Debug, +// } +// +// It's recommended to make this a global instance called `log`. +func New() *Logger { + return &Logger{ + Out: os.Stdout, + Formatter: new(TextFormatter), + Hooks: make(levelHooks), + Level: InfoLevel, + } +} + +// Adds a field to the log entry, note that you it doesn't log until you call +// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. +// Ff you want multiple fields, use `WithFields`. +func (logger *Logger) WithField(key string, value interface{}) *Entry { + return NewEntry(logger).WithField(key, value) +} + +// Adds a struct of fields to the log entry. All it does is call `WithField` for +// each `Field`. +func (logger *Logger) WithFields(fields Fields) *Entry { + return NewEntry(logger).WithFields(fields) +} + +func (logger *Logger) Debugf(format string, args ...interface{}) { + NewEntry(logger).Debugf(format, args...) +} + +func (logger *Logger) Infof(format string, args ...interface{}) { + NewEntry(logger).Infof(format, args...) +} + +func (logger *Logger) Printf(format string, args ...interface{}) { + NewEntry(logger).Printf(format, args...) +} + +func (logger *Logger) Warnf(format string, args ...interface{}) { + NewEntry(logger).Warnf(format, args...) +} + +func (logger *Logger) Warningf(format string, args ...interface{}) { + NewEntry(logger).Warnf(format, args...) +} + +func (logger *Logger) Errorf(format string, args ...interface{}) { + NewEntry(logger).Errorf(format, args...) +} + +func (logger *Logger) Fatalf(format string, args ...interface{}) { + NewEntry(logger).Fatalf(format, args...) +} + +func (logger *Logger) Panicf(format string, args ...interface{}) { + NewEntry(logger).Panicf(format, args...) +} + +func (logger *Logger) Debug(args ...interface{}) { + NewEntry(logger).Debug(args...) +} + +func (logger *Logger) Info(args ...interface{}) { + NewEntry(logger).Info(args...) +} + +func (logger *Logger) Print(args ...interface{}) { + NewEntry(logger).Info(args...) +} + +func (logger *Logger) Warn(args ...interface{}) { + NewEntry(logger).Warn(args...) +} + +func (logger *Logger) Warning(args ...interface{}) { + NewEntry(logger).Warn(args...) +} + +func (logger *Logger) Error(args ...interface{}) { + NewEntry(logger).Error(args...) +} + +func (logger *Logger) Fatal(args ...interface{}) { + NewEntry(logger).Fatal(args...) +} + +func (logger *Logger) Panic(args ...interface{}) { + NewEntry(logger).Panic(args...) +} + +func (logger *Logger) Debugln(args ...interface{}) { + NewEntry(logger).Debugln(args...) +} + +func (logger *Logger) Infoln(args ...interface{}) { + NewEntry(logger).Infoln(args...) +} + +func (logger *Logger) Println(args ...interface{}) { + NewEntry(logger).Println(args...) +} + +func (logger *Logger) Warnln(args ...interface{}) { + NewEntry(logger).Warnln(args...) +} + +func (logger *Logger) Warningln(args ...interface{}) { + NewEntry(logger).Warnln(args...) +} + +func (logger *Logger) Errorln(args ...interface{}) { + NewEntry(logger).Errorln(args...) +} + +func (logger *Logger) Fatalln(args ...interface{}) { + NewEntry(logger).Fatalln(args...) +} + +func (logger *Logger) Panicln(args ...interface{}) { + NewEntry(logger).Panicln(args...) +} diff --git a/vendor/src/github.com/Sirupsen/logrus/logrus.go b/vendor/src/github.com/Sirupsen/logrus/logrus.go new file mode 100644 index 0000000000..43ee12e90e --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/logrus.go @@ -0,0 +1,94 @@ +package logrus + +import ( + "fmt" + "log" +) + +// Fields type, used to pass to `WithFields`. +type Fields map[string]interface{} + +// Level type +type Level uint8 + +// Convert the Level to a string. E.g. PanicLevel becomes "panic". +func (level Level) String() string { + switch level { + case DebugLevel: + return "debug" + case InfoLevel: + return "info" + case WarnLevel: + return "warning" + case ErrorLevel: + return "error" + case FatalLevel: + return "fatal" + case PanicLevel: + return "panic" + } + + return "unknown" +} + +// ParseLevel takes a string level and returns the Logrus log level constant. +func ParseLevel(lvl string) (Level, error) { + switch lvl { + case "panic": + return PanicLevel, nil + case "fatal": + return FatalLevel, nil + case "error": + return ErrorLevel, nil + case "warn", "warning": + return WarnLevel, nil + case "info": + return InfoLevel, nil + case "debug": + return DebugLevel, nil + } + + var l Level + return l, fmt.Errorf("not a valid logrus Level: %q", lvl) +} + +// These are the different logging levels. You can set the logging level to log +// on your instance of logger, obtained with `logrus.New()`. +const ( + // PanicLevel level, highest level of severity. Logs and then calls panic with the + // message passed to Debug, Info, ... + PanicLevel Level = iota + // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the + // logging level is set to Panic. + FatalLevel + // ErrorLevel level. Logs. Used for errors that should definitely be noted. + // Commonly used for hooks to send errors to an error tracking service. + ErrorLevel + // WarnLevel level. Non-critical entries that deserve eyes. + WarnLevel + // InfoLevel level. General operational entries about what's going on inside the + // application. + InfoLevel + // DebugLevel level. Usually only enabled when debugging. Very verbose logging. + DebugLevel +) + +// Won't compile if StdLogger can't be realized by a log.Logger +var _ StdLogger = &log.Logger{} + +// StdLogger is what your logrus-enabled library should take, that way +// it'll accept a stdlib logger and a logrus logger. There's no standard +// interface, this is the closest we get, unfortunately. +type StdLogger interface { + Print(...interface{}) + Printf(string, ...interface{}) + Println(...interface{}) + + Fatal(...interface{}) + Fatalf(string, ...interface{}) + Fatalln(...interface{}) + + Panic(...interface{}) + Panicf(string, ...interface{}) + Panicln(...interface{}) +} diff --git a/vendor/src/github.com/Sirupsen/logrus/logrus_test.go b/vendor/src/github.com/Sirupsen/logrus/logrus_test.go new file mode 100644 index 0000000000..15157d172d --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/logrus_test.go @@ -0,0 +1,247 @@ +package logrus + +import ( + "bytes" + "encoding/json" + "strconv" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) { + var buffer bytes.Buffer + var fields Fields + + logger := New() + logger.Out = &buffer + logger.Formatter = new(JSONFormatter) + + log(logger) + + err := json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + assertions(fields) +} + +func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) { + var buffer bytes.Buffer + + logger := New() + logger.Out = &buffer + logger.Formatter = &TextFormatter{ + DisableColors: true, + } + + log(logger) + + fields := make(map[string]string) + for _, kv := range strings.Split(buffer.String(), " ") { + if !strings.Contains(kv, "=") { + continue + } + kvArr := strings.Split(kv, "=") + key := strings.TrimSpace(kvArr[0]) + val, err := strconv.Unquote(kvArr[1]) + assert.NoError(t, err) + fields[key] = val + } + assertions(fields) +} + +func TestPrint(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "info") + }) +} + +func TestInfo(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "info") + }) +} + +func TestWarn(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Warn("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "warning") + }) +} + +func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln("test", "test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test test") + }) +} + +func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln("test", 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test 10") + }) +} + +func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln(10, 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "10 10") + }) +} + +func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln(10, 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "10 10") + }) +} + +func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test", 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test10") + }) +} + +func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test", "test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "testtest") + }) +} + +func TestWithFieldsShouldAllowAssignments(t *testing.T) { + var buffer bytes.Buffer + var fields Fields + + logger := New() + logger.Out = &buffer + logger.Formatter = new(JSONFormatter) + + localLog := logger.WithFields(Fields{ + "key1": "value1", + }) + + localLog.WithField("key2", "value2").Info("test") + err := json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + assert.Equal(t, "value2", fields["key2"]) + assert.Equal(t, "value1", fields["key1"]) + + buffer = bytes.Buffer{} + fields = Fields{} + localLog.Info("test") + err = json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + _, ok := fields["key2"] + assert.Equal(t, false, ok) + assert.Equal(t, "value1", fields["key1"]) +} + +func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("msg", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + }) +} + +func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("msg", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["fields.msg"], "hello") + }) +} + +func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("time", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["fields.time"], "hello") + }) +} + +func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("level", 1).Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["level"], "info") + assert.Equal(t, fields["fields.level"], 1) + }) +} + +func TestDefaultFieldsAreNotPrefixed(t *testing.T) { + LogAndAssertText(t, func(log *Logger) { + ll := log.WithField("herp", "derp") + ll.Info("hello") + ll.Info("bye") + }, func(fields map[string]string) { + for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} { + if _, ok := fields[fieldName]; ok { + t.Fatalf("should not have prefixed %q: %v", fieldName, fields) + } + } + }) +} + +func TestConvertLevelToString(t *testing.T) { + assert.Equal(t, "debug", DebugLevel.String()) + assert.Equal(t, "info", InfoLevel.String()) + assert.Equal(t, "warning", WarnLevel.String()) + assert.Equal(t, "error", ErrorLevel.String()) + assert.Equal(t, "fatal", FatalLevel.String()) + assert.Equal(t, "panic", PanicLevel.String()) +} + +func TestParseLevel(t *testing.T) { + l, err := ParseLevel("panic") + assert.Nil(t, err) + assert.Equal(t, PanicLevel, l) + + l, err = ParseLevel("fatal") + assert.Nil(t, err) + assert.Equal(t, FatalLevel, l) + + l, err = ParseLevel("error") + assert.Nil(t, err) + assert.Equal(t, ErrorLevel, l) + + l, err = ParseLevel("warn") + assert.Nil(t, err) + assert.Equal(t, WarnLevel, l) + + l, err = ParseLevel("warning") + assert.Nil(t, err) + assert.Equal(t, WarnLevel, l) + + l, err = ParseLevel("info") + assert.Nil(t, err) + assert.Equal(t, InfoLevel, l) + + l, err = ParseLevel("debug") + assert.Nil(t, err) + assert.Equal(t, DebugLevel, l) + + l, err = ParseLevel("invalid") + assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error()) +} diff --git a/vendor/src/github.com/Sirupsen/logrus/terminal_darwin.go b/vendor/src/github.com/Sirupsen/logrus/terminal_darwin.go new file mode 100644 index 0000000000..8fe02a4aec --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/terminal_darwin.go @@ -0,0 +1,12 @@ +// Based on ssh/terminal: +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA + +type Termios syscall.Termios diff --git a/vendor/src/github.com/Sirupsen/logrus/terminal_freebsd.go b/vendor/src/github.com/Sirupsen/logrus/terminal_freebsd.go new file mode 100644 index 0000000000..0428ee5d52 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/terminal_freebsd.go @@ -0,0 +1,20 @@ +/* + Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin. +*/ +package logrus + +import ( + "syscall" +) + +const ioctlReadTermios = syscall.TIOCGETA + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed uint32 + Ospeed uint32 +} diff --git a/vendor/src/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/src/github.com/Sirupsen/logrus/terminal_linux.go new file mode 100644 index 0000000000..a2c0b40db6 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/terminal_linux.go @@ -0,0 +1,12 @@ +// Based on ssh/terminal: +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TCGETS + +type Termios syscall.Termios diff --git a/vendor/src/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/src/github.com/Sirupsen/logrus/terminal_notwindows.go new file mode 100644 index 0000000000..276447bd5c --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/terminal_notwindows.go @@ -0,0 +1,21 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux,!appengine darwin freebsd + +package logrus + +import ( + "syscall" + "unsafe" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal() bool { + fd := syscall.Stdout + var termios Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/src/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/src/github.com/Sirupsen/logrus/terminal_windows.go new file mode 100644 index 0000000000..2e09f6f7e3 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/terminal_windows.go @@ -0,0 +1,27 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package logrus + +import ( + "syscall" + "unsafe" +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") + +var ( + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal() bool { + fd := syscall.Stdout + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} diff --git a/vendor/src/github.com/Sirupsen/logrus/text_formatter.go b/vendor/src/github.com/Sirupsen/logrus/text_formatter.go new file mode 100644 index 0000000000..fc0a4082a7 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/text_formatter.go @@ -0,0 +1,95 @@ +package logrus + +import ( + "bytes" + "fmt" + "sort" + "strings" + "time" +) + +const ( + nocolor = 0 + red = 31 + green = 32 + yellow = 33 + blue = 34 +) + +var ( + baseTimestamp time.Time + isTerminal bool +) + +func init() { + baseTimestamp = time.Now() + isTerminal = IsTerminal() +} + +func miniTS() int { + return int(time.Since(baseTimestamp) / time.Second) +} + +type TextFormatter struct { + // Set to true to bypass checking for a TTY before outputting colors. + ForceColors bool + DisableColors bool +} + +func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { + + var keys []string + for k := range entry.Data { + keys = append(keys, k) + } + sort.Strings(keys) + + b := &bytes.Buffer{} + + prefixFieldClashes(entry) + + isColored := (f.ForceColors || isTerminal) && !f.DisableColors + + if isColored { + printColored(b, entry, keys) + } else { + f.appendKeyValue(b, "time", entry.Time.Format(time.RFC3339)) + f.appendKeyValue(b, "level", entry.Level.String()) + f.appendKeyValue(b, "msg", entry.Message) + for _, key := range keys { + f.appendKeyValue(b, key, entry.Data[key]) + } + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func printColored(b *bytes.Buffer, entry *Entry, keys []string) { + var levelColor int + switch entry.Level { + case WarnLevel: + levelColor = yellow + case ErrorLevel, FatalLevel, PanicLevel: + levelColor = red + default: + levelColor = blue + } + + levelText := strings.ToUpper(entry.Level.String())[0:4] + + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) + for _, k := range keys { + v := entry.Data[k] + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v) + } +} + +func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key, value interface{}) { + switch value.(type) { + case string, error: + fmt.Fprintf(b, "%v=%q ", key, value) + default: + fmt.Fprintf(b, "%v=%v ", key, value) + } +} diff --git a/vendor/src/github.com/docker/libcontainer/.drone.yml b/vendor/src/github.com/docker/libcontainer/.drone.yml new file mode 100755 index 0000000000..80d298f218 --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/.drone.yml @@ -0,0 +1,9 @@ +image: dockercore/libcontainer +script: +# Setup the DockerInDocker environment. + - /dind + - sed -i 's!docker/docker!docker/libcontainer!' /go/src/github.com/docker/docker/hack/make/.validate + - bash /go/src/github.com/docker/docker/hack/make/validate-dco + - bash /go/src/github.com/docker/docker/hack/make/validate-gofmt + - export GOPATH="$GOPATH:/go:$(pwd)/vendor" # Drone mucks with our GOPATH + - make direct-test diff --git a/vendor/src/github.com/docker/libcontainer/.travis.yml b/vendor/src/github.com/docker/libcontainer/.travis.yml deleted file mode 100644 index 3ce0e27e45..0000000000 --- a/vendor/src/github.com/docker/libcontainer/.travis.yml +++ /dev/null @@ -1,36 +0,0 @@ -language: go -go: 1.3 - -# let us have pretty experimental Docker-based Travis workers -sudo: false - -env: - - TRAVIS_GLOBAL_WTF=1 - - _GOOS=linux _GOARCH=amd64 CGO_ENABLED=1 - - _GOOS=linux _GOARCH=amd64 CGO_ENABLED=0 -# - _GOOS=linux _GOARCH=386 CGO_ENABLED=1 # TODO add this once Travis can handle it (https://github.com/travis-ci/travis-ci/issues/2207#issuecomment-49625061) - - _GOOS=linux _GOARCH=386 CGO_ENABLED=0 - - _GOOS=linux _GOARCH=arm CGO_ENABLED=0 - -install: - - go get code.google.com/p/go.tools/cmd/cover - - mkdir -pv "${GOPATH%%:*}/src/github.com/docker" && [ -d "${GOPATH%%:*}/src/github.com/docker/libcontainer" ] || ln -sv "$(readlink -f .)" "${GOPATH%%:*}/src/github.com/docker/libcontainer" - - if [ -z "$TRAVIS_GLOBAL_WTF" ]; then - gvm cross "$_GOOS" "$_GOARCH"; - export GOOS="$_GOOS" GOARCH="$_GOARCH"; - fi - - export GOPATH="$GOPATH:$(pwd)/vendor" - - if [ -z "$TRAVIS_GLOBAL_WTF" ]; then go env; fi - - go get -d -v ./... # TODO remove this if /docker/docker gets purged from our includes - - if [ "$TRAVIS_GLOBAL_WTF" ]; then - export DOCKER_PATH="${GOPATH%%:*}/src/github.com/docker/docker"; - mkdir -p "$DOCKER_PATH/hack/make"; - ( cd "$DOCKER_PATH/hack/make" && wget -c 'https://raw.githubusercontent.com/docker/docker/master/hack/make/'{.validate,validate-dco,validate-gofmt} ); - sed -i 's!docker/docker!docker/libcontainer!' "$DOCKER_PATH/hack/make/.validate"; - fi - -script: - - if [ "$TRAVIS_GLOBAL_WTF" ]; then bash "$DOCKER_PATH/hack/make/validate-dco"; fi - - if [ "$TRAVIS_GLOBAL_WTF" ]; then bash "$DOCKER_PATH/hack/make/validate-gofmt"; fi - - if [ -z "$TRAVIS_GLOBAL_WTF" ]; then make direct-build; fi - - if [ -z "$TRAVIS_GLOBAL_WTF" -a "$GOARCH" != 'arm' ]; then make direct-test-short; fi diff --git a/vendor/src/github.com/docker/libcontainer/CONTRIBUTORS_GUIDE.md b/vendor/src/github.com/docker/libcontainer/CONTRIBUTING.md similarity index 84% rename from vendor/src/github.com/docker/libcontainer/CONTRIBUTORS_GUIDE.md rename to vendor/src/github.com/docker/libcontainer/CONTRIBUTING.md index 07bf22a031..667cc5a63f 100644 --- a/vendor/src/github.com/docker/libcontainer/CONTRIBUTORS_GUIDE.md +++ b/vendor/src/github.com/docker/libcontainer/CONTRIBUTING.md @@ -6,7 +6,7 @@ feels wrong or incomplete. ## Reporting Issues -When reporting [issues](https://github.com/docker/libcontainer/issues) +When reporting [issues](https://github.com/docker/libcontainer/issues) on GitHub please include your host OS (Ubuntu 12.04, Fedora 19, etc), the output of `uname -a`. Please include the steps required to reproduce the problem if possible and applicable. @@ -14,7 +14,60 @@ This information will help us review and fix your issue faster. ## Development Environment -*Add instructions on setting up the development environment.* +### Requirements + +For best results, use a Linux development environment. +The following packages are required to compile libcontainer natively. + +- Golang 1.3 +- GCC +- git +- cgutils + +You can develop on OSX, but you are limited to Dockerfile-based builds only. + +### Building libcontainer from Dockerfile + + make all + +This is the easiest way of building libcontainer. +As this build is done using Docker, you can even run this from [OSX](https://github.com/boot2docker/boot2docker) + +### Testing changes with "nsinit" + + make sh + +This will create an container that runs `nsinit exec sh` on a busybox rootfs with the configuration from ['minimal.json'](https://github.com/docker/libcontainer/blob/master/sample_configs/minimal.json). +Like the previous command, you can run this on OSX too! + +### Building libcontainer directly + +> Note: You should add the `vendor` directory to your GOPATH to use the vendored libraries + + ./update-vendor.sh + go get -d ./... + make direct-build + # Run the tests + make direct-test-short | egrep --color 'FAIL|$' + # Run all the test + make direct-test | egrep --color 'FAIL|$' + +### Testing Changes with "nsinit" directly + +To test a change: + + # Install nsinit + make direct-install + + # Optional, add a docker0 bridge + ip link add docker0 type bridge + ifconfig docker0 172.17.0.1/16 up + + mkdir testfs + curl -sSL https://github.com/jpetazzo/docker-busybox/raw/buildroot-2014.02/rootfs.tar | tar -xC testfs + cd testfs + cp container.json + nsinit exec sh ## Contribution Guidelines diff --git a/vendor/src/github.com/docker/libcontainer/Dockerfile b/vendor/src/github.com/docker/libcontainer/Dockerfile index 65bf5731d2..614e5979bf 100644 --- a/vendor/src/github.com/docker/libcontainer/Dockerfile +++ b/vendor/src/github.com/docker/libcontainer/Dockerfile @@ -1,21 +1,22 @@ FROM crosbymichael/golang RUN apt-get update && apt-get install -y gcc make -RUN go get code.google.com/p/go.tools/cmd/cover +RUN go get golang.org/x/tools/cmd/cover + +ENV GOPATH $GOPATH:/go/src/github.com/docker/libcontainer/vendor +RUN go get github.com/docker/docker/pkg/term # setup a playground for us to spawn containers in RUN mkdir /busybox && \ curl -sSL 'https://github.com/jpetazzo/docker-busybox/raw/buildroot-2014.02/rootfs.tar' | tar -xC /busybox -RUN curl -sSL https://raw.githubusercontent.com/docker/docker/master/hack/dind -o /dind && \ +RUN curl -sSL https://raw.githubusercontent.com/docker/docker/master/project/dind -o /dind && \ chmod +x /dind COPY . /go/src/github.com/docker/libcontainer WORKDIR /go/src/github.com/docker/libcontainer RUN cp sample_configs/minimal.json /busybox/container.json -ENV GOPATH $GOPATH:/go/src/github.com/docker/libcontainer/vendor - RUN go get -d -v ./... RUN make direct-install diff --git a/vendor/src/github.com/docker/libcontainer/MAINTAINERS b/vendor/src/github.com/docker/libcontainer/MAINTAINERS index 24011b0540..7295c6038f 100644 --- a/vendor/src/github.com/docker/libcontainer/MAINTAINERS +++ b/vendor/src/github.com/docker/libcontainer/MAINTAINERS @@ -2,5 +2,4 @@ Michael Crosby (@crosbymichael) Rohit Jnagal (@rjnagal) Victor Marmol (@vmarmol) Mrunal Patel (@mrunalp) -.travis.yml: Tianon Gravi (@tianon) update-vendor.sh: Tianon Gravi (@tianon) diff --git a/vendor/src/github.com/docker/libcontainer/Makefile b/vendor/src/github.com/docker/libcontainer/Makefile index 0ec995fc3c..0c4dda7c9b 100644 --- a/vendor/src/github.com/docker/libcontainer/Makefile +++ b/vendor/src/github.com/docker/libcontainer/Makefile @@ -12,10 +12,10 @@ sh: GO_PACKAGES = $(shell find . -not \( -wholename ./vendor -prune -o -wholename ./.git -prune \) -name '*.go' -print0 | xargs -0n1 dirname | sort -u) direct-test: - go test -cover -v $(GO_PACKAGES) + go test $(TEST_TAGS) -cover -v $(GO_PACKAGES) direct-test-short: - go test -cover -test.short -v $(GO_PACKAGES) + go test $(TEST_TAGS) -cover -test.short -v $(GO_PACKAGES) direct-build: go build -v $(GO_PACKAGES) diff --git a/vendor/src/github.com/docker/libcontainer/README.md b/vendor/src/github.com/docker/libcontainer/README.md index b80d2841f8..37047e68c8 100644 --- a/vendor/src/github.com/docker/libcontainer/README.md +++ b/vendor/src/github.com/docker/libcontainer/README.md @@ -1,4 +1,4 @@ -## libcontainer - reference implementation for containers [![Build Status](https://travis-ci.org/docker/libcontainer.png?branch=master)](https://travis-ci.org/docker/libcontainer) +## libcontainer - reference implementation for containers [![Build Status](https://ci.dockerproject.com/github.com/docker/libcontainer/status.svg?branch=master)](https://ci.dockerproject.com/github.com/docker/libcontainer) ### Note on API changes: @@ -56,7 +56,7 @@ Docs released under Creative commons. First of all, please familiarise yourself with the [libcontainer Principles](PRINCIPLES.md). -If you're a *contributor* or aspiring contributor, you should read the [Contributors' Guide](CONTRIBUTORS_GUIDE.md). +If you're a *contributor* or aspiring contributor, you should read the [Contributors' Guide](CONTRIBUTING.md). If you're a *maintainer* or aspiring maintainer, you should read the [Maintainers' Guide](MAINTAINERS_GUIDE.md) and "How can I become a maintainer?" in the Contributors' Guide. diff --git a/vendor/src/github.com/docker/libcontainer/SPEC.md b/vendor/src/github.com/docker/libcontainer/SPEC.md new file mode 100644 index 0000000000..f5afaadc51 --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/SPEC.md @@ -0,0 +1,321 @@ +## Container Specification - v1 + +This is the standard configuration for version 1 containers. It includes +namespaces, standard filesystem setup, a default Linux capability set, and +information about resource reservations. It also has information about any +populated environment settings for the processes running inside a container. + +Along with the configuration of how a container is created the standard also +discusses actions that can be performed on a container to manage and inspect +information about the processes running inside. + +The v1 profile is meant to be able to accommodate the majority of applications +with a strong security configuration. + +### System Requirements and Compatibility + +Minimum requirements: +* Kernel version - 3.8 recommended 2.6.2x minimum(with backported patches) +* Mounted cgroups with each subsystem in its own hierarchy + + +### Namespaces + +| Flag | Enabled | +| ------------ | ------- | +| CLONE_NEWPID | 1 | +| CLONE_NEWUTS | 1 | +| CLONE_NEWIPC | 1 | +| CLONE_NEWNET | 1 | +| CLONE_NEWNS | 1 | +| CLONE_NEWUSER | 0 | + +In v1 the user namespace is not enabled by default for support of older kernels +where the user namespace feature is not fully implemented. Namespaces are +created for the container via the `clone` syscall. + + +### Filesystem + +A root filesystem must be provided to a container for execution. The container +will use this root filesystem (rootfs) to jail and spawn processes inside where +the binaries and system libraries are local to that directory. Any binaries +to be executed must be contained within this rootfs. + +Mounts that happen inside the container are automatically cleaned up when the +container exits as the mount namespace is destroyed and the kernel will +unmount all the mounts that were setup within that namespace. + +For a container to execute properly there are certain filesystems that +are required to be mounted within the rootfs that the runtime will setup. + +| Path | Type | Flags | Data | +| ----------- | ------ | -------------------------------------- | --------------------------------------- | +| /proc | proc | MS_NOEXEC,MS_NOSUID,MS_NODEV | | +| /dev | tmpfs | MS_NOEXEC,MS_STRICTATIME | mode=755 | +| /dev/shm | shm | MS_NOEXEC,MS_NOSUID,MS_NODEV | mode=1777,size=65536k | +| /dev/mqueue | mqueue | MS_NOEXEC,MS_NOSUID,MS_NODEV | | +| /dev/pts | devpts | MS_NOEXEC,MS_NOSUID | newinstance,ptmxmode=0666,mode=620,gid5 | +| /sys | sysfs | MS_NOEXEC,MS_NOSUID,MS_NODEV,MS_RDONLY | | + + +After a container's filesystems are mounted within the newly created +mount namespace `/dev` will need to be populated with a set of device nodes. +It is expected that a rootfs does not need to have any device nodes specified +for `/dev` witin the rootfs as the container will setup the correct devices +that are required for executing a container's process. + +| Path | Mode | Access | +| ------------ | ---- | ---------- | +| /dev/null | 0666 | rwm | +| /dev/zero | 0666 | rwm | +| /dev/full | 0666 | rwm | +| /dev/tty | 0666 | rwm | +| /dev/random | 0666 | rwm | +| /dev/urandom | 0666 | rwm | +| /dev/fuse | 0666 | rwm | + + +**ptmx** +`/dev/ptmx` will need to be a symlink to the host's `/dev/ptmx` within +the container. + +The use of a pseudo TTY is optional within a container and it should support both. +If a pseudo is provided to the container `/dev/console` will need to be +setup by binding the console in `/dev/` after it has been populated and mounted +in tmpfs. + +| Source | Destination | UID GID | Mode | Type | +| --------------- | ------------ | ------- | ---- | ---- | +| *pty host path* | /dev/console | 0 0 | 0600 | bind | + + +After `/dev/null` has been setup we check for any external links between +the container's io, STDIN, STDOUT, STDERR. If the container's io is pointing +to `/dev/null` outside the container we close and `dup2` the the `/dev/null` +that is local to the container's rootfs. + + +After the container has `/proc` mounted a few standard symlinks are setup +within `/dev/` for the io. + +| Source | Destination | +| ------------ | ----------- | +| /proc/1/fd | /dev/fd | +| /proc/1/fd/0 | /dev/stdin | +| /proc/1/fd/1 | /dev/stdout | +| /proc/1/fd/2 | /dev/stderr | + +A `pivot_root` is used to change the root for the process, effectively +jailing the process inside the rootfs. + +```c +put_old = mkdir(...); +pivot_root(rootfs, put_old); +chdir("/"); +unmount(put_old, MS_DETACH); +rmdir(put_old); +``` + +For container's running with a rootfs inside `ramfs` a `MS_MOVE` combined +with a `chroot` is required as `pivot_root` is not supported in `ramfs`. + +```c +mount(rootfs, "/", NULL, MS_MOVE, NULL); +chroot("."); +chdir("/"); +``` + +The `umask` is set back to `0022` after the filesystem setup has been completed. + +### Resources + +Cgroups are used to handle resource allocation for containers. This includes +system resources like cpu, memory, and device access. + +| Subsystem | Enabled | +| ---------- | ------- | +| devices | 1 | +| memory | 1 | +| cpu | 1 | +| cpuacct | 1 | +| cpuset | 1 | +| blkio | 1 | +| perf_event | 1 | +| freezer | 1 | + + +All cgroup subsystem are joined so that statistics can be collected from +each of the subsystems. Freezer does not expose any stats but is joined +so that containers can be paused and resumed. + +The parent process of the container's init must place the init pid inside +the correct cgroups before the initialization begins. This is done so +that no processes or threads escape the cgroups. This sync is +done via a pipe ( specified in the runtime section below ) that the container's +init process will block waiting for the parent to finish setup. + +### Security + +The standard set of Linux capabilities that are set in a container +provide a good default for security and flexibility for the applications. + + +| Capability | Enabled | +| -------------------- | ------- | +| CAP_NET_RAW | 1 | +| CAP_NET_BIND_SERVICE | 1 | +| CAP_AUDIT_WRITE | 1 | +| CAP_DAC_OVERRIDE | 1 | +| CAP_SETFCAP | 1 | +| CAP_SETPCAP | 1 | +| CAP_SETGID | 1 | +| CAP_SETUID | 1 | +| CAP_MKNOD | 1 | +| CAP_CHOWN | 1 | +| CAP_FOWNER | 1 | +| CAP_FSETID | 1 | +| CAP_KILL | 1 | +| CAP_SYS_CHROOT | 1 | +| CAP_NET_BROADCAST | 0 | +| CAP_SYS_MODULE | 0 | +| CAP_SYS_RAWIO | 0 | +| CAP_SYS_PACCT | 0 | +| CAP_SYS_ADMIN | 0 | +| CAP_SYS_NICE | 0 | +| CAP_SYS_RESOURCE | 0 | +| CAP_SYS_TIME | 0 | +| CAP_SYS_TTY_CONFIG | 0 | +| CAP_AUDIT_CONTROL | 0 | +| CAP_MAC_OVERRIDE | 0 | +| CAP_MAC_ADMIN | 0 | +| CAP_NET_ADMIN | 0 | +| CAP_SYSLOG | 0 | +| CAP_DAC_READ_SEARCH | 0 | +| CAP_LINUX_IMMUTABLE | 0 | +| CAP_IPC_LOCK | 0 | +| CAP_IPC_OWNER | 0 | +| CAP_SYS_PTRACE | 0 | +| CAP_SYS_BOOT | 0 | +| CAP_LEASE | 0 | +| CAP_WAKE_ALARM | 0 | +| CAP_BLOCK_SUSPE | 0 | + + +Additional security layers like [apparmor](https://wiki.ubuntu.com/AppArmor) +and [selinux](http://selinuxproject.org/page/Main_Page) can be used with +the containers. A container should support setting an apparmor profile or +selinux process and mount labels if provided in the configuration. + +Standard apparmor profile: +```c +#include +profile flags=(attach_disconnected,mediate_deleted) { + #include + network, + capability, + file, + umount, + + mount fstype=tmpfs, + mount fstype=mqueue, + mount fstype=fuse.*, + mount fstype=binfmt_misc -> /proc/sys/fs/binfmt_misc/, + mount fstype=efivarfs -> /sys/firmware/efi/efivars/, + mount fstype=fusectl -> /sys/fs/fuse/connections/, + mount fstype=securityfs -> /sys/kernel/security/, + mount fstype=debugfs -> /sys/kernel/debug/, + mount fstype=proc -> /proc/, + mount fstype=sysfs -> /sys/, + + deny @{PROC}/sys/fs/** wklx, + deny @{PROC}/sysrq-trigger rwklx, + deny @{PROC}/mem rwklx, + deny @{PROC}/kmem rwklx, + deny @{PROC}/sys/kernel/[^s][^h][^m]* wklx, + deny @{PROC}/sys/kernel/*/** wklx, + + deny mount options=(ro, remount) -> /, + deny mount fstype=debugfs -> /var/lib/ureadahead/debugfs/, + deny mount fstype=devpts, + + deny /sys/[^f]*/** wklx, + deny /sys/f[^s]*/** wklx, + deny /sys/fs/[^c]*/** wklx, + deny /sys/fs/c[^g]*/** wklx, + deny /sys/fs/cg[^r]*/** wklx, + deny /sys/firmware/efi/efivars/** rwklx, + deny /sys/kernel/security/** rwklx, +} +``` + +*TODO: seccomp work is being done to find a good default config* + +### Runtime and Init Process + +During container creation the parent process needs to talk to the container's init +process and have a form of synchronization. This is accomplished by creating +a pipe that is passed to the container's init. When the init process first spawns +it will block on its side of the pipe until the parent closes its side. This +allows the parent to have time to set the new process inside a cgroup hierarchy +and/or write any uid/gid mappings required for user namespaces. +The pipe is passed to the init process via FD 3. + +The application consuming libcontainer should be compiled statically. libcontainer +does not define any init process and the arguments provided are used to `exec` the +process inside the application. There should be no long running init within the +container spec. + +If a pseudo tty is provided to a container it will open and `dup2` the console +as the container's STDIN, STDOUT, STDERR as well as mounting the console +as `/dev/console`. + +An extra set of mounts are provided to a container and setup for use. A container's +rootfs can contain some non portable files inside that can cause side effects during +execution of a process. These files are usually created and populated with the container +specific information via the runtime. + +**Extra runtime files:** +* /etc/hosts +* /etc/resolv.conf +* /etc/hostname +* /etc/localtime + + +#### Defaults + +There are a few defaults that can be overridden by users, but in their omission +these apply to processes within a container. + +| Type | Value | +| ------------------- | ------------------------------ | +| Parent Death Signal | SIGKILL | +| UID | 0 | +| GID | 0 | +| GROUPS | 0, NULL | +| CWD | "/" | +| $HOME | Current user's home dir or "/" | +| Readonly rootfs | false | +| Pseudo TTY | false | + + +## Actions + +After a container is created there is a standard set of actions that can +be done to the container. These actions are part of the public API for +a container. + +| Action | Description | +| -------------- | ------------------------------------------------------------------ | +| Get processes | Return all the pids for processes running inside a container | +| Get Stats | Return resource statistics for the container as a whole | +| Wait | Wait waits on the container's init process ( pid 1 ) | +| Wait Process | Wait on any of the container's processes returning the exit status | +| Destroy | Kill the container's init process and remove any filesystem state | +| Signal | Send a signal to the container's init process | +| Signal Process | Send a signal to any of the container's processes | +| Pause | Pause all processes inside the container | +| Resume | Resume all processes inside the container if paused | +| Exec | Execute a new process inside of the container ( requires setns ) | + + diff --git a/vendor/src/github.com/docker/libcontainer/api_temp.go b/vendor/src/github.com/docker/libcontainer/api_temp.go index 9b2c520774..5c682ee344 100644 --- a/vendor/src/github.com/docker/libcontainer/api_temp.go +++ b/vendor/src/github.com/docker/libcontainer/api_temp.go @@ -5,30 +5,17 @@ package libcontainer import ( "github.com/docker/libcontainer/cgroups/fs" - "github.com/docker/libcontainer/cgroups/systemd" "github.com/docker/libcontainer/network" ) // TODO(vmarmol): Complete Stats() in final libcontainer API and move users to that. // DEPRECATED: The below portions are only to be used during the transition to the official API. // Returns all available stats for the given container. -func GetStats(container *Config, state *State) (*ContainerStats, error) { - var ( - err error - stats = &ContainerStats{} - ) - - if systemd.UseSystemd() { - stats.CgroupStats, err = systemd.GetStats(container.Cgroups) - } else { - stats.CgroupStats, err = fs.GetStats(container.Cgroups) - } - - if err != nil { +func GetStats(container *Config, state *State) (stats *ContainerStats, err error) { + stats = &ContainerStats{} + if stats.CgroupStats, err = fs.GetStats(state.CgroupPaths); err != nil { return stats, err } - stats.NetworkStats, err = network.GetStats(&state.NetworkState) - return stats, err } diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/cgroups.go b/vendor/src/github.com/docker/libcontainer/cgroups/cgroups.go index 567e9a6c16..fe3600597b 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/cgroups.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/cgroups.go @@ -53,8 +53,3 @@ type Cgroup struct { Freezer FreezerState `json:"freezer,omitempty"` // set the freeze value for the process Slice string `json:"slice,omitempty"` // Parent slice to use for systemd } - -type ActiveCgroup interface { - Cleanup() error - Paths() (map[string]string, error) -} diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/cgutil/cgutil.go b/vendor/src/github.com/docker/libcontainer/cgroups/cgutil/cgutil.go deleted file mode 100644 index d1a66117f1..0000000000 --- a/vendor/src/github.com/docker/libcontainer/cgroups/cgutil/cgutil.go +++ /dev/null @@ -1,264 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "log" - "os" - "syscall" - "time" - - "github.com/codegangsta/cli" - "github.com/docker/libcontainer/cgroups" - "github.com/docker/libcontainer/cgroups/fs" - "github.com/docker/libcontainer/cgroups/systemd" -) - -var createCommand = cli.Command{ - Name: "create", - Usage: "Create a cgroup container using the supplied configuration and initial process.", - Flags: []cli.Flag{ - cli.StringFlag{Name: "config, c", Value: "cgroup.json", Usage: "path to container configuration (cgroups.Cgroup object)"}, - cli.IntFlag{Name: "pid, p", Value: 0, Usage: "pid of the initial process in the container"}, - }, - Action: createAction, -} - -var destroyCommand = cli.Command{ - Name: "destroy", - Usage: "Destroy an existing cgroup container.", - Flags: []cli.Flag{ - cli.StringFlag{Name: "name, n", Value: "", Usage: "container name"}, - cli.StringFlag{Name: "parent, p", Value: "", Usage: "container parent"}, - }, - Action: destroyAction, -} - -var statsCommand = cli.Command{ - Name: "stats", - Usage: "Get stats for cgroup", - Flags: []cli.Flag{ - cli.StringFlag{Name: "name, n", Value: "", Usage: "container name"}, - cli.StringFlag{Name: "parent, p", Value: "", Usage: "container parent"}, - }, - Action: statsAction, -} - -var pauseCommand = cli.Command{ - Name: "pause", - Usage: "Pause cgroup", - Flags: []cli.Flag{ - cli.StringFlag{Name: "name, n", Value: "", Usage: "container name"}, - cli.StringFlag{Name: "parent, p", Value: "", Usage: "container parent"}, - }, - Action: pauseAction, -} - -var resumeCommand = cli.Command{ - Name: "resume", - Usage: "Resume a paused cgroup", - Flags: []cli.Flag{ - cli.StringFlag{Name: "name, n", Value: "", Usage: "container name"}, - cli.StringFlag{Name: "parent, p", Value: "", Usage: "container parent"}, - }, - Action: resumeAction, -} - -var psCommand = cli.Command{ - Name: "ps", - Usage: "Get list of pids for a cgroup", - Flags: []cli.Flag{ - cli.StringFlag{Name: "name, n", Value: "", Usage: "container name"}, - cli.StringFlag{Name: "parent, p", Value: "", Usage: "container parent"}, - }, - Action: psAction, -} - -func getConfigFromFile(c *cli.Context) (*cgroups.Cgroup, error) { - f, err := os.Open(c.String("config")) - if err != nil { - return nil, err - } - defer f.Close() - - var config *cgroups.Cgroup - if err := json.NewDecoder(f).Decode(&config); err != nil { - log.Fatal(err) - } - return config, nil -} - -func openLog(name string) error { - f, err := os.OpenFile(name, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0755) - if err != nil { - return err - } - - log.SetOutput(f) - return nil -} - -func getConfig(context *cli.Context) (*cgroups.Cgroup, error) { - name := context.String("name") - if name == "" { - log.Fatal(fmt.Errorf("Missing container name")) - } - parent := context.String("parent") - return &cgroups.Cgroup{ - Name: name, - Parent: parent, - }, nil -} - -func killAll(config *cgroups.Cgroup) { - // We could use freezer here to prevent process spawning while we are trying - // to kill everything. But going with more portable solution of retrying for - // now. - pids := getPids(config) - retry := 10 - for len(pids) != 0 || retry > 0 { - killPids(pids) - time.Sleep(100 * time.Millisecond) - retry-- - pids = getPids(config) - } - if len(pids) != 0 { - log.Fatal(fmt.Errorf("Could not kill existing processes in the container.")) - } -} - -func getPids(config *cgroups.Cgroup) []int { - pids, err := fs.GetPids(config) - if err != nil { - log.Fatal(err) - } - return pids -} - -func killPids(pids []int) { - for _, pid := range pids { - // pids might go away on their own. Ignore errors. - syscall.Kill(pid, syscall.SIGKILL) - } -} - -func setFreezerState(context *cli.Context, state cgroups.FreezerState) { - config, err := getConfig(context) - if err != nil { - log.Fatal(err) - } - - if systemd.UseSystemd() { - err = systemd.Freeze(config, state) - } else { - err = fs.Freeze(config, state) - } - if err != nil { - log.Fatal(err) - } -} - -func createAction(context *cli.Context) { - config, err := getConfigFromFile(context) - if err != nil { - log.Fatal(err) - } - pid := context.Int("pid") - if pid <= 0 { - log.Fatal(fmt.Errorf("Invalid pid : %d", pid)) - } - if systemd.UseSystemd() { - _, err := systemd.Apply(config, pid) - if err != nil { - log.Fatal(err) - } - } else { - _, err := fs.Apply(config, pid) - if err != nil { - log.Fatal(err) - } - } -} - -func destroyAction(context *cli.Context) { - config, err := getConfig(context) - if err != nil { - log.Fatal(err) - } - - killAll(config) - // Systemd will clean up cgroup state for empty container. - if !systemd.UseSystemd() { - err := fs.Cleanup(config) - if err != nil { - log.Fatal(err) - } - } -} - -func statsAction(context *cli.Context) { - config, err := getConfig(context) - if err != nil { - log.Fatal(err) - } - stats, err := fs.GetStats(config) - if err != nil { - log.Fatal(err) - } - - out, err := json.MarshalIndent(stats, "", "\t") - if err != nil { - log.Fatal(err) - } - fmt.Printf("Usage stats for '%s':\n %v\n", config.Name, string(out)) -} - -func pauseAction(context *cli.Context) { - setFreezerState(context, cgroups.Frozen) -} - -func resumeAction(context *cli.Context) { - setFreezerState(context, cgroups.Thawed) -} - -func psAction(context *cli.Context) { - config, err := getConfig(context) - if err != nil { - log.Fatal(err) - } - - pids, err := fs.GetPids(config) - if err != nil { - log.Fatal(err) - } - - fmt.Printf("Pids in '%s':\n", config.Name) - fmt.Println(pids) -} - -func main() { - logPath := os.Getenv("log") - if logPath != "" { - if err := openLog(logPath); err != nil { - log.Fatal(err) - } - } - - app := cli.NewApp() - app.Name = "cgutil" - app.Usage = "Test utility for libcontainer cgroups package" - app.Version = "0.1" - - app.Commands = []cli.Command{ - createCommand, - destroyCommand, - statsCommand, - pauseCommand, - resumeCommand, - psCommand, - } - - if err := app.Run(os.Args); err != nil { - log.Fatal(err) - } -} diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/cgutil/sample_cgroup.json b/vendor/src/github.com/docker/libcontainer/cgroups/cgutil/sample_cgroup.json deleted file mode 100644 index 2d29784941..0000000000 --- a/vendor/src/github.com/docker/libcontainer/cgroups/cgutil/sample_cgroup.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "name": "luke", - "parent": "darth", - "allow_all_devices": true, - "memory": 1073741824, - "memory_swap": -1, - "cpu_shares": 2048, - "cpu_quota": 500000, - "cpu_period": 250000 -} diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go index 133241e472..6f85793dd2 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go @@ -57,49 +57,57 @@ type data struct { pid int } -func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { +func Apply(c *cgroups.Cgroup, pid int) (map[string]string, error) { d, err := getCgroupData(c, pid) if err != nil { return nil, err } - for _, sys := range subsystems { + paths := make(map[string]string) + defer func() { + if err != nil { + cgroups.RemovePaths(paths) + } + }() + for name, sys := range subsystems { if err := sys.Set(d); err != nil { - d.Cleanup() return nil, err } - } - - return d, nil -} - -func Cleanup(c *cgroups.Cgroup) error { - d, err := getCgroupData(c, 0) - if err != nil { - return fmt.Errorf("Could not get Cgroup data %s", err) - } - return d.Cleanup() -} - -func GetStats(c *cgroups.Cgroup) (*cgroups.Stats, error) { - stats := cgroups.NewStats() - - d, err := getCgroupData(c, 0) - if err != nil { - return nil, fmt.Errorf("getting CgroupData %s", err) - } - - for sysname, sys := range subsystems { - path, err := d.path(sysname) + // FIXME: Apply should, ideally, be reentrant or be broken up into a separate + // create and join phase so that the cgroup hierarchy for a container can be + // created then join consists of writing the process pids to cgroup.procs + p, err := d.path(name) if err != nil { - // Don't fail if a cgroup hierarchy was not found, just skip this subsystem if cgroups.IsNotFound(err) { continue } - return nil, err } + paths[name] = p + } + return paths, nil +} +// Symmetrical public function to update device based cgroups. Also available +// in the systemd implementation. +func ApplyDevices(c *cgroups.Cgroup, pid int) error { + d, err := getCgroupData(c, pid) + if err != nil { + return err + } + + devices := subsystems["devices"] + + return devices.Set(d) +} + +func GetStats(systemPaths map[string]string) (*cgroups.Stats, error) { + stats := cgroups.NewStats() + for name, path := range systemPaths { + sys, ok := subsystems[name] + if !ok { + continue + } if err := sys.GetStats(path, stats); err != nil { return nil, err } @@ -163,26 +171,6 @@ func (raw *data) parent(subsystem string) (string, error) { return filepath.Join(raw.root, subsystem, initPath), nil } -func (raw *data) Paths() (map[string]string, error) { - paths := make(map[string]string) - - for sysname := range subsystems { - path, err := raw.path(sysname) - if err != nil { - // Don't fail if a cgroup hierarchy was not found, just skip this subsystem - if cgroups.IsNotFound(err) { - continue - } - - return nil, err - } - - paths[sysname] = path - } - - return paths, nil -} - func (raw *data) path(subsystem string) (string, error) { // If the cgroup name/path is absolute do not look relative to the cgroup of the init process. if filepath.IsAbs(raw.cgroup) { @@ -221,13 +209,6 @@ func (raw *data) join(subsystem string) (string, error) { return path, nil } -func (raw *data) Cleanup() error { - for _, sys := range subsystems { - sys.Remove(raw) - } - return nil -} - func writeFile(dir, file, data string) error { return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700) } diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio.go index 261a97ff23..ce824d56c2 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio.go @@ -146,6 +146,26 @@ func getCFQStats(path string, stats *cgroups.Stats) error { } stats.BlkioStats.IoQueuedRecursive = blkioStats + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_service_time_recursive")); err != nil { + return err + } + stats.BlkioStats.IoServiceTimeRecursive = blkioStats + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_wait_time_recursive")); err != nil { + return err + } + stats.BlkioStats.IoWaitTimeRecursive = blkioStats + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_merged_recursive")); err != nil { + return err + } + stats.BlkioStats.IoMergedRecursive = blkioStats + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.time_recursive")); err != nil { + return err + } + stats.BlkioStats.IoTimeRecursive = blkioStats + return nil } diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio_test.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio_test.go index 2a79d260f6..6cd38cbaba 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio_test.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio_test.go @@ -26,7 +26,25 @@ Total 50` 8:0 Async 3 8:0 Total 5 Total 5` - throttleServiceBytes = `8:0 Read 11030528 + serviceTimeRecursiveContents = `8:0 Read 173959 +8:0 Write 0 +8:0 Sync 0 +8:0 Async 173959 +8:0 Total 17395 +Total 17395` + waitTimeRecursiveContents = `8:0 Read 15571 +8:0 Write 0 +8:0 Sync 0 +8:0 Async 15571 +8:0 Total 15571` + mergedRecursiveContents = `8:0 Read 5 +8:0 Write 10 +8:0 Sync 0 +8:0 Async 0 +8:0 Total 15 +Total 15` + timeRecursiveContents = `8:0 8` + throttleServiceBytes = `8:0 Read 11030528 8:0 Write 23 8:0 Sync 42 8:0 Async 11030528 @@ -61,6 +79,10 @@ func TestBlkioStats(t *testing.T) { "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, "blkio.io_serviced_recursive": servicedRecursiveContents, "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, "blkio.sectors_recursive": sectorsRecursiveContents, }) @@ -93,6 +115,26 @@ func TestBlkioStats(t *testing.T) { appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 3, "Async") appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 5, "Total") + appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 173959, "Read") + appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 0, "Write") + appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 0, "Sync") + appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 173959, "Async") + appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 17395, "Total") + + appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 15571, "Read") + appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 0, "Write") + appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 0, "Sync") + appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 15571, "Async") + appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 15571, "Total") + + appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 5, "Read") + appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 10, "Write") + appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 0, "Sync") + appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 0, "Async") + appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 15, "Total") + + appendBlkioStatEntry(&expectedStats.IoTimeRecursive, 8, 0, 8, "") + expectBlkioStatsEquals(t, expectedStats, actualStats.BlkioStats) } @@ -103,6 +145,10 @@ func TestBlkioStatsNoSectorsFile(t *testing.T) { "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, "blkio.io_serviced_recursive": servicedRecursiveContents, "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, }) blkio := &BlkioGroup{} @@ -117,9 +163,13 @@ func TestBlkioStatsNoServiceBytesFile(t *testing.T) { helper := NewCgroupTestUtil("blkio", t) defer helper.cleanup() helper.writeFileContents(map[string]string{ - "blkio.io_serviced_recursive": servicedRecursiveContents, - "blkio.io_queued_recursive": queuedRecursiveContents, - "blkio.sectors_recursive": sectorsRecursiveContents, + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, }) blkio := &BlkioGroup{} @@ -137,6 +187,10 @@ func TestBlkioStatsNoServicedFile(t *testing.T) { "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, "blkio.io_queued_recursive": queuedRecursiveContents, "blkio.sectors_recursive": sectorsRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, }) blkio := &BlkioGroup{} @@ -154,6 +208,106 @@ func TestBlkioStatsNoQueuedFile(t *testing.T) { "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, "blkio.io_serviced_recursive": servicedRecursiveContents, "blkio.sectors_recursive": sectorsRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatalf("Failed unexpectedly: %s", err) + } +} + +func TestBlkioStatsNoServiceTimeFile(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatalf("Failed unexpectedly: %s", err) + } +} + +func TestBlkioStatsNoWaitTimeFile(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatalf("Failed unexpectedly: %s", err) + } +} + +func TestBlkioStatsNoMergedFile(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatalf("Failed unexpectedly: %s", err) + } +} + +func TestBlkioStatsNoTimeFile(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, }) blkio := &BlkioGroup{} @@ -172,6 +326,10 @@ func TestBlkioStatsUnexpectedNumberOfFields(t *testing.T) { "blkio.io_serviced_recursive": servicedRecursiveContents, "blkio.io_queued_recursive": queuedRecursiveContents, "blkio.sectors_recursive": sectorsRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, }) blkio := &BlkioGroup{} @@ -190,6 +348,10 @@ func TestBlkioStatsUnexpectedFieldType(t *testing.T) { "blkio.io_serviced_recursive": servicedRecursiveContents, "blkio.io_queued_recursive": queuedRecursiveContents, "blkio.sectors_recursive": sectorsRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, }) blkio := &BlkioGroup{} @@ -208,6 +370,10 @@ func TestNonCFQBlkioStats(t *testing.T) { "blkio.io_serviced_recursive": "", "blkio.io_queued_recursive": "", "blkio.sectors_recursive": "", + "blkio.io_service_time_recursive": "", + "blkio.io_wait_time_recursive": "", + "blkio.io_merged_recursive": "", + "blkio.time_recursive": "", "blkio.throttle.io_service_bytes": throttleServiceBytes, "blkio.throttle.io_serviced": throttleServiced, }) diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpuset.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpuset.go index 8847739464..54d2ed5725 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpuset.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpuset.go @@ -14,17 +14,11 @@ type CpusetGroup struct { } func (s *CpusetGroup) Set(d *data) error { - // we don't want to join this cgroup unless it is specified - if d.c.CpusetCpus != "" { - dir, err := d.path("cpuset") - if err != nil { - return err - } - - return s.SetDir(dir, d.c.CpusetCpus, d.pid) + dir, err := d.path("cpuset") + if err != nil { + return err } - - return nil + return s.SetDir(dir, d.c.CpusetCpus, d.pid) } func (s *CpusetGroup) Remove(d *data) error { @@ -46,8 +40,12 @@ func (s *CpusetGroup) SetDir(dir, value string, pid int) error { return err } - if err := writeFile(dir, "cpuset.cpus", value); err != nil { - return err + // If we don't use --cpuset, the default cpuset.cpus is set in + // s.ensureParent, otherwise, use the value we set + if value != "" { + if err := writeFile(dir, "cpuset.cpus", value); err != nil { + return err + } } return nil diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/stats_util_test.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/stats_util_test.go index 7e7da754d0..1a9e590f59 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/stats_util_test.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/stats_util_test.go @@ -41,6 +41,26 @@ func expectBlkioStatsEquals(t *testing.T, expected, actual cgroups.BlkioStats) { log.Printf("blkio SectorsRecursive do not match - %s\n", err) t.Fail() } + + if err := blkioStatEntryEquals(expected.IoServiceTimeRecursive, actual.IoServiceTimeRecursive); err != nil { + log.Printf("blkio IoServiceTimeRecursive do not match - %s\n", err) + t.Fail() + } + + if err := blkioStatEntryEquals(expected.IoWaitTimeRecursive, actual.IoWaitTimeRecursive); err != nil { + log.Printf("blkio IoWaitTimeRecursive do not match - %s\n", err) + t.Fail() + } + + if err := blkioStatEntryEquals(expected.IoMergedRecursive, actual.IoMergedRecursive); err != nil { + log.Printf("blkio IoMergedRecursive do not match - %s vs %s\n", expected.IoMergedRecursive, actual.IoMergedRecursive) + t.Fail() + } + + if err := blkioStatEntryEquals(expected.IoTimeRecursive, actual.IoTimeRecursive); err != nil { + log.Printf("blkio IoTimeRecursive do not match - %s\n", err) + t.Fail() + } } func expectThrottlingDataEquals(t *testing.T, expected, actual cgroups.ThrottlingData) { diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/utils_test.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/utils_test.go index f1afd49411..8b19a84b27 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/utils_test.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/utils_test.go @@ -57,7 +57,7 @@ func TestGetCgroupParamsInt(t *testing.T) { if err != nil { t.Fatal(err) } else if value != 0 { - t.Fatalf("Expected %d to equal %f", value, 0) + t.Fatalf("Expected %d to equal %d", value, 0) } // Success with negative values lesser than min int64 @@ -70,7 +70,7 @@ func TestGetCgroupParamsInt(t *testing.T) { if err != nil { t.Fatal(err) } else if value != 0 { - t.Fatalf("Expected %d to equal %f", value, 0) + t.Fatalf("Expected %d to equal %d", value, 0) } // Not a float. diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/stats.go b/vendor/src/github.com/docker/libcontainer/cgroups/stats.go index f52251395c..dc5dbb3c21 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/stats.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/stats.go @@ -27,7 +27,7 @@ type CpuUsage struct { type CpuStats struct { CpuUsage CpuUsage `json:"cpu_usage,omitempty"` - ThrottlingData ThrottlingData `json:"throlling_data,omitempty"` + ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` } type MemoryStats struct { @@ -52,8 +52,12 @@ type BlkioStatEntry struct { type BlkioStats struct { // number of bytes tranferred to and from the block device IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive,omitempty"` - IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recusrive,omitempty"` + IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive,omitempty"` IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive,omitempty"` + IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive,omitempty"` + IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive,omitempty"` + IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive,omitempty"` + IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive,omitempty"` SectorsRecursive []BlkioStatEntry `json:"sectors_recursive,omitempty"` } diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go b/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go index 685591090b..4b9a2f5b74 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go @@ -12,7 +12,7 @@ func UseSystemd() bool { return false } -func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { +func Apply(c *cgroups.Cgroup, pid int) (map[string]string, error) { return nil, fmt.Errorf("Systemd not supported") } @@ -20,10 +20,10 @@ func GetPids(c *cgroups.Cgroup) ([]int, error) { return nil, fmt.Errorf("Systemd not supported") } -func Freeze(c *cgroups.Cgroup, state cgroups.FreezerState) error { +func ApplyDevices(c *cgroups.Cgroup, pid int) error { return fmt.Errorf("Systemd not supported") } -func GetStats(c *cgroups.Cgroup) (*cgroups.Stats, error) { - return nil, fmt.Errorf("Systemd not supported") +func Freeze(c *cgroups.Cgroup, state cgroups.FreezerState) error { + return fmt.Errorf("Systemd not supported") } diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go b/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go index 7af4818e23..3d89811433 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go @@ -31,18 +31,15 @@ var ( connLock sync.Mutex theConn *systemd.Conn hasStartTransientUnit bool - subsystems = map[string]subsystem{ - "devices": &fs.DevicesGroup{}, - "memory": &fs.MemoryGroup{}, - "cpu": &fs.CpuGroup{}, - "cpuset": &fs.CpusetGroup{}, - "cpuacct": &fs.CpuacctGroup{}, - "blkio": &fs.BlkioGroup{}, - "perf_event": &fs.PerfEventGroup{}, - "freezer": &fs.FreezerGroup{}, - } ) +func newProp(name string, units interface{}) systemd.Property { + return systemd.Property{ + Name: name, + Value: dbus.MakeVariant(units), + } +} + func UseSystemd() bool { s, err := os.Stat("/run/systemd/system") if err != nil || !s.IsDir() { @@ -84,7 +81,7 @@ func getIfaceForUnit(unitName string) string { return "Unit" } -func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { +func Apply(c *cgroups.Cgroup, pid int) (map[string]string, error) { var ( unitName = getUnitName(c) slice = "system.slice" @@ -99,27 +96,27 @@ func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { } properties = append(properties, - systemd.Property{"Slice", dbus.MakeVariant(slice)}, - systemd.Property{"Description", dbus.MakeVariant("docker container " + c.Name)}, - systemd.Property{"PIDs", dbus.MakeVariant([]uint32{uint32(pid)})}, + systemd.PropSlice(slice), + systemd.PropDescription("docker container "+c.Name), + newProp("PIDs", []uint32{uint32(pid)}), ) // Always enable accounting, this gets us the same behaviour as the fs implementation, // plus the kernel has some problems with joining the memory cgroup at a later time. properties = append(properties, - systemd.Property{"MemoryAccounting", dbus.MakeVariant(true)}, - systemd.Property{"CPUAccounting", dbus.MakeVariant(true)}, - systemd.Property{"BlockIOAccounting", dbus.MakeVariant(true)}) + newProp("MemoryAccounting", true), + newProp("CPUAccounting", true), + newProp("BlockIOAccounting", true)) if c.Memory != 0 { properties = append(properties, - systemd.Property{"MemoryLimit", dbus.MakeVariant(uint64(c.Memory))}) + newProp("MemoryLimit", uint64(c.Memory))) } // TODO: MemoryReservation and MemorySwap not available in systemd if c.CpuShares != 0 { properties = append(properties, - systemd.Property{"CPUShares", dbus.MakeVariant(uint64(c.CpuShares))}) + newProp("CPUShares", uint64(c.CpuShares))) } if _, err := theConn.StartTransientUnit(unitName, "replace", properties...); err != nil { @@ -140,57 +137,42 @@ func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { } - // we need to manually join the freezer cgroup in systemd because it does not currently support it - // via the dbus api + // we need to manually join the freezer and cpuset cgroup in systemd + // because it does not currently support it via the dbus api. if err := joinFreezer(c, pid); err != nil { return nil, err } - if c.CpusetCpus != "" { - if err := joinCpuset(c, pid); err != nil { - return nil, err - } + if err := joinCpuset(c, pid); err != nil { + return nil, err } - return res, nil -} - -func writeFile(dir, file, data string) error { - return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700) -} - -func (c *systemdCgroup) Paths() (map[string]string, error) { paths := make(map[string]string) - - for sysname := range subsystems { - subsystemPath, err := getSubsystemPath(c.cgroup, sysname) + for _, sysname := range []string{ + "devices", + "memory", + "cpu", + "cpuset", + "cpuacct", + "blkio", + "perf_event", + "freezer", + } { + subsystemPath, err := getSubsystemPath(res.cgroup, sysname) if err != nil { // Don't fail if a cgroup hierarchy was not found, just skip this subsystem if cgroups.IsNotFound(err) { continue } - return nil, err } - paths[sysname] = subsystemPath } - return paths, nil } -func (c *systemdCgroup) Cleanup() error { - // systemd cleans up, we don't need to do much - paths, err := c.Paths() - if err != nil { - return err - } - - for _, path := range paths { - os.RemoveAll(path) - } - - return nil +func writeFile(dir, file, data string) error { + return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700) } func joinFreezer(c *cgroups.Cgroup, pid int) error { @@ -260,35 +242,6 @@ func getUnitName(c *cgroups.Cgroup) string { return fmt.Sprintf("%s-%s.scope", c.Parent, c.Name) } -/* - * This would be nicer to get from the systemd API when accounting - * is enabled, but sadly there is no way to do that yet. - * The lack of this functionality in the API & the approach taken - * is guided by - * http://www.freedesktop.org/wiki/Software/systemd/ControlGroupInterface/#readingaccountinginformation. - */ -func GetStats(c *cgroups.Cgroup) (*cgroups.Stats, error) { - stats := cgroups.NewStats() - - for sysname, sys := range subsystems { - subsystemPath, err := getSubsystemPath(c, sysname) - if err != nil { - // Don't fail if a cgroup hierarchy was not found, just skip this subsystem - if cgroups.IsNotFound(err) { - continue - } - - return nil, err - } - - if err := sys.GetStats(subsystemPath, stats); err != nil { - return nil, err - } - } - - return stats, nil -} - // Atm we can't use the systemd device support because of two missing things: // * Support for wildcards to allow mknod on any device // * Support for wildcards to allow /dev/pts support @@ -327,6 +280,12 @@ func joinDevices(c *cgroups.Cgroup, pid int) error { return nil } +// Symmetrical public function to update device based cgroups. Also available +// in the fs implementation. +func ApplyDevices(c *cgroups.Cgroup, pid int) error { + return joinDevices(c, pid) +} + func joinMemory(c *cgroups.Cgroup, pid int) error { memorySwap := c.MemorySwap diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/utils.go b/vendor/src/github.com/docker/libcontainer/cgroups/utils.go index 77a3c0d7c5..224a20b9b2 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/utils.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/utils.go @@ -189,6 +189,17 @@ func EnterPid(cgroupPaths map[string]string, pid int) error { } } } - return nil } + +// RemovePaths iterates over the provided paths removing them. +// If an error is encountered the removal proceeds and the first error is +// returned to ensure a partial removal is not possible. +func RemovePaths(paths map[string]string) (err error) { + for _, path := range paths { + if rerr := os.RemoveAll(path); err == nil { + err = rerr + } + } + return err +} diff --git a/vendor/src/github.com/docker/libcontainer/config.go b/vendor/src/github.com/docker/libcontainer/config.go index 1fb377dcef..915e00660c 100644 --- a/vendor/src/github.com/docker/libcontainer/config.go +++ b/vendor/src/github.com/docker/libcontainer/config.go @@ -47,6 +47,9 @@ type Config struct { // Networks specifies the container's network setup to be created Networks []*Network `json:"networks,omitempty"` + // Ipc specifies the container's ipc setup to be created + IpcNsPath string `json:"ipc,omitempty"` + // Routes can be specified to create entries in the route table as the container is started Routes []*Route `json:"routes,omitempty"` @@ -65,6 +68,10 @@ type Config struct { // RestrictSys will remount /proc/sys, /sys, and mask over sysrq-trigger as well as /proc/irq and // /proc/bus RestrictSys bool `json:"restrict_sys,omitempty"` + + // Rlimits specifies the resource limits, such as max open files, to set in the container + // If Rlimits are not set, the container will inherit rlimits from the parent process + Rlimits []Rlimit `json:"rlimits,omitempty"` } // Routes can be specified to create entries in the route table as the container is started @@ -87,3 +94,9 @@ type Route struct { // The device to set this route up for, for example: eth0 InterfaceName string `json:"interface_name,omitempty"` } + +type Rlimit struct { + Type int `json:"type,omitempty"` + Hard uint64 `json:"hard,omitempty"` + Soft uint64 `json:"soft,omitempty"` +} diff --git a/vendor/src/github.com/docker/libcontainer/console/console.go b/vendor/src/github.com/docker/libcontainer/console/console.go index 346f537d53..438e670420 100644 --- a/vendor/src/github.com/docker/libcontainer/console/console.go +++ b/vendor/src/github.com/docker/libcontainer/console/console.go @@ -67,14 +67,14 @@ func OpenAndDup(consolePath string) error { // Unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. // Unlockpt should be called before opening the slave side of a pseudoterminal. func Unlockpt(f *os.File) error { - var u int + var u int32 return Ioctl(f.Fd(), syscall.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))) } // Ptsname retrieves the name of the first available pts for the given master. func Ptsname(f *os.File) (string, error) { - var n int + var n int32 if err := Ioctl(f.Fd(), syscall.TIOCGPTN, uintptr(unsafe.Pointer(&n))); err != nil { return "", err diff --git a/vendor/src/github.com/docker/libcontainer/devices/devices.go b/vendor/src/github.com/docker/libcontainer/devices/devices.go index 558f7f5f9c..8e86d95292 100644 --- a/vendor/src/github.com/docker/libcontainer/devices/devices.go +++ b/vendor/src/github.com/docker/libcontainer/devices/devices.go @@ -100,9 +100,10 @@ func getDeviceNodes(path string) ([]*Device, error) { out := []*Device{} for _, f := range files { - if f.IsDir() { + switch { + case f.IsDir(): switch f.Name() { - case "pts", "shm", "fd": + case "pts", "shm", "fd", "mqueue": continue default: sub, err := getDeviceNodes(filepath.Join(path, f.Name())) @@ -113,6 +114,8 @@ func getDeviceNodes(path string) ([]*Device, error) { out = append(out, sub...) continue } + case f.Name() == "console": + continue } device, err := GetDevice(filepath.Join(path, f.Name()), "rwm") diff --git a/vendor/src/github.com/docker/libcontainer/integration/doc.go b/vendor/src/github.com/docker/libcontainer/integration/doc.go new file mode 100644 index 0000000000..87545bc99c --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/integration/doc.go @@ -0,0 +1,2 @@ +// integration is used for integration testing of libcontainer +package integration diff --git a/vendor/src/github.com/docker/libcontainer/integration/exec_test.go b/vendor/src/github.com/docker/libcontainer/integration/exec_test.go new file mode 100644 index 0000000000..8f4dae0f9e --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/integration/exec_test.go @@ -0,0 +1,179 @@ +package integration + +import ( + "os" + "strings" + "testing" +) + +func TestExecPS(t *testing.T) { + if testing.Short() { + return + } + + rootfs, err := newRootFs() + if err != nil { + t.Fatal(err) + } + defer remove(rootfs) + + config := newTemplateConfig(rootfs) + buffers, exitCode, err := runContainer(config, "", "ps") + if err != nil { + t.Fatal(err) + } + + if exitCode != 0 { + t.Fatalf("exit code not 0. code %d stderr %q", exitCode, buffers.Stderr) + } + + lines := strings.Split(buffers.Stdout.String(), "\n") + if len(lines) < 2 { + t.Fatalf("more than one process running for output %q", buffers.Stdout.String()) + } + expected := `1 root ps` + actual := strings.Trim(lines[1], "\n ") + if actual != expected { + t.Fatalf("expected output %q but received %q", expected, actual) + } +} + +func TestIPCPrivate(t *testing.T) { + if testing.Short() { + return + } + + rootfs, err := newRootFs() + if err != nil { + t.Fatal(err) + } + defer remove(rootfs) + + l, err := os.Readlink("/proc/1/ns/ipc") + if err != nil { + t.Fatal(err) + } + + config := newTemplateConfig(rootfs) + config.Namespaces["NEWIPC"] = true + buffers, exitCode, err := runContainer(config, "", "readlink", "/proc/self/ns/ipc") + if err != nil { + t.Fatal(err) + } + + if exitCode != 0 { + t.Fatalf("exit code not 0. code %d stderr %q", exitCode, buffers.Stderr) + } + + if actual := strings.Trim(buffers.Stdout.String(), "\n"); actual == l { + t.Fatalf("ipc link should be private to the conatiner but equals host %q %q", actual, l) + } +} + +func TestIPCHost(t *testing.T) { + if testing.Short() { + return + } + + rootfs, err := newRootFs() + if err != nil { + t.Fatal(err) + } + defer remove(rootfs) + + l, err := os.Readlink("/proc/1/ns/ipc") + if err != nil { + t.Fatal(err) + } + + config := newTemplateConfig(rootfs) + config.Namespaces["NEWIPC"] = false + buffers, exitCode, err := runContainer(config, "", "readlink", "/proc/self/ns/ipc") + if err != nil { + t.Fatal(err) + } + + if exitCode != 0 { + t.Fatalf("exit code not 0. code %d stderr %q", exitCode, buffers.Stderr) + } + + if actual := strings.Trim(buffers.Stdout.String(), "\n"); actual != l { + t.Fatalf("ipc link not equal to host link %q %q", actual, l) + } +} + +func TestIPCJoinPath(t *testing.T) { + if testing.Short() { + return + } + + rootfs, err := newRootFs() + if err != nil { + t.Fatal(err) + } + defer remove(rootfs) + + l, err := os.Readlink("/proc/1/ns/ipc") + if err != nil { + t.Fatal(err) + } + + config := newTemplateConfig(rootfs) + config.Namespaces["NEWIPC"] = false + config.IpcNsPath = "/proc/1/ns/ipc" + + buffers, exitCode, err := runContainer(config, "", "readlink", "/proc/self/ns/ipc") + if err != nil { + t.Fatal(err) + } + + if exitCode != 0 { + t.Fatalf("exit code not 0. code %d stderr %q", exitCode, buffers.Stderr) + } + + if actual := strings.Trim(buffers.Stdout.String(), "\n"); actual != l { + t.Fatalf("ipc link not equal to host link %q %q", actual, l) + } +} + +func TestIPCBadPath(t *testing.T) { + if testing.Short() { + return + } + + rootfs, err := newRootFs() + if err != nil { + t.Fatal(err) + } + defer remove(rootfs) + + config := newTemplateConfig(rootfs) + config.Namespaces["NEWIPC"] = false + config.IpcNsPath = "/proc/1/ns/ipcc" + + _, _, err = runContainer(config, "", "true") + if err == nil { + t.Fatal("container succeded with bad ipc path") + } +} + +func TestRlimit(t *testing.T) { + if testing.Short() { + return + } + + rootfs, err := newRootFs() + if err != nil { + t.Fatal(err) + } + defer remove(rootfs) + + config := newTemplateConfig(rootfs) + out, _, err := runContainer(config, "", "/bin/sh", "-c", "ulimit -n") + if err != nil { + t.Fatal(err) + } + if limit := strings.TrimSpace(out.Stdout.String()); limit != "1024" { + t.Fatalf("expected rlimit to be 1024, got %s", limit) + } +} diff --git a/vendor/src/github.com/docker/libcontainer/integration/init_test.go b/vendor/src/github.com/docker/libcontainer/integration/init_test.go new file mode 100644 index 0000000000..9954c0f8e5 --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/integration/init_test.go @@ -0,0 +1,33 @@ +package integration + +import ( + "log" + "os" + "runtime" + + "github.com/docker/libcontainer/namespaces" +) + +// init runs the libcontainer initialization code because of the busybox style needs +// to work around the go runtime and the issues with forking +func init() { + if len(os.Args) < 2 || os.Args[1] != "init" { + return + } + runtime.LockOSThread() + + container, err := loadConfig() + if err != nil { + log.Fatal(err) + } + + rootfs, err := os.Getwd() + if err != nil { + log.Fatal(err) + } + + if err := namespaces.Init(container, rootfs, "", os.NewFile(3, "pipe"), os.Args[3:]); err != nil { + log.Fatalf("unable to initialize for container: %s", err) + } + os.Exit(1) +} diff --git a/vendor/src/github.com/docker/libcontainer/integration/template_test.go b/vendor/src/github.com/docker/libcontainer/integration/template_test.go new file mode 100644 index 0000000000..efcf6d5b90 --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/integration/template_test.go @@ -0,0 +1,73 @@ +package integration + +import ( + "syscall" + + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/cgroups" + "github.com/docker/libcontainer/devices" +) + +// newTemplateConfig returns a base template for running a container +// +// it uses a network strategy of just setting a loopback interface +// and the default setup for devices +func newTemplateConfig(rootfs string) *libcontainer.Config { + return &libcontainer.Config{ + RootFs: rootfs, + Tty: false, + Capabilities: []string{ + "CHOWN", + "DAC_OVERRIDE", + "FSETID", + "FOWNER", + "MKNOD", + "NET_RAW", + "SETGID", + "SETUID", + "SETFCAP", + "SETPCAP", + "NET_BIND_SERVICE", + "SYS_CHROOT", + "KILL", + "AUDIT_WRITE", + }, + Namespaces: map[string]bool{ + "NEWNS": true, + "NEWUTS": true, + "NEWIPC": true, + "NEWPID": true, + "NEWNET": true, + }, + Cgroups: &cgroups.Cgroup{ + Parent: "integration", + AllowAllDevices: false, + AllowedDevices: devices.DefaultAllowedDevices, + }, + + MountConfig: &libcontainer.MountConfig{ + DeviceNodes: devices.DefaultAutoCreatedDevices, + }, + Hostname: "integration", + Env: []string{ + "HOME=/root", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOSTNAME=integration", + "TERM=xterm", + }, + Networks: []*libcontainer.Network{ + { + Type: "loopback", + Address: "127.0.0.1/0", + Gateway: "localhost", + }, + }, + Rlimits: []libcontainer.Rlimit{ + { + Type: syscall.RLIMIT_NOFILE, + Hard: uint64(1024), + Soft: uint64(1024), + }, + }, + } +} diff --git a/vendor/src/github.com/docker/libcontainer/integration/utils_test.go b/vendor/src/github.com/docker/libcontainer/integration/utils_test.go new file mode 100644 index 0000000000..6393fb9982 --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/integration/utils_test.go @@ -0,0 +1,95 @@ +package integration + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/namespaces" +) + +func newStdBuffers() *stdBuffers { + return &stdBuffers{ + Stdin: bytes.NewBuffer(nil), + Stdout: bytes.NewBuffer(nil), + Stderr: bytes.NewBuffer(nil), + } +} + +type stdBuffers struct { + Stdin *bytes.Buffer + Stdout *bytes.Buffer + Stderr *bytes.Buffer +} + +func writeConfig(config *libcontainer.Config) error { + f, err := os.OpenFile(filepath.Join(config.RootFs, "container.json"), os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0700) + if err != nil { + return err + } + defer f.Close() + return json.NewEncoder(f).Encode(config) +} + +func loadConfig() (*libcontainer.Config, error) { + f, err := os.Open(filepath.Join(os.Getenv("data_path"), "container.json")) + if err != nil { + return nil, err + } + defer f.Close() + + var container *libcontainer.Config + if err := json.NewDecoder(f).Decode(&container); err != nil { + return nil, err + } + return container, nil +} + +// newRootFs creates a new tmp directory and copies the busybox root filesystem +func newRootFs() (string, error) { + dir, err := ioutil.TempDir("", "") + if err != nil { + return "", err + } + if err := os.MkdirAll(dir, 0700); err != nil { + return "", err + } + if err := copyBusybox(dir); err != nil { + return "", nil + } + return dir, nil +} + +func remove(dir string) { + os.RemoveAll(dir) +} + +// copyBusybox copies the rootfs for a busybox container created for the test image +// into the new directory for the specific test +func copyBusybox(dest string) error { + out, err := exec.Command("sh", "-c", fmt.Sprintf("cp -R /busybox/* %s/", dest)).CombinedOutput() + if err != nil { + return fmt.Errorf("copy error %q: %q", err, out) + } + return nil +} + +// runContainer runs the container with the specific config and arguments +// +// buffers are returned containing the STDOUT and STDERR output for the run +// along with the exit code and any go error +func runContainer(config *libcontainer.Config, console string, args ...string) (buffers *stdBuffers, exitCode int, err error) { + if err := writeConfig(config); err != nil { + return nil, -1, err + } + + buffers = newStdBuffers() + exitCode, err = namespaces.Exec(config, buffers.Stdin, buffers.Stdout, buffers.Stderr, + console, config.RootFs, args, namespaces.DefaultCreateCommand, nil) + return +} diff --git a/vendor/src/github.com/docker/libcontainer/ipc/ipc.go b/vendor/src/github.com/docker/libcontainer/ipc/ipc.go new file mode 100644 index 0000000000..147cf5571e --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/ipc/ipc.go @@ -0,0 +1,29 @@ +package ipc + +import ( + "fmt" + "os" + "syscall" + + "github.com/docker/libcontainer/system" +) + +// Join the IPC Namespace of specified ipc path if it exists. +// If the path does not exist then you are not joining a container. +func Initialize(nsPath string) error { + if nsPath == "" { + return nil + } + f, err := os.OpenFile(nsPath, os.O_RDONLY, 0) + if err != nil { + return fmt.Errorf("failed get IPC namespace fd: %v", err) + } + + err = system.Setns(f.Fd(), syscall.CLONE_NEWIPC) + f.Close() + + if err != nil { + return fmt.Errorf("failed to setns current IPC namespace: %v", err) + } + return nil +} diff --git a/vendor/src/github.com/docker/libcontainer/label/label.go b/vendor/src/github.com/docker/libcontainer/label/label.go index ce60296ea1..5a540fd5a0 100644 --- a/vendor/src/github.com/docker/libcontainer/label/label.go +++ b/vendor/src/github.com/docker/libcontainer/label/label.go @@ -25,6 +25,10 @@ func SetFileLabel(path string, fileLabel string) error { return nil } +func SetFileCreateLabel(fileLabel string) error { + return nil +} + func Relabel(path string, fileLabel string, relabel string) error { return nil } @@ -43,3 +47,15 @@ func ReserveLabel(label string) error { func UnreserveLabel(label string) error { return nil } + +// DupSecOpt takes an process label and returns security options that +// can be used to set duplicate labels on future container processes +func DupSecOpt(src string) []string { + return nil +} + +// DisableSecOpt returns a security opt that can disable labeling +// support for future container processes +func DisableSecOpt() []string { + return nil +} diff --git a/vendor/src/github.com/docker/libcontainer/label/label_selinux.go b/vendor/src/github.com/docker/libcontainer/label/label_selinux.go index 65b84797b5..5983031ae0 100644 --- a/vendor/src/github.com/docker/libcontainer/label/label_selinux.go +++ b/vendor/src/github.com/docker/libcontainer/label/label_selinux.go @@ -17,7 +17,6 @@ func InitLabels(options []string) (string, string, error) { if !selinux.SelinuxEnabled() { return "", "", nil } - var err error processLabel, mountLabel := selinux.GetLxcContexts() if processLabel != "" { pcon := selinux.NewContext(processLabel) @@ -38,7 +37,7 @@ func InitLabels(options []string) (string, string, error) { processLabel = pcon.Get() mountLabel = mcon.Get() } - return processLabel, mountLabel, err + return processLabel, mountLabel, nil } // DEPRECATED: The GenLabels function is only to be used during the transition to the official API. @@ -88,6 +87,14 @@ func SetFileLabel(path string, fileLabel string) error { return nil } +// Tell the kernel the label for all files to be created +func SetFileCreateLabel(fileLabel string) error { + if selinux.SelinuxEnabled() { + return selinux.Setfscreatecon(fileLabel) + } + return nil +} + // Change the label of path to the filelabel string. If the relabel string // is "z", relabel will change the MCS label to s0. This will allow all // containers to share the content. If the relabel string is a "Z" then @@ -130,3 +137,15 @@ func UnreserveLabel(label string) error { selinux.FreeLxcContexts(label) return nil } + +// DupSecOpt takes an process label and returns security options that +// can be used to set duplicate labels on future container processes +func DupSecOpt(src string) []string { + return selinux.DupSecOpt(src) +} + +// DisableSecOpt returns a security opt that can disable labeling +// support for future container processes +func DisableSecOpt() []string { + return selinux.DisableSecOpt() +} diff --git a/vendor/src/github.com/docker/libcontainer/label/label_selinux_test.go b/vendor/src/github.com/docker/libcontainer/label/label_selinux_test.go index c83654f6b5..8629353f24 100644 --- a/vendor/src/github.com/docker/libcontainer/label/label_selinux_test.go +++ b/vendor/src/github.com/docker/libcontainer/label/label_selinux_test.go @@ -3,6 +3,7 @@ package label import ( + "strings" "testing" "github.com/docker/libcontainer/selinux" @@ -33,7 +34,7 @@ func TestInit(t *testing.T) { t.Fatal(err) } if plabel != "user_u:user_r:user_t:s0:c1,c15" || mlabel != "user_u:object_r:svirt_sandbox_file_t:s0:c1,c15" { - t.Log("InitLabels User Failed") + t.Log("InitLabels User Match Failed") t.Log(plabel, mlabel) t.Fatal(err) } @@ -46,3 +47,43 @@ func TestInit(t *testing.T) { } } } +func TestDuplicateLabel(t *testing.T) { + secopt := DupSecOpt("system_u:system_r:svirt_lxc_net_t:s0:c1,c2") + t.Log(secopt) + for _, opt := range secopt { + con := strings.SplitN(opt, ":", 3) + if len(con) != 3 || con[0] != "label" { + t.Errorf("Invalid DupSecOpt return value") + continue + } + if con[1] == "user" { + if con[2] != "system_u" { + t.Errorf("DupSecOpt Failed user incorrect") + } + continue + } + if con[1] == "role" { + if con[2] != "system_r" { + t.Errorf("DupSecOpt Failed role incorrect") + } + continue + } + if con[1] == "type" { + if con[2] != "svirt_lxc_net_t" { + t.Errorf("DupSecOpt Failed type incorrect") + } + continue + } + if con[1] == "level" { + if con[2] != "s0:c1,c2" { + t.Errorf("DupSecOpt Failed level incorrect") + } + continue + } + t.Errorf("DupSecOpt Failed invalid field %q", con[1]) + } + secopt = DisableSecOpt() + if secopt[0] != "label:disable" { + t.Errorf("DisableSecOpt Failed level incorrect") + } +} diff --git a/vendor/src/github.com/docker/libcontainer/mount/init.go b/vendor/src/github.com/docker/libcontainer/mount/init.go index ea2b732737..a2c3d52026 100644 --- a/vendor/src/github.com/docker/libcontainer/mount/init.go +++ b/vendor/src/github.com/docker/libcontainer/mount/init.go @@ -97,7 +97,7 @@ func InitializeMountNamespace(rootfs, console string, sysReadonly bool, mountCon return nil } -// mountSystem sets up linux specific system mounts like sys, proc, shm, and devpts +// mountSystem sets up linux specific system mounts like mqueue, sys, proc, shm, and devpts // inside the mount namespace func mountSystem(rootfs string, sysReadonly bool, mountConfig *MountConfig) error { for _, m := range newSystemMounts(rootfs, mountConfig.MountLabel, sysReadonly) { @@ -168,6 +168,7 @@ func newSystemMounts(rootfs, mountLabel string, sysReadonly bool) []mount { {source: "proc", path: filepath.Join(rootfs, "proc"), device: "proc", flags: defaultMountFlags}, {source: "tmpfs", path: filepath.Join(rootfs, "dev"), device: "tmpfs", flags: syscall.MS_NOSUID | syscall.MS_STRICTATIME, data: label.FormatMountLabel("mode=755", mountLabel)}, {source: "shm", path: filepath.Join(rootfs, "dev", "shm"), device: "tmpfs", flags: defaultMountFlags, data: label.FormatMountLabel("mode=1777,size=65536k", mountLabel)}, + {source: "mqueue", path: filepath.Join(rootfs, "dev", "mqueue"), device: "mqueue", flags: defaultMountFlags}, {source: "devpts", path: filepath.Join(rootfs, "dev", "pts"), device: "devpts", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: label.FormatMountLabel("newinstance,ptmxmode=0666,mode=620,gid=5", mountLabel)}, } diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/exec.go b/vendor/src/github.com/docker/libcontainer/namespaces/exec.go index 4440ccd0d5..b7873edd0e 100644 --- a/vendor/src/github.com/docker/libcontainer/namespaces/exec.go +++ b/vendor/src/github.com/docker/libcontainer/namespaces/exec.go @@ -3,6 +3,7 @@ package namespaces import ( + "encoding/json" "io" "os" "os/exec" @@ -13,7 +14,6 @@ import ( "github.com/docker/libcontainer/cgroups/fs" "github.com/docker/libcontainer/cgroups/systemd" "github.com/docker/libcontainer/network" - "github.com/docker/libcontainer/syncpipe" "github.com/docker/libcontainer/system" ) @@ -22,19 +22,17 @@ import ( // Exec performs setup outside of a namespace so that a container can be // executed. Exec is a high level function for working with container namespaces. func Exec(container *libcontainer.Config, stdin io.Reader, stdout, stderr io.Writer, console, dataPath string, args []string, createCommand CreateCommand, startCallback func()) (int, error) { - var ( - err error - ) + var err error // create a pipe so that we can syncronize with the namespaced process and - // pass the veth name to the child - syncPipe, err := syncpipe.NewSyncPipe() + // pass the state and configuration to the child process + parent, child, err := newInitPipe() if err != nil { return -1, err } - defer syncPipe.Close() + defer parent.Close() - command := createCommand(container, console, dataPath, os.Args[0], syncPipe.Child(), args) + command := createCommand(container, console, dataPath, os.Args[0], child, args) // Note: these are only used in non-tty mode // if there is a tty for the container it will be opened within the namespace and the // fds will be duped to stdin, stdiout, and stderr @@ -43,39 +41,42 @@ func Exec(container *libcontainer.Config, stdin io.Reader, stdout, stderr io.Wri command.Stderr = stderr if err := command.Start(); err != nil { + child.Close() return -1, err } + child.Close() - // Now we passed the pipe to the child, close our side - syncPipe.CloseChild() + terminate := func(terr error) (int, error) { + // TODO: log the errors for kill and wait + command.Process.Kill() + command.Wait() + return -1, terr + } started, err := system.GetProcessStartTime(command.Process.Pid) if err != nil { - return -1, err + return terminate(err) } // Do this before syncing with child so that no children // can escape the cgroup - cgroupRef, err := SetupCgroups(container, command.Process.Pid) + cgroupPaths, err := SetupCgroups(container, command.Process.Pid) if err != nil { - command.Process.Kill() - command.Wait() - return -1, err - } - defer cgroupRef.Cleanup() - - cgroupPaths, err := cgroupRef.Paths() - if err != nil { - command.Process.Kill() - command.Wait() - return -1, err + return terminate(err) } + defer cgroups.RemovePaths(cgroupPaths) var networkState network.NetworkState - if err := InitializeNetworking(container, command.Process.Pid, syncPipe, &networkState); err != nil { - command.Process.Kill() - command.Wait() - return -1, err + if err := InitializeNetworking(container, command.Process.Pid, &networkState); err != nil { + return terminate(err) + } + // send the state to the container's init process then shutdown writes for the parent + if err := json.NewEncoder(parent).Encode(networkState); err != nil { + return terminate(err) + } + // shutdown writes for the parent side of the pipe + if err := syscall.Shutdown(int(parent.Fd()), syscall.SHUT_WR); err != nil { + return terminate(err) } state := &libcontainer.State{ @@ -86,17 +87,18 @@ func Exec(container *libcontainer.Config, stdin io.Reader, stdout, stderr io.Wri } if err := libcontainer.SaveState(dataPath, state); err != nil { - command.Process.Kill() - command.Wait() - return -1, err + return terminate(err) } defer libcontainer.DeleteState(dataPath) - // Sync with child - if err := syncPipe.ReadFromChild(); err != nil { - command.Process.Kill() - command.Wait() - return -1, err + // wait for the child process to fully complete and receive an error message + // if one was encoutered + var ierr *initError + if err := json.NewDecoder(parent).Decode(&ierr); err != nil && err != io.EOF { + return terminate(err) + } + if ierr != nil { + return terminate(ierr) } if startCallback != nil { @@ -108,7 +110,6 @@ func Exec(container *libcontainer.Config, stdin io.Reader, stdout, stderr io.Wri return -1, err } } - return command.ProcessState.Sys().(syscall.WaitStatus).ExitStatus(), nil } @@ -129,16 +130,6 @@ func DefaultCreateCommand(container *libcontainer.Config, console, dataPath, ini "data_path=" + dataPath, } - /* - TODO: move user and wd into env - if user != "" { - env = append(env, "user="+user) - } - if workingDir != "" { - env = append(env, "wd="+workingDir) - } - */ - command := exec.Command(init, append([]string{"init", "--"}, args...)...) // make sure the process is executed inside the context of the rootfs command.Dir = container.RootFs @@ -157,23 +148,20 @@ func DefaultCreateCommand(container *libcontainer.Config, console, dataPath, ini // SetupCgroups applies the cgroup restrictions to the process running in the container based // on the container's configuration -func SetupCgroups(container *libcontainer.Config, nspid int) (cgroups.ActiveCgroup, error) { +func SetupCgroups(container *libcontainer.Config, nspid int) (map[string]string, error) { if container.Cgroups != nil { c := container.Cgroups - if systemd.UseSystemd() { return systemd.Apply(c, nspid) } - return fs.Apply(c, nspid) } - - return nil, nil + return map[string]string{}, nil } // InitializeNetworking creates the container's network stack outside of the namespace and moves // interfaces into the container's net namespaces if necessary -func InitializeNetworking(container *libcontainer.Config, nspid int, pipe *syncpipe.SyncPipe, networkState *network.NetworkState) error { +func InitializeNetworking(container *libcontainer.Config, nspid int, networkState *network.NetworkState) error { for _, config := range container.Networks { strategy, err := network.GetStrategy(config.Type) if err != nil { @@ -183,18 +171,5 @@ func InitializeNetworking(container *libcontainer.Config, nspid int, pipe *syncp return err } } - return pipe.SendToChild(networkState) -} - -// GetNamespaceFlags parses the container's Namespaces options to set the correct -// flags on clone, unshare, and setns -func GetNamespaceFlags(namespaces map[string]bool) (flag int) { - for key, enabled := range namespaces { - if enabled { - if ns := GetNamespace(key); ns != nil { - flag |= ns.Value - } - } - } - return flag + return nil } diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/execin.go b/vendor/src/github.com/docker/libcontainer/namespaces/execin.go index 53e676ac7e..430dc72fe6 100644 --- a/vendor/src/github.com/docker/libcontainer/namespaces/execin.go +++ b/vendor/src/github.com/docker/libcontainer/namespaces/execin.go @@ -3,6 +3,7 @@ package namespaces import ( + "encoding/json" "fmt" "io" "os" @@ -15,7 +16,6 @@ import ( "github.com/docker/libcontainer/apparmor" "github.com/docker/libcontainer/cgroups" "github.com/docker/libcontainer/label" - "github.com/docker/libcontainer/syncpipe" "github.com/docker/libcontainer/system" ) @@ -41,11 +41,11 @@ func ExecIn(container *libcontainer.Config, state *libcontainer.State, userArgs } } - pipe, err := syncpipe.NewSyncPipe() + parent, child, err := newInitPipe() if err != nil { return -1, err } - defer pipe.Close() + defer parent.Close() // Note: these are only used in non-tty mode // if there is a tty for the container it will be opened within the namespace and the @@ -53,23 +53,28 @@ func ExecIn(container *libcontainer.Config, state *libcontainer.State, userArgs cmd.Stdin = stdin cmd.Stdout = stdout cmd.Stderr = stderr - - cmd.ExtraFiles = []*os.File{pipe.Child()} + cmd.ExtraFiles = []*os.File{child} if err := cmd.Start(); err != nil { + child.Close() return -1, err } - pipe.CloseChild() + child.Close() + + terminate := func(terr error) (int, error) { + // TODO: log the errors for kill and wait + cmd.Process.Kill() + cmd.Wait() + return -1, terr + } // Enter cgroups. if err := EnterCgroups(state, cmd.Process.Pid); err != nil { - return -1, err + return terminate(err) } - if err := pipe.SendToChild(container); err != nil { - cmd.Process.Kill() - cmd.Wait() - return -1, err + if err := json.NewEncoder(parent).Encode(container); err != nil { + return terminate(err) } if startCallback != nil { @@ -81,7 +86,6 @@ func ExecIn(container *libcontainer.Config, state *libcontainer.State, userArgs return -1, err } } - return cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus(), nil } @@ -107,7 +111,7 @@ func FinalizeSetns(container *libcontainer.Config, args []string) error { } } - if err := system.Execv(args[0], args[0:], container.Env); err != nil { + if err := system.Execv(args[0], args[0:], os.Environ()); err != nil { return err } diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/init.go b/vendor/src/github.com/docker/libcontainer/namespaces/init.go index 4c2b3327e5..7c83b13761 100644 --- a/vendor/src/github.com/docker/libcontainer/namespaces/init.go +++ b/vendor/src/github.com/docker/libcontainer/namespaces/init.go @@ -3,7 +3,9 @@ package namespaces import ( + "encoding/json" "fmt" + "io/ioutil" "os" "strings" "syscall" @@ -11,13 +13,13 @@ import ( "github.com/docker/libcontainer" "github.com/docker/libcontainer/apparmor" "github.com/docker/libcontainer/console" + "github.com/docker/libcontainer/ipc" "github.com/docker/libcontainer/label" "github.com/docker/libcontainer/mount" "github.com/docker/libcontainer/netlink" "github.com/docker/libcontainer/network" "github.com/docker/libcontainer/security/capabilities" "github.com/docker/libcontainer/security/restrict" - "github.com/docker/libcontainer/syncpipe" "github.com/docker/libcontainer/system" "github.com/docker/libcontainer/user" "github.com/docker/libcontainer/utils" @@ -29,11 +31,22 @@ import ( // and other options required for the new container. // The caller of Init function has to ensure that the go runtime is locked to an OS thread // (using runtime.LockOSThread) else system calls like setns called within Init may not work as intended. -func Init(container *libcontainer.Config, uncleanRootfs, consolePath string, syncPipe *syncpipe.SyncPipe, args []string) (err error) { +func Init(container *libcontainer.Config, uncleanRootfs, consolePath string, pipe *os.File, args []string) (err error) { defer func() { + // if we have an error during the initialization of the container's init then send it back to the + // parent process in the form of an initError. if err != nil { - syncPipe.ReportChildError(err) + // ensure that any data sent from the parent is consumed so it doesn't + // receive ECONNRESET when the child writes to the pipe. + ioutil.ReadAll(pipe) + if err := json.NewEncoder(pipe).Encode(initError{ + Message: err.Error(), + }); err != nil { + panic(err) + } } + // ensure that this pipe is always closed + pipe.Close() }() rootfs, err := utils.ResolveRootfs(uncleanRootfs) @@ -49,7 +62,7 @@ func Init(container *libcontainer.Config, uncleanRootfs, consolePath string, syn // We always read this as it is a way to sync with the parent as well var networkState *network.NetworkState - if err := syncPipe.ReadFromParent(&networkState); err != nil { + if err := json.NewDecoder(pipe).Decode(&networkState); err != nil { return err } @@ -66,6 +79,9 @@ func Init(container *libcontainer.Config, uncleanRootfs, consolePath string, syn return fmt.Errorf("setctty %s", err) } } + if err := ipc.Initialize(container.IpcNsPath); err != nil { + return fmt.Errorf("setup IPC %s", err) + } if err := setupNetwork(container, networkState); err != nil { return fmt.Errorf("setup networking %s", err) } @@ -73,6 +89,10 @@ func Init(container *libcontainer.Config, uncleanRootfs, consolePath string, syn return fmt.Errorf("setup route %s", err) } + if err := setupRlimits(container); err != nil { + return fmt.Errorf("setup rlimits %s", err) + } + label.Init() if err := mount.InitializeMountNamespace(rootfs, @@ -84,7 +104,7 @@ func Init(container *libcontainer.Config, uncleanRootfs, consolePath string, syn if container.Hostname != "" { if err := syscall.Sethostname([]byte(container.Hostname)); err != nil { - return fmt.Errorf("sethostname %s", err) + return fmt.Errorf("unable to sethostname %q: %s", container.Hostname, err) } } @@ -151,26 +171,43 @@ func RestoreParentDeathSignal(old int) error { // SetupUser changes the groups, gid, and uid for the user inside the container func SetupUser(u string) error { - uid, gid, suppGids, home, err := user.GetUserGroupSupplementaryHome(u, syscall.Getuid(), syscall.Getgid(), "/") + // Set up defaults. + defaultExecUser := user.ExecUser{ + Uid: syscall.Getuid(), + Gid: syscall.Getgid(), + Home: "/", + } + + passwdFile, err := user.GetPasswdFile() + if err != nil { + return err + } + + groupFile, err := user.GetGroupFile() + if err != nil { + return err + } + + execUser, err := user.GetExecUserFile(u, &defaultExecUser, passwdFile, groupFile) if err != nil { return fmt.Errorf("get supplementary groups %s", err) } - if err := syscall.Setgroups(suppGids); err != nil { + if err := syscall.Setgroups(execUser.Sgids); err != nil { return fmt.Errorf("setgroups %s", err) } - if err := syscall.Setgid(gid); err != nil { + if err := system.Setgid(execUser.Gid); err != nil { return fmt.Errorf("setgid %s", err) } - if err := syscall.Setuid(uid); err != nil { + if err := system.Setuid(execUser.Uid); err != nil { return fmt.Errorf("setuid %s", err) } // if we didn't get HOME already, set it based on the user's HOME if envHome := os.Getenv("HOME"); envHome == "" { - if err := os.Setenv("HOME", home); err != nil { + if err := os.Setenv("HOME", execUser.Home); err != nil { return fmt.Errorf("set HOME %s", err) } } @@ -205,6 +242,16 @@ func setupRoute(container *libcontainer.Config) error { return nil } +func setupRlimits(container *libcontainer.Config) error { + for _, rlimit := range container.Rlimits { + l := &syscall.Rlimit{Max: rlimit.Hard, Cur: rlimit.Soft} + if err := syscall.Setrlimit(rlimit.Type, l); err != nil { + return fmt.Errorf("error setting rlimit type %v: %v", rlimit.Type, err) + } + } + return nil +} + // FinalizeNamespace drops the caps, sets the correct user // and working dir, and closes any leaky file descriptors // before execing the command inside the namespace diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/nsenter/nsenter.c b/vendor/src/github.com/docker/libcontainer/namespaces/nsenter/nsenter.c index 2869dd14d6..f060f63b13 100644 --- a/vendor/src/github.com/docker/libcontainer/namespaces/nsenter/nsenter.c +++ b/vendor/src/github.com/docker/libcontainer/namespaces/nsenter/nsenter.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -88,6 +89,11 @@ void nsenter() return; } + if (prctl(PR_SET_CHILD_SUBREAPER, 1, 0, 0, 0) == -1) { + fprintf(stderr, "nsenter: failed to set child subreaper: %s", strerror(errno)); + exit(1); + } + static const struct option longopts[] = { {"nspid", required_argument, NULL, 'n'}, {"console", required_argument, NULL, 't'}, diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/utils.go b/vendor/src/github.com/docker/libcontainer/namespaces/utils.go new file mode 100644 index 0000000000..bf60cd8f0e --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/namespaces/utils.go @@ -0,0 +1,38 @@ +// +build linux + +package namespaces + +import ( + "os" + "syscall" +) + +type initError struct { + Message string `json:"message,omitempty"` +} + +func (i initError) Error() string { + return i.Message +} + +// New returns a newly initialized Pipe for communication between processes +func newInitPipe() (parent *os.File, child *os.File, err error) { + fds, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_STREAM|syscall.SOCK_CLOEXEC, 0) + if err != nil { + return nil, nil, err + } + return os.NewFile(uintptr(fds[1]), "parent"), os.NewFile(uintptr(fds[0]), "child"), nil +} + +// GetNamespaceFlags parses the container's Namespaces options to set the correct +// flags on clone, unshare, and setns +func GetNamespaceFlags(namespaces map[string]bool) (flag int) { + for key, enabled := range namespaces { + if enabled { + if ns := GetNamespace(key); ns != nil { + flag |= ns.Value + } + } + } + return flag +} diff --git a/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux.go b/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux.go index 3083cf907a..1bf70430f2 100644 --- a/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux.go +++ b/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux.go @@ -7,6 +7,7 @@ import ( "math/rand" "net" "os" + "path/filepath" "sync/atomic" "syscall" "unsafe" @@ -575,6 +576,31 @@ func NetworkSetMTU(iface *net.Interface, mtu int) error { return s.HandleAck(wb.Seq) } +// Set link queue length +// This is identical to running: ip link set dev $name txqueuelen $QLEN +func NetworkSetTxQueueLen(iface *net.Interface, txQueueLen int) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + msg.Type = syscall.RTM_SETLINK + msg.Flags = syscall.NLM_F_REQUEST + msg.Index = int32(iface.Index) + msg.Change = DEFAULT_CHANGE + wb.AddData(msg) + wb.AddData(uint32Attr(syscall.IFLA_TXQLEN, uint32(txQueueLen))) + + if err := s.Send(wb); err != nil { + return err + } + return s.HandleAck(wb.Seq) +} + func networkMasterAction(iface *net.Interface, rtattr *RtAttr) error { s, err := getNetlinkSocket() if err != nil { @@ -681,7 +707,7 @@ func NetworkChangeName(iface *net.Interface, newName string) error { // Add a new VETH pair link on the host // This is identical to running: ip link add name $name type veth peer name $peername -func NetworkCreateVethPair(name1, name2 string) error { +func NetworkCreateVethPair(name1, name2 string, txQueueLen int) error { s, err := getNetlinkSocket() if err != nil { return err @@ -696,6 +722,11 @@ func NetworkCreateVethPair(name1, name2 string) error { nameData := newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(name1)) wb.AddData(nameData) + txqLen := make([]byte, 4) + native.PutUint32(txqLen, uint32(txQueueLen)) + txqData := newRtAttr(syscall.IFLA_TXQLEN, txqLen) + wb.AddData(txqData) + nest1 := newRtAttr(syscall.IFLA_LINKINFO, nil) newRtAttrChild(nest1, IFLA_INFO_KIND, zeroTerminated("veth")) nest2 := newRtAttrChild(nest1, IFLA_INFO_DATA, nil) @@ -704,6 +735,10 @@ func NetworkCreateVethPair(name1, name2 string) error { newIfInfomsgChild(nest3, syscall.AF_UNSPEC) newRtAttrChild(nest3, syscall.IFLA_IFNAME, zeroTerminated(name2)) + txqLen2 := make([]byte, 4) + native.PutUint32(txqLen2, uint32(txQueueLen)) + newRtAttrChild(nest3, syscall.IFLA_TXQLEN, txqLen2) + wb.AddData(nest1) if err := s.Send(wb); err != nil { @@ -759,26 +794,38 @@ func NetworkLinkAddVlan(masterDev, vlanDev string, vlanId uint16) error { return s.HandleAck(wb.Seq) } -// Add MAC VLAN network interface with masterDev as its upper device -// This is identical to running: -// ip link add name $name link $masterdev type macvlan mode $mode -func NetworkLinkAddMacVlan(masterDev, macVlanDev string, mode string) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() +// MacVlan link has LowerDev, UpperDev and operates in Mode mode +// This simplifies the code when creating MacVlan or MacVtap interface +type MacVlanLink struct { + MasterDev string + SlaveDev string + mode string +} - macVlan := map[string]uint32{ +func (m MacVlanLink) Mode() uint32 { + modeMap := map[string]uint32{ "private": MACVLAN_MODE_PRIVATE, "vepa": MACVLAN_MODE_VEPA, "bridge": MACVLAN_MODE_BRIDGE, "passthru": MACVLAN_MODE_PASSTHRU, } + return modeMap[m.mode] +} + +// Add MAC VLAN network interface with masterDev as its upper device +// This is identical to running: +// ip link add name $name link $masterdev type macvlan mode $mode +func networkLinkMacVlan(dev_type string, mcvln *MacVlanLink) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) - masterDevIfc, err := net.InterfaceByName(masterDev) + masterDevIfc, err := net.InterfaceByName(mcvln.MasterDev) if err != nil { return err } @@ -787,16 +834,16 @@ func NetworkLinkAddMacVlan(masterDev, macVlanDev string, mode string) error { wb.AddData(msg) nest1 := newRtAttr(syscall.IFLA_LINKINFO, nil) - newRtAttrChild(nest1, IFLA_INFO_KIND, nonZeroTerminated("macvlan")) + newRtAttrChild(nest1, IFLA_INFO_KIND, nonZeroTerminated(dev_type)) nest2 := newRtAttrChild(nest1, IFLA_INFO_DATA, nil) macVlanData := make([]byte, 4) - native.PutUint32(macVlanData, macVlan[mode]) + native.PutUint32(macVlanData, mcvln.Mode()) newRtAttrChild(nest2, IFLA_MACVLAN_MODE, macVlanData) wb.AddData(nest1) wb.AddData(uint32Attr(syscall.IFLA_LINK, uint32(masterDevIfc.Index))) - wb.AddData(newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(macVlanDev))) + wb.AddData(newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(mcvln.SlaveDev))) if err := s.Send(wb); err != nil { return err @@ -804,6 +851,22 @@ func NetworkLinkAddMacVlan(masterDev, macVlanDev string, mode string) error { return s.HandleAck(wb.Seq) } +func NetworkLinkAddMacVlan(masterDev, macVlanDev string, mode string) error { + return networkLinkMacVlan("macvlan", &MacVlanLink{ + MasterDev: masterDev, + SlaveDev: macVlanDev, + mode: mode, + }) +} + +func NetworkLinkAddMacVtap(masterDev, macVlanDev string, mode string) error { + return networkLinkMacVlan("macvtap", &MacVlanLink{ + MasterDev: masterDev, + SlaveDev: macVlanDev, + mode: mode, + }) +} + func networkLinkIpAction(action, flags int, ifa IfAddr) error { s, err := getNetlinkSocket() if err != nil { @@ -993,28 +1056,23 @@ func AddRoute(destination, source, gateway, device string) error { } if source != "" { - srcIP, srcNet, err := net.ParseCIDR(source) - if err != nil { - return fmt.Errorf("source CIDR %s couldn't be parsed", source) + srcIP := net.ParseIP(source) + if srcIP == nil { + return fmt.Errorf("source IP %s couldn't be parsed", source) } srcFamily := getIpFamily(srcIP) if currentFamily != -1 && currentFamily != srcFamily { return fmt.Errorf("source and destination ip were not the same IP family") } currentFamily = srcFamily - srcLen, bits := srcNet.Mask.Size() - if srcLen == 0 && bits == 0 { - return fmt.Errorf("source CIDR %s generated a non-canonical Mask", source) - } msg.Family = uint8(srcFamily) - msg.Src_len = uint8(srcLen) var srcData []byte if srcFamily == syscall.AF_INET { srcData = srcIP.To4() } else { srcData = srcIP.To16() } - rtAttrs = append(rtAttrs, newRtAttr(syscall.RTA_SRC, srcData)) + rtAttrs = append(rtAttrs, newRtAttr(syscall.RTA_PREFSRC, srcData)) } if gateway != "" { @@ -1195,6 +1253,28 @@ func SetMacAddress(name, addr string) error { return nil } +func SetHairpinMode(iface *net.Interface, enabled bool) error { + sysPath := filepath.Join("/sys/class/net", iface.Name, "brport/hairpin_mode") + + sysFile, err := os.OpenFile(sysPath, os.O_WRONLY, 0) + if err != nil { + return err + } + defer sysFile.Close() + + var writeVal []byte + if enabled { + writeVal = []byte("1") + } else { + writeVal = []byte("0") + } + if _, err := sysFile.Write(writeVal); err != nil { + return err + } + + return nil +} + func ChangeName(iface *net.Interface, newName string) error { if len(newName) >= IFNAMSIZ { return fmt.Errorf("Interface name %s too long", newName) @@ -1215,5 +1295,6 @@ func ChangeName(iface *net.Interface, newName string) error { if _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), syscall.SIOCSIFNAME, uintptr(unsafe.Pointer(&data[0]))); errno != 0 { return errno } + return nil } diff --git a/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux_test.go b/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux_test.go index 88c2e04a3a..3f6511abfe 100644 --- a/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux_test.go +++ b/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux_test.go @@ -116,7 +116,7 @@ func TestNetworkSetMacAddress(t *testing.T) { ifcBeforeSet := readLink(t, tl.name) if err := NetworkSetMacAddress(ifcBeforeSet, macaddr); err != nil { - t.Fatalf("Could not set %s MAC address on %#v interface: err", macaddr, tl, err) + t.Fatalf("Could not set %s MAC address on %#v interface: %s", macaddr, tl, err) } ifcAfterSet := readLink(t, tl.name) @@ -140,7 +140,7 @@ func TestNetworkSetMTU(t *testing.T) { ifcBeforeSet := readLink(t, tl.name) if err := NetworkSetMTU(ifcBeforeSet, mtu); err != nil { - t.Fatalf("Could not set %d MTU on %#v interface: err", mtu, tl, err) + t.Fatalf("Could not set %d MTU on %#v interface: %s", mtu, tl, err) } ifcAfterSet := readLink(t, tl.name) @@ -248,6 +248,30 @@ func TestNetworkLinkAddMacVlan(t *testing.T) { readLink(t, tl.name) } +func TestNetworkLinkAddMacVtap(t *testing.T) { + if testing.Short() { + return + } + + tl := struct { + name string + mode string + }{ + name: "tstVtap", + mode: "private", + } + masterLink := testLink{"tstEth", "dummy"} + + addLink(t, masterLink.name, masterLink.linkType) + defer deleteLink(t, masterLink.name) + + if err := NetworkLinkAddMacVtap(masterLink.name, tl.name, tl.mode); err != nil { + t.Fatalf("Unable to create %#v MAC VTAP interface: %s", tl, err) + } + + readLink(t, tl.name) +} + func TestAddDelNetworkIp(t *testing.T) { if testing.Short() { return @@ -280,6 +304,34 @@ func TestAddDelNetworkIp(t *testing.T) { } } +func TestAddRouteSourceSelection(t *testing.T) { + tstIp := "127.1.1.1" + tl := testLink{name: "tstEth", linkType: "dummy"} + + addLink(t, tl.name, tl.linkType) + defer deleteLink(t, tl.name) + + ip := net.ParseIP(tstIp) + mask := net.IPv4Mask(255, 255, 255, 255) + ipNet := &net.IPNet{IP: ip, Mask: mask} + + iface, err := net.InterfaceByName(tl.name) + if err != nil { + t.Fatalf("Lost created link %#v", tl) + } + + if err := NetworkLinkAddIp(iface, ip, ipNet); err != nil { + t.Fatalf("Could not add IP address %s to interface %#v: %s", ip.String(), iface, err) + } + + upLink(t, tl.name) + defer downLink(t, tl.name) + + if err := AddRoute("127.0.0.0/8", tstIp, "", tl.name); err != nil { + t.Fatalf("Failed to add route with source address") + } +} + func TestCreateVethPair(t *testing.T) { if testing.Short() { return @@ -290,7 +342,7 @@ func TestCreateVethPair(t *testing.T) { name2 = "veth2" ) - if err := NetworkCreateVethPair(name1, name2); err != nil { + if err := NetworkCreateVethPair(name1, name2, 0); err != nil { t.Fatalf("Could not create veth pair %s %s: %s", name1, name2, err) } defer NetworkLinkDel(name1) diff --git a/vendor/src/github.com/docker/libcontainer/netlink/netlink_unsupported.go b/vendor/src/github.com/docker/libcontainer/netlink/netlink_unsupported.go index f6e84adf7e..4b11bf8ba5 100644 --- a/vendor/src/github.com/docker/libcontainer/netlink/netlink_unsupported.go +++ b/vendor/src/github.com/docker/libcontainer/netlink/netlink_unsupported.go @@ -47,7 +47,11 @@ func NetworkSetMTU(iface *net.Interface, mtu int) error { return ErrNotImplemented } -func NetworkCreateVethPair(name1, name2 string) error { +func NetworkSetTxQueueLen(iface *net.Interface, txQueueLen int) error { + return ErrNotImplemented +} + +func NetworkCreateVethPair(name1, name2 string, txQueueLen int) error { return ErrNotImplemented } diff --git a/vendor/src/github.com/docker/libcontainer/network/network.go b/vendor/src/github.com/docker/libcontainer/network/network.go index 014ba74315..ba8f6f74e7 100644 --- a/vendor/src/github.com/docker/libcontainer/network/network.go +++ b/vendor/src/github.com/docker/libcontainer/network/network.go @@ -32,8 +32,8 @@ func ChangeInterfaceName(old, newName string) error { return netlink.NetworkChangeName(iface, newName) } -func CreateVethPair(name1, name2 string) error { - return netlink.NetworkCreateVethPair(name1, name2) +func CreateVethPair(name1, name2 string, txQueueLen int) error { + return netlink.NetworkCreateVethPair(name1, name2, txQueueLen) } func SetInterfaceInNamespacePid(name string, nsPid int) error { @@ -95,3 +95,11 @@ func SetMtu(name string, mtu int) error { } return netlink.NetworkSetMTU(iface, mtu) } + +func SetHairpinMode(name string, enabled bool) error { + iface, err := net.InterfaceByName(name) + if err != nil { + return err + } + return netlink.SetHairpinMode(iface, enabled) +} diff --git a/vendor/src/github.com/docker/libcontainer/network/stats.go b/vendor/src/github.com/docker/libcontainer/network/stats.go index c8ece5c7b0..e2156c74da 100644 --- a/vendor/src/github.com/docker/libcontainer/network/stats.go +++ b/vendor/src/github.com/docker/libcontainer/network/stats.go @@ -2,7 +2,6 @@ package network import ( "io/ioutil" - "os" "path/filepath" "strconv" "strings" @@ -25,45 +24,51 @@ func GetStats(networkState *NetworkState) (*NetworkStats, error) { if networkState.VethHost == "" { return &NetworkStats{}, nil } - data, err := readSysfsNetworkStats(networkState.VethHost) - if err != nil { - return nil, err + + out := &NetworkStats{} + + type netStatsPair struct { + // Where to write the output. + Out *uint64 + + // The network stats file to read. + File string } // Ingress for host veth is from the container. Hence tx_bytes stat on the host veth is actually number of bytes received by the container. - return &NetworkStats{ - RxBytes: data["tx_bytes"], - RxPackets: data["tx_packets"], - RxErrors: data["tx_errors"], - RxDropped: data["tx_dropped"], - TxBytes: data["rx_bytes"], - TxPackets: data["rx_packets"], - TxErrors: data["rx_errors"], - TxDropped: data["rx_dropped"], - }, nil + netStats := []netStatsPair{ + {Out: &out.RxBytes, File: "tx_bytes"}, + {Out: &out.RxPackets, File: "tx_packets"}, + {Out: &out.RxErrors, File: "tx_errors"}, + {Out: &out.RxDropped, File: "tx_dropped"}, + + {Out: &out.TxBytes, File: "rx_bytes"}, + {Out: &out.TxPackets, File: "rx_packets"}, + {Out: &out.TxErrors, File: "rx_errors"}, + {Out: &out.TxDropped, File: "rx_dropped"}, + } + for _, netStat := range netStats { + data, err := readSysfsNetworkStats(networkState.VethHost, netStat.File) + if err != nil { + return nil, err + } + *(netStat.Out) = data + } + + return out, nil } -// Reads all the statistics available under /sys/class/net//statistics as a map with file name as key and data as integers. -func readSysfsNetworkStats(ethInterface string) (map[string]uint64, error) { - out := make(map[string]uint64) +// Reads the specified statistics available under /sys/class/net//statistics +func readSysfsNetworkStats(ethInterface, statsFile string) (uint64, error) { + fullPath := filepath.Join("/sys/class/net", ethInterface, "statistics", statsFile) + data, err := ioutil.ReadFile(fullPath) + if err != nil { + return 0, err + } + value, err := strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) + if err != nil { + return 0, err + } - fullPath := filepath.Join("/sys/class/net", ethInterface, "statistics/") - err := filepath.Walk(fullPath, func(path string, _ os.FileInfo, _ error) error { - // skip fullPath. - if path == fullPath { - return nil - } - base := filepath.Base(path) - data, err := ioutil.ReadFile(path) - if err != nil { - return err - } - value, err := strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) - if err != nil { - return err - } - out[base] = value - return nil - }) - return out, err + return value, err } diff --git a/vendor/src/github.com/docker/libcontainer/network/types.go b/vendor/src/github.com/docker/libcontainer/network/types.go index 383e27c81a..ea0741be1c 100644 --- a/vendor/src/github.com/docker/libcontainer/network/types.go +++ b/vendor/src/github.com/docker/libcontainer/network/types.go @@ -36,6 +36,11 @@ type Network struct { // container's interfaces if a pair is created, specifically in the case of type veth // Note: This does not apply to loopback interfaces. Mtu int `json:"mtu,omitempty"` + + // TxQueueLen sets the tx_queuelen value for the interface and will be mirrored on both the host and + // container's interfaces if a pair is created, specifically in the case of type veth + // Note: This does not apply to loopback interfaces. + TxQueueLen int `json:"txqueuelen,omitempty"` } // Struct describing the network specific runtime state that will be maintained by libcontainer for all running containers diff --git a/vendor/src/github.com/docker/libcontainer/network/veth.go b/vendor/src/github.com/docker/libcontainer/network/veth.go index e5185de7c7..3d7dc8729e 100644 --- a/vendor/src/github.com/docker/libcontainer/network/veth.go +++ b/vendor/src/github.com/docker/libcontainer/network/veth.go @@ -19,8 +19,9 @@ const defaultDevice = "eth0" func (v *Veth) Create(n *Network, nspid int, networkState *NetworkState) error { var ( - bridge = n.Bridge - prefix = n.VethPrefix + bridge = n.Bridge + prefix = n.VethPrefix + txQueueLen = n.TxQueueLen ) if bridge == "" { return fmt.Errorf("bridge is not specified") @@ -28,7 +29,7 @@ func (v *Veth) Create(n *Network, nspid int, networkState *NetworkState) error { if prefix == "" { return fmt.Errorf("veth prefix is not specified") } - name1, name2, err := createVethPair(prefix) + name1, name2, err := createVethPair(prefix, txQueueLen) if err != nil { return err } @@ -96,7 +97,7 @@ func (v *Veth) Initialize(config *Network, networkState *NetworkState) error { // createVethPair will automatically generage two random names for // the veth pair and ensure that they have been created -func createVethPair(prefix string) (name1 string, name2 string, err error) { +func createVethPair(prefix string, txQueueLen int) (name1 string, name2 string, err error) { for i := 0; i < 10; i++ { if name1, err = utils.GenerateRandomName(prefix, 7); err != nil { return @@ -106,7 +107,7 @@ func createVethPair(prefix string) (name1 string, name2 string, err error) { return } - if err = CreateVethPair(name1, name2); err != nil { + if err = CreateVethPair(name1, name2, txQueueLen); err != nil { if err == netlink.ErrInterfaceExists { continue } diff --git a/vendor/src/github.com/docker/libcontainer/network/veth_test.go b/vendor/src/github.com/docker/libcontainer/network/veth_test.go index e09a6042c7..b92b284eb0 100644 --- a/vendor/src/github.com/docker/libcontainer/network/veth_test.go +++ b/vendor/src/github.com/docker/libcontainer/network/veth_test.go @@ -15,7 +15,7 @@ func TestGenerateVethNames(t *testing.T) { prefix := "veth" - name1, name2, err := createVethPair(prefix) + name1, name2, err := createVethPair(prefix, 0) if err != nil { t.Fatal(err) } @@ -36,13 +36,13 @@ func TestCreateDuplicateVethPair(t *testing.T) { prefix := "veth" - name1, name2, err := createVethPair(prefix) + name1, name2, err := createVethPair(prefix, 0) if err != nil { t.Fatal(err) } // retry to create the name interfaces and make sure that we get the correct error - err = CreateVethPair(name1, name2) + err = CreateVethPair(name1, name2, 0) if err == nil { t.Fatal("expected error to not be nil with duplicate interface") } diff --git a/vendor/src/github.com/docker/libcontainer/nsinit/init.go b/vendor/src/github.com/docker/libcontainer/nsinit/init.go index c091ee1099..6df9b1d894 100644 --- a/vendor/src/github.com/docker/libcontainer/nsinit/init.go +++ b/vendor/src/github.com/docker/libcontainer/nsinit/init.go @@ -8,7 +8,6 @@ import ( "github.com/codegangsta/cli" "github.com/docker/libcontainer/namespaces" - "github.com/docker/libcontainer/syncpipe" ) var ( @@ -41,12 +40,8 @@ func initAction(context *cli.Context) { log.Fatal(err) } - syncPipe, err := syncpipe.NewSyncPipeFromFd(0, uintptr(pipeFd)) - if err != nil { - log.Fatalf("unable to create sync pipe: %s", err) - } - - if err := namespaces.Init(container, rootfs, console, syncPipe, []string(context.Args())); err != nil { + pipe := os.NewFile(uintptr(pipeFd), "pipe") + if err := namespaces.Init(container, rootfs, console, pipe, []string(context.Args())); err != nil { log.Fatalf("unable to initialize for container: %s", err) } } diff --git a/vendor/src/github.com/docker/libcontainer/nsinit/utils.go b/vendor/src/github.com/docker/libcontainer/nsinit/utils.go index 7f5155942c..6a8aafbf17 100644 --- a/vendor/src/github.com/docker/libcontainer/nsinit/utils.go +++ b/vendor/src/github.com/docker/libcontainer/nsinit/utils.go @@ -8,7 +8,6 @@ import ( "github.com/codegangsta/cli" "github.com/docker/libcontainer" - "github.com/docker/libcontainer/syncpipe" ) // rFunc is a function registration for calling after an execin @@ -59,16 +58,13 @@ func findUserArgs() []string { // loadConfigFromFd loads a container's config from the sync pipe that is provided by // fd 3 when running a process func loadConfigFromFd() (*libcontainer.Config, error) { - syncPipe, err := syncpipe.NewSyncPipeFromFd(0, 3) - if err != nil { - return nil, err - } + pipe := os.NewFile(3, "pipe") + defer pipe.Close() var config *libcontainer.Config - if err := syncPipe.ReadFromParent(&config); err != nil { + if err := json.NewDecoder(pipe).Decode(&config); err != nil { return nil, err } - return config, nil } diff --git a/vendor/src/github.com/docker/libcontainer/sample_configs/route_source_address_selection.json b/vendor/src/github.com/docker/libcontainer/sample_configs/route_source_address_selection.json new file mode 100644 index 0000000000..d4baf94cde --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/sample_configs/route_source_address_selection.json @@ -0,0 +1,209 @@ +{ + "capabilities": [ + "CHOWN", + "DAC_OVERRIDE", + "FOWNER", + "MKNOD", + "NET_RAW", + "SETGID", + "SETUID", + "SETFCAP", + "SETPCAP", + "NET_BIND_SERVICE", + "SYS_CHROOT", + "KILL" + ], + "cgroups": { + "allowed_devices": [ + { + "cgroup_permissions": "m", + "major_number": -1, + "minor_number": -1, + "type": 99 + }, + { + "cgroup_permissions": "m", + "major_number": -1, + "minor_number": -1, + "type": 98 + }, + { + "cgroup_permissions": "rwm", + "major_number": 5, + "minor_number": 1, + "path": "/dev/console", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 4, + "path": "/dev/tty0", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 4, + "minor_number": 1, + "path": "/dev/tty1", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 136, + "minor_number": -1, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 5, + "minor_number": 2, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 10, + "minor_number": 200, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 3, + "path": "/dev/null", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 5, + "path": "/dev/zero", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 7, + "path": "/dev/full", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 5, + "path": "/dev/tty", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 9, + "path": "/dev/urandom", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 8, + "path": "/dev/random", + "type": 99 + } + ], + "name": "docker-koye", + "parent": "docker" + }, + "restrict_sys": true, + "mount_config": { + "device_nodes": [ + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 3, + "path": "/dev/null", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 5, + "path": "/dev/zero", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 7, + "path": "/dev/full", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 5, + "path": "/dev/tty", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 9, + "path": "/dev/urandom", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 8, + "path": "/dev/random", + "type": 99 + } + ] + }, + "environment": [ + "HOME=/", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOSTNAME=koye", + "TERM=xterm" + ], + "hostname": "koye", + "namespaces": { + "NEWIPC": true, + "NEWNET": true, + "NEWNS": true, + "NEWPID": true, + "NEWUTS": true + }, + "networks": [ + { + "address": "127.0.0.1/0", + "gateway": "localhost", + "mtu": 1500, + "type": "loopback" + }, + { + "address": "172.17.0.101/16", + "bridge": "docker0", + "veth_prefix": "veth", + "mtu": 1500, + "type": "veth" + } + ], + "routes": [ + { + "destination": "0.0.0.0/0", + "source": "172.17.0.101", + "gateway": "172.17.42.1", + "interface_name": "eth0" + } + ], + "tty": true +} diff --git a/vendor/src/github.com/docker/libcontainer/selinux/selinux.go b/vendor/src/github.com/docker/libcontainer/selinux/selinux.go index e0c90ee551..e5bd820980 100644 --- a/vendor/src/github.com/docker/libcontainer/selinux/selinux.go +++ b/vendor/src/github.com/docker/libcontainer/selinux/selinux.go @@ -434,3 +434,28 @@ func Chcon(fpath string, scon string, recurse bool) error { return Setfilecon(fpath, scon) } + +// DupSecOpt takes an SELinux process label and returns security options that +// can will set the SELinux Type and Level for future container processes +func DupSecOpt(src string) []string { + if src == "" { + return nil + } + con := NewContext(src) + if con["user"] == "" || + con["role"] == "" || + con["type"] == "" || + con["level"] == "" { + return nil + } + return []string{"label:user:" + con["user"], + "label:role:" + con["role"], + "label:type:" + con["type"], + "label:level:" + con["level"]} +} + +// DisableSecOpt returns a security opt that can be used to disabling SELinux +// labeling support for future container processes +func DisableSecOpt() []string { + return []string{"label:disable"} +} diff --git a/vendor/src/github.com/docker/libcontainer/selinux/selinux_test.go b/vendor/src/github.com/docker/libcontainer/selinux/selinux_test.go index 34c3497441..228ad8361c 100644 --- a/vendor/src/github.com/docker/libcontainer/selinux/selinux_test.go +++ b/vendor/src/github.com/docker/libcontainer/selinux/selinux_test.go @@ -42,7 +42,7 @@ func TestSELinux(t *testing.T) { t.Log("getenforce ", selinux.SelinuxGetEnforce()) t.Log("getenforcemode ", selinux.SelinuxGetEnforceMode()) pid := os.Getpid() - t.Log("PID:%d MCS:%s\n", pid, selinux.IntToMcs(pid, 1023)) + t.Logf("PID:%d MCS:%s\n", pid, selinux.IntToMcs(pid, 1023)) err = selinux.Setfscreatecon("unconfined_u:unconfined_r:unconfined_t:s0") if err == nil { t.Log(selinux.Getfscreatecon()) diff --git a/vendor/src/github.com/docker/libcontainer/syncpipe/sync_pipe.go b/vendor/src/github.com/docker/libcontainer/syncpipe/sync_pipe.go deleted file mode 100644 index f73c354dbf..0000000000 --- a/vendor/src/github.com/docker/libcontainer/syncpipe/sync_pipe.go +++ /dev/null @@ -1,105 +0,0 @@ -package syncpipe - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "syscall" -) - -// SyncPipe allows communication to and from the child processes -// to it's parent and allows the two independent processes to -// syncronize their state. -type SyncPipe struct { - parent, child *os.File -} - -func NewSyncPipeFromFd(parentFd, childFd uintptr) (*SyncPipe, error) { - s := &SyncPipe{} - - if parentFd > 0 { - s.parent = os.NewFile(parentFd, "parentPipe") - } else if childFd > 0 { - s.child = os.NewFile(childFd, "childPipe") - } else { - return nil, fmt.Errorf("no valid sync pipe fd specified") - } - - return s, nil -} - -func (s *SyncPipe) Child() *os.File { - return s.child -} - -func (s *SyncPipe) Parent() *os.File { - return s.parent -} - -func (s *SyncPipe) SendToChild(v interface{}) error { - data, err := json.Marshal(v) - if err != nil { - return err - } - - s.parent.Write(data) - - return syscall.Shutdown(int(s.parent.Fd()), syscall.SHUT_WR) -} - -func (s *SyncPipe) ReadFromChild() error { - data, err := ioutil.ReadAll(s.parent) - if err != nil { - return err - } - - if len(data) > 0 { - return fmt.Errorf("%s", data) - } - - return nil -} - -func (s *SyncPipe) ReadFromParent(v interface{}) error { - data, err := ioutil.ReadAll(s.child) - if err != nil { - return fmt.Errorf("error reading from sync pipe %s", err) - } - - if len(data) > 0 { - if err := json.Unmarshal(data, v); err != nil { - return err - } - } - - return nil -} - -func (s *SyncPipe) ReportChildError(err error) { - // ensure that any data sent from the parent is consumed so it doesn't - // receive ECONNRESET when the child writes to the pipe. - ioutil.ReadAll(s.child) - - s.child.Write([]byte(err.Error())) - s.CloseChild() -} - -func (s *SyncPipe) Close() error { - if s.parent != nil { - s.parent.Close() - } - - if s.child != nil { - s.child.Close() - } - - return nil -} - -func (s *SyncPipe) CloseChild() { - if s.child != nil { - s.child.Close() - s.child = nil - } -} diff --git a/vendor/src/github.com/docker/libcontainer/syncpipe/sync_pipe_linux.go b/vendor/src/github.com/docker/libcontainer/syncpipe/sync_pipe_linux.go deleted file mode 100644 index bea4b52f9e..0000000000 --- a/vendor/src/github.com/docker/libcontainer/syncpipe/sync_pipe_linux.go +++ /dev/null @@ -1,20 +0,0 @@ -package syncpipe - -import ( - "os" - "syscall" -) - -func NewSyncPipe() (s *SyncPipe, err error) { - s = &SyncPipe{} - - fds, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_STREAM|syscall.SOCK_CLOEXEC, 0) - if err != nil { - return nil, err - } - - s.child = os.NewFile(uintptr(fds[0]), "child syncpipe") - s.parent = os.NewFile(uintptr(fds[1]), "parent syncpipe") - - return s, nil -} diff --git a/vendor/src/github.com/docker/libcontainer/syncpipe/sync_pipe_test.go b/vendor/src/github.com/docker/libcontainer/syncpipe/sync_pipe_test.go deleted file mode 100644 index 906e6ed24d..0000000000 --- a/vendor/src/github.com/docker/libcontainer/syncpipe/sync_pipe_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package syncpipe - -import ( - "fmt" - "syscall" - "testing" -) - -type testStruct struct { - Name string -} - -func TestSendErrorFromChild(t *testing.T) { - pipe, err := NewSyncPipe() - if err != nil { - t.Fatal(err) - } - defer func() { - if err := pipe.Close(); err != nil { - t.Fatal(err) - } - }() - - childfd, err := syscall.Dup(int(pipe.Child().Fd())) - if err != nil { - t.Fatal(err) - } - childPipe, _ := NewSyncPipeFromFd(0, uintptr(childfd)) - - pipe.CloseChild() - pipe.SendToChild(nil) - - expected := "something bad happened" - childPipe.ReportChildError(fmt.Errorf(expected)) - - childError := pipe.ReadFromChild() - if childError == nil { - t.Fatal("expected an error to be returned but did not receive anything") - } - - if childError.Error() != expected { - t.Fatalf("expected %q but received error message %q", expected, childError.Error()) - } -} - -func TestSendPayloadToChild(t *testing.T) { - pipe, err := NewSyncPipe() - if err != nil { - t.Fatal(err) - } - - defer func() { - if err := pipe.Close(); err != nil { - t.Fatal(err) - } - }() - - expected := "libcontainer" - - if err := pipe.SendToChild(testStruct{Name: expected}); err != nil { - t.Fatal(err) - } - - var s *testStruct - if err := pipe.ReadFromParent(&s); err != nil { - t.Fatal(err) - } - - if s.Name != expected { - t.Fatalf("expected name %q but received %q", expected, s.Name) - } -} diff --git a/vendor/src/github.com/docker/libcontainer/system/setns_linux.go b/vendor/src/github.com/docker/libcontainer/system/setns_linux.go index 32821ee2bf..228e6ccd7f 100644 --- a/vendor/src/github.com/docker/libcontainer/system/setns_linux.go +++ b/vendor/src/github.com/docker/libcontainer/system/setns_linux.go @@ -11,9 +11,12 @@ import ( // We need different setns values for the different platforms and arch // We are declaring the macro here because the SETNS syscall does not exist in th stdlib var setNsMap = map[string]uintptr{ - "linux/386": 346, - "linux/amd64": 308, - "linux/arm": 374, + "linux/386": 346, + "linux/amd64": 308, + "linux/arm": 374, + "linux/ppc64": 350, + "linux/ppc64le": 350, + "linux/s390x": 339, } func Setns(fd uintptr, flags uintptr) error { diff --git a/vendor/src/github.com/docker/libcontainer/system/syscall_linux_386.go b/vendor/src/github.com/docker/libcontainer/system/syscall_linux_386.go new file mode 100644 index 0000000000..2fcbf21309 --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/system/syscall_linux_386.go @@ -0,0 +1,24 @@ +// +build linux,386 +package system + +import ( + "syscall" +) + +// Setuid sets the uid of the calling thread to the specified uid. +func Setuid(uid int) (err error) { + _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +// Setgid sets the gid of the calling thread to the specified gid. +func Setgid(gid int) (err error) { + _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID32, uintptr(gid), 0, 0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/src/github.com/docker/libcontainer/system/syscall_linux_64.go b/vendor/src/github.com/docker/libcontainer/system/syscall_linux_64.go new file mode 100644 index 0000000000..6840c3770f --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/system/syscall_linux_64.go @@ -0,0 +1,25 @@ +// +build linux,amd64 linux,ppc64 linux,ppc64le linux,s390x + +package system + +import ( + "syscall" +) + +// Setuid sets the uid of the calling thread to the specified uid. +func Setuid(uid int) (err error) { + _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +// Setgid sets the gid of the calling thread to the specified gid. +func Setgid(gid int) (err error) { + _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/src/github.com/docker/libcontainer/system/syscall_linux_arm.go b/vendor/src/github.com/docker/libcontainer/system/syscall_linux_arm.go new file mode 100644 index 0000000000..7d8cda9d00 --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/system/syscall_linux_arm.go @@ -0,0 +1,24 @@ +// +build linux,arm +package system + +import ( + "syscall" +) + +// Setuid sets the uid of the calling thread to the specified uid. +func Setuid(uid int) (err error) { + _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID32, uintptr(uid), 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +// Setgid sets the gid of the calling thread to the specified gid. +func Setgid(gid int) (err error) { + _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID32, uintptr(gid), 0, 0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/src/github.com/docker/libcontainer/user/lookup.go b/vendor/src/github.com/docker/libcontainer/user/lookup.go new file mode 100644 index 0000000000..6f8a982ff7 --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/user/lookup.go @@ -0,0 +1,108 @@ +package user + +import ( + "errors" + "fmt" + "syscall" +) + +var ( + // The current operating system does not provide the required data for user lookups. + ErrUnsupported = errors.New("user lookup: operating system does not provide passwd-formatted data") +) + +func lookupUser(filter func(u User) bool) (User, error) { + // Get operating system-specific passwd reader-closer. + passwd, err := GetPasswd() + if err != nil { + return User{}, err + } + defer passwd.Close() + + // Get the users. + users, err := ParsePasswdFilter(passwd, filter) + if err != nil { + return User{}, err + } + + // No user entries found. + if len(users) == 0 { + return User{}, fmt.Errorf("no matching entries in passwd file") + } + + // Assume the first entry is the "correct" one. + return users[0], nil +} + +// CurrentUser looks up the current user by their user id in /etc/passwd. If the +// user cannot be found (or there is no /etc/passwd file on the filesystem), +// then CurrentUser returns an error. +func CurrentUser() (User, error) { + return LookupUid(syscall.Getuid()) +} + +// LookupUser looks up a user by their username in /etc/passwd. If the user +// cannot be found (or there is no /etc/passwd file on the filesystem), then +// LookupUser returns an error. +func LookupUser(username string) (User, error) { + return lookupUser(func(u User) bool { + return u.Name == username + }) +} + +// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot +// be found (or there is no /etc/passwd file on the filesystem), then LookupId +// returns an error. +func LookupUid(uid int) (User, error) { + return lookupUser(func(u User) bool { + return u.Uid == uid + }) +} + +func lookupGroup(filter func(g Group) bool) (Group, error) { + // Get operating system-specific group reader-closer. + group, err := GetGroup() + if err != nil { + return Group{}, err + } + defer group.Close() + + // Get the users. + groups, err := ParseGroupFilter(group, filter) + if err != nil { + return Group{}, err + } + + // No user entries found. + if len(groups) == 0 { + return Group{}, fmt.Errorf("no matching entries in group file") + } + + // Assume the first entry is the "correct" one. + return groups[0], nil +} + +// CurrentGroup looks up the current user's group by their primary group id's +// entry in /etc/passwd. If the group cannot be found (or there is no +// /etc/group file on the filesystem), then CurrentGroup returns an error. +func CurrentGroup() (Group, error) { + return LookupGid(syscall.Getgid()) +} + +// LookupGroup looks up a group by its name in /etc/group. If the group cannot +// be found (or there is no /etc/group file on the filesystem), then LookupGroup +// returns an error. +func LookupGroup(groupname string) (Group, error) { + return lookupGroup(func(g Group) bool { + return g.Name == groupname + }) +} + +// LookupGid looks up a group by its group id in /etc/group. If the group cannot +// be found (or there is no /etc/group file on the filesystem), then LookupGid +// returns an error. +func LookupGid(gid int) (Group, error) { + return lookupGroup(func(g Group) bool { + return g.Gid == gid + }) +} diff --git a/vendor/src/github.com/docker/libcontainer/user/lookup_unix.go b/vendor/src/github.com/docker/libcontainer/user/lookup_unix.go new file mode 100644 index 0000000000..409c114e26 --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/user/lookup_unix.go @@ -0,0 +1,30 @@ +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package user + +import ( + "io" + "os" +) + +// Unix-specific path to the passwd and group formatted files. +const ( + unixPasswdFile = "/etc/passwd" + unixGroupFile = "/etc/group" +) + +func GetPasswdFile() (string, error) { + return unixPasswdFile, nil +} + +func GetPasswd() (io.ReadCloser, error) { + return os.Open(unixPasswdFile) +} + +func GetGroupFile() (string, error) { + return unixGroupFile, nil +} + +func GetGroup() (io.ReadCloser, error) { + return os.Open(unixGroupFile) +} diff --git a/vendor/src/github.com/docker/libcontainer/user/lookup_unsupported.go b/vendor/src/github.com/docker/libcontainer/user/lookup_unsupported.go new file mode 100644 index 0000000000..0f15c57d82 --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/user/lookup_unsupported.go @@ -0,0 +1,21 @@ +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package user + +import "io" + +func GetPasswdFile() (string, error) { + return "", ErrUnsupported +} + +func GetPasswd() (io.ReadCloser, error) { + return nil, ErrUnsupported +} + +func GetGroupFile() (string, error) { + return "", ErrUnsupported +} + +func GetGroup() (io.ReadCloser, error) { + return nil, ErrUnsupported +} diff --git a/vendor/src/github.com/docker/libcontainer/user/user.go b/vendor/src/github.com/docker/libcontainer/user/user.go index 493dd86f20..69387f2ef6 100644 --- a/vendor/src/github.com/docker/libcontainer/user/user.go +++ b/vendor/src/github.com/docker/libcontainer/user/user.go @@ -69,23 +69,36 @@ func parseLine(line string, v ...interface{}) { } } -func ParsePasswd() ([]*User, error) { - return ParsePasswdFilter(nil) -} - -func ParsePasswdFilter(filter func(*User) bool) ([]*User, error) { - f, err := os.Open("/etc/passwd") +func ParsePasswdFile(path string) ([]User, error) { + passwd, err := os.Open(path) if err != nil { return nil, err } - defer f.Close() - return parsePasswdFile(f, filter) + defer passwd.Close() + return ParsePasswd(passwd) } -func parsePasswdFile(r io.Reader, filter func(*User) bool) ([]*User, error) { +func ParsePasswd(passwd io.Reader) ([]User, error) { + return ParsePasswdFilter(passwd, nil) +} + +func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) { + passwd, err := os.Open(path) + if err != nil { + return nil, err + } + defer passwd.Close() + return ParsePasswdFilter(passwd, filter) +} + +func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) { + if r == nil { + return nil, fmt.Errorf("nil source for passwd-formatted data") + } + var ( s = bufio.NewScanner(r) - out = []*User{} + out = []User{} ) for s.Scan() { @@ -103,7 +116,7 @@ func parsePasswdFile(r io.Reader, filter func(*User) bool) ([]*User, error) { // Name:Pass:Uid:Gid:Gecos:Home:Shell // root:x:0:0:root:/root:/bin/bash // adm:x:3:4:adm:/var/adm:/bin/false - p := &User{} + p := User{} parseLine( text, &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell, @@ -117,23 +130,36 @@ func parsePasswdFile(r io.Reader, filter func(*User) bool) ([]*User, error) { return out, nil } -func ParseGroup() ([]*Group, error) { - return ParseGroupFilter(nil) -} - -func ParseGroupFilter(filter func(*Group) bool) ([]*Group, error) { - f, err := os.Open("/etc/group") +func ParseGroupFile(path string) ([]Group, error) { + group, err := os.Open(path) if err != nil { return nil, err } - defer f.Close() - return parseGroupFile(f, filter) + defer group.Close() + return ParseGroup(group) } -func parseGroupFile(r io.Reader, filter func(*Group) bool) ([]*Group, error) { +func ParseGroup(group io.Reader) ([]Group, error) { + return ParseGroupFilter(group, nil) +} + +func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) { + group, err := os.Open(path) + if err != nil { + return nil, err + } + defer group.Close() + return ParseGroupFilter(group, filter) +} + +func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { + if r == nil { + return nil, fmt.Errorf("nil source for group-formatted data") + } + var ( s = bufio.NewScanner(r) - out = []*Group{} + out = []Group{} ) for s.Scan() { @@ -151,7 +177,7 @@ func parseGroupFile(r io.Reader, filter func(*Group) bool) ([]*Group, error) { // Name:Pass:Gid:List // root:x:0:root // adm:x:4:root,adm,daemon - p := &Group{} + p := Group{} parseLine( text, &p.Name, &p.Pass, &p.Gid, &p.List, @@ -165,94 +191,160 @@ func parseGroupFile(r io.Reader, filter func(*Group) bool) ([]*Group, error) { return out, nil } -// Given a string like "user", "1000", "user:group", "1000:1000", returns the uid, gid, list of supplementary group IDs, and home directory, if available and/or applicable. -func GetUserGroupSupplementaryHome(userSpec string, defaultUid, defaultGid int, defaultHome string) (int, int, []int, string, error) { - var ( - uid = defaultUid - gid = defaultGid - suppGids = []int{} - home = defaultHome +type ExecUser struct { + Uid, Gid int + Sgids []int + Home string +} +// GetExecUserFile is a wrapper for GetExecUser. It reads data from each of the +// given file paths and uses that data as the arguments to GetExecUser. If the +// files cannot be opened for any reason, the error is ignored and a nil +// io.Reader is passed instead. +func GetExecUserFile(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) { + passwd, err := os.Open(passwdPath) + if err != nil { + passwd = nil + } else { + defer passwd.Close() + } + + group, err := os.Open(groupPath) + if err != nil { + group = nil + } else { + defer group.Close() + } + + return GetExecUser(userSpec, defaults, passwd, group) +} + +// GetExecUser parses a user specification string (using the passwd and group +// readers as sources for /etc/passwd and /etc/group data, respectively). In +// the case of blank fields or missing data from the sources, the values in +// defaults is used. +// +// GetExecUser will return an error if a user or group literal could not be +// found in any entry in passwd and group respectively. +// +// Examples of valid user specifications are: +// * "" +// * "user" +// * "uid" +// * "user:group" +// * "uid:gid +// * "user:gid" +// * "uid:group" +func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) { + var ( userArg, groupArg string + name string ) + if defaults == nil { + defaults = new(ExecUser) + } + + // Copy over defaults. + user := &ExecUser{ + Uid: defaults.Uid, + Gid: defaults.Gid, + Sgids: defaults.Sgids, + Home: defaults.Home, + } + + // Sgids slice *cannot* be nil. + if user.Sgids == nil { + user.Sgids = []int{} + } + // allow for userArg to have either "user" syntax, or optionally "user:group" syntax parseLine(userSpec, &userArg, &groupArg) - users, err := ParsePasswdFilter(func(u *User) bool { + users, err := ParsePasswdFilter(passwd, func(u User) bool { if userArg == "" { - return u.Uid == uid + return u.Uid == user.Uid } return u.Name == userArg || strconv.Itoa(u.Uid) == userArg }) - if err != nil && !os.IsNotExist(err) { + if err != nil && passwd != nil { if userArg == "" { - userArg = strconv.Itoa(uid) + userArg = strconv.Itoa(user.Uid) } - return 0, 0, nil, "", fmt.Errorf("Unable to find user %v: %v", userArg, err) + return nil, fmt.Errorf("Unable to find user %v: %v", userArg, err) } haveUser := users != nil && len(users) > 0 if haveUser { // if we found any user entries that matched our filter, let's take the first one as "correct" - uid = users[0].Uid - gid = users[0].Gid - home = users[0].Home + name = users[0].Name + user.Uid = users[0].Uid + user.Gid = users[0].Gid + user.Home = users[0].Home } else if userArg != "" { // we asked for a user but didn't find them... let's check to see if we wanted a numeric user - uid, err = strconv.Atoi(userArg) + user.Uid, err = strconv.Atoi(userArg) if err != nil { // not numeric - we have to bail - return 0, 0, nil, "", fmt.Errorf("Unable to find user %v", userArg) + return nil, fmt.Errorf("Unable to find user %v", userArg) } - if uid < minId || uid > maxId { - return 0, 0, nil, "", ErrRange + + // Must be inside valid uid range. + if user.Uid < minId || user.Uid > maxId { + return nil, ErrRange } // if userArg couldn't be found in /etc/passwd but is numeric, just roll with it - this is legit } - if groupArg != "" || (haveUser && users[0].Name != "") { - groups, err := ParseGroupFilter(func(g *Group) bool { + if groupArg != "" || name != "" { + groups, err := ParseGroupFilter(group, func(g Group) bool { + // Explicit group format takes precedence. if groupArg != "" { return g.Name == groupArg || strconv.Itoa(g.Gid) == groupArg } + + // Check if user is a member. for _, u := range g.List { - if u == users[0].Name { + if u == name { return true } } + return false }) - if err != nil && !os.IsNotExist(err) { - return 0, 0, nil, "", fmt.Errorf("Unable to find groups for user %v: %v", users[0].Name, err) + if err != nil && group != nil { + return nil, fmt.Errorf("Unable to find groups for user %v: %v", users[0].Name, err) } haveGroup := groups != nil && len(groups) > 0 if groupArg != "" { if haveGroup { // if we found any group entries that matched our filter, let's take the first one as "correct" - gid = groups[0].Gid + user.Gid = groups[0].Gid } else { // we asked for a group but didn't find id... let's check to see if we wanted a numeric group - gid, err = strconv.Atoi(groupArg) + user.Gid, err = strconv.Atoi(groupArg) if err != nil { // not numeric - we have to bail - return 0, 0, nil, "", fmt.Errorf("Unable to find group %v", groupArg) + return nil, fmt.Errorf("Unable to find group %v", groupArg) } - if gid < minId || gid > maxId { - return 0, 0, nil, "", ErrRange + + // Ensure gid is inside gid range. + if user.Gid < minId || user.Gid > maxId { + return nil, ErrRange } // if groupArg couldn't be found in /etc/group but is numeric, just roll with it - this is legit } } else if haveGroup { - suppGids = make([]int, len(groups)) + // If implicit group format, fill supplementary gids. + user.Sgids = make([]int, len(groups)) for i, group := range groups { - suppGids[i] = group.Gid + user.Sgids[i] = group.Gid } } } - return uid, gid, suppGids, home, nil + return user, nil } diff --git a/vendor/src/github.com/docker/libcontainer/user/user_test.go b/vendor/src/github.com/docker/libcontainer/user/user_test.go index 136632c27e..4fe008fb39 100644 --- a/vendor/src/github.com/docker/libcontainer/user/user_test.go +++ b/vendor/src/github.com/docker/libcontainer/user/user_test.go @@ -1,6 +1,8 @@ package user import ( + "io" + "reflect" "strings" "testing" ) @@ -54,7 +56,7 @@ func TestUserParseLine(t *testing.T) { } func TestUserParsePasswd(t *testing.T) { - users, err := parsePasswdFile(strings.NewReader(` + users, err := ParsePasswdFilter(strings.NewReader(` root:x:0:0:root:/root:/bin/bash adm:x:3:4:adm:/var/adm:/bin/false this is just some garbage data @@ -74,7 +76,7 @@ this is just some garbage data } func TestUserParseGroup(t *testing.T) { - groups, err := parseGroupFile(strings.NewReader(` + groups, err := ParseGroupFilter(strings.NewReader(` root:x:0:root adm:x:4:root,adm,daemon this is just some garbage data @@ -92,3 +94,259 @@ this is just some garbage data t.Fatalf("Expected groups[1] to be 4 - adm - 3 members, got %v - %v - %v", groups[1].Gid, groups[1].Name, len(groups[1].List)) } } + +func TestValidGetExecUser(t *testing.T) { + const passwdContent = ` +root:x:0:0:root user:/root:/bin/bash +adm:x:42:43:adm:/var/adm:/bin/false +this is just some garbage data +` + const groupContent = ` +root:x:0:root +adm:x:43: +grp:x:1234:root,adm +this is just some garbage data +` + defaultExecUser := ExecUser{ + Uid: 8888, + Gid: 8888, + Sgids: []int{8888}, + Home: "/8888", + } + + tests := []struct { + ref string + expected ExecUser + }{ + { + ref: "root", + expected: ExecUser{ + Uid: 0, + Gid: 0, + Sgids: []int{0, 1234}, + Home: "/root", + }, + }, + { + ref: "adm", + expected: ExecUser{ + Uid: 42, + Gid: 43, + Sgids: []int{1234}, + Home: "/var/adm", + }, + }, + { + ref: "root:adm", + expected: ExecUser{ + Uid: 0, + Gid: 43, + Sgids: defaultExecUser.Sgids, + Home: "/root", + }, + }, + { + ref: "adm:1234", + expected: ExecUser{ + Uid: 42, + Gid: 1234, + Sgids: defaultExecUser.Sgids, + Home: "/var/adm", + }, + }, + { + ref: "42:1234", + expected: ExecUser{ + Uid: 42, + Gid: 1234, + Sgids: defaultExecUser.Sgids, + Home: "/var/adm", + }, + }, + { + ref: "1337:1234", + expected: ExecUser{ + Uid: 1337, + Gid: 1234, + Sgids: defaultExecUser.Sgids, + Home: defaultExecUser.Home, + }, + }, + { + ref: "1337", + expected: ExecUser{ + Uid: 1337, + Gid: defaultExecUser.Gid, + Sgids: defaultExecUser.Sgids, + Home: defaultExecUser.Home, + }, + }, + { + ref: "", + expected: ExecUser{ + Uid: defaultExecUser.Uid, + Gid: defaultExecUser.Gid, + Sgids: defaultExecUser.Sgids, + Home: defaultExecUser.Home, + }, + }, + } + + for _, test := range tests { + passwd := strings.NewReader(passwdContent) + group := strings.NewReader(groupContent) + + execUser, err := GetExecUser(test.ref, &defaultExecUser, passwd, group) + if err != nil { + t.Logf("got unexpected error when parsing '%s': %s", test.ref, err.Error()) + t.Fail() + continue + } + + if !reflect.DeepEqual(test.expected, *execUser) { + t.Logf("got: %#v", execUser) + t.Logf("expected: %#v", test.expected) + t.Fail() + continue + } + } +} + +func TestInvalidGetExecUser(t *testing.T) { + const passwdContent = ` +root:x:0:0:root user:/root:/bin/bash +adm:x:42:43:adm:/var/adm:/bin/false +this is just some garbage data +` + const groupContent = ` +root:x:0:root +adm:x:43: +grp:x:1234:root,adm +this is just some garbage data +` + + tests := []string{ + // No such user/group. + "notuser", + "notuser:notgroup", + "root:notgroup", + "notuser:adm", + "8888:notgroup", + "notuser:8888", + + // Invalid user/group values. + "-1:0", + "0:-3", + "-5:-2", + } + + for _, test := range tests { + passwd := strings.NewReader(passwdContent) + group := strings.NewReader(groupContent) + + execUser, err := GetExecUser(test, nil, passwd, group) + if err == nil { + t.Logf("got unexpected success when parsing '%s': %#v", test, execUser) + t.Fail() + continue + } + } +} + +func TestGetExecUserNilSources(t *testing.T) { + const passwdContent = ` +root:x:0:0:root user:/root:/bin/bash +adm:x:42:43:adm:/var/adm:/bin/false +this is just some garbage data +` + const groupContent = ` +root:x:0:root +adm:x:43: +grp:x:1234:root,adm +this is just some garbage data +` + + defaultExecUser := ExecUser{ + Uid: 8888, + Gid: 8888, + Sgids: []int{8888}, + Home: "/8888", + } + + tests := []struct { + ref string + passwd, group bool + expected ExecUser + }{ + { + ref: "", + passwd: false, + group: false, + expected: ExecUser{ + Uid: 8888, + Gid: 8888, + Sgids: []int{8888}, + Home: "/8888", + }, + }, + { + ref: "root", + passwd: true, + group: false, + expected: ExecUser{ + Uid: 0, + Gid: 0, + Sgids: []int{8888}, + Home: "/root", + }, + }, + { + ref: "0", + passwd: false, + group: false, + expected: ExecUser{ + Uid: 0, + Gid: 8888, + Sgids: []int{8888}, + Home: "/8888", + }, + }, + { + ref: "0:0", + passwd: false, + group: false, + expected: ExecUser{ + Uid: 0, + Gid: 0, + Sgids: []int{8888}, + Home: "/8888", + }, + }, + } + + for _, test := range tests { + var passwd, group io.Reader + + if test.passwd { + passwd = strings.NewReader(passwdContent) + } + + if test.group { + group = strings.NewReader(groupContent) + } + + execUser, err := GetExecUser(test.ref, &defaultExecUser, passwd, group) + if err != nil { + t.Logf("got unexpected error when parsing '%s': %s", test.ref, err.Error()) + t.Fail() + continue + } + + if !reflect.DeepEqual(test.expected, *execUser) { + t.Logf("got: %#v", execUser) + t.Logf("expected: %#v", test.expected) + t.Fail() + continue + } + } +} diff --git a/vendor/src/github.com/docker/libcontainer/utils/utils_test.go b/vendor/src/github.com/docker/libcontainer/utils/utils_test.go new file mode 100644 index 0000000000..41ef1aa3df --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/utils/utils_test.go @@ -0,0 +1,15 @@ +package utils + +import "testing" + +func TestGenerateName(t *testing.T) { + name, err := GenerateRandomName("veth", 5) + if err != nil { + t.Fatal(err) + } + + expected := 5 + len("veth") + if len(name) != 5+len("veth") { + t.Fatalf("expected name to be %d chars but received %d", expected, len(name)) + } +} diff --git a/vendor/src/github.com/docker/libcontainer/xattr/errors.go b/vendor/src/github.com/docker/libcontainer/xattr/errors.go new file mode 100644 index 0000000000..8cd77418cc --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/xattr/errors.go @@ -0,0 +1,8 @@ +package xattr + +import ( + "fmt" + "runtime" +) + +var ErrNotSupportedPlatform = fmt.Errorf("platform and architecture is not supported %s %s", runtime.GOOS, runtime.GOARCH) diff --git a/vendor/src/github.com/docker/libcontainer/xattr/xattr.go b/vendor/src/github.com/docker/libcontainer/xattr/xattr_linux.go similarity index 100% rename from vendor/src/github.com/docker/libcontainer/xattr/xattr.go rename to vendor/src/github.com/docker/libcontainer/xattr/xattr_linux.go diff --git a/vendor/src/github.com/docker/libcontainer/xattr/xattr_unsupported.go b/vendor/src/github.com/docker/libcontainer/xattr/xattr_unsupported.go new file mode 100644 index 0000000000..821dea3be1 --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/xattr/xattr_unsupported.go @@ -0,0 +1,15 @@ +// +build !linux + +package xattr + +func Listxattr(path string) ([]string, error) { + return nil, ErrNotSupportedPlatform +} + +func Getxattr(path, attr string) (string, error) { + return "", ErrNotSupportedPlatform +} + +func Setxattr(path, xattr, value string) error { + return ErrNotSupportedPlatform +} diff --git a/vendor/src/github.com/docker/libtrust/ec_key.go b/vendor/src/github.com/docker/libtrust/ec_key.go index c7ac6844cf..f642acbcfa 100644 --- a/vendor/src/github.com/docker/libtrust/ec_key.go +++ b/vendor/src/github.com/docker/libtrust/ec_key.go @@ -55,16 +55,7 @@ func (k *ecPublicKey) CurveName() string { // KeyID returns a distinct identifier which is unique to this Public Key. func (k *ecPublicKey) KeyID() string { - // Generate and return a libtrust fingerprint of the EC public key. - // For an EC key this should be: - // SHA256("EC"+curveName+bytes(X)+bytes(Y)) - // Then truncated to 240 bits and encoded into 12 base32 groups like so: - // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP - hasher := crypto.SHA256.New() - hasher.Write([]byte(k.KeyType() + k.CurveName())) - hasher.Write(k.X.Bytes()) - hasher.Write(k.Y.Bytes()) - return keyIDEncode(hasher.Sum(nil)[:30]) + return keyIDFromCryptoKey(k) } func (k *ecPublicKey) String() string { diff --git a/vendor/src/github.com/docker/libtrust/filter.go b/vendor/src/github.com/docker/libtrust/filter.go index 945852afc8..5b2b4fca6f 100644 --- a/vendor/src/github.com/docker/libtrust/filter.go +++ b/vendor/src/github.com/docker/libtrust/filter.go @@ -11,9 +11,21 @@ func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKe filtered := make([]PublicKey, 0, len(keys)) for _, pubKey := range keys { - hosts, ok := pubKey.GetExtendedField("hosts").([]interface{}) + var hosts []string + switch v := pubKey.GetExtendedField("hosts").(type) { + case []string: + hosts = v + case []interface{}: + for _, value := range v { + h, ok := value.(string) + if !ok { + continue + } + hosts = append(hosts, h) + } + } - if !ok || (ok && len(hosts) == 0) { + if len(hosts) == 0 { if includeEmpty { filtered = append(filtered, pubKey) } @@ -21,12 +33,7 @@ func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKe } // Check if any hosts match pattern - for _, hostVal := range hosts { - hostPattern, ok := hostVal.(string) - if !ok { - continue - } - + for _, hostPattern := range hosts { match, err := filepath.Match(hostPattern, host) if err != nil { return nil, err @@ -37,7 +44,6 @@ func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKe continue } } - } return filtered, nil diff --git a/vendor/src/github.com/docker/libtrust/filter_test.go b/vendor/src/github.com/docker/libtrust/filter_test.go index b24e3322e6..997e554c04 100644 --- a/vendor/src/github.com/docker/libtrust/filter_test.go +++ b/vendor/src/github.com/docker/libtrust/filter_test.go @@ -27,6 +27,8 @@ func TestFilter(t *testing.T) { t.Fatal(err) } + // we use both []interface{} and []string here because jwt uses + // []interface{} format, while PEM uses []string switch { case i == 0: // Don't add entries for this key, key 0. @@ -36,10 +38,10 @@ func TestFilter(t *testing.T) { key.AddExtendedField("hosts", []interface{}{"*.even.example.com"}) case i == 7: // Should catch only the last key, and make it match any hostname. - key.AddExtendedField("hosts", []interface{}{"*"}) + key.AddExtendedField("hosts", []string{"*"}) default: // should catch keys 1, 3, 5. - key.AddExtendedField("hosts", []interface{}{"*.example.com"}) + key.AddExtendedField("hosts", []string{"*.example.com"}) } keys = append(keys, key) diff --git a/vendor/src/github.com/docker/libtrust/key_files_test.go b/vendor/src/github.com/docker/libtrust/key_files_test.go index 66c71dd43f..57e691f2ed 100644 --- a/vendor/src/github.com/docker/libtrust/key_files_test.go +++ b/vendor/src/github.com/docker/libtrust/key_files_test.go @@ -138,7 +138,7 @@ func testTrustedHostKeysFile(t *testing.T, trustedHostKeysFilename string) { } for addr, hostKey := range trustedHostKeysMapping { - t.Logf("Host Address: %s\n", addr) + t.Logf("Host Address: %d\n", addr) t.Logf("Host Key: %s\n\n", hostKey) } @@ -160,7 +160,7 @@ func testTrustedHostKeysFile(t *testing.T, trustedHostKeysFilename string) { } for addr, hostKey := range trustedHostKeysMapping { - t.Logf("Host Address: %s\n", addr) + t.Logf("Host Address: %d\n", addr) t.Logf("Host Key: %s\n\n", hostKey) } diff --git a/vendor/src/github.com/docker/libtrust/rsa_key.go b/vendor/src/github.com/docker/libtrust/rsa_key.go index 45463039d2..ecb15b56f3 100644 --- a/vendor/src/github.com/docker/libtrust/rsa_key.go +++ b/vendor/src/github.com/docker/libtrust/rsa_key.go @@ -34,16 +34,7 @@ func (k *rsaPublicKey) KeyType() string { // KeyID returns a distinct identifier which is unique to this Public Key. func (k *rsaPublicKey) KeyID() string { - // Generate and return a 'libtrust' fingerprint of the RSA public key. - // For an RSA key this should be: - // SHA256("RSA"+bytes(N)+bytes(E)) - // Then truncated to 240 bits and encoded into 12 base32 groups like so: - // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP - hasher := crypto.SHA256.New() - hasher.Write([]byte(k.KeyType())) - hasher.Write(k.N.Bytes()) - hasher.Write(serializeRSAPublicExponentParam(k.E)) - return keyIDEncode(hasher.Sum(nil)[:30]) + return keyIDFromCryptoKey(k) } func (k *rsaPublicKey) String() string { diff --git a/vendor/src/github.com/docker/libtrust/trustgraph/statement_test.go b/vendor/src/github.com/docker/libtrust/trustgraph/statement_test.go index d9c3c1a1ea..e509468659 100644 --- a/vendor/src/github.com/docker/libtrust/trustgraph/statement_test.go +++ b/vendor/src/github.com/docker/libtrust/trustgraph/statement_test.go @@ -201,7 +201,7 @@ func TestCollapseGrants(t *testing.T) { collapsedGrants, expiration, err := CollapseStatements(statements, false) if len(collapsedGrants) != 12 { - t.Fatalf("Unexpected number of grants\n\tExpected: %d\n\tActual: %s", 12, len(collapsedGrants)) + t.Fatalf("Unexpected number of grants\n\tExpected: %d\n\tActual: %d", 12, len(collapsedGrants)) } if expiration.After(time.Now().Add(time.Hour*5)) || expiration.Before(time.Now()) { t.Fatalf("Unexpected expiration time: %s", expiration.String()) @@ -261,7 +261,7 @@ func TestCollapseGrants(t *testing.T) { collapsedGrants, expiration, err = CollapseStatements(statements, false) if len(collapsedGrants) != 12 { - t.Fatalf("Unexpected number of grants\n\tExpected: %d\n\tActual: %s", 12, len(collapsedGrants)) + t.Fatalf("Unexpected number of grants\n\tExpected: %d\n\tActual: %d", 12, len(collapsedGrants)) } if expiration.After(time.Now().Add(time.Hour*5)) || expiration.Before(time.Now()) { t.Fatalf("Unexpected expiration time: %s", expiration.String()) diff --git a/vendor/src/github.com/docker/libtrust/util.go b/vendor/src/github.com/docker/libtrust/util.go index 3b2fac95b1..4d5a6200a8 100644 --- a/vendor/src/github.com/docker/libtrust/util.go +++ b/vendor/src/github.com/docker/libtrust/util.go @@ -2,6 +2,7 @@ package libtrust import ( "bytes" + "crypto" "crypto/elliptic" "crypto/x509" "encoding/base32" @@ -52,6 +53,21 @@ func keyIDEncode(b []byte) string { return buf.String() } +func keyIDFromCryptoKey(pubKey PublicKey) string { + // Generate and return a 'libtrust' fingerprint of the public key. + // For an RSA key this should be: + // SHA256(DER encoded ASN1) + // Then truncated to 240 bits and encoded into 12 base32 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey()) + if err != nil { + return "" + } + hasher := crypto.SHA256.New() + hasher.Write(derBytes) + return keyIDEncode(hasher.Sum(nil)[:30]) +} + func stringFromMap(m map[string]interface{}, key string) (string, error) { val, ok := m[key] if !ok { diff --git a/vendor/src/github.com/docker/libtrust/util_test.go b/vendor/src/github.com/docker/libtrust/util_test.go new file mode 100644 index 0000000000..ee54f5b8cc --- /dev/null +++ b/vendor/src/github.com/docker/libtrust/util_test.go @@ -0,0 +1,23 @@ +package libtrust + +import ( + "encoding/pem" + "reflect" + "testing" +) + +func TestAddPEMHeadersToKey(t *testing.T) { + pk := &rsaPublicKey{nil, map[string]interface{}{}} + blk := &pem.Block{Headers: map[string]string{"hosts": "localhost,127.0.0.1"}} + addPEMHeadersToKey(blk, pk) + + val := pk.GetExtendedField("hosts") + hosts, ok := val.([]string) + if !ok { + t.Fatalf("hosts type(%v), expected []string", reflect.TypeOf(val)) + } + expected := []string{"localhost", "127.0.0.1"} + if !reflect.DeepEqual(hosts, expected) { + t.Errorf("hosts(%v), expected %v", hosts, expected) + } +} diff --git a/volumes/repository.go b/volumes/repository.go index 2383f34a93..225148b60e 100644 --- a/volumes/repository.go +++ b/volumes/repository.go @@ -7,8 +7,8 @@ import ( "path/filepath" "sync" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/log" "github.com/docker/docker/utils" ) @@ -166,9 +166,6 @@ func (r *Repository) Delete(path string) error { return fmt.Errorf("Volume %s does not exist", path) } - if volume.IsBindMount { - return fmt.Errorf("Volume %s is a bind-mount and cannot be removed", volume.Path) - } containers := volume.Containers() if len(containers) > 0 { return fmt.Errorf("Volume %s is being used and cannot be removed: used by containers %s", volume.Path, containers) @@ -178,6 +175,10 @@ func (r *Repository) Delete(path string) error { return err } + if volume.IsBindMount { + return nil + } + if err := r.driver.Remove(volume.ID); err != nil { if !os.IsNotExist(err) { return err diff --git a/volumes/volume.go b/volumes/volume.go index e2d7a726db..d718b07d70 100644 --- a/volumes/volume.go +++ b/volumes/volume.go @@ -2,11 +2,14 @@ package volumes import ( "encoding/json" + "io" "io/ioutil" "os" + "path" "path/filepath" "sync" + "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/symlink" ) @@ -21,6 +24,35 @@ type Volume struct { lock sync.Mutex } +func (v *Volume) Export(resource, name string) (io.ReadCloser, error) { + if v.IsBindMount && filepath.Base(resource) == name { + name = "" + } + + basePath, err := v.getResourcePath(resource) + if err != nil { + return nil, err + } + stat, err := os.Stat(basePath) + if err != nil { + return nil, err + } + var filter []string + if !stat.IsDir() { + d, f := path.Split(basePath) + basePath = d + filter = []string{f} + } else { + filter = []string{path.Base(basePath)} + basePath = path.Dir(basePath) + } + return archive.TarWithOptions(basePath, &archive.TarOptions{ + Compression: archive.Uncompressed, + Name: name, + Includes: filter, + }) +} + func (v *Volume) IsDir() (bool, error) { stat, err := os.Stat(v.Path) if err != nil { @@ -122,12 +154,15 @@ func (v *Volume) FromDisk() error { return err } - data, err := ioutil.ReadFile(pth) + jsonSource, err := os.Open(pth) if err != nil { return err } + defer jsonSource.Close() - return json.Unmarshal(data, v) + dec := json.NewDecoder(jsonSource) + + return dec.Decode(v) } func (v *Volume) jsonPath() (string, error) { @@ -137,3 +172,8 @@ func (v *Volume) getRootResourcePath(path string) (string, error) { cleanPath := filepath.Join("/", path) return symlink.FollowSymlinkInScope(filepath.Join(v.configPath, cleanPath), v.configPath) } + +func (v *Volume) getResourcePath(path string) (string, error) { + cleanPath := filepath.Join("/", path) + return symlink.FollowSymlinkInScope(filepath.Join(v.Path, cleanPath), v.Path) +}