From 6de5ca1e64407582debe01f17377cdb7bb8d70c5 Mon Sep 17 00:00:00 2001 From: Thatcher Peskens Date: Thu, 18 Apr 2013 16:00:18 -0700 Subject: [PATCH 1/7] Added redirect from old location of documentation (/documentation), these was the location when we were on github. --- docs/Makefile | 1 + docs/sources/nginx.conf | 4 ++++ 2 files changed, 5 insertions(+) create mode 100644 docs/sources/nginx.conf diff --git a/docs/Makefile b/docs/Makefile index f74bf5705e..77f14ee92f 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -51,6 +51,7 @@ docs: cp sources/dotcloud.yml $(BUILDDIR)/html/ cp sources/CNAME $(BUILDDIR)/html/ cp sources/.nojekyll $(BUILDDIR)/html/ + cp sources/nginx.conf $(BUILDDIR)/html/ @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." diff --git a/docs/sources/nginx.conf b/docs/sources/nginx.conf new file mode 100644 index 0000000000..cbc954318c --- /dev/null +++ b/docs/sources/nginx.conf @@ -0,0 +1,4 @@ + +# rule to redirect original links created when hosted on github pages +rewrite ^/documentation/(.*).html http://docs.docker.io/en/latest/$1/ permanent; + From 8ecde8f9a5dcad23afea013a62d373cef303fa84 Mon Sep 17 00:00:00 2001 From: Thatcher Peskens Date: Fri, 19 Apr 2013 20:57:50 -0700 Subject: [PATCH 2/7] Updated documentation and fixed Vagrantfile --- Vagrantfile | 51 +++++++++++++----- docs/sources/installation/binaries.rst | 56 ++++++++++++++++++++ docs/sources/installation/index.rst | 2 +- docs/sources/installation/vagrant.rst | 73 ++++++++++++++++++++++++++ docs/sources/installation/windows.rst | 4 +- docs/sources/nginx.conf | 2 + 6 files changed, 172 insertions(+), 16 deletions(-) create mode 100644 docs/sources/installation/binaries.rst create mode 100644 docs/sources/installation/vagrant.rst diff --git a/Vagrantfile b/Vagrantfile index 48b3ef567a..f49e781563 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -2,19 +2,13 @@ # vi: set ft=ruby : def v10(config) - config.vm.box = "quantal64_3.5.0-25" - config.vm.box_url = "http://get.docker.io/vbox/ubuntu/12.10/quantal64_3.5.0-25.box" + config.vm.box = 'precise64' + config.vm.box_url = 'http://files.vagrantup.com/precise64.box' - config.vm.share_folder "v-data", "/opt/go/src/github.com/dotcloud/docker", File.dirname(__FILE__) + # Install ubuntu packaging dependencies and create ubuntu packages + config.vm.provision :shell, :inline => "echo 'deb http://ppa.launchpad.net/dotcloud/lxc-docker/ubuntu precise main' >>/etc/apt/sources.list" + config.vm.provision :shell, :inline => 'export DEBIAN_FRONTEND=noninteractive; apt-get -qq update; apt-get install -qq -y --force-yes lxc-docker' - # Ensure puppet is installed on the instance - config.vm.provision :shell, :inline => "apt-get -qq update; apt-get install -y puppet" - - config.vm.provision :puppet do |puppet| - puppet.manifests_path = "puppet/manifests" - puppet.manifest_file = "quantal64.pp" - puppet.module_path = "puppet/modules" - end end Vagrant::VERSION < "1.1.0" and Vagrant::Config.run do |config| @@ -30,11 +24,11 @@ Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config| config.vm.box = "dummy" config.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box" aws.access_key_id = ENV["AWS_ACCESS_KEY_ID"] - aws.secret_access_key = ENV["AWS_SECRET_ACCESS_KEY"] + aws.secret_access_key = ENV["AWS_SECRET_ACCESS_KEY"] aws.keypair_name = ENV["AWS_KEYPAIR_NAME"] aws.ssh_private_key_path = ENV["AWS_SSH_PRIVKEY"] aws.region = "us-east-1" - aws.ami = "ami-ae9806c7" + aws.ami = "ami-d0f89fb9" aws.ssh_username = "ubuntu" aws.instance_type = "t1.micro" end @@ -55,3 +49,34 @@ Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config| config.vm.box_url = "http://get.docker.io/vbox/ubuntu/12.10/quantal64_3.5.0-25.box" end end + +Vagrant::VERSION >= "1.2.0" and Vagrant.configure("2") do |config| + config.vm.provider :aws do |aws, override| + config.vm.box = "dummy" + config.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box" + aws.access_key_id = ENV["AWS_ACCESS_KEY_ID"] + aws.secret_access_key = ENV["AWS_SECRET_ACCESS_KEY"] + aws.keypair_name = ENV["AWS_KEYPAIR_NAME"] + override.ssh.private_key_path = ENV["AWS_SSH_PRIVKEY"] + override.ssh.username = "ubuntu" + aws.region = "us-east-1" + aws.ami = "ami-d0f89fb9" + aws.instance_type = "t1.micro" + end + + config.vm.provider :rackspace do |rs| + config.vm.box = "dummy" + config.vm.box_url = "https://github.com/mitchellh/vagrant-rackspace/raw/master/dummy.box" + config.ssh.private_key_path = ENV["RS_PRIVATE_KEY"] + rs.username = ENV["RS_USERNAME"] + rs.api_key = ENV["RS_API_KEY"] + rs.public_key_path = ENV["RS_PUBLIC_KEY"] + rs.flavor = /512MB/ + rs.image = /Ubuntu/ + end + + config.vm.provider :virtualbox do |vb| + config.vm.box = "quantal64_3.5.0-25" + config.vm.box_url = "http://get.docker.io/vbox/ubuntu/12.10/quantal64_3.5.0-25.box" + end +end diff --git a/docs/sources/installation/binaries.rst b/docs/sources/installation/binaries.rst new file mode 100644 index 0000000000..bf83a5bc88 --- /dev/null +++ b/docs/sources/installation/binaries.rst @@ -0,0 +1,56 @@ +.. _ubuntu_linux: + +Ubuntu Linux +============ + + **Please note this project is currently under heavy development. It should not be used in production.** + + + +Installing on Ubuntu 12.04 and 12.10 + +Right now, the officially supported distributions are: + +Ubuntu 12.04 (precise LTS) +Ubuntu 12.10 (quantal) +Docker probably works on other distributions featuring a recent kernel, the AUFS patch, and up-to-date lxc. However this has not been tested. + +Install dependencies: +--------------------- + +:: + + sudo apt-get install lxc wget bsdtar curl + sudo apt-get install linux-image-extra-`uname -r` + +The linux-image-extra package is needed on standard Ubuntu EC2 AMIs in order to install the aufs kernel module. + +Install the latest docker binary: + +:: + + wget http://get.docker.io/builds/$(uname -s)/$(uname -m)/docker-master.tgz + tar -xf docker-master.tgz + +Run your first container! + +:: + + cd docker-master + +:: + + sudo ./docker run -i -t base /bin/bash + + +To run docker as a daemon, in the background, and allow non-root users to run ``docker`` start +docker -d + +:: + + sudo ./docker -d & + + +Consider adding docker to your PATH for simplicity. + +Continue with the :ref:`hello_world` example. \ No newline at end of file diff --git a/docs/sources/installation/index.rst b/docs/sources/installation/index.rst index b02e9c83ac..ae11258875 100644 --- a/docs/sources/installation/index.rst +++ b/docs/sources/installation/index.rst @@ -13,7 +13,7 @@ Contents: :maxdepth: 1 ubuntulinux - macos + vagrant windows amazon upgrading diff --git a/docs/sources/installation/vagrant.rst b/docs/sources/installation/vagrant.rst new file mode 100644 index 0000000000..5b57721425 --- /dev/null +++ b/docs/sources/installation/vagrant.rst @@ -0,0 +1,73 @@ + +.. _install_using_vagrant: + +Install using Vagrant +===================== + + Please note this is a community contributed installation path. The only 'official' installation is using the :ref:`ubuntu_linux` installation path. This version + may be out of date because it depends on some binaries to be updated and published + +**requirements** +This guide will setup a new virtual machine on your computer. This works on most operating systems, +including MacOX, Windows, Linux, FreeBSD and others. If you can +install these and have at least 400Mb RAM to spare you should be good. + + +Install Vagrant, Virtualbox and Git +----------------------------------- + +We currently rely on some Ubuntu-linux specific packages, this will change in the future, but for now we provide a +streamlined path to install Virtualbox with a Ubuntu 12.10 image using Vagrant. + +1. Install virtualbox from https://www.virtualbox.org/ (or use your package manager) +2. Install vagrant from http://www.vagrantup.com/ (or use your package manager) +3. Install git if you had not installed it before, check if it is installed by running + ``git`` in a terminal window + +We recommend having at least about 2Gb of free disk space and 2Gb RAM (or more). + +Spin up your machine +-------------------- + +1. Fetch the docker sources + +.. code-block:: bash + + git clone https://github.com/dotcloud/docker.git + +2. Run vagrant from the sources directory + +.. code-block:: bash + + vagrant up + +Vagrant will: + +* Download the Quantal64 base ubuntu virtual machine image from get.docker.io/ +* Boot this image in virtualbox + +Then it will use Puppet to perform an initial setup in this machine: + +* Download & untar the most recent docker binary tarball to vagrant homedir. +* Debootstrap to /var/lib/docker/images/ubuntu. +* Install & run dockerd as service. +* Put docker in /usr/local/bin. +* Put latest Go toolchain in /usr/local/go. + +You now have a Ubuntu Virtual Machine running with docker pre-installed. + +To access the VM and use Docker, Run ``vagrant ssh`` from the same directory as where you ran +``vagrant up``. Vagrant will make sure to connect you to the correct VM. + +.. code-block:: bash + + vagrant ssh + +Now you are in the VM, run docker + +.. code-block:: bash + + docker + + +Continue with the :ref:`hello_world` example. diff --git a/docs/sources/installation/windows.rst b/docs/sources/installation/windows.rst index 6091d6bac1..a89d3a9014 100644 --- a/docs/sources/installation/windows.rst +++ b/docs/sources/installation/windows.rst @@ -3,8 +3,8 @@ :keywords: Docker, Docker documentation, Windows, requirements, virtualbox, vagrant, git, ssh, putty, cygwin -Windows -========= +Windows (with Vagrant) +====================== Please note this is a community contributed installation path. The only 'official' installation is using the :ref:`ubuntu_linux` installation path. This version may be out of date because it depends on some binaries to be updated and published diff --git a/docs/sources/nginx.conf b/docs/sources/nginx.conf index cbc954318c..97ffd2c0e5 100644 --- a/docs/sources/nginx.conf +++ b/docs/sources/nginx.conf @@ -2,3 +2,5 @@ # rule to redirect original links created when hosted on github pages rewrite ^/documentation/(.*).html http://docs.docker.io/en/latest/$1/ permanent; +# rewrite the stuff which was on the current page +rewrite ^/gettingstarted.html$ /gettingstarted/ permanent; From 0731d1a582b44a18e9bfdf0764d232b46218346b Mon Sep 17 00:00:00 2001 From: Thatcher Peskens Date: Fri, 19 Apr 2013 20:59:43 -0700 Subject: [PATCH 3/7] Updated ubuntu install --- docs/sources/installation/ubuntulinux.rst | 88 ++++++++++------------- 1 file changed, 37 insertions(+), 51 deletions(-) diff --git a/docs/sources/installation/ubuntulinux.rst b/docs/sources/installation/ubuntulinux.rst index bf83a5bc88..a822242cee 100644 --- a/docs/sources/installation/ubuntulinux.rst +++ b/docs/sources/installation/ubuntulinux.rst @@ -1,56 +1,42 @@ -.. _ubuntu_linux: +Docker on Ubuntu +================ -Ubuntu Linux -============ +Docker is now available as a Ubuntu PPA (Personal Package Archive), which makes installing Docker on Ubuntu super easy! - **Please note this project is currently under heavy development. It should not be used in production.** +**The Requirements** + +* Ubuntu 12.04 (LTS) or Ubuntu 12.10 +* **64-bit Operating system** + + +Add the custom package sources to your apt sources list. Copy and paste both the following lines at once. + +.. code-block:: bash + + sudo sh -c "echo 'deb http://ppa.launchpad.net/dotcloud/lxc-docker/ubuntu precise main' \ + >> /etc/apt/sources.list" + + +Update your sources. You will see a warning that GPG signatures cannot be verified + +.. code-block:: bash + + sudo apt-get update + + +Now install it, you will see another warning that the package cannot be authenticated. + +.. code-block:: bash + + sudo apt-get install lxc-docker + + +**Run!** + +.. code-block:: bash + + docker -Installing on Ubuntu 12.04 and 12.10 - -Right now, the officially supported distributions are: - -Ubuntu 12.04 (precise LTS) -Ubuntu 12.10 (quantal) -Docker probably works on other distributions featuring a recent kernel, the AUFS patch, and up-to-date lxc. However this has not been tested. - -Install dependencies: ---------------------- - -:: - - sudo apt-get install lxc wget bsdtar curl - sudo apt-get install linux-image-extra-`uname -r` - -The linux-image-extra package is needed on standard Ubuntu EC2 AMIs in order to install the aufs kernel module. - -Install the latest docker binary: - -:: - - wget http://get.docker.io/builds/$(uname -s)/$(uname -m)/docker-master.tgz - tar -xf docker-master.tgz - -Run your first container! - -:: - - cd docker-master - -:: - - sudo ./docker run -i -t base /bin/bash - - -To run docker as a daemon, in the background, and allow non-root users to run ``docker`` start -docker -d - -:: - - sudo ./docker -d & - - -Consider adding docker to your PATH for simplicity. - -Continue with the :ref:`hello_world` example. \ No newline at end of file +Probably you would like to continue with the :ref:`hello_world` example. \ No newline at end of file From 6c8dcd5cbbff2b33f878a32ae3b93abc0d7b9dae Mon Sep 17 00:00:00 2001 From: Thatcher Peskens Date: Mon, 22 Apr 2013 13:10:32 -0700 Subject: [PATCH 4/7] Updated Vagrantfile and documentation to reflect new installation path using Ubuntu's PPA, also switched everything to use Ubuntu 12.04 by default. --- Vagrantfile | 10 +++---- docs/sources/installation/ubuntulinux.rst | 8 +++-- docs/sources/installation/upgrading.rst | 3 +- docs/sources/installation/vagrant.rst | 36 ++++++++--------------- 4 files changed, 25 insertions(+), 32 deletions(-) diff --git a/Vagrantfile b/Vagrantfile index f49e781563..01cfd14272 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -8,7 +8,6 @@ def v10(config) # Install ubuntu packaging dependencies and create ubuntu packages config.vm.provision :shell, :inline => "echo 'deb http://ppa.launchpad.net/dotcloud/lxc-docker/ubuntu precise main' >>/etc/apt/sources.list" config.vm.provision :shell, :inline => 'export DEBIAN_FRONTEND=noninteractive; apt-get -qq update; apt-get install -qq -y --force-yes lxc-docker' - end Vagrant::VERSION < "1.1.0" and Vagrant::Config.run do |config| @@ -45,8 +44,8 @@ Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config| end config.vm.provider :virtualbox do |vb| - config.vm.box = "quantal64_3.5.0-25" - config.vm.box_url = "http://get.docker.io/vbox/ubuntu/12.10/quantal64_3.5.0-25.box" + config.vm.box = 'precise64' + config.vm.box_url = 'http://files.vagrantup.com/precise64.box' end end @@ -76,7 +75,8 @@ Vagrant::VERSION >= "1.2.0" and Vagrant.configure("2") do |config| end config.vm.provider :virtualbox do |vb| - config.vm.box = "quantal64_3.5.0-25" - config.vm.box_url = "http://get.docker.io/vbox/ubuntu/12.10/quantal64_3.5.0-25.box" + config.vm.box = 'precise64' + config.vm.box_url = 'http://files.vagrantup.com/precise64.box' end + end diff --git a/docs/sources/installation/ubuntulinux.rst b/docs/sources/installation/ubuntulinux.rst index a822242cee..94786f95d3 100644 --- a/docs/sources/installation/ubuntulinux.rst +++ b/docs/sources/installation/ubuntulinux.rst @@ -1,7 +1,9 @@ Docker on Ubuntu ================ -Docker is now available as a Ubuntu PPA (Personal Package Archive), which makes installing Docker on Ubuntu super easy! +Docker is now available as a Ubuntu PPA (Personal Package Archive), +`hosted on launchpad `_ +which makes installing Docker on Ubuntu very easy. **The Requirements** @@ -17,14 +19,14 @@ Add the custom package sources to your apt sources list. Copy and paste both the >> /etc/apt/sources.list" -Update your sources. You will see a warning that GPG signatures cannot be verified +Update your sources. You will see a warning that GPG signatures cannot be verified. .. code-block:: bash sudo apt-get update -Now install it, you will see another warning that the package cannot be authenticated. +Now install it, you will see another warning that the package cannot be authenticated. Confirm install. .. code-block:: bash diff --git a/docs/sources/installation/upgrading.rst b/docs/sources/installation/upgrading.rst index 4a1de88a7c..66825ac643 100644 --- a/docs/sources/installation/upgrading.rst +++ b/docs/sources/installation/upgrading.rst @@ -3,7 +3,8 @@ Upgrading ============ - We assume you are upgrading from within the operating system which runs your docker daemon. +These instructions are for upgrading your Docker binary for when you had a custom (non package manager) installation. +If you istalled docker using apt-get, use that to upgrade. Get the latest docker binary: diff --git a/docs/sources/installation/vagrant.rst b/docs/sources/installation/vagrant.rst index 5b57721425..a8249961a7 100644 --- a/docs/sources/installation/vagrant.rst +++ b/docs/sources/installation/vagrant.rst @@ -4,32 +4,28 @@ Install using Vagrant ===================== - Please note this is a community contributed installation path. The only 'official' installation is using the :ref:`ubuntu_linux` installation path. This version - may be out of date because it depends on some binaries to be updated and published + Please note this is a community contributed installation path. The only 'official' installation is using the + :ref:`ubuntu_linux` installation path. This version may sometimes be out of date. **requirements** -This guide will setup a new virtual machine on your computer. This works on most operating systems, -including MacOX, Windows, Linux, FreeBSD and others. If you can -install these and have at least 400Mb RAM to spare you should be good. +This guide will setup a new virtual machine with docker installed on your computer. This works on most operating +systems, including MacOX, Windows, Linux, FreeBSD and others. If you can install these and have at least 400Mb RAM +to spare you should be good. -Install Vagrant, Virtualbox and Git ------------------------------------ - -We currently rely on some Ubuntu-linux specific packages, this will change in the future, but for now we provide a -streamlined path to install Virtualbox with a Ubuntu 12.10 image using Vagrant. +Install Vagrant and Virtualbox +------------------------------ 1. Install virtualbox from https://www.virtualbox.org/ (or use your package manager) 2. Install vagrant from http://www.vagrantup.com/ (or use your package manager) 3. Install git if you had not installed it before, check if it is installed by running ``git`` in a terminal window -We recommend having at least about 2Gb of free disk space and 2Gb RAM (or more). Spin up your machine -------------------- -1. Fetch the docker sources +1. Fetch the docker sources (this includes the instructions for machine setup). .. code-block:: bash @@ -43,21 +39,16 @@ Spin up your machine Vagrant will: -* Download the Quantal64 base ubuntu virtual machine image from get.docker.io/ +* Download the 'official' Precise64 base ubuntu virtual machine image from vagrantup.com * Boot this image in virtualbox - -Then it will use Puppet to perform an initial setup in this machine: - -* Download & untar the most recent docker binary tarball to vagrant homedir. -* Debootstrap to /var/lib/docker/images/ubuntu. -* Install & run dockerd as service. -* Put docker in /usr/local/bin. -* Put latest Go toolchain in /usr/local/go. +* Add the `Docker PPA sources `_ to /etc/apt/sources.lst +* Update your sources +* Install lxc-docker You now have a Ubuntu Virtual Machine running with docker pre-installed. To access the VM and use Docker, Run ``vagrant ssh`` from the same directory as where you ran -``vagrant up``. Vagrant will make sure to connect you to the correct VM. +``vagrant up``. Vagrant will connect you to the correct VM. .. code-block:: bash @@ -69,5 +60,4 @@ Now you are in the VM, run docker docker - Continue with the :ref:`hello_world` example. From 690e1186704eff65ff72404eb2a686b2b205ee7c Mon Sep 17 00:00:00 2001 From: Thatcher Peskens Date: Mon, 22 Apr 2013 13:36:00 -0700 Subject: [PATCH 5/7] Updated gettingstarted with quicker install. --- docs/sources/gettingstarted/index.html | 34 ++++++++++++++++---------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/docs/sources/gettingstarted/index.html b/docs/sources/gettingstarted/index.html index b86e9bbdd4..1022879071 100644 --- a/docs/sources/gettingstarted/index.html +++ b/docs/sources/gettingstarted/index.html @@ -71,34 +71,42 @@

Installing on Ubuntu

+ Requirements +
    +
  • Ubuntu 12.04 (LTS) or Ubuntu 12.10
  • +
  • 64-bit Operating system
  • +
  1. -

    Install dependencies:

    +

    Add the Ubuntu PPA (Personal Package Archive) sources to your apt sources list. Copy and + paste the following lines at once.

    -
    sudo apt-get install lxc wget bsdtar curl
    -
    sudo apt-get install linux-image-extra-`uname -r`
    +
    sudo sh -c "echo 'deb http://ppa.launchpad.net/dotcloud/lxc-docker/ubuntu precise main' >> /etc/apt/sources.list"
    -

    The linux-image-extra package is needed on standard Ubuntu EC2 AMIs in order to install the aufs kernel module.

  2. -

    Install the latest docker binary:

    +

    Update your sources. You will see a warning that GPG signatures cannot be verified.

    -
    wget http://get.docker.io/builds/$(uname -s)/$(uname -m)/docker-master.tgz
    -
    tar -xf docker-master.tgz
    +
    sudo apt-get update
  3. -

    Run your first container!

    +

    Now install it, you will see another warning that the package cannot be authenticated. Confirm install.

    -
    cd docker-master
    -
    sudo ./docker run -i -t base /bin/bash
    +
    +
    sudo apt-get install lxc-docker
    -

    Done!

    -

    Consider adding docker to your PATH for simplicity.

  4. +
  5. +

    Run!

    + +
    +
    docker
    +
    +
  6. Continue with the Hello world example.
@@ -117,7 +125,7 @@ vagrant and an Ubuntu virtual machine.

From 4031a01af1658225344af04be0dbd4225893c242 Mon Sep 17 00:00:00 2001 From: Thatcher Peskens Date: Mon, 22 Apr 2013 18:38:42 -0700 Subject: [PATCH 6/7] Merged changes --- .mailmap | 3 + AUTHORS | 7 + Makefile | 5 +- README.md | 181 +++++++------- SPECS/data-volumes.md | 71 ++++++ archive.go | 36 +++ buildbot/README.rst | 20 ++ buildbot/Vagrantfile | 28 +++ buildbot/buildbot-cfg/buildbot-cfg.sh | 43 ++++ buildbot/buildbot-cfg/buildbot.conf | 18 ++ buildbot/buildbot-cfg/master.cfg | 46 ++++ buildbot/buildbot-cfg/post-commit | 21 ++ buildbot/buildbot.pp | 32 +++ buildbot/requirements.txt | 6 + commands.go | 118 ++++++--- commands_test.go | 41 +++- container.go | 116 ++++++--- container_test.go | 60 ++++- contrib/crashTest.go | 96 ++++++++ contrib/docker-build/README | 68 ++++++ contrib/docker-build/docker-build | 104 ++++++++ contrib/docker-build/example.changefile | 11 + contrib/install.sh | 2 +- contrib/vagrant-docker/README.md | 3 + deb/Makefile | 1 - deb/Makefile.deb | 73 ------ deb/README.md | 1 - deb/debian/changelog | 5 - deb/debian/control | 20 -- deb/debian/copyright | 209 ---------------- deb/etc/docker-dev.upstart | 10 - docker/docker.go | 58 ++++- docs/sources/examples/running_examples.rst | 21 +- docs/sources/installation/amazon.rst | 7 +- docs/sources/installation/archlinux.rst | 64 +++++ docs/sources/installation/index.rst | 1 + docs/sources/installation/ubuntulinux.rst | 17 +- docs/sources/installation/vagrant.rst | 8 +- graph.go | 32 ++- graph_test.go | 39 ++- hack/README.md | 1 + hack/fmt-check.hook | 46 ++++ image.go | 26 +- lxc_template.go | 2 +- network.go | 59 ++++- packaging/README.rst | 8 + packaging/archlinux/README.archlinux | 25 ++ packaging/debian/Makefile | 35 +++ packaging/debian/README.debian | 31 +++ packaging/debian/Vagrantfile | 22 ++ packaging/debian/changelog | 14 ++ packaging/debian/compat | 1 + packaging/debian/control | 19 ++ packaging/debian/copyright | 237 +++++++++++++++++++ packaging/debian/docker.initd | 49 ++++ {deb => packaging}/debian/docs | 0 packaging/debian/lxc-docker.postinst | 13 + packaging/debian/maintainer.rst | 16 ++ packaging/debian/rules | 13 + {deb => packaging}/debian/source/format | 0 packaging/ubuntu/Makefile | 62 +++++ packaging/ubuntu/README.ubuntu | 37 +++ packaging/ubuntu/Vagrantfile | 12 + packaging/ubuntu/changelog | 30 +++ {deb/debian => packaging/ubuntu}/compat | 0 packaging/ubuntu/control | 19 ++ packaging/ubuntu/copyright | 237 +++++++++++++++++++ {deb/etc => packaging/ubuntu}/docker.upstart | 4 +- packaging/ubuntu/docs | 1 + packaging/ubuntu/lxc-docker.postinst | 4 + packaging/ubuntu/lxc-docker.prerm | 4 + packaging/ubuntu/maintainer.ubuntu | 35 +++ {deb/debian => packaging/ubuntu}/rules | 0 packaging/ubuntu/source/format | 1 + rcli/tcp.go | 6 +- registry.go | 43 ++-- runtime.go | 81 ++++++- runtime_test.go | 57 ++++- state.go | 4 + sysinit.go | 11 +- utils.go | 134 ++++++++++- utils_test.go | 33 +++ 82 files changed, 2525 insertions(+), 609 deletions(-) create mode 100644 SPECS/data-volumes.md create mode 100644 buildbot/README.rst create mode 100644 buildbot/Vagrantfile create mode 100755 buildbot/buildbot-cfg/buildbot-cfg.sh create mode 100644 buildbot/buildbot-cfg/buildbot.conf create mode 100644 buildbot/buildbot-cfg/master.cfg create mode 100755 buildbot/buildbot-cfg/post-commit create mode 100644 buildbot/buildbot.pp create mode 100644 buildbot/requirements.txt create mode 100644 contrib/crashTest.go create mode 100644 contrib/docker-build/README create mode 100755 contrib/docker-build/docker-build create mode 100644 contrib/docker-build/example.changefile create mode 100644 contrib/vagrant-docker/README.md delete mode 120000 deb/Makefile delete mode 100644 deb/Makefile.deb delete mode 120000 deb/README.md delete mode 100644 deb/debian/changelog delete mode 100644 deb/debian/control delete mode 100644 deb/debian/copyright delete mode 100644 deb/etc/docker-dev.upstart create mode 100644 docs/sources/installation/archlinux.rst create mode 100644 hack/README.md create mode 100644 hack/fmt-check.hook create mode 100644 packaging/README.rst create mode 100644 packaging/archlinux/README.archlinux create mode 100644 packaging/debian/Makefile create mode 100644 packaging/debian/README.debian create mode 100644 packaging/debian/Vagrantfile create mode 100644 packaging/debian/changelog create mode 100644 packaging/debian/compat create mode 100644 packaging/debian/control create mode 100644 packaging/debian/copyright create mode 100644 packaging/debian/docker.initd rename {deb => packaging}/debian/docs (100%) create mode 100644 packaging/debian/lxc-docker.postinst create mode 100644 packaging/debian/maintainer.rst create mode 100755 packaging/debian/rules rename {deb => packaging}/debian/source/format (100%) create mode 100644 packaging/ubuntu/Makefile create mode 100644 packaging/ubuntu/README.ubuntu create mode 100644 packaging/ubuntu/Vagrantfile create mode 100644 packaging/ubuntu/changelog rename {deb/debian => packaging/ubuntu}/compat (100%) create mode 100644 packaging/ubuntu/control create mode 100644 packaging/ubuntu/copyright rename {deb/etc => packaging/ubuntu}/docker.upstart (50%) create mode 100644 packaging/ubuntu/docs create mode 100644 packaging/ubuntu/lxc-docker.postinst create mode 100644 packaging/ubuntu/lxc-docker.prerm create mode 100644 packaging/ubuntu/maintainer.ubuntu rename {deb/debian => packaging/ubuntu}/rules (100%) create mode 100644 packaging/ubuntu/source/format diff --git a/.mailmap b/.mailmap index 2570683f85..83c18fa29c 100644 --- a/.mailmap +++ b/.mailmap @@ -14,3 +14,6 @@ Joffrey F Tim Terhorst Andy Smith + + + diff --git a/AUTHORS b/AUTHORS index fefd748422..e8979aac6b 100644 --- a/AUTHORS +++ b/AUTHORS @@ -10,6 +10,8 @@ Daniel Robinson Dominik Honnef Don Spaulding ezbercih +Flavio Castelli +Francisco Souza Frederick F. Kautz IV Guillaume J. Charmes Hunter Blanks @@ -23,10 +25,13 @@ Jérôme Petazzoni Ken Cochrane Kevin J. Lynagh Louis Opter +Maxim Treskin Mikhail Sobolev Nelson Chen Niall O'Higgins +Paul Hammond Piotr Bogdan +Robert Obryk Sam Alba Shawn Siefkas Silas Sewell @@ -35,4 +40,6 @@ Sridhar Ratnakumar Thatcher Peskens Tim Terhorst Troy Howard +unclejack +Victor Vieux Vivek Agarwal diff --git a/Makefile b/Makefile index a6eb613830..c3e2f7820b 100644 --- a/Makefile +++ b/Makefile @@ -23,7 +23,7 @@ DOCKER_MAIN := $(DOCKER_DIR)/docker DOCKER_BIN_RELATIVE := bin/docker DOCKER_BIN := $(CURDIR)/$(DOCKER_BIN_RELATIVE) -.PHONY: all clean test +.PHONY: all clean test hack all: $(DOCKER_BIN) @@ -49,3 +49,6 @@ test: all fmt: @gofmt -s -l -w . + +hack: + cd $(CURDIR)/buildbot && vagrant up diff --git a/README.md b/README.md index c186d9a063..13ec817e2b 100644 --- a/README.md +++ b/README.md @@ -33,123 +33,85 @@ Notable features * Interactive shell: docker can allocate a pseudo-tty and attach to the standard input of any container, for example to run a throwaway interactive shell. - - -Under the hood --------------- - -Under the hood, Docker is built on the following components: - - -* The [cgroup](http://blog.dotcloud.com/kernel-secrets-from-the-paas-garage-part-24-c) and [namespacing](http://blog.dotcloud.com/under-the-hood-linux-kernels-on-dotcloud-part) capabilities of the Linux kernel; - -* [AUFS](http://aufs.sourceforge.net/aufs.html), a powerful union filesystem with copy-on-write capabilities; - -* The [Go](http://golang.org) programming language; - -* [lxc](http://lxc.sourceforge.net/), a set of convenience scripts to simplify the creation of linux containers. - - Install instructions ================== -Building from source --------------------- - -1. Make sure you have a [Go language](http://golang.org) compiler. - - On a Debian/wheezy or Ubuntu 12.10 install the package: - - ```bash - - $ sudo apt-get install golang-go - ``` - -2. Execute ``make`` - - This command will install all necessary dependencies and build the - executable that you can find in ``bin/docker`` - -3. Should you like to see what's happening, run ``make`` with ``VERBOSE=1`` parameter: - - ```bash - - $ make VERBOSE=1 - ``` - -Installing on Ubuntu 12.04 and 12.10 ------------------------------------- - -1. Install dependencies: - - ```bash - sudo apt-get install lxc wget bsdtar curl - sudo apt-get install linux-image-extra-`uname -r` - ``` - - The `linux-image-extra` package is needed on standard Ubuntu EC2 AMIs in order to install the aufs kernel module. - -2. Install the latest docker binary: - - ```bash - wget http://get.docker.io/builds/$(uname -s)/$(uname -m)/docker-master.tgz - tar -xf docker-master.tgz - ``` - -3. Run your first container! - - ```bash - cd docker-master - sudo ./docker pull base - sudo ./docker run -i -t base /bin/bash - ``` - - Consider adding docker to your `PATH` for simplicity. - -Installing on other Linux distributions +Quick install on Ubuntu 12.04 and 12.10 --------------------------------------- -Right now, the officially supported distributions are: +```bash +curl get.docker.io | sh -x +``` -* Ubuntu 12.04 (precise LTS) -* Ubuntu 12.10 (quantal) +Binary installs +---------------- -Docker probably works on other distributions featuring a recent kernel, the AUFS patch, and up-to-date lxc. However this has not been tested. +Docker supports the following binary installation methods. +Note that some methods are community contributions and not yet officially supported. -Some streamlined (but possibly outdated) installation paths' are available from the website: http://docker.io/documentation/ +* [Ubuntu 12.04 and 12.10 (officially supported)](http://docs.docker.io/en/latest/installation/ubuntulinux/) +* [Arch Linux](http://docs.docker.io/en/latest/installation/archlinux/) +* [MacOS X (with Vagrant)](http://docs.docker.io/en/latest/installation/macos/) +* [Windows (with Vagrant)](http://docs.docker.io/en/latest/installation/windows/) +* [Amazon EC2 (with Vagrant)](http://docs.docker.io/en/latest/installation/amazon/) +Installing from source +---------------------- + +1. Make sure you have a [Go language](http://golang.org/doc/install) compiler and [git](http://git-scm.com) installed. + +2. Checkout the source code + + ```bash + git clone http://github.com/dotcloud/docker + ``` + +3. Build the docker binary + + ```bash + cd docker + make VERBOSE=1 + sudo cp ./bin/docker /usr/local/bin/docker + ``` Usage examples ============== -Running an interactive shell ----------------------------- +First run the docker daemon +--------------------------- + +All the examples assume your machine is running the docker daemon. To run the docker daemon in the background, simply type: ```bash -# Download a base image -docker pull base - -# Run an interactive shell in the base image, -# allocate a tty, attach stdin and stdout -docker run -i -t base /bin/bash +# On a production system you want this running in an init script +sudo docker -d & ``` -Detaching from the interactive shell ------------------------------------- +Now you can run docker in client mode: all commands will be forwarded to the docker daemon, so the client can run from any account. + +```bash +# Now you can run docker commands from any account. +docker help ``` -# In order to detach without killing the shell, you can use the escape sequence Ctrl-p + Ctrl-q -# Note: this works only in tty mode (run with -t option). + + +Throwaway shell in a base ubuntu image +-------------------------------------- + +```bash +docker pull ubuntu:12.10 + +# Run an interactive shell, allocate a tty, attach stdin and stdout +# To detach the tty without exiting the shell, use the escape sequence Ctrl-p + Ctrl-q +docker run -i -t ubuntu:12.10 /bin/bash ``` Starting a long-running worker process -------------------------------------- ```bash -# Run docker in daemon mode -(docker -d || echo "Docker daemon already running") & - # Start a very useful long-running process -JOB=$(docker run -d base /bin/sh -c "while true; do echo Hello world; sleep 1; done") +JOB=$(docker run -d ubuntu /bin/sh -c "while true; do echo Hello world; sleep 1; done") # Collect the output of the job so far docker logs $JOB @@ -158,25 +120,32 @@ docker logs $JOB docker kill $JOB ``` - -Listing all running containers ------------------------------- +Running an irc bouncer +---------------------- ```bash -docker ps +BOUNCER_ID=$(docker run -d -p 6667 -u irc shykes/znc $USER $PASSWORD) +echo "Configure your irc client to connect to port $(docker port $BOUNCER_ID 6667) of this machine" ``` +Running Redis +------------- + +```bash +REDIS_ID=$(docker run -d -p 6379 shykes/redis redis-server) +echo "Configure your redis client to connect to port $(docker port $REDIS_ID 6379) of this machine" +``` Share your own image! --------------------- ```bash -docker pull base -CONTAINER=$(docker run -d base apt-get install -y curl) +CONTAINER=$(docker run -d ubuntu:12.10 apt-get install -y curl) docker commit -m "Installed curl" $CONTAINER $USER/betterbase docker push $USER/betterbase ``` +A list of publicly available images is [available here](https://github.com/dotcloud/docker/wiki/Public-docker-images). Expose a service on a TCP port ------------------------------ @@ -197,6 +166,22 @@ echo hello world | nc $IP $PORT echo "Daemon received: $(docker logs $JOB)" ``` +Under the hood +-------------- + +Under the hood, Docker is built on the following components: + + +* The [cgroup](http://blog.dotcloud.com/kernel-secrets-from-the-paas-garage-part-24-c) and [namespacing](http://blog.dotcloud.com/under-the-hood-linux-kernels-on-dotcloud-part) capabilities of the Linux kernel; + +* [AUFS](http://aufs.sourceforge.net/aufs.html), a powerful union filesystem with copy-on-write capabilities; + +* The [Go](http://golang.org) programming language; + +* [lxc](http://lxc.sourceforge.net/), a set of convenience scripts to simplify the creation of linux containers. + + + Contributing to Docker ====================== diff --git a/SPECS/data-volumes.md b/SPECS/data-volumes.md new file mode 100644 index 0000000000..d800656afc --- /dev/null +++ b/SPECS/data-volumes.md @@ -0,0 +1,71 @@ + +## Spec for data volumes + +Spec owner: Solomon Hykes + +Data volumes (issue #111) are a much-requested feature which trigger much discussion and debate. Below is the current authoritative spec for implementing data volumes. +This spec will be deprecated once the feature is fully implemented. + +Discussion, requests, trolls, demands, offerings, threats and other forms of supplications concerning this spec should be addressed to Solomon here: https://github.com/dotcloud/docker/issues/111 + + +### 1. Creating data volumes + +At container creation, parts of a container's filesystem can be mounted as separate data volumes. Volumes are defined with the -v flag. + +For example: + +```bash +$ docker run -v /var/lib/postgres -v /var/log postgres /usr/bin/postgres +``` + +In this example, a new container is created from the 'postgres' image. At the same time, docker creates 2 new data volumes: one will be mapped to the container at /var/lib/postgres, the other at /var/log. + +2 important notes: + +1) Volumes don't have top-level names. At no point does the user provide a name, or is a name given to him. Volumes are identified by the path at which they are mounted inside their container. + +2) The user doesn't choose the source of the volume. Docker only mounts volumes it created itself, in the same way that it only runs containers that it created itself. That is by design. + + +### 2. Sharing data volumes + +Instead of creating its own volumes, a container can share another container's volumes. For example: + +```bash +$ docker run --volumes-from $OTHER_CONTAINER_ID postgres /usr/local/bin/postgres-backup +``` + +In this example, a new container is created from the 'postgres' example. At the same time, docker will *re-use* the 2 data volumes created in the previous example. One volume will be mounted on the /var/lib/postgres of *both* containers, and the other will be mounted on the /var/log of both containers. + +### 3. Under the hood + +Docker stores volumes in /var/lib/docker/volumes. Each volume receives a globally unique ID at creation, and is stored at /var/lib/docker/volumes/ID. + +At creation, volumes are attached to a single container - the source of truth for this mapping will be the container's configuration. + +Mounting a volume consists of calling "mount --bind" from the volume's directory to the appropriate sub-directory of the container mountpoint. This may be done by Docker itself, or farmed out to lxc (which supports mount-binding) if possible. + + +### 4. Backups, transfers and other volume operations + +Volumes sometimes need to be backed up, transfered between hosts, synchronized, etc. These operations typically are application-specific or site-specific, eg. rsync vs. S3 upload vs. replication vs... + +Rather than attempting to implement all these scenarios directly, Docker will allow for custom implementations using an extension mechanism. + +### 5. Custom volume handlers + +Docker allows for arbitrary code to be executed against a container's volumes, to implement any custom action: backup, transfer, synchronization across hosts, etc. + +Here's an example: + +```bash +$ DB=$(docker run -d -v /var/lib/postgres -v /var/log postgres /usr/bin/postgres) + +$ BACKUP_JOB=$(docker run -d --volumes-from $DB shykes/backuper /usr/local/bin/backup-postgres --s3creds=$S3CREDS) + +$ docker wait $BACKUP_JOB +``` + +Congratulations, you just implemented a custom volume handler, using Docker's built-in ability to 1) execute arbitrary code and 2) share volumes between containers. + diff --git a/archive.go b/archive.go index d09d3d6b97..8a011eb6e1 100644 --- a/archive.go +++ b/archive.go @@ -4,6 +4,7 @@ import ( "errors" "io" "io/ioutil" + "os" "os/exec" ) @@ -86,3 +87,38 @@ func CmdStream(cmd *exec.Cmd) (io.Reader, error) { } return pipeR, nil } + +// NewTempArchive reads the content of src into a temporary file, and returns the contents +// of that file as an archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +func NewTempArchive(src Archive, dir string) (*TempArchive, error) { + f, err := ioutil.TempFile(dir, "") + if err != nil { + return nil, err + } + if _, err := io.Copy(f, src); err != nil { + return nil, err + } + if _, err := f.Seek(0, 0); err != nil { + return nil, err + } + st, err := f.Stat() + if err != nil { + return nil, err + } + size := st.Size() + return &TempArchive{f, size}, nil +} + +type TempArchive struct { + *os.File + Size int64 // Pre-computed from Stat().Size() as a convenience +} + +func (archive *TempArchive) Read(data []byte) (int, error) { + n, err := archive.File.Read(data) + if err != nil { + os.Remove(archive.File.Name()) + } + return n, err +} diff --git a/buildbot/README.rst b/buildbot/README.rst new file mode 100644 index 0000000000..a52b9769ef --- /dev/null +++ b/buildbot/README.rst @@ -0,0 +1,20 @@ +Buildbot +======== + +Buildbot is a continuous integration system designed to automate the +build/test cycle. By automatically rebuilding and testing the tree each time +something has changed, build problems are pinpointed quickly, before other +developers are inconvenienced by the failure. + +When running 'make hack' at the docker root directory, it spawns a virtual +machine in the background running a buildbot instance and adds a git +post-commit hook that automatically run docker tests for you. + +You can check your buildbot instance at http://192.168.33.21:8010/waterfall + + +Buildbot dependencies +--------------------- + +vagrant, virtualbox packages and python package requests + diff --git a/buildbot/Vagrantfile b/buildbot/Vagrantfile new file mode 100644 index 0000000000..ea027f0666 --- /dev/null +++ b/buildbot/Vagrantfile @@ -0,0 +1,28 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +$BUILDBOT_IP = '192.168.33.21' + +def v10(config) + config.vm.box = "quantal64_3.5.0-25" + config.vm.box_url = "http://get.docker.io/vbox/ubuntu/12.10/quantal64_3.5.0-25.box" + config.vm.share_folder 'v-data', '/data/docker', File.dirname(__FILE__) + '/..' + config.vm.network :hostonly, $BUILDBOT_IP + + # Ensure puppet is installed on the instance + config.vm.provision :shell, :inline => 'apt-get -qq update; apt-get install -y puppet' + + config.vm.provision :puppet do |puppet| + puppet.manifests_path = '.' + puppet.manifest_file = 'buildbot.pp' + puppet.options = ['--templatedir','.'] + end +end + +Vagrant::VERSION < '1.1.0' and Vagrant::Config.run do |config| + v10(config) +end + +Vagrant::VERSION >= '1.1.0' and Vagrant.configure('1') do |config| + v10(config) +end diff --git a/buildbot/buildbot-cfg/buildbot-cfg.sh b/buildbot/buildbot-cfg/buildbot-cfg.sh new file mode 100755 index 0000000000..5e4e7432fd --- /dev/null +++ b/buildbot/buildbot-cfg/buildbot-cfg.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +# Auto setup of buildbot configuration. Package installation is being done +# on buildbot.pp +# Dependencies: buildbot, buildbot-slave, supervisor + +SLAVE_NAME='buildworker' +SLAVE_SOCKET='localhost:9989' +BUILDBOT_PWD='pass-docker' +USER='vagrant' +ROOT_PATH='/data/buildbot' +DOCKER_PATH='/data/docker' +BUILDBOT_CFG="$DOCKER_PATH/buildbot/buildbot-cfg" +IP=$(grep BUILDBOT_IP /data/docker/buildbot/Vagrantfile | awk -F "'" '{ print $2; }') + +function run { su $USER -c "$1"; } + +export PATH=/bin:sbin:/usr/bin:/usr/sbin:/usr/local/bin + +# Exit if buildbot has already been installed +[ -d "$ROOT_PATH" ] && exit 0 + +# Setup buildbot +run "mkdir -p ${ROOT_PATH}" +cd ${ROOT_PATH} +run "buildbot create-master master" +run "cp $BUILDBOT_CFG/master.cfg master" +run "sed -i 's/localhost/$IP/' master/master.cfg" +run "buildslave create-slave slave $SLAVE_SOCKET $SLAVE_NAME $BUILDBOT_PWD" + +# Allow buildbot subprocesses (docker tests) to properly run in containers, +# in particular with docker -u +run "sed -i 's/^umask = None/umask = 000/' ${ROOT_PATH}/slave/buildbot.tac" + +# Setup supervisor +cp $BUILDBOT_CFG/buildbot.conf /etc/supervisor/conf.d/buildbot.conf +sed -i "s/^chmod=0700.*0700./chmod=0770\nchown=root:$USER/" /etc/supervisor/supervisord.conf +kill -HUP `pgrep -f "/usr/bin/python /usr/bin/supervisord"` + +# Add git hook +cp $BUILDBOT_CFG/post-commit $DOCKER_PATH/.git/hooks +sed -i "s/localhost/$IP/" $DOCKER_PATH/.git/hooks/post-commit + diff --git a/buildbot/buildbot-cfg/buildbot.conf b/buildbot/buildbot-cfg/buildbot.conf new file mode 100644 index 0000000000..b162f4e7c7 --- /dev/null +++ b/buildbot/buildbot-cfg/buildbot.conf @@ -0,0 +1,18 @@ +[program:buildmaster] +command=su vagrant -c "buildbot start master" +directory=/data/buildbot +chown= root:root +redirect_stderr=true +stdout_logfile=/var/log/supervisor/buildbot-master.log +stderr_logfile=/var/log/supervisor/buildbot-master.log + +[program:buildworker] +command=buildslave start slave +directory=/data/buildbot +chown= root:root +redirect_stderr=true +stdout_logfile=/var/log/supervisor/buildbot-slave.log +stderr_logfile=/var/log/supervisor/buildbot-slave.log + +[group:buildbot] +programs=buildmaster,buildworker diff --git a/buildbot/buildbot-cfg/master.cfg b/buildbot/buildbot-cfg/master.cfg new file mode 100644 index 0000000000..c786e418ed --- /dev/null +++ b/buildbot/buildbot-cfg/master.cfg @@ -0,0 +1,46 @@ +import os +from buildbot.buildslave import BuildSlave +from buildbot.schedulers.forcesched import ForceScheduler +from buildbot.config import BuilderConfig +from buildbot.process.factory import BuildFactory +from buildbot.steps.shell import ShellCommand +from buildbot.status import html +from buildbot.status.web import authz, auth + +PORT_WEB = 8010 # Buildbot webserver port +PORT_MASTER = 9989 # Port where buildbot master listen buildworkers +TEST_USER = 'buildbot' # Credential to authenticate build triggers +TEST_PWD = 'docker' # Credential to authenticate build triggers +BUILDER_NAME = 'docker' +BUILDPASSWORD = 'pass-docker' # Credential to authenticate buildworkers +DOCKER_PATH = '/data/docker' + + +c = BuildmasterConfig = {} + +c['title'] = "Docker" +c['titleURL'] = "waterfall" +c['buildbotURL'] = "http://localhost:{0}/".format(PORT_WEB) +c['db'] = {'db_url':"sqlite:///state.sqlite"} +c['slaves'] = [BuildSlave('buildworker', BUILDPASSWORD)] +c['slavePortnum'] = PORT_MASTER + +c['schedulers'] = [ForceScheduler(name='trigger',builderNames=[BUILDER_NAME])] + +# Docker test command +test_cmd = """( + cd {0}/..; rm -rf docker-tmp; git clone docker docker-tmp; + cd docker-tmp; make test; exit_status=$?; + cd ..; rm -rf docker-tmp; exit $exit_status)""".format(DOCKER_PATH) + +# Builder +factory = BuildFactory() +factory.addStep(ShellCommand(description='Docker',logEnviron=False, + usePTY=True,command=test_cmd)) +c['builders'] = [BuilderConfig(name=BUILDER_NAME,slavenames=['buildworker'], + factory=factory)] + +# Status +authz_cfg=authz.Authz(auth=auth.BasicAuth([(TEST_USER,TEST_PWD)]), + forceBuild='auth') +c['status'] = [html.WebStatus(http_port=PORT_WEB, authz=authz_cfg)] diff --git a/buildbot/buildbot-cfg/post-commit b/buildbot/buildbot-cfg/post-commit new file mode 100755 index 0000000000..0173fe504f --- /dev/null +++ b/buildbot/buildbot-cfg/post-commit @@ -0,0 +1,21 @@ +#!/usr/bin/env python + +'''Trigger buildbot docker test build + + post-commit git hook designed to automatically trigger buildbot on + the provided vagrant docker VM.''' + +import requests + +USERNAME = 'buildbot' +PASSWORD = 'docker' +BASE_URL = 'http://localhost:8010' +path = lambda s: BASE_URL + '/' + s + +try: + session = requests.session() + session.post(path('login'),data={'username':USERNAME,'passwd':PASSWORD}) + session.post(path('builders/docker/force'), + data={'forcescheduler':'trigger','reason':'Test commit'}) +except: + pass diff --git a/buildbot/buildbot.pp b/buildbot/buildbot.pp new file mode 100644 index 0000000000..8109cdc2a0 --- /dev/null +++ b/buildbot/buildbot.pp @@ -0,0 +1,32 @@ +node default { + $USER = 'vagrant' + $ROOT_PATH = '/data/buildbot' + $DOCKER_PATH = '/data/docker' + + exec {'apt_update': command => '/usr/bin/apt-get update' } + Package { require => Exec['apt_update'] } + group {'puppet': ensure => 'present'} + + # Install dependencies + Package { ensure => 'installed' } + package { ['python-dev','python-pip','supervisor','lxc','bsdtar','git','golang']: } + + file{[ '/data' ]: + owner => $USER, group => $USER, ensure => 'directory' } + + file {'/var/tmp/requirements.txt': + content => template('requirements.txt') } + + exec {'requirements': + require => [ Package['python-dev'], Package['python-pip'], + File['/var/tmp/requirements.txt'] ], + cwd => '/var/tmp', + command => "/bin/sh -c '(/usr/bin/pip install -r requirements.txt; + rm /var/tmp/requirements.txt)'" } + + exec {'buildbot-cfg-sh': + require => [ Package['supervisor'], Exec['requirements']], + path => '/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin', + cwd => '/data', + command => "$DOCKER_PATH/buildbot/buildbot-cfg/buildbot-cfg.sh" } +} diff --git a/buildbot/requirements.txt b/buildbot/requirements.txt new file mode 100644 index 0000000000..0e451b017d --- /dev/null +++ b/buildbot/requirements.txt @@ -0,0 +1,6 @@ +sqlalchemy<=0.7.9 +sqlalchemy-migrate>=0.7.2 +buildbot==0.8.7p1 +buildbot_slave==0.8.7p1 +nose==1.2.1 +requests==1.1.0 diff --git a/commands.go b/commands.go index 20d6b45c97..b0440a9766 100644 --- a/commands.go +++ b/commands.go @@ -18,9 +18,11 @@ import ( "unicode" ) -const VERSION = "0.1.4" +const VERSION = "0.1.7" -var GIT_COMMIT string +var ( + GIT_COMMIT string +) func (srv *Server) Name() string { return "docker" @@ -79,7 +81,7 @@ func (srv *Server) CmdLogin(stdin io.ReadCloser, stdout rcli.DockerConn, args .. n, err := stdin.Read(char) if n > 0 { if char[0] == '\r' || char[0] == '\n' { - stdout.Write([]byte{'\n'}) + stdout.Write([]byte{'\r', '\n'}) break } else if char[0] == 127 || char[0] == '\b' { if i > 0 { @@ -99,7 +101,7 @@ func (srv *Server) CmdLogin(stdin io.ReadCloser, stdout rcli.DockerConn, args .. } if err != nil { if err != io.EOF { - fmt.Fprintf(stdout, "Read error: %v\n", err) + fmt.Fprintf(stdout, "Read error: %v\r\n", err) } break } @@ -149,7 +151,7 @@ func (srv *Server) CmdLogin(stdin io.ReadCloser, stdout rcli.DockerConn, args .. newAuthConfig := auth.NewAuthConfig(username, password, email, srv.runtime.root) status, err := auth.Login(newAuthConfig) if err != nil { - fmt.Fprintln(stdout, "Error:", err) + fmt.Fprintf(stdout, "Error: %s\r\n", err) } else { srv.runtime.authConfig = newAuthConfig } @@ -161,7 +163,7 @@ func (srv *Server) CmdLogin(stdin io.ReadCloser, stdout rcli.DockerConn, args .. // 'docker wait': block until a container stops func (srv *Server) CmdWait(stdin io.ReadCloser, stdout io.Writer, args ...string) error { - cmd := rcli.Subcmd(stdout, "wait", "[OPTIONS] NAME", "Block until a container stops, then print its exit code.") + cmd := rcli.Subcmd(stdout, "wait", "CONTAINER [CONTAINER...]", "Block until a container stops, then print its exit code.") if err := cmd.Parse(args); err != nil { return nil } @@ -181,8 +183,15 @@ func (srv *Server) CmdWait(stdin io.ReadCloser, stdout io.Writer, args ...string // 'docker version': show version information func (srv *Server) CmdVersion(stdin io.ReadCloser, stdout io.Writer, args ...string) error { - fmt.Fprintf(stdout, "Version:%s\n", VERSION) - fmt.Fprintf(stdout, "Git Commit:%s\n", GIT_COMMIT) + fmt.Fprintf(stdout, "Version: %s\n", VERSION) + fmt.Fprintf(stdout, "Git Commit: %s\n", GIT_COMMIT) + fmt.Fprintf(stdout, "Kernel: %s\n", srv.runtime.kernelVersion) + if !srv.runtime.capabilities.MemoryLimit { + fmt.Fprintf(stdout, "WARNING: No memory limit support\n") + } + if !srv.runtime.capabilities.SwapLimit { + fmt.Fprintf(stdout, "WARNING: No swap limit support\n") + } return nil } @@ -217,7 +226,8 @@ func (srv *Server) CmdInfo(stdin io.ReadCloser, stdout io.Writer, args ...string } func (srv *Server) CmdStop(stdin io.ReadCloser, stdout io.Writer, args ...string) error { - cmd := rcli.Subcmd(stdout, "stop", "[OPTIONS] NAME", "Stop a running container") + cmd := rcli.Subcmd(stdout, "stop", "[OPTIONS] CONTAINER [CONTAINER...]", "Stop a running container") + nSeconds := cmd.Int("t", 10, "wait t seconds before killing the container") if err := cmd.Parse(args); err != nil { return nil } @@ -227,7 +237,7 @@ func (srv *Server) CmdStop(stdin io.ReadCloser, stdout io.Writer, args ...string } for _, name := range cmd.Args() { if container := srv.runtime.Get(name); container != nil { - if err := container.Stop(); err != nil { + if err := container.Stop(*nSeconds); err != nil { return err } fmt.Fprintln(stdout, container.ShortId()) @@ -239,7 +249,8 @@ func (srv *Server) CmdStop(stdin io.ReadCloser, stdout io.Writer, args ...string } func (srv *Server) CmdRestart(stdin io.ReadCloser, stdout io.Writer, args ...string) error { - cmd := rcli.Subcmd(stdout, "restart", "[OPTIONS] NAME", "Restart a running container") + cmd := rcli.Subcmd(stdout, "restart", "CONTAINER [CONTAINER...]", "Restart a running container") + nSeconds := cmd.Int("t", 10, "wait t seconds before killing the container") if err := cmd.Parse(args); err != nil { return nil } @@ -249,7 +260,7 @@ func (srv *Server) CmdRestart(stdin io.ReadCloser, stdout io.Writer, args ...str } for _, name := range cmd.Args() { if container := srv.runtime.Get(name); container != nil { - if err := container.Restart(); err != nil { + if err := container.Restart(*nSeconds); err != nil { return err } fmt.Fprintln(stdout, container.ShortId()) @@ -261,7 +272,7 @@ func (srv *Server) CmdRestart(stdin io.ReadCloser, stdout io.Writer, args ...str } func (srv *Server) CmdStart(stdin io.ReadCloser, stdout io.Writer, args ...string) error { - cmd := rcli.Subcmd(stdout, "start", "[OPTIONS] NAME", "Start a stopped container") + cmd := rcli.Subcmd(stdout, "start", "CONTAINER [CONTAINER...]", "Start a stopped container") if err := cmd.Parse(args); err != nil { return nil } @@ -283,7 +294,7 @@ func (srv *Server) CmdStart(stdin io.ReadCloser, stdout io.Writer, args ...strin } func (srv *Server) CmdInspect(stdin io.ReadCloser, stdout io.Writer, args ...string) error { - cmd := rcli.Subcmd(stdout, "inspect", "[OPTIONS] CONTAINER", "Return low-level information on a container") + cmd := rcli.Subcmd(stdout, "inspect", "CONTAINER", "Return low-level information on a container") if err := cmd.Parse(args); err != nil { return nil } @@ -318,7 +329,7 @@ func (srv *Server) CmdInspect(stdin io.ReadCloser, stdout io.Writer, args ...str } func (srv *Server) CmdPort(stdin io.ReadCloser, stdout io.Writer, args ...string) error { - cmd := rcli.Subcmd(stdout, "port", "[OPTIONS] CONTAINER PRIVATE_PORT", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT") + cmd := rcli.Subcmd(stdout, "port", "CONTAINER PRIVATE_PORT", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT") if err := cmd.Parse(args); err != nil { return nil } @@ -340,9 +351,9 @@ func (srv *Server) CmdPort(stdin io.ReadCloser, stdout io.Writer, args ...string return nil } -// 'docker rmi NAME' removes all images with the name NAME +// 'docker rmi IMAGE' removes all images with the name IMAGE func (srv *Server) CmdRmi(stdin io.ReadCloser, stdout io.Writer, args ...string) (err error) { - cmd := rcli.Subcmd(stdout, "rmimage", "[OPTIONS] IMAGE", "Remove an image") + cmd := rcli.Subcmd(stdout, "rmimage", "IMAGE [IMAGE...]", "Remove an image") if err := cmd.Parse(args); err != nil { return nil } @@ -351,7 +362,11 @@ func (srv *Server) CmdRmi(stdin io.ReadCloser, stdout io.Writer, args ...string) return nil } for _, name := range cmd.Args() { - if err := srv.runtime.graph.Delete(name); err != nil { + img, err := srv.runtime.repositories.LookupImage(name) + if err != nil { + return err + } + if err := srv.runtime.graph.Delete(img.Id); err != nil { return err } } @@ -359,7 +374,7 @@ func (srv *Server) CmdRmi(stdin io.ReadCloser, stdout io.Writer, args ...string) } func (srv *Server) CmdHistory(stdin io.ReadCloser, stdout io.Writer, args ...string) error { - cmd := rcli.Subcmd(stdout, "history", "[OPTIONS] IMAGE", "Show the history of an image") + cmd := rcli.Subcmd(stdout, "history", "IMAGE", "Show the history of an image") if err := cmd.Parse(args); err != nil { return nil } @@ -385,10 +400,14 @@ func (srv *Server) CmdHistory(stdin io.ReadCloser, stdout io.Writer, args ...str } func (srv *Server) CmdRm(stdin io.ReadCloser, stdout io.Writer, args ...string) error { - cmd := rcli.Subcmd(stdout, "rm", "[OPTIONS] CONTAINER", "Remove a container") + cmd := rcli.Subcmd(stdout, "rm", "CONTAINER [CONTAINER...]", "Remove a container") if err := cmd.Parse(args); err != nil { return nil } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } for _, name := range cmd.Args() { container := srv.runtime.Get(name) if container == nil { @@ -403,10 +422,14 @@ func (srv *Server) CmdRm(stdin io.ReadCloser, stdout io.Writer, args ...string) // 'docker kill NAME' kills a running container func (srv *Server) CmdKill(stdin io.ReadCloser, stdout io.Writer, args ...string) error { - cmd := rcli.Subcmd(stdout, "kill", "[OPTIONS] CONTAINER [CONTAINER...]", "Kill a running container") + cmd := rcli.Subcmd(stdout, "kill", "CONTAINER [CONTAINER...]", "Kill a running container") if err := cmd.Parse(args); err != nil { return nil } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } for _, name := range cmd.Args() { container := srv.runtime.Get(name) if container == nil { @@ -421,17 +444,19 @@ func (srv *Server) CmdKill(stdin io.ReadCloser, stdout io.Writer, args ...string func (srv *Server) CmdImport(stdin io.ReadCloser, stdout rcli.DockerConn, args ...string) error { stdout.Flush() - cmd := rcli.Subcmd(stdout, "import", "[OPTIONS] URL|- [REPOSITORY [TAG]]", "Create a new filesystem image from the contents of a tarball") + cmd := rcli.Subcmd(stdout, "import", "URL|- [REPOSITORY [TAG]]", "Create a new filesystem image from the contents of a tarball") var archive io.Reader var resp *http.Response if err := cmd.Parse(args); err != nil { return nil } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } src := cmd.Arg(0) - if src == "" { - return fmt.Errorf("Not enough arguments") - } else if src == "-" { + if src == "-" { archive = stdin } else { u, err := url.Parse(src) @@ -450,9 +475,9 @@ func (srv *Server) CmdImport(stdin io.ReadCloser, stdout rcli.DockerConn, args . if err != nil { return err } - archive = ProgressReader(resp.Body, int(resp.ContentLength), stdout) + archive = ProgressReader(resp.Body, int(resp.ContentLength), stdout, "Importing %v/%v (%v)") } - img, err := srv.runtime.graph.Create(archive, nil, "Imported from "+src) + img, err := srv.runtime.graph.Create(archive, nil, "Imported from "+src, "") if err != nil { return err } @@ -569,7 +594,7 @@ func (srv *Server) CmdImages(stdin io.ReadCloser, stdout io.Writer, args ...stri } w := tabwriter.NewWriter(stdout, 20, 1, 3, ' ', 0) if !*quiet { - fmt.Fprintln(w, "REPOSITORY\tTAG\tID\tCREATED\tPARENT") + fmt.Fprintln(w, "REPOSITORY\tTAG\tID\tCREATED") } var allImages map[string]*Image var err error @@ -598,7 +623,6 @@ func (srv *Server) CmdImages(stdin io.ReadCloser, stdout io.Writer, args ...stri /* TAG */ tag, /* ID */ TruncateId(id), /* CREATED */ HumanDuration(time.Now().Sub(image.Created)) + " ago", - /* PARENT */ srv.runtime.repositories.ImageName(image.Parent), } { if idx == 0 { w.Write([]byte(field)) @@ -621,7 +645,6 @@ func (srv *Server) CmdImages(stdin io.ReadCloser, stdout io.Writer, args ...stri /* TAG */ "", /* ID */ TruncateId(id), /* CREATED */ HumanDuration(time.Now().Sub(image.Created)) + " ago", - /* PARENT */ srv.runtime.repositories.ImageName(image.Parent), } { if idx == 0 { w.Write([]byte(field)) @@ -647,17 +670,25 @@ func (srv *Server) CmdPs(stdin io.ReadCloser, stdout io.Writer, args ...string) quiet := cmd.Bool("q", false, "Only display numeric IDs") flAll := cmd.Bool("a", false, "Show all containers. Only running containers are shown by default.") flFull := cmd.Bool("notrunc", false, "Don't truncate output") + latest := cmd.Bool("l", false, "Show only the latest created container, include non-running ones.") + nLast := cmd.Int("n", -1, "Show n last created containers, include non-running ones.") if err := cmd.Parse(args); err != nil { return nil } + if *nLast == -1 && *latest { + *nLast = 1 + } w := tabwriter.NewWriter(stdout, 12, 1, 3, ' ', 0) if !*quiet { - fmt.Fprintln(w, "ID\tIMAGE\tCOMMAND\tCREATED\tSTATUS\tCOMMENT") + fmt.Fprintln(w, "ID\tIMAGE\tCOMMAND\tCREATED\tSTATUS\tCOMMENT\tPORTS") } - for _, container := range srv.runtime.List() { - if !container.State.Running && !*flAll { + for i, container := range srv.runtime.List() { + if !container.State.Running && !*flAll && *nLast == -1 { continue } + if i == *nLast { + break + } if !*quiet { command := fmt.Sprintf("%s %s", container.Path, strings.Join(container.Args, " ")) if !*flFull { @@ -670,6 +701,7 @@ func (srv *Server) CmdPs(stdin io.ReadCloser, stdout io.Writer, args ...string) /* CREATED */ HumanDuration(time.Now().Sub(container.Created)) + " ago", /* STATUS */ container.State.String(), /* COMMENT */ "", + /* PORTS */ container.NetworkSettings.PortMappingHuman(), } { if idx == 0 { w.Write([]byte(field)) @@ -693,6 +725,7 @@ func (srv *Server) CmdCommit(stdin io.ReadCloser, stdout io.Writer, args ...stri "commit", "[OPTIONS] CONTAINER [REPOSITORY [TAG]]", "Create a new image from a container's changes") flComment := cmd.String("m", "", "Commit message") + flAuthor := cmd.String("author", "", "Author (eg. \"John Hannibal Smith \"") if err := cmd.Parse(args); err != nil { return nil } @@ -701,7 +734,7 @@ func (srv *Server) CmdCommit(stdin io.ReadCloser, stdout io.Writer, args ...stri cmd.Usage() return nil } - img, err := srv.runtime.Commit(containerName, repository, tag, *flComment) + img, err := srv.runtime.Commit(containerName, repository, tag, *flComment, *flAuthor) if err != nil { return err } @@ -733,13 +766,14 @@ func (srv *Server) CmdExport(stdin io.ReadCloser, stdout io.Writer, args ...stri func (srv *Server) CmdDiff(stdin io.ReadCloser, stdout io.Writer, args ...string) error { cmd := rcli.Subcmd(stdout, - "diff", "CONTAINER [OPTIONS]", + "diff", "CONTAINER", "Inspect changes on a container's filesystem") if err := cmd.Parse(args); err != nil { return nil } if cmd.NArg() < 1 { - return fmt.Errorf("Not enough arguments") + cmd.Usage() + return nil } if container := srv.runtime.Get(cmd.Arg(0)); container == nil { return fmt.Errorf("No such container") @@ -756,7 +790,7 @@ func (srv *Server) CmdDiff(stdin io.ReadCloser, stdout io.Writer, args ...string } func (srv *Server) CmdLogs(stdin io.ReadCloser, stdout io.Writer, args ...string) error { - cmd := rcli.Subcmd(stdout, "logs", "[OPTIONS] CONTAINER", "Fetch the logs of a container") + cmd := rcli.Subcmd(stdout, "logs", "CONTAINER", "Fetch the logs of a container") if err := cmd.Parse(args); err != nil { return nil } @@ -879,7 +913,7 @@ func (srv *Server) CmdTag(stdin io.ReadCloser, stdout io.Writer, args ...string) } func (srv *Server) CmdRun(stdin io.ReadCloser, stdout rcli.DockerConn, args ...string) error { - config, err := ParseRun(args, stdout) + config, err := ParseRun(args, stdout, srv.runtime.capabilities) if err != nil { return err } @@ -904,7 +938,7 @@ func (srv *Server) CmdRun(stdin io.ReadCloser, stdout rcli.DockerConn, args ...s if err != nil { // If container not found, try to pull it if srv.runtime.graph.IsNotExist(err) { - fmt.Fprintf(stdout, "Image %s not found, trying to pull it from registry.\n", config.Image) + fmt.Fprintf(stdout, "Image %s not found, trying to pull it from registry.\r\n", config.Image) if err = srv.CmdPull(stdin, stdout, config.Image); err != nil { return err } @@ -946,6 +980,12 @@ func (srv *Server) CmdRun(stdin io.ReadCloser, stdout rcli.DockerConn, args ...s Debugf("Waiting for attach to return\n") <-attachErr // Expecting I/O pipe error, discarding + + // If we are in stdinonce mode, wait for the process to end + // otherwise, simply return + if config.StdinOnce && !config.Tty { + container.Wait() + } return nil } diff --git a/commands_test.go b/commands_test.go index 30e2579d20..a64b4f4dc7 100644 --- a/commands_test.go +++ b/commands_test.go @@ -59,6 +59,20 @@ func assertPipe(input, output string, r io.Reader, w io.Writer, count int) error return nil } +func cmdWait(srv *Server, container *Container) error { + stdout, stdoutPipe := io.Pipe() + + go func() { + srv.CmdWait(nil, stdoutPipe, container.Id) + }() + + if _, err := bufio.NewReader(stdout).ReadString('\n'); err != nil { + return err + } + // Cleanup pipes + return closeWrap(stdout, stdoutPipe) +} + // TestRunHostname checks that 'docker run -h' correctly sets a custom hostname func TestRunHostname(t *testing.T) { runtime, err := newTestRuntime() @@ -89,7 +103,9 @@ func TestRunHostname(t *testing.T) { setTimeout(t, "CmdRun timed out", 2*time.Second, func() { <-c + cmdWait(srv, srv.runtime.List()[0]) }) + } func TestRunExit(t *testing.T) { @@ -129,6 +145,7 @@ func TestRunExit(t *testing.T) { // as the process exited, CmdRun must finish and unblock. Wait for it setTimeout(t, "Waiting for CmdRun timed out", 2*time.Second, func() { <-c1 + cmdWait(srv, container) }) // Make sure that the client has been disconnected @@ -211,6 +228,21 @@ func TestRunDisconnectTty(t *testing.T) { close(c1) }() + setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() { + for { + // Client disconnect after run -i should keep stdin out in TTY mode + l := runtime.List() + if len(l) == 1 && l[0].State.Running { + break + } + + time.Sleep(10 * time.Millisecond) + } + }) + + // Client disconnect after run -i should keep stdin out in TTY mode + container := runtime.List()[0] + setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() { if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 15); err != nil { t.Fatal(err) @@ -222,14 +254,9 @@ func TestRunDisconnectTty(t *testing.T) { t.Fatal(err) } - // as the pipes are close, we expect the process to die, - // therefore CmdRun to unblock. Wait for CmdRun - setTimeout(t, "Waiting for CmdRun timed out", 2*time.Second, func() { - <-c1 - }) + // In tty mode, we expect the process to stay alive even after client's stdin closes. + // Do not wait for run to finish - // Client disconnect after run -i should keep stdin out in TTY mode - container := runtime.List()[0] // Give some time to monitor to do his thing container.WaitTimeout(500 * time.Millisecond) if !container.State.Running { diff --git a/container.go b/container.go index f180c7559b..c2c6fddd40 100644 --- a/container.go +++ b/container.go @@ -11,7 +11,9 @@ import ( "os" "os/exec" "path" + "sort" "strconv" + "strings" "syscall" "time" ) @@ -33,13 +35,14 @@ type Container struct { network *NetworkInterface NetworkSettings *NetworkSettings - SysInitPath string - cmd *exec.Cmd - stdout *writeBroadcaster - stderr *writeBroadcaster - stdin io.ReadCloser - stdinPipe io.WriteCloser + SysInitPath string + ResolvConfPath string + cmd *exec.Cmd + stdout *writeBroadcaster + stderr *writeBroadcaster + stdin io.ReadCloser + stdinPipe io.WriteCloser ptyMaster io.Closer runtime *Runtime @@ -61,10 +64,11 @@ type Config struct { StdinOnce bool // If true, close stdin after the 1 attached client disconnects. Env []string Cmd []string + Dns []string Image string // Name of the image as it was passed by the operator (eg. could be symbolic) } -func ParseRun(args []string, stdout io.Writer) (*Config, error) { +func ParseRun(args []string, stdout io.Writer, capabilities *Capabilities) (*Config, error) { cmd := rcli.Subcmd(stdout, "run", "[OPTIONS] IMAGE COMMAND [ARG...]", "Run a command in a new container") if len(args) > 0 && args[0] != "--help" { cmd.SetOutput(ioutil.Discard) @@ -79,12 +83,20 @@ func ParseRun(args []string, stdout io.Writer) (*Config, error) { flTty := cmd.Bool("t", false, "Allocate a pseudo-tty") flMemory := cmd.Int64("m", 0, "Memory limit (in bytes)") + if *flMemory > 0 && !capabilities.MemoryLimit { + fmt.Fprintf(stdout, "WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n") + *flMemory = 0 + } + var flPorts ListOpts cmd.Var(&flPorts, "p", "Expose a container's port to the host (use 'docker port' to see the actual mapping)") var flEnv ListOpts cmd.Var(&flEnv, "e", "Set environment variables") + var flDns ListOpts + cmd.Var(&flDns, "dns", "Set custom dns servers") + if err := cmd.Parse(args); err != nil { return nil, err } @@ -122,8 +134,15 @@ func ParseRun(args []string, stdout io.Writer) (*Config, error) { AttachStderr: flAttach.Get("stderr"), Env: flEnv, Cmd: runCmd, + Dns: flDns, Image: image, } + + if *flMemory > 0 && !capabilities.SwapLimit { + fmt.Fprintf(stdout, "WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n") + config.MemorySwap = -1 + } + // When allocating stdin in attached mode, close stdin at client disconnect if config.OpenStdin && config.AttachStdin { config.StdinOnce = true @@ -139,6 +158,16 @@ type NetworkSettings struct { PortMapping map[string]string } +// String returns a human-readable description of the port mapping defined in the settings +func (settings *NetworkSettings) PortMappingHuman() string { + var mapping []string + for private, public := range settings.PortMapping { + mapping = append(mapping, fmt.Sprintf("%s->%s", public, private)) + } + sort.Strings(mapping) + return strings.Join(mapping, ", ") +} + func (container *Container) Cmd() *exec.Cmd { return container.cmd } @@ -355,6 +384,17 @@ func (container *Container) Start() error { if err := container.allocateNetwork(); err != nil { return err } + + // Make sure the config is compatible with the current kernel + if container.Config.Memory > 0 && !container.runtime.capabilities.MemoryLimit { + log.Printf("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n") + container.Config.Memory = 0 + } + if container.Config.Memory > 0 && !container.runtime.capabilities.SwapLimit { + log.Printf("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n") + container.Config.MemorySwap = -1 + } + if err := container.generateLXCConfig(); err != nil { return err } @@ -373,21 +413,26 @@ func (container *Container) Start() error { params = append(params, "-u", container.Config.User) } + if container.Config.Tty { + params = append(params, "-e", "TERM=xterm") + } + + // Setup environment + params = append(params, + "-e", "HOME=/", + "-e", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + ) + + for _, elem := range container.Config.Env { + params = append(params, "-e", elem) + } + // Program params = append(params, "--", container.Path) params = append(params, container.Args...) container.cmd = exec.Command("lxc-start", params...) - // Setup environment - container.cmd.Env = append( - []string{ - "HOME=/", - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - }, - container.Config.Env..., - ) - // Setup logging of stdout and stderr to disk if err := container.runtime.LogToDisk(container.stdout, container.logPath("stdout")); err != nil { return err @@ -398,10 +443,6 @@ func (container *Container) Start() error { var err error if container.Config.Tty { - container.cmd.Env = append( - []string{"TERM=xterm"}, - container.cmd.Env..., - ) err = container.startPty() } else { err = container.start() @@ -550,9 +591,21 @@ func (container *Container) kill() error { if !container.State.Running || container.cmd == nil { return nil } - if err := container.cmd.Process.Kill(); err != nil { - return err + + // Sending SIGKILL to the process via lxc + output, err := exec.Command("lxc-kill", "-n", container.Id, "9").CombinedOutput() + if err != nil { + log.Printf("error killing container %s (%s, %s)", container.Id, output, err) } + + // 2. Wait for the process to die, in last resort, try to kill the process directly + if err := container.WaitTimeout(10 * time.Second); err != nil { + log.Printf("Container %s failed to exit within 10 seconds of lxc SIGKILL - trying direct SIGKILL", container.Id) + if err := container.cmd.Process.Kill(); err != nil { + return err + } + } + // Wait for the container to be actually stopped container.Wait() return nil @@ -561,15 +614,24 @@ func (container *Container) kill() error { func (container *Container) Kill() error { container.State.lock() defer container.State.unlock() + if !container.State.Running { + return nil + } + if container.State.Ghost { + return fmt.Errorf("Can't kill ghost container") + } return container.kill() } -func (container *Container) Stop() error { +func (container *Container) Stop(seconds int) error { container.State.lock() defer container.State.unlock() if !container.State.Running { return nil } + if container.State.Ghost { + return fmt.Errorf("Can't stop ghost container") + } // 1. Send a SIGTERM if output, err := exec.Command("lxc-kill", "-n", container.Id, "15").CombinedOutput(); err != nil { @@ -581,8 +643,8 @@ func (container *Container) Stop() error { } // 2. Wait for the process to exit on its own - if err := container.WaitTimeout(10 * time.Second); err != nil { - log.Printf("Container %v failed to exit within 10 seconds of SIGTERM - using the force", container.Id) + if err := container.WaitTimeout(time.Duration(seconds) * time.Second); err != nil { + log.Printf("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.Id, seconds) if err := container.kill(); err != nil { return err } @@ -590,8 +652,8 @@ func (container *Container) Stop() error { return nil } -func (container *Container) Restart() error { - if err := container.Stop(); err != nil { +func (container *Container) Restart(seconds int) error { + if err := container.Stop(seconds); err != nil { return err } if err := container.Start(); err != nil { diff --git a/container_test.go b/container_test.go index ac47f84bf0..e6525f0a79 100644 --- a/container_test.go +++ b/container_test.go @@ -97,7 +97,7 @@ func TestMultipleAttachRestart(t *testing.T) { t.Fatalf("Unexpected output. Expected [%s], received [%s]", "hello", l3) } - if err := container.Stop(); err != nil { + if err := container.Stop(10); err != nil { t.Fatal(err) } @@ -182,7 +182,7 @@ func TestCommitRun(t *testing.T) { if err != nil { t.Error(err) } - img, err := runtime.graph.Create(rwTar, container1, "unit test commited image") + img, err := runtime.graph.Create(rwTar, container1, "unit test commited image", "") if err != nil { t.Error(err) } @@ -324,6 +324,54 @@ func TestOutput(t *testing.T) { } } +func TestKillDifferentUser(t *testing.T) { + runtime, err := newTestRuntime() + if err != nil { + t.Fatal(err) + } + defer nuke(runtime) + container, err := runtime.Create(&Config{ + Image: GetTestImage(runtime).Id, + Cmd: []string{"tail", "-f", "/etc/resolv.conf"}, + User: "daemon", + }, + ) + if err != nil { + t.Fatal(err) + } + defer runtime.Destroy(container) + + if container.State.Running { + t.Errorf("Container shouldn't be running") + } + if err := container.Start(); err != nil { + t.Fatal(err) + } + + // Give some time to lxc to spawn the process (setuid might take some time) + container.WaitTimeout(500 * time.Millisecond) + + if !container.State.Running { + t.Errorf("Container should be running") + } + + if err := container.Kill(); err != nil { + t.Fatal(err) + } + + if container.State.Running { + t.Errorf("Container shouldn't be running") + } + container.Wait() + if container.State.Running { + t.Errorf("Container shouldn't be running") + } + // Try stopping twice + if err := container.Kill(); err != nil { + t.Fatal(err) + } +} + func TestKill(t *testing.T) { runtime, err := newTestRuntime() if err != nil { @@ -346,6 +394,10 @@ func TestKill(t *testing.T) { if err := container.Start(); err != nil { t.Fatal(err) } + + // Give some time to lxc to spawn the process + container.WaitTimeout(500 * time.Millisecond) + if !container.State.Running { t.Errorf("Container should be running") } @@ -657,6 +709,10 @@ func TestMultipleContainers(t *testing.T) { t.Fatal(err) } + // Make sure they are running before trying to kill them + container1.WaitTimeout(250 * time.Millisecond) + container2.WaitTimeout(250 * time.Millisecond) + // If we are here, both containers should be running if !container1.State.Running { t.Fatal("Container not running") diff --git a/contrib/crashTest.go b/contrib/crashTest.go new file mode 100644 index 0000000000..fa9cda6056 --- /dev/null +++ b/contrib/crashTest.go @@ -0,0 +1,96 @@ +package main + +import ( + "io" + "log" + "os" + "os/exec" + "time" +) + +const DOCKER_PATH = "/home/creack/dotcloud/docker/docker/docker" + +func runDaemon() (*exec.Cmd, error) { + os.Remove("/var/run/docker.pid") + cmd := exec.Command(DOCKER_PATH, "-d") + outPipe, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + errPipe, err := cmd.StderrPipe() + if err != nil { + return nil, err + } + if err := cmd.Start(); err != nil { + return nil, err + } + go func() { + io.Copy(os.Stdout, outPipe) + }() + go func() { + io.Copy(os.Stderr, errPipe) + }() + return cmd, nil +} + +func crashTest() error { + if err := exec.Command("/bin/bash", "-c", "while true; do true; done").Start(); err != nil { + return err + } + + for { + daemon, err := runDaemon() + if err != nil { + return err + } + // time.Sleep(5000 * time.Millisecond) + var stop bool + go func() error { + stop = false + for i := 0; i < 100 && !stop; i++ { + func() error { + cmd := exec.Command(DOCKER_PATH, "run", "base", "echo", "hello", "world") + log.Printf("%d", i) + outPipe, err := cmd.StdoutPipe() + if err != nil { + return err + } + inPipe, err := cmd.StdinPipe() + if err != nil { + return err + } + if err := cmd.Start(); err != nil { + return err + } + go func() { + io.Copy(os.Stdout, outPipe) + }() + // Expecting error, do not check + inPipe.Write([]byte("hello world!!!!!\n")) + go inPipe.Write([]byte("hello world!!!!!\n")) + go inPipe.Write([]byte("hello world!!!!!\n")) + inPipe.Close() + + if err := cmd.Wait(); err != nil { + return err + } + outPipe.Close() + return nil + }() + } + return nil + }() + time.Sleep(20 * time.Second) + stop = true + if err := daemon.Process.Kill(); err != nil { + return err + } + } + return nil +} + +func main() { + if err := crashTest(); err != nil { + log.Println(err) + } +} diff --git a/contrib/docker-build/README b/contrib/docker-build/README new file mode 100644 index 0000000000..f648753b90 --- /dev/null +++ b/contrib/docker-build/README @@ -0,0 +1,68 @@ +# docker-build: build your software with docker + +## Description + +docker-build is a script to build docker images from source. It will be deprecated once the 'build' feature is incorporated into docker itself (See https://github.com/dotcloud/docker/issues/278) + +Author: Solomon Hykes + + +## Install + +docker-builder requires: + +1) A reasonably recent Python setup (tested on 2.7.2). + +2) A running docker daemon at version 0.1.4 or more recent (http://www.docker.io/gettingstarted) + + +## Usage + +First create a valid Changefile, which defines a sequence of changes to apply to a base image. + + $ cat Changefile + # Start build from a know base image + from base:ubuntu-12.10 + # Update ubuntu sources + run echo 'deb http://archive.ubuntu.com/ubuntu quantal main universe multiverse' > /etc/apt/sources.list + run apt-get update + # Install system packages + run DEBIAN_FRONTEND=noninteractive apt-get install -y -q git + run DEBIAN_FRONTEND=noninteractive apt-get install -y -q curl + run DEBIAN_FRONTEND=noninteractive apt-get install -y -q golang + # Insert files from the host (./myscript must be present in the current directory) + copy myscript /usr/local/bin/myscript + + +Run docker-build, and pass the contents of your Changefile as standard input. + + $ IMG=$(./docker-build < Changefile) + +This will take a while: for each line of the changefile, docker-build will: + +1. Create a new container to execute the given command or insert the given file +2. Wait for the container to complete execution +3. Commit the resulting changes as a new image +4. Use the resulting image as the input of the next step + + +If all the steps succeed, the result will be an image containing the combined results of each build step. +You can trace back those build steps by inspecting the image's history: + + $ docker history $IMG + ID CREATED CREATED BY + 1e9e2045de86 A few seconds ago /bin/sh -c cat > /usr/local/bin/myscript; chmod +x /usr/local/bin/git + 77db140aa62a A few seconds ago /bin/sh -c DEBIAN_FRONTEND=noninteractive apt-get install -y -q golang + 77db140aa62a A few seconds ago /bin/sh -c DEBIAN_FRONTEND=noninteractive apt-get install -y -q curl + 77db140aa62a A few seconds ago /bin/sh -c DEBIAN_FRONTEND=noninteractive apt-get install -y -q git + 83e85d155451 A few seconds ago /bin/sh -c apt-get update + bfd53b36d9d3 A few seconds ago /bin/sh -c echo 'deb http://archive.ubuntu.com/ubuntu quantal main universe multiverse' > /etc/apt/sources.list + base 2 weeks ago /bin/bash + 27cf78414709 2 weeks ago + + +Note that your build started from 'base', as instructed by your Changefile. But that base image itself seems to have been built in 2 steps - hence the extra step in the history. + + +You can use this build technique to create any image you want: a database, a web application, or anything else that can be build by a sequence of unix commands - in other words, anything else. + diff --git a/contrib/docker-build/docker-build b/contrib/docker-build/docker-build new file mode 100755 index 0000000000..f2fc340680 --- /dev/null +++ b/contrib/docker-build/docker-build @@ -0,0 +1,104 @@ +#!/usr/bin/env python + +# docker-build is a script to build docker images from source. +# It will be deprecated once the 'build' feature is incorporated into docker itself. +# (See https://github.com/dotcloud/docker/issues/278) +# +# Author: Solomon Hykes + + + +# First create a valid Changefile, which defines a sequence of changes to apply to a base image. +# +# $ cat Changefile +# # Start build from a know base image +# from base:ubuntu-12.10 +# # Update ubuntu sources +# run echo 'deb http://archive.ubuntu.com/ubuntu quantal main universe multiverse' > /etc/apt/sources.list +# run apt-get update +# # Install system packages +# run DEBIAN_FRONTEND=noninteractive apt-get install -y -q git +# run DEBIAN_FRONTEND=noninteractive apt-get install -y -q curl +# run DEBIAN_FRONTEND=noninteractive apt-get install -y -q golang +# # Insert files from the host (./myscript must be present in the current directory) +# copy myscript /usr/local/bin/myscript +# +# +# Run docker-build, and pass the contents of your Changefile as standard input. +# +# $ IMG=$(./docker-build < Changefile) +# +# This will take a while: for each line of the changefile, docker-build will: +# +# 1. Create a new container to execute the given command or insert the given file +# 2. Wait for the container to complete execution +# 3. Commit the resulting changes as a new image +# 4. Use the resulting image as the input of the next step + + +import sys +import subprocess +import json +import hashlib + +def docker(args, stdin=None): + print "# docker " + " ".join(args) + p = subprocess.Popen(["docker"] + list(args), stdin=stdin, stdout=subprocess.PIPE) + return p.stdout + +def image_exists(img): + return docker(["inspect", img]).read().strip() != "" + +def run_and_commit(img_in, cmd, stdin=None): + run_id = docker(["run"] + (["-i", "-a", "stdin"] if stdin else ["-d"]) + [img_in, "/bin/sh", "-c", cmd], stdin=stdin).read().rstrip() + print "---> Waiting for " + run_id + result=int(docker(["wait", run_id]).read().rstrip()) + if result != 0: + print "!!! '{}' return non-zero exit code '{}'. Aborting.".format(cmd, result) + sys.exit(1) + return docker(["commit", run_id]).read().rstrip() + +def insert(base, src, dst): + print "COPY {} to {} in {}".format(src, dst, base) + if dst == "": + raise Exception("Missing destination path") + stdin = file(src) + stdin.seek(0) + return run_and_commit(base, "cat > {0}; chmod +x {0}".format(dst), stdin=stdin) + + +def main(): + base="" + steps = [] + try: + for line in sys.stdin.readlines(): + line = line.strip() + # Skip comments and empty lines + if line == "" or line[0] == "#": + continue + op, param = line.split(" ", 1) + if op == "from": + print "FROM " + param + base = param + steps.append(base) + elif op == "run": + print "RUN " + param + result = run_and_commit(base, param) + steps.append(result) + base = result + print "===> " + base + elif op == "copy": + src, dst = param.split(" ", 1) + result = insert(base, src, dst) + steps.append(result) + base = result + print "===> " + base + else: + print "Skipping uknown op " + op + except: + docker(["rmi"] + steps[1:]) + raise + print base + +if __name__ == "__main__": + main() diff --git a/contrib/docker-build/example.changefile b/contrib/docker-build/example.changefile new file mode 100644 index 0000000000..19261de82b --- /dev/null +++ b/contrib/docker-build/example.changefile @@ -0,0 +1,11 @@ +# Start build from a know base image +from base:ubuntu-12.10 +# Update ubuntu sources +run echo 'deb http://archive.ubuntu.com/ubuntu quantal main universe multiverse' > /etc/apt/sources.list +run apt-get update +# Install system packages +run DEBIAN_FRONTEND=noninteractive apt-get install -y -q git +run DEBIAN_FRONTEND=noninteractive apt-get install -y -q curl +run DEBIAN_FRONTEND=noninteractive apt-get install -y -q golang +# Insert files from the host (./myscript must be present in the current directory) +copy myscript /usr/local/bin/myscript diff --git a/contrib/install.sh b/contrib/install.sh index b0a998332e..d7c6e66466 100755 --- a/contrib/install.sh +++ b/contrib/install.sh @@ -45,7 +45,7 @@ then echo "Upstart script already exists." else echo "Creating /etc/init/dockerd.conf..." - echo "exec /usr/local/bin/docker -d" > /etc/init/dockerd.conf + echo "exec env LANG=\"en_US.UTF-8\" /usr/local/bin/docker -d" > /etc/init/dockerd.conf fi echo "Starting dockerd..." diff --git a/contrib/vagrant-docker/README.md b/contrib/vagrant-docker/README.md new file mode 100644 index 0000000000..5852ea1927 --- /dev/null +++ b/contrib/vagrant-docker/README.md @@ -0,0 +1,3 @@ +# Vagrant-docker + +This is a placeholder for the official vagrant-docker, a plugin for Vagrant (http://vagrantup.com) which exposes Docker as a provider. diff --git a/deb/Makefile b/deb/Makefile deleted file mode 120000 index d0b0e8e008..0000000000 --- a/deb/Makefile +++ /dev/null @@ -1 +0,0 @@ -../Makefile \ No newline at end of file diff --git a/deb/Makefile.deb b/deb/Makefile.deb deleted file mode 100644 index c954b0f5b5..0000000000 --- a/deb/Makefile.deb +++ /dev/null @@ -1,73 +0,0 @@ -PKG_NAME=dotcloud-docker -PKG_ARCH=amd64 -PKG_VERSION=1 -ROOT_PATH:=$(PWD) -BUILD_PATH=build # Do not change, decided by dpkg-buildpackage -BUILD_SRC=build_src -GITHUB_PATH=src/github.com/dotcloud/docker -INSDIR=usr/bin -SOURCE_PACKAGE=$(PKG_NAME)_$(PKG_VERSION).orig.tar.gz -DEB_PACKAGE=$(PKG_NAME)_$(PKG_VERSION)_$(PKG_ARCH).deb -EXTRA_GO_PKG=./auth - -TMPDIR=$(shell mktemp -d -t XXXXXX) - - -# Build a debian source package -all: clean build_in_deb - -build_in_deb: - echo "GOPATH = " $(ROOT_PATH) - mkdir bin - cd $(GITHUB_PATH)/docker; GOPATH=$(ROOT_PATH) go build -o $(ROOT_PATH)/bin/docker - -# DESTDIR provided by Debian packaging -install: - # Call this from a go environment (as packaged for deb source package) - mkdir -p $(DESTDIR)/$(INSDIR) - mkdir -p $(DESTDIR)/etc/init - install -m 0755 bin/docker $(DESTDIR)/$(INSDIR) - install -o root -m 0755 etc/docker.upstart $(DESTDIR)/etc/init/docker.conf - -$(BUILD_SRC): clean - # Copy ourselves into $BUILD_SRC to comply with unusual golang constraints - tar --exclude=*.tar.gz --exclude=checkout.tgz -f checkout.tgz -cz * - mkdir -p $(BUILD_SRC)/$(GITHUB_PATH) - tar -f checkout.tgz -C $(BUILD_SRC)/$(GITHUB_PATH) -xz - cd $(BUILD_SRC)/$(GITHUB_PATH)/docker; GOPATH=$(ROOT_PATH)/$(BUILD_SRC) go get -d - for d in `find $(BUILD_SRC) -name '.git*'`; do rm -rf $$d; done - # Populate source build with debian stuff - cp -R -L ./deb/* $(BUILD_SRC) - -$(SOURCE_PACKAGE): $(BUILD_SRC) - rm -f $(SOURCE_PACKAGE) - # Create the debian source package - tar -f $(SOURCE_PACKAGE) -C ${ROOT_PATH}/${BUILD_SRC} -cz . - -# Build deb package fetching go dependencies and cleaning up git repositories -deb: $(DEB_PACKAGE) - -$(DEB_PACKAGE): $(SOURCE_PACKAGE) - # dpkg-buildpackage looks for source package tarball in ../ - cd $(BUILD_SRC); dpkg-buildpackage - rm -rf $(BUILD_PATH) debian/$(PKG_NAME)* debian/files - -debsrc: $(SOURCE_PACKAGE) - -# Build local sources -#$(PKG_NAME): build_local - -build_local: - -@mkdir -p bin - cd docker && go build -o ../bin/docker - -gotest: - @echo "\033[36m[Testing]\033[00m docker..." - @sudo -E GOPATH=$(ROOT_PATH)/$(BUILD_SRC) go test -v . $(EXTRA_GO_PKG) && \ - echo -n "\033[32m[OK]\033[00m" || \ - echo -n "\033[31m[FAIL]\033[00m"; \ - echo " docker" - @sudo rm -rf /tmp/docker-* - -clean: - rm -rf $(BUILD_PATH) debian/$(PKG_NAME)* debian/files $(BUILD_SRC) checkout.tgz bin diff --git a/deb/README.md b/deb/README.md deleted file mode 120000 index 32d46ee883..0000000000 --- a/deb/README.md +++ /dev/null @@ -1 +0,0 @@ -../README.md \ No newline at end of file diff --git a/deb/debian/changelog b/deb/debian/changelog deleted file mode 100644 index 76cc04bee2..0000000000 --- a/deb/debian/changelog +++ /dev/null @@ -1,5 +0,0 @@ -dotcloud-docker (1) precise; urgency=low - - * Initial release - - -- dotCloud Mon, 14 Mar 2013 04:43:21 -0700 diff --git a/deb/debian/control b/deb/debian/control deleted file mode 100644 index 5245d7e238..0000000000 --- a/deb/debian/control +++ /dev/null @@ -1,20 +0,0 @@ -Source: dotcloud-docker -Section: misc -Priority: extra -Homepage: https://github.com/dotcloud/docker -Maintainer: Daniel Mizyrycki -Build-Depends: debhelper (>= 8.0.0), git, golang -Vcs-Git: https://github.com/dotcloud/docker.git -Standards-Version: 3.9.2 - -Package: dotcloud-docker -Architecture: amd64 -Provides: dotcloud-docker -Depends: lxc, wget, bsdtar, curl -Conflicts: docker -Description: A process manager with superpowers - It encapsulates heterogeneous payloads in Standard Containers, and runs - them on any server with strong guarantees of isolation and repeatability. - Is is a great building block for automating distributed systems: - large-scale web deployments, database clusters, continuous deployment - systems, private PaaS, service-oriented architectures, etc. diff --git a/deb/debian/copyright b/deb/debian/copyright deleted file mode 100644 index 6f3a66bbce..0000000000 --- a/deb/debian/copyright +++ /dev/null @@ -1,209 +0,0 @@ -Format: http://dep.debian.net/deps/dep5 -Upstream-Name: dotcloud-docker -Source: https://github.com/dotcloud/docker - -Files: * -Copyright: 2012 DotCloud Inc (opensource@dotcloud.com) -License: Apache License Version 2.0 - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2012 DotCloud Inc (opensource@dotcloud.com) - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/deb/etc/docker-dev.upstart b/deb/etc/docker-dev.upstart deleted file mode 100644 index 6cfe9d2616..0000000000 --- a/deb/etc/docker-dev.upstart +++ /dev/null @@ -1,10 +0,0 @@ -description "Run docker" - -start on runlevel [2345] -stop on starting rc RUNLEVEL=[016] -respawn - -script - test -f /etc/default/locale && . /etc/default/locale || true - LANG=$LANG LC_ALL=$LANG /usr/bin/docker -d -end script diff --git a/docker/docker.go b/docker/docker.go index 1b1c21990d..411e4d0c96 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -2,15 +2,20 @@ package main import ( "flag" + "fmt" "github.com/dotcloud/docker" "github.com/dotcloud/docker/rcli" "github.com/dotcloud/docker/term" "io" "log" "os" + "os/signal" + "syscall" ) -var GIT_COMMIT string +var ( + GIT_COMMIT string +) func main() { if docker.SelfPath() == "/sbin/init" { @@ -22,6 +27,7 @@ func main() { flDaemon := flag.Bool("d", false, "Daemon mode") flDebug := flag.Bool("D", false, "Debug mode") bridgeName := flag.String("b", "", "Attach containers to a pre-existing network bridge") + pidfile := flag.String("p", "/var/run/docker.pid", "File containing process PID") flag.Parse() if *bridgeName != "" { docker.NetworkBridgeIface = *bridgeName @@ -37,7 +43,7 @@ func main() { flag.Usage() return } - if err := daemon(); err != nil { + if err := daemon(*pidfile); err != nil { log.Fatal(err) } } else { @@ -47,7 +53,43 @@ func main() { } } -func daemon() error { +func createPidFile(pidfile string) error { + if _, err := os.Stat(pidfile); err == nil { + return fmt.Errorf("pid file found, ensure docker is not running or delete %s", pidfile) + } + + file, err := os.Create(pidfile) + if err != nil { + return err + } + + defer file.Close() + + _, err = fmt.Fprintf(file, "%d", os.Getpid()) + return err +} + +func removePidFile(pidfile string) { + if err := os.Remove(pidfile); err != nil { + log.Printf("Error removing %s: %s", pidfile, err) + } +} + +func daemon(pidfile string) error { + if err := createPidFile(pidfile); err != nil { + log.Fatal(err) + } + defer removePidFile(pidfile) + + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, os.Kill, os.Signal(syscall.SIGTERM)) + go func() { + sig := <-c + log.Printf("Received signal '%v', exiting\n", sig) + removePidFile(pidfile) + os.Exit(0) + }() + service, err := docker.NewServer() if err != nil { return err @@ -91,15 +133,7 @@ func runCommand(args []string) error { } } } else { - service, err := docker.NewServer() - if err != nil { - return err - } - dockerConn := rcli.NewDockerLocalConn(os.Stdout) - defer dockerConn.Close() - if err := rcli.LocalCall(service, os.Stdin, dockerConn, args...); err != nil { - return err - } + return fmt.Errorf("Can't connect to docker daemon. Is 'docker -d' running on this host?") } return nil } diff --git a/docs/sources/examples/running_examples.rst b/docs/sources/examples/running_examples.rst index 4042add487..3d2593c710 100644 --- a/docs/sources/examples/running_examples.rst +++ b/docs/sources/examples/running_examples.rst @@ -7,27 +7,16 @@ Running The Examples -------------------- -There are two ways to run docker, daemon mode and standalone mode. - -When you run the docker command it will first check if there is a docker daemon running in the background it can connect to. - -* If it exists it will use that daemon to run all of the commands. -* If it does not exist docker will run in standalone mode (docker will exit after each command). - -Docker needs to be run from a privileged account (root). - -1. The most common (and recommended) way is to run a docker daemon as root in the background, and then connect to it from the docker client from any account. +All the examples assume your machine is running the docker daemon. To run the docker daemon in the background, simply type: .. code-block:: bash - # starting docker daemon in the background sudo docker -d & - # now you can run docker commands from any account. - docker - -2. Standalone: You need to run every command as root, or using sudo +Now you can run docker in client mode: all commands will be forwarded to the docker daemon, so the client +can run from any account. .. code-block:: bash - sudo docker + # now you can run docker commands from any account. + docker help diff --git a/docs/sources/installation/amazon.rst b/docs/sources/installation/amazon.rst index 5260b992bd..012c78f401 100644 --- a/docs/sources/installation/amazon.rst +++ b/docs/sources/installation/amazon.rst @@ -1,8 +1,9 @@ Amazon EC2 ========== - Please note this is a community contributed installation path. The only 'official' installation is using the :ref:`ubuntu_linux` installation path. This version - may be out of date because it depends on some binaries to be updated and published + Please note this is a community contributed installation path. The only 'official' installation is using the + :ref:`ubuntu_linux` installation path. This version may sometimes be out of date. + Installation ------------ @@ -17,7 +18,7 @@ Docker can now be installed on Amazon EC2 with a single vagrant command. Vagrant vagrant plugin install vagrant-aws -3. Get the docker sources, this will give you the latest Vagrantfile and puppet manifests. +3. Get the docker sources, this will give you the latest Vagrantfile. :: diff --git a/docs/sources/installation/archlinux.rst b/docs/sources/installation/archlinux.rst new file mode 100644 index 0000000000..ad9ab255eb --- /dev/null +++ b/docs/sources/installation/archlinux.rst @@ -0,0 +1,64 @@ +.. _arch_linux: + +Arch Linux +========== + + Please note this is a community contributed installation path. The only 'official' installation is using the + :ref:`ubuntu_linux` installation path. This version may sometimes be out of date. + + +Installing on Arch Linux is not officially supported but can be handled via +either of the following AUR packages: + +* `lxc-docker `_ +* `lxc-docker-git `_ + +The lxc-docker package will install the latest tagged version of docker. +The lxc-docker-git package will build from the current master branch. + +Dependencies +------------ + +Docker depends on several packages which are specified as dependencies in +either AUR package. + +* aufs3 +* bridge-utils +* go +* iproute2 +* linux-aufs_friendly +* lxc + +Installation +------------ + +The instructions here assume **yaourt** is installed. See +`Arch User Repository `_ +for information on building and installing packages from the AUR if you have not +done so before. + +Keep in mind that if **linux-aufs_friendly** is not already installed that a +new kernel will be compiled and this can take quite a while. + +:: + + yaourt -S lxc-docker-git + +Starting Docker +--------------- + +Prior to starting docker modify your bootloader to use the +**linux-aufs_friendly** kernel and reboot your system. + +There is a systemd service unit created for docker. To start the docker service: + +:: + + sudo systemctl start docker + + +To start on system boot: + +:: + + sudo systemctl enable docker diff --git a/docs/sources/installation/index.rst b/docs/sources/installation/index.rst index ae11258875..f9a59b0ad6 100644 --- a/docs/sources/installation/index.rst +++ b/docs/sources/installation/index.rst @@ -13,6 +13,7 @@ Contents: :maxdepth: 1 ubuntulinux + archlinux vagrant windows amazon diff --git a/docs/sources/installation/ubuntulinux.rst b/docs/sources/installation/ubuntulinux.rst index 94786f95d3..5f1ab39229 100644 --- a/docs/sources/installation/ubuntulinux.rst +++ b/docs/sources/installation/ubuntulinux.rst @@ -1,5 +1,10 @@ -Docker on Ubuntu -================ +.. _ubuntu_linux: + +Ubuntu Linux +============ + + **Please note this project is currently under heavy development. It should not be used in production.** + Docker is now available as a Ubuntu PPA (Personal Package Archive), `hosted on launchpad `_ @@ -15,8 +20,7 @@ Add the custom package sources to your apt sources list. Copy and paste both the .. code-block:: bash - sudo sh -c "echo 'deb http://ppa.launchpad.net/dotcloud/lxc-docker/ubuntu precise main' \ - >> /etc/apt/sources.list" + sudo sh -c "echo 'deb http://ppa.launchpad.net/dotcloud/lxc-docker/ubuntu precise main' >> /etc/apt/sources.list" Update your sources. You will see a warning that GPG signatures cannot be verified. @@ -33,12 +37,11 @@ Now install it, you will see another warning that the package cannot be authenti sudo apt-get install lxc-docker -**Run!** +Verify it worked .. code-block:: bash docker - -Probably you would like to continue with the :ref:`hello_world` example. \ No newline at end of file +**Done!**, now continue with the :ref:`hello_world` example. diff --git a/docs/sources/installation/vagrant.rst b/docs/sources/installation/vagrant.rst index a8249961a7..67d1f22813 100644 --- a/docs/sources/installation/vagrant.rst +++ b/docs/sources/installation/vagrant.rst @@ -7,7 +7,7 @@ Install using Vagrant Please note this is a community contributed installation path. The only 'official' installation is using the :ref:`ubuntu_linux` installation path. This version may sometimes be out of date. -**requirements** +**Requirements:** This guide will setup a new virtual machine with docker installed on your computer. This works on most operating systems, including MacOX, Windows, Linux, FreeBSD and others. If you can install these and have at least 400Mb RAM to spare you should be good. @@ -22,10 +22,10 @@ Install Vagrant and Virtualbox ``git`` in a terminal window -Spin up your machine --------------------- +Spin it up +---------- -1. Fetch the docker sources (this includes the instructions for machine setup). +1. Fetch the docker sources (this includes the Vagrantfile for machine setup). .. code-block:: bash diff --git a/graph.go b/graph.go index e7044c25a0..c0e5000913 100644 --- a/graph.go +++ b/graph.go @@ -2,6 +2,7 @@ package docker import ( "fmt" + "io" "io/ioutil" "os" "path" @@ -83,12 +84,13 @@ func (graph *Graph) Get(name string) (*Image, error) { } // Create creates a new image and registers it in the graph. -func (graph *Graph) Create(layerData Archive, container *Container, comment string) (*Image, error) { +func (graph *Graph) Create(layerData Archive, container *Container, comment, author string) (*Image, error) { img := &Image{ Id: GenerateId(), Comment: comment, Created: time.Now(), DockerVersion: VERSION, + Author: author, } if container != nil { img.Parent = container.Image @@ -111,7 +113,7 @@ func (graph *Graph) Register(layerData Archive, img *Image) error { if graph.Exists(img.Id) { return fmt.Errorf("Image %s already exists", img.Id) } - tmp, err := graph.Mktemp(img.Id) + tmp, err := graph.Mktemp("") defer os.RemoveAll(tmp) if err != nil { return fmt.Errorf("Mktemp failed: %s", err) @@ -128,12 +130,32 @@ func (graph *Graph) Register(layerData Archive, img *Image) error { return nil } +// TempLayerArchive creates a temporary archive of the given image's filesystem layer. +// The archive is stored on disk and will be automatically deleted as soon as has been read. +// If output is not nil, a human-readable progress bar will be written to it. +// FIXME: does this belong in Graph? How about MktempFile, let the caller use it for archives? +func (graph *Graph) TempLayerArchive(id string, compression Compression, output io.Writer) (*TempArchive, error) { + image, err := graph.Get(id) + if err != nil { + return nil, err + } + tmp, err := graph.tmp() + if err != nil { + return nil, err + } + archive, err := image.TarLayer(compression) + if err != nil { + return nil, err + } + return NewTempArchive(ProgressReader(ioutil.NopCloser(archive), 0, output, "Buffering to disk %v/%v (%v)"), tmp.Root) +} + // Mktemp creates a temporary sub-directory inside the graph's filesystem. func (graph *Graph) Mktemp(id string) (string, error) { if id == "" { id = GenerateId() } - tmp, err := NewGraph(path.Join(graph.Root, ":tmp:")) + tmp, err := graph.tmp() if err != nil { return "", fmt.Errorf("Couldn't create temp: %s", err) } @@ -143,6 +165,10 @@ func (graph *Graph) Mktemp(id string) (string, error) { return tmp.imageRoot(id), nil } +func (graph *Graph) tmp() (*Graph, error) { + return NewGraph(path.Join(graph.Root, ":tmp:")) +} + // Check if given error is "not empty". // Note: this is the way golang does it internally with os.IsNotExists. func isNotEmpty(err error) bool { diff --git a/graph_test.go b/graph_test.go index 7c40330aa4..1bd05aaa97 100644 --- a/graph_test.go +++ b/graph_test.go @@ -3,6 +3,7 @@ package docker import ( "archive/tar" "bytes" + "errors" "io" "io/ioutil" "os" @@ -26,6 +27,32 @@ func TestInit(t *testing.T) { } } +// Test that Register can be interrupted cleanly without side effects +func TestInterruptedRegister(t *testing.T) { + graph := tempGraph(t) + defer os.RemoveAll(graph.Root) + badArchive, w := io.Pipe() // Use a pipe reader as a fake archive which never yields data + image := &Image{ + Id: GenerateId(), + Comment: "testing", + Created: time.Now(), + } + go graph.Register(badArchive, image) + time.Sleep(200 * time.Millisecond) + w.CloseWithError(errors.New("But I'm not a tarball!")) // (Nobody's perfect, darling) + if _, err := graph.Get(image.Id); err == nil { + t.Fatal("Image should not exist after Register is interrupted") + } + // Registering the same image again should succeed if the first register was interrupted + goodArchive, err := fakeTar() + if err != nil { + t.Fatal(err) + } + if err := graph.Register(goodArchive, image); err != nil { + t.Fatal(err) + } +} + // FIXME: Do more extensive tests (ex: create multiple, delete, recreate; // create multiple, check the amount of images and paths, etc..) func TestGraphCreate(t *testing.T) { @@ -35,7 +62,7 @@ func TestGraphCreate(t *testing.T) { if err != nil { t.Fatal(err) } - image, err := graph.Create(archive, nil, "Testing") + image, err := graph.Create(archive, nil, "Testing", "") if err != nil { t.Fatal(err) } @@ -95,7 +122,7 @@ func TestMount(t *testing.T) { if err != nil { t.Fatal(err) } - image, err := graph.Create(archive, nil, "Testing") + image, err := graph.Create(archive, nil, "Testing", "") if err != nil { t.Fatal(err) } @@ -139,7 +166,7 @@ func createTestImage(graph *Graph, t *testing.T) *Image { if err != nil { t.Fatal(err) } - img, err := graph.Create(archive, nil, "Test image") + img, err := graph.Create(archive, nil, "Test image", "") if err != nil { t.Fatal(err) } @@ -154,7 +181,7 @@ func TestDelete(t *testing.T) { t.Fatal(err) } assertNImages(graph, t, 0) - img, err := graph.Create(archive, nil, "Bla bla") + img, err := graph.Create(archive, nil, "Bla bla", "") if err != nil { t.Fatal(err) } @@ -165,11 +192,11 @@ func TestDelete(t *testing.T) { assertNImages(graph, t, 0) // Test 2 create (same name) / 1 delete - img1, err := graph.Create(archive, nil, "Testing") + img1, err := graph.Create(archive, nil, "Testing", "") if err != nil { t.Fatal(err) } - if _, err = graph.Create(archive, nil, "Testing"); err != nil { + if _, err = graph.Create(archive, nil, "Testing", ""); err != nil { t.Fatal(err) } assertNImages(graph, t, 2) diff --git a/hack/README.md b/hack/README.md new file mode 100644 index 0000000000..06cdd50854 --- /dev/null +++ b/hack/README.md @@ -0,0 +1 @@ +This directory contains material helpful for hacking on docker. diff --git a/hack/fmt-check.hook b/hack/fmt-check.hook new file mode 100644 index 0000000000..cd18a18bcb --- /dev/null +++ b/hack/fmt-check.hook @@ -0,0 +1,46 @@ +#!/bin/sh + +# This pre-commit hook will abort if a committed file doesn't pass gofmt. +# By Even Shaw +# http://github.com/edsrzf/gofmt-git-hook + +test_fmt() { + hash gofmt 2>&- || { echo >&2 "gofmt not in PATH."; exit 1; } + IFS=' +' + for file in `git diff --cached --name-only --diff-filter=ACM | grep '\.go$'` + do + output=`git cat-file -p :$file | gofmt -l 2>&1` + if test $? -ne 0 + then + output=`echo "$output" | sed "s,,$file,"` + syntaxerrors="${list}${output}\n" + elif test -n "$output" + then + list="${list}${file}\n" + fi + done + exitcode=0 + if test -n "$syntaxerrors" + then + echo >&2 "gofmt found syntax errors:" + printf "$syntaxerrors" + exitcode=1 + fi + if test -n "$list" + then + echo >&2 "gofmt needs to format these files (run gofmt -w and git add):" + printf "$list" + exitcode=1 + fi + exit $exitcode +} + +case "$1" in + --about ) + echo "Check Go code formatting" + ;; + * ) + test_fmt + ;; +esac diff --git a/image.go b/image.go index 83bf9481ae..403731d6e9 100644 --- a/image.go +++ b/image.go @@ -7,7 +7,9 @@ import ( "fmt" "io" "io/ioutil" + "log" "os" + "os/exec" "path" "strings" "time" @@ -21,6 +23,7 @@ type Image struct { Container string `json:"container,omitempty"` ContainerConfig Config `json:"container_config,omitempty"` DockerVersion string `json:"docker_version,omitempty"` + Author string `json:"author,omitempty"` graph *Graph } @@ -92,7 +95,28 @@ func MountAUFS(ro []string, rw string, target string) error { roBranches += fmt.Sprintf("%v=ro:", layer) } branches := fmt.Sprintf("br:%v:%v", rwBranch, roBranches) - return mount("none", target, "aufs", 0, branches) + + //if error, try to load aufs kernel module + if err := mount("none", target, "aufs", 0, branches); err != nil { + log.Printf("Kernel does not support AUFS, trying to load the AUFS module with modprobe...") + if err := exec.Command("modprobe", "aufs").Run(); err != nil { + return fmt.Errorf("Unable to load the AUFS module") + } + log.Printf("...module loaded.") + if err := mount("none", target, "aufs", 0, branches); err != nil { + return fmt.Errorf("Unable to mount using aufs") + } + } + return nil +} + +// TarLayer returns a tar archive of the image's filesystem layer. +func (image *Image) TarLayer(compression Compression) (Archive, error) { + layerPath, err := image.layer() + if err != nil { + return nil, err + } + return Tar(layerPath, compression) } func (image *Image) Mount(root, rw string) error { diff --git a/lxc_template.go b/lxc_template.go index c6849cb0df..5ac62f52af 100644 --- a/lxc_template.go +++ b/lxc_template.go @@ -78,7 +78,7 @@ lxc.mount.entry = devpts {{$ROOTFS}}/dev/pts devpts newinstance,ptmxmode=0666,no lxc.mount.entry = {{.SysInitPath}} {{$ROOTFS}}/sbin/init none bind,ro 0 0 # In order to get a working DNS environment, mount bind (ro) the host's /etc/resolv.conf into the container -lxc.mount.entry = /etc/resolv.conf {{$ROOTFS}}/etc/resolv.conf none bind,ro 0 0 +lxc.mount.entry = {{.ResolvConfPath}} {{$ROOTFS}}/etc/resolv.conf none bind,ro 0 0 # drop linux capabilities (apply mainly to the user root in the container) diff --git a/network.go b/network.go index 9164c1d72e..373625d59c 100644 --- a/network.go +++ b/network.go @@ -4,6 +4,7 @@ import ( "encoding/binary" "errors" "fmt" + "io" "log" "net" "os/exec" @@ -183,18 +184,21 @@ func getIfaceAddr(name string) (net.Addr, error) { // It keeps track of all mappings and is able to unmap at will type PortMapper struct { mapping map[int]net.TCPAddr + proxies map[int]net.Listener } func (mapper *PortMapper) cleanup() error { // Ignore errors - This could mean the chains were never set up iptables("-t", "nat", "-D", "PREROUTING", "-m", "addrtype", "--dst-type", "LOCAL", "-j", "DOCKER") - iptables("-t", "nat", "-D", "OUTPUT", "-m", "addrtype", "--dst-type", "LOCAL", "-j", "DOCKER") + iptables("-t", "nat", "-D", "OUTPUT", "-m", "addrtype", "--dst-type", "LOCAL", "!", "--dst", "127.0.0.0/8", "-j", "DOCKER") + iptables("-t", "nat", "-D", "OUTPUT", "-m", "addrtype", "--dst-type", "LOCAL", "-j", "DOCKER") // Created in versions <= 0.1.6 // Also cleanup rules created by older versions, or -X might fail. iptables("-t", "nat", "-D", "PREROUTING", "-j", "DOCKER") iptables("-t", "nat", "-D", "OUTPUT", "-j", "DOCKER") iptables("-t", "nat", "-F", "DOCKER") iptables("-t", "nat", "-X", "DOCKER") mapper.mapping = make(map[int]net.TCPAddr) + mapper.proxies = make(map[int]net.Listener) return nil } @@ -205,7 +209,7 @@ func (mapper *PortMapper) setup() error { if err := iptables("-t", "nat", "-A", "PREROUTING", "-m", "addrtype", "--dst-type", "LOCAL", "-j", "DOCKER"); err != nil { return fmt.Errorf("Failed to inject docker in PREROUTING chain: %s", err) } - if err := iptables("-t", "nat", "-A", "OUTPUT", "-m", "addrtype", "--dst-type", "LOCAL", "-j", "DOCKER"); err != nil { + if err := iptables("-t", "nat", "-A", "OUTPUT", "-m", "addrtype", "--dst-type", "LOCAL", "!", "--dst", "127.0.0.0/8", "-j", "DOCKER"); err != nil { return fmt.Errorf("Failed to inject docker in OUTPUT chain: %s", err) } return nil @@ -220,15 +224,64 @@ func (mapper *PortMapper) Map(port int, dest net.TCPAddr) error { if err := mapper.iptablesForward("-A", port, dest); err != nil { return err } + mapper.mapping[port] = dest + listener, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", port)) + if err != nil { + mapper.Unmap(port) + return err + } + mapper.proxies[port] = listener + go proxy(listener, "tcp", dest.String()) return nil } +// proxy listens for socket connections on `listener`, and forwards them unmodified +// to `proto:address` +func proxy(listener net.Listener, proto, address string) error { + Debugf("proxying to %s:%s", proto, address) + defer Debugf("Done proxying to %s:%s", proto, address) + for { + Debugf("Listening on %s", listener) + src, err := listener.Accept() + if err != nil { + return err + } + Debugf("Connecting to %s:%s", proto, address) + dst, err := net.Dial(proto, address) + if err != nil { + log.Printf("Error connecting to %s:%s: %s", proto, address, err) + src.Close() + continue + } + Debugf("Connected to backend, splicing") + splice(src, dst) + } + return nil +} + +func halfSplice(dst, src net.Conn) error { + _, err := io.Copy(dst, src) + // FIXME: on EOF from a tcp connection, pass WriteClose() + dst.Close() + src.Close() + return err +} + +func splice(a, b net.Conn) { + go halfSplice(a, b) + go halfSplice(b, a) +} + func (mapper *PortMapper) Unmap(port int) error { dest, ok := mapper.mapping[port] if !ok { return errors.New("Port is not mapped") } + if proxy, exists := mapper.proxies[port]; exists { + proxy.Close() + delete(mapper.proxies, port) + } if err := mapper.iptablesForward("-D", port, dest); err != nil { return err } @@ -293,7 +346,7 @@ func (alloc *PortAllocator) Acquire(port int) (int, error) { func newPortAllocator() (*PortAllocator, error) { allocator := &PortAllocator{ - inUse: make(map[int]struct{}), + inUse: make(map[int]struct{}), fountain: make(chan int), } go allocator.runFountain() diff --git a/packaging/README.rst b/packaging/README.rst new file mode 100644 index 0000000000..7e927ccffe --- /dev/null +++ b/packaging/README.rst @@ -0,0 +1,8 @@ +Docker packaging +================ + +This directory has one subdirectory per packaging distribution. +At minimum, each of these subdirectories should contain a +README.$DISTRIBUTION explaining how to create the native +docker package and how to install it. + diff --git a/packaging/archlinux/README.archlinux b/packaging/archlinux/README.archlinux new file mode 100644 index 0000000000..f20d2d25bc --- /dev/null +++ b/packaging/archlinux/README.archlinux @@ -0,0 +1,25 @@ +Docker on Arch +============== + +The AUR lxc-docker and lxc-docker-git packages handle building docker on Arch +linux. The PKGBUILD specifies all dependencies, build, and packaging steps. + +Dependencies +============ + +The only buildtime dependencies are git and go which are available via pacman. +The -s flag can be used on makepkg commands below to automatically install +these dependencies. + +Building Package +================ + +Download the tarball for either AUR packaged to a local directory. In that +directory makepkg can be run to build the package. + +# Build the binary package +makepkg + +# Build an updated source tarball +makepkg --source + diff --git a/packaging/debian/Makefile b/packaging/debian/Makefile new file mode 100644 index 0000000000..75ff8f34f5 --- /dev/null +++ b/packaging/debian/Makefile @@ -0,0 +1,35 @@ +PKG_NAME=lxc-docker +DOCKER_VERSION=$(shell head -1 changelog | awk 'match($$0, /\(.+\)/) {print substr($$0, RSTART+1, RLENGTH-4)}') +GITHUB_PATH=github.com/dotcloud/docker +SOURCE_PKG=$(PKG_NAME)_$(DOCKER_VERSION).orig.tar.gz +BUILD_SRC=${CURDIR}/../../build_src + +all: + # Compile docker. Used by debian dpkg-buildpackage. + cd src/${GITHUB_PATH}/docker; GOPATH=${CURDIR} go build + +install: + # Used by debian dpkg-buildpackage + mkdir -p $(DESTDIR)/usr/bin + mkdir -p $(DESTDIR)/etc/init.d + install -m 0755 src/${GITHUB_PATH}/docker/docker $(DESTDIR)/usr/bin + install -o root -m 0755 debian/docker.initd $(DESTDIR)/etc/init.d/docker + +debian: + # This Makefile will compile the github master branch of dotcloud/docker + # Retrieve docker project and its go structure from internet + rm -rf ${BUILD_SRC} + GOPATH=${BUILD_SRC} go get ${GITHUB_PATH} + # Add debianization + mkdir ${BUILD_SRC}/debian + cp Makefile ${BUILD_SRC} + cp -r * ${BUILD_SRC}/debian + cp ../../README.md ${BUILD_SRC} + # Cleanup + for d in `find ${BUILD_SRC} -name '.git*'`; do rm -rf $$d; done + rm -rf ${BUILD_SRC}/../${SOURCE_PKG} + rm -rf ${BUILD_SRC}/pkg + # Create docker debian files + cd ${BUILD_SRC}; tar czf ../${SOURCE_PKG} . + cd ${BUILD_SRC}; dpkg-buildpackage + rm -rf ${BUILD_SRC} diff --git a/packaging/debian/README.debian b/packaging/debian/README.debian new file mode 100644 index 0000000000..83dc42268b --- /dev/null +++ b/packaging/debian/README.debian @@ -0,0 +1,31 @@ +Docker on Debian +================ + +Docker has been built and tested on Wheezy. All docker functionality works +out of the box, except for memory limitation as the stock debian kernel +does not support it yet. + + +Building docker package +~~~~~~~~~~~~~~~~~~~~~~~ + +Building Dependencies: debhelper, autotools-dev and golang + + +Assuming you have a wheezy system up and running + +# Download a fresh copy of the docker project +git clone https://github.com/dotcloud/docker.git +cd docker + +# Get building dependencies +sudo apt-get update ; sudo apt-get install -y debhelper autotools-dev golang + +# Make the debian package, with no memory limitation support +(cd packaging/debian; make debian NO_MEMORY_LIMIT=1) + + +Install docker package +~~~~~~~~~~~~~~~~~~~~~~ + +sudo dpkg -i lxc-docker_0.1.4-1_amd64.deb; sudo apt-get install -f -y diff --git a/packaging/debian/Vagrantfile b/packaging/debian/Vagrantfile new file mode 100644 index 0000000000..2da2900605 --- /dev/null +++ b/packaging/debian/Vagrantfile @@ -0,0 +1,22 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +$BUILDBOT_IP = '192.168.33.31' + +def v10(config) + config.vm.box = 'debian' + config.vm.share_folder 'v-data', '/data/docker', File.dirname(__FILE__) + '/../..' + config.vm.network :hostonly, $BUILDBOT_IP + + # Install debian packaging dependencies and create debian packages + config.vm.provision :shell, :inline => 'apt-get -qq update; apt-get install -y debhelper autotools-dev golang' + config.vm.provision :shell, :inline => 'cd /data/docker/packaging/debian; make debian' +end + +Vagrant::VERSION < '1.1.0' and Vagrant::Config.run do |config| + v10(config) +end + +Vagrant::VERSION >= '1.1.0' and Vagrant.configure('1') do |config| + v10(config) +end diff --git a/packaging/debian/changelog b/packaging/debian/changelog new file mode 100644 index 0000000000..761a879e8a --- /dev/null +++ b/packaging/debian/changelog @@ -0,0 +1,14 @@ +lxc-docker (0.1.4-1) unstable; urgency=low + + Improvements [+], Updates [*], Bug fixes [-]: + * Changed default bridge interface do 'docker0' + - Fix a race condition when running the port allocator + + -- Daniel Mizyrycki Wed, 10 Apr 2013 18:06:21 -0700 + + +lxc-docker (0.1.0-1) unstable; urgency=low + + * Initial release + + -- Daniel Mizyrycki Mon, 29 Mar 2013 18:09:55 -0700 diff --git a/packaging/debian/compat b/packaging/debian/compat new file mode 100644 index 0000000000..ec635144f6 --- /dev/null +++ b/packaging/debian/compat @@ -0,0 +1 @@ +9 diff --git a/packaging/debian/control b/packaging/debian/control new file mode 100644 index 0000000000..a09e9aee56 --- /dev/null +++ b/packaging/debian/control @@ -0,0 +1,19 @@ +Source: lxc-docker +Section: admin +Priority: optional +Maintainer: Daniel Mizyrycki +Build-Depends: debhelper (>= 9),autotools-dev,golang +Standards-Version: 3.9.3 +Homepage: http://github.com/dotcloud/docker + +Package: lxc-docker +Architecture: linux-any +Depends: ${misc:Depends},${shlibs:Depends},lxc,bsdtar +Conflicts: docker +Description: lxc-docker is a Linux container runtime + Docker complements LXC with a high-level API which operates at the process + level. It runs unix processes with strong guarantees of isolation and + repeatability across servers. + Docker is a great building block for automating distributed systems: + large-scale web deployments, database clusters, continuous deployment systems, + private PaaS, service-oriented architectures, etc. diff --git a/packaging/debian/copyright b/packaging/debian/copyright new file mode 100644 index 0000000000..668c8635e4 --- /dev/null +++ b/packaging/debian/copyright @@ -0,0 +1,237 @@ +Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: docker +Upstream-Contact: DotCloud Inc +Source: http://github.com/dotcloud/docker + +Files: * +Copyright: 2012, DotCloud Inc +License: Apache-2.0 + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2012 DotCloud Inc + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +Files: src/github.com/kr/pty/* +Copyright: Copyright (c) 2011 Keith Rarick +License: Expat + Copyright (c) 2011 Keith Rarick + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated + documentation files (the "Software"), to deal in the + Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, + sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, + subject to the following conditions: + + The above copyright notice and this permission notice shall + be included in all copies or substantial portions of the + Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY + KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE + WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS + OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/packaging/debian/docker.initd b/packaging/debian/docker.initd new file mode 100644 index 0000000000..2b6a3c0979 --- /dev/null +++ b/packaging/debian/docker.initd @@ -0,0 +1,49 @@ +#!/bin/sh + +### BEGIN INIT INFO +# Provides: docker +# Required-Start: $local_fs +# Required-Stop: $local_fs +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: docker +# Description: docker daemon +### END INIT INFO + +DOCKER=/usr/bin/docker +PIDFILE=/var/run/docker.pid + +# Check docker is present +[ -x $DOCKER ] || log_success_msg "Docker not present" + +# Get lsb functions +. /lib/lsb/init-functions + + +case "$1" in + start) + log_begin_msg "Starting docker..." + start-stop-daemon --start --background --exec "$DOCKER" -- -d + log_end_msg $? + ;; + stop) + log_begin_msg "Stopping docker..." + docker_pid=`pgrep -f "$DOCKER -d"` + [ -n "$docker_pid" ] && kill $docker_pid + log_end_msg $? + ;; + status) + docker_pid=`pgrep -f "$DOCKER -d"` + if [ -z "$docker_pid" ] ; then + echo "docker not running" + else + echo "docker running (pid $docker_pid)" + fi + ;; + *) + echo "Usage: /etc/init.d/docker {start|stop|status}" + exit 1 + ;; +esac + +exit 0 diff --git a/deb/debian/docs b/packaging/debian/docs similarity index 100% rename from deb/debian/docs rename to packaging/debian/docs diff --git a/packaging/debian/lxc-docker.postinst b/packaging/debian/lxc-docker.postinst new file mode 100644 index 0000000000..91e251dc8d --- /dev/null +++ b/packaging/debian/lxc-docker.postinst @@ -0,0 +1,13 @@ +#!/bin/sh + +# Ensure cgroup is mounted +if [ -z "`/bin/egrep -e '^cgroup' /etc/fstab`" ]; then + /bin/echo 'cgroup /sys/fs/cgroup cgroup defaults 0 0' >>/etc/fstab +fi +if [ -z "`/bin/mount | /bin/egrep -e '^cgroup'`" ]; then + /bin/mount /sys/fs/cgroup +fi + +# Start docker +/usr/sbin/update-rc.d docker defaults +/etc/init.d/docker start diff --git a/packaging/debian/maintainer.rst b/packaging/debian/maintainer.rst new file mode 100644 index 0000000000..111d4fcc3e --- /dev/null +++ b/packaging/debian/maintainer.rst @@ -0,0 +1,16 @@ +Maintainer duty +=============== + +The Debian project specifies the role of a 'maintainer' which is the person +making the Debian package of the program. This role requires an 'sponsor' to +upload the package. As a maintainer you should follow the guide +http://www.debian.org/doc/manuals/maint-guide . Your sponsor will be there +helping you succeed. + +The most relevant information to update is the changelog file: +Each new release should create a new first paragraph with new release version, +changes, and the maintainer information. + +After this is done, follow README.debian to generate the actual source +packages and talk with your sponsor to upload them into the official Debian +package archive. diff --git a/packaging/debian/rules b/packaging/debian/rules new file mode 100755 index 0000000000..25f16f9c61 --- /dev/null +++ b/packaging/debian/rules @@ -0,0 +1,13 @@ +#!/usr/bin/make -f +# -*- makefile -*- +# Sample debian/rules that uses debhelper. +# This file was originally written by Joey Hess and Craig Small. +# As a special exception, when this file is copied by dh-make into a +# dh-make output file, you may use that output file without restriction. +# This special exception was added by Craig Small in version 0.37 of dh-make. + +# Uncomment this to turn on verbose mode. +#export DH_VERBOSE=1 + +%: + dh ${@} --with autotools_dev diff --git a/deb/debian/source/format b/packaging/debian/source/format similarity index 100% rename from deb/debian/source/format rename to packaging/debian/source/format diff --git a/packaging/ubuntu/Makefile b/packaging/ubuntu/Makefile new file mode 100644 index 0000000000..dbdf1af7a9 --- /dev/null +++ b/packaging/ubuntu/Makefile @@ -0,0 +1,62 @@ +# Ubuntu package Makefile +# +# Dependencies: debhelper autotools-dev devscripts golang +# Notes: +# Use 'make ubuntu' to create the ubuntu package +# GPG_KEY environment variable needs to contain a GPG private key for package to be signed +# and uploaded to docker PPA. +# If GPG_KEY is not defined, make ubuntu will create docker package and exit with +# status code 2 + +PKG_NAME=lxc-docker +VERSION=$(shell head -1 changelog | sed 's/^.\+(\(.\+\)..).\+$$/\1/') +GITHUB_PATH=github.com/dotcloud/docker +DOCKER_VERSION=${PKG_NAME}_${VERSION} +DOCKER_FVERSION=${PKG_NAME}_$(shell head -1 changelog | sed 's/^.\+(\(.\+\)).\+$$/\1/') +BUILD_SRC=${CURDIR}/../../build_src +VERSION_TAG=v$(shell head -1 changelog | sed 's/^.\+(\(.\+\)-[0-9]\+).\+$$/\1/') + +all: + # Compile docker. Used by dpkg-buildpackage. + cd src/${GITHUB_PATH}/docker; GOPATH=${CURDIR} go build + +install: + # Used by dpkg-buildpackage + mkdir -p ${DESTDIR}/usr/bin + mkdir -p ${DESTDIR}/etc/init + mkdir -p ${DESTDIR}/DEBIAN + install -m 0755 src/${GITHUB_PATH}/docker/docker ${DESTDIR}/usr/bin + install -o root -m 0755 debian/docker.upstart ${DESTDIR}/etc/init/docker.conf + install debian/lxc-docker.prerm ${DESTDIR}/DEBIAN/prerm + install debian/lxc-docker.postinst ${DESTDIR}/DEBIAN/postinst + +ubuntu: + # This Makefile will compile the github master branch of dotcloud/docker + # Retrieve docker project and its go structure from internet + rm -rf ${BUILD_SRC} + git clone $(shell git rev-parse --show-toplevel) ${BUILD_SRC}/${GITHUB_PATH} + cd ${BUILD_SRC}/${GITHUB_PATH}; git checkout ${VERSION_TAG} && GOPATH=${BUILD_SRC} go get -d + # Add debianization + mkdir ${BUILD_SRC}/debian + cp Makefile ${BUILD_SRC} + cp -r * ${BUILD_SRC}/debian + cp ../../README.md ${BUILD_SRC} + # Cleanup + for d in `find ${BUILD_SRC} -name '.git*'`; do rm -rf $$d; done + rm -rf ${BUILD_SRC}/../${DOCKER_VERSION}.orig.tar.gz + rm -rf ${BUILD_SRC}/pkg + # Create docker debian files + cd ${BUILD_SRC}; tar czf ../${DOCKER_VERSION}.orig.tar.gz . + cd ${BUILD_SRC}; dpkg-buildpackage -us -uc + rm -rf ${BUILD_SRC} + # Sign package and upload it to PPA if GPG_KEY environment variable + # holds a private GPG KEY + if /usr/bin/test "$${GPG_KEY}" == ""; then exit 2; fi + mkdir ${BUILD_SRC} + # Import gpg signing key + echo "$${GPG_KEY}" | gpg --allow-secret-key-import --import + # Sign the package + cd ${BUILD_SRC}; dpkg-source -x ${BUILD_SRC}/../${DOCKER_FVERSION}.dsc + cd ${BUILD_SRC}/${PKG_NAME}-${VERSION}; debuild -S -sa + cd ${BUILD_SRC};dput ppa:dotcloud/lxc-docker ${DOCKER_FVERSION}_source.changes + rm -rf ${BUILD_SRC} diff --git a/packaging/ubuntu/README.ubuntu b/packaging/ubuntu/README.ubuntu new file mode 100644 index 0000000000..286a6f8d52 --- /dev/null +++ b/packaging/ubuntu/README.ubuntu @@ -0,0 +1,37 @@ +Docker on Ubuntu +================ + +The easiest way to get docker up and running natively on Ubuntu is installing +it from its official PPA:: + + sudo sh -c "echo 'deb http://ppa.launchpad.net/dotcloud/lxc-docker/ubuntu precise main' >>/etc/apt/sources.list" + sudo apt-get update + sudo apt-get install lxc-docker + + +Building docker package +~~~~~~~~~~~~~~~~~~~~~~~ + +The building process is shared by both, developers and maintainers. If you are +a developer, the Makefile will stop with exit status 2 right before signing +the built packages. + +Assuming you are working on an Ubuntu 12.04 TLS system :: + + # Download a fresh copy of the docker project + git clone https://github.com/dotcloud/docker.git + cd docker + + # Get building dependencies + sudo apt-get update; sudo apt-get install -y debhelper autotools-dev devscripts golang + + # Make the ubuntu package + (cd packaging/ubuntu; make ubuntu) + + +Install docker built package +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + sudo dpkg -i lxc-docker_*_amd64.deb; sudo apt-get install -f -y diff --git a/packaging/ubuntu/Vagrantfile b/packaging/ubuntu/Vagrantfile new file mode 100644 index 0000000000..0689eea1c2 --- /dev/null +++ b/packaging/ubuntu/Vagrantfile @@ -0,0 +1,12 @@ +BUILDBOT_IP = '192.168.33.32' + +Vagrant::Config.run do |config| + config.vm.box = 'precise64' + config.vm.box_url = 'http://files.vagrantup.com/precise64.box' + config.vm.share_folder 'v-data', '/data/docker', "#{File.dirname(__FILE__)}/../.." + config.vm.network :hostonly,BUILDBOT_IP + + # Install ubuntu packaging dependencies and create ubuntu packages + config.vm.provision :shell, :inline => 'export DEBIAN_FRONTEND=noninteractive; apt-get -qq update; apt-get install -qq -y git debhelper autotools-dev devscripts golang' + config.vm.provision :shell, :inline => "export GPG_KEY='#{ENV['GPG_KEY']}'; cd /data/docker/packaging/ubuntu; make ubuntu" +end diff --git a/packaging/ubuntu/changelog b/packaging/ubuntu/changelog new file mode 100644 index 0000000000..aa5ea6cc87 --- /dev/null +++ b/packaging/ubuntu/changelog @@ -0,0 +1,30 @@ +lxc-docker (0.1.6-1) precise; urgency=low + + Improvements [+], Updates [*], Bug fixes [-]: + + Multiple improvements, updates and bug fixes + + -- dotCloud Wed, 17 Apr 2013 20:43:43 -0700 + + +lxc-docker (0.1.4.1-1) precise; urgency=low + + Improvements [+], Updates [*], Bug fixes [-]: + * Test PPA + + -- dotCloud Mon, 15 Apr 2013 12:14:50 -0700 + + +lxc-docker (0.1.4-1) precise; urgency=low + + Improvements [+], Updates [*], Bug fixes [-]: + * Changed default bridge interface do 'docker0' + - Fix a race condition when running the port allocator + + -- dotCloud Fri, 12 Apr 2013 12:20:06 -0700 + + +lxc-docker (0.1.0-1) unstable; urgency=low + + * Initial release + + -- dotCloud Mon, 25 Mar 2013 05:51:12 -0700 diff --git a/deb/debian/compat b/packaging/ubuntu/compat similarity index 100% rename from deb/debian/compat rename to packaging/ubuntu/compat diff --git a/packaging/ubuntu/control b/packaging/ubuntu/control new file mode 100644 index 0000000000..c52303a88b --- /dev/null +++ b/packaging/ubuntu/control @@ -0,0 +1,19 @@ +Source: lxc-docker +Section: misc +Priority: extra +Maintainer: Daniel Mizyrycki +Build-Depends: debhelper,autotools-dev,devscripts,golang +Standards-Version: 3.9.3 +Homepage: http://github.com/dotcloud/docker + +Package: lxc-docker +Architecture: linux-any +Depends: ${misc:Depends},${shlibs:Depends},lxc,bsdtar +Conflicts: docker +Description: lxc-docker is a Linux container runtime + Docker complements LXC with a high-level API which operates at the process + level. It runs unix processes with strong guarantees of isolation and + repeatability across servers. + Docker is a great building block for automating distributed systems: + large-scale web deployments, database clusters, continuous deployment systems, + private PaaS, service-oriented architectures, etc. diff --git a/packaging/ubuntu/copyright b/packaging/ubuntu/copyright new file mode 100644 index 0000000000..668c8635e4 --- /dev/null +++ b/packaging/ubuntu/copyright @@ -0,0 +1,237 @@ +Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: docker +Upstream-Contact: DotCloud Inc +Source: http://github.com/dotcloud/docker + +Files: * +Copyright: 2012, DotCloud Inc +License: Apache-2.0 + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2012 DotCloud Inc + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +Files: src/github.com/kr/pty/* +Copyright: Copyright (c) 2011 Keith Rarick +License: Expat + Copyright (c) 2011 Keith Rarick + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated + documentation files (the "Software"), to deal in the + Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, + sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, + subject to the following conditions: + + The above copyright notice and this permission notice shall + be included in all copies or substantial portions of the + Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY + KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE + WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS + OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/deb/etc/docker.upstart b/packaging/ubuntu/docker.upstart similarity index 50% rename from deb/etc/docker.upstart rename to packaging/ubuntu/docker.upstart index 6cfe9d2616..07e7e8a890 100644 --- a/deb/etc/docker.upstart +++ b/packaging/ubuntu/docker.upstart @@ -5,6 +5,6 @@ stop on starting rc RUNLEVEL=[016] respawn script - test -f /etc/default/locale && . /etc/default/locale || true - LANG=$LANG LC_ALL=$LANG /usr/bin/docker -d + # FIXME: docker should not depend on the system having en_US.UTF-8 + LC_ALL='en_US.UTF-8' /usr/bin/docker -d end script diff --git a/packaging/ubuntu/docs b/packaging/ubuntu/docs new file mode 100644 index 0000000000..b43bf86b50 --- /dev/null +++ b/packaging/ubuntu/docs @@ -0,0 +1 @@ +README.md diff --git a/packaging/ubuntu/lxc-docker.postinst b/packaging/ubuntu/lxc-docker.postinst new file mode 100644 index 0000000000..5d04c5b55d --- /dev/null +++ b/packaging/ubuntu/lxc-docker.postinst @@ -0,0 +1,4 @@ +#!/bin/sh + +# Start docker +/sbin/start docker diff --git a/packaging/ubuntu/lxc-docker.prerm b/packaging/ubuntu/lxc-docker.prerm new file mode 100644 index 0000000000..824f15cff0 --- /dev/null +++ b/packaging/ubuntu/lxc-docker.prerm @@ -0,0 +1,4 @@ +#!/bin/sh + +# Stop docker +/sbin/stop docker diff --git a/packaging/ubuntu/maintainer.ubuntu b/packaging/ubuntu/maintainer.ubuntu new file mode 100644 index 0000000000..406498ebad --- /dev/null +++ b/packaging/ubuntu/maintainer.ubuntu @@ -0,0 +1,35 @@ +Maintainer duty +=============== + +Ubuntu allows developers to use their PPA (Personal Package Archive) +repository. This is very convenient for the users as they just need to add +the PPA address, update their package database and use the apt-get tool. + +For now, the official lxc-docker package is located on launchpad and can be +accessed adding the following line to /etc/apt/sources.list :: + + + deb http://ppa.launchpad.net/dotcloud/lxc-docker/ubuntu precise main + + +Releasing a new package +~~~~~~~~~~~~~~~~~~~~~~~ + +The most relevant information to update is the changelog file: +Each new release should create a new first paragraph with new release version, +changes, and the maintainer information. + +Assuming your PPA GPG signing key is on /media/usbdrive/docker.key, load it +into the GPG_KEY environment variable with:: + + export GPG_KEY=`cat /media/usbdrive/docker.key` + + +After this is done and you are ready to upload the package to the PPA, you have +a couple of choices: + +* Follow README.debian to generate the actual source packages and upload them + to the PPA +* Let vagrant do all the work for you:: + + ( cd docker/packaging/ubuntu; vagrant up ) diff --git a/deb/debian/rules b/packaging/ubuntu/rules similarity index 100% rename from deb/debian/rules rename to packaging/ubuntu/rules diff --git a/packaging/ubuntu/source/format b/packaging/ubuntu/source/format new file mode 100644 index 0000000000..163aaf8d82 --- /dev/null +++ b/packaging/ubuntu/source/format @@ -0,0 +1 @@ +3.0 (quilt) diff --git a/rcli/tcp.go b/rcli/tcp.go index e9dba7f319..cf111cdf71 100644 --- a/rcli/tcp.go +++ b/rcli/tcp.go @@ -138,7 +138,8 @@ func ListenAndServe(proto, addr string, service Service) error { if err != nil { return err } - go func() { + go func(conn DockerConn) { + defer conn.Close() if DEBUG_FLAG { CLIENT_SOCKET = conn } @@ -146,8 +147,7 @@ func ListenAndServe(proto, addr string, service Service) error { log.Println("Error:", err.Error()) fmt.Fprintln(conn, "Error:", err.Error()) } - conn.Close() - }() + }(conn) } } return nil diff --git a/registry.go b/registry.go index 761fc335d3..74b166906f 100644 --- a/registry.go +++ b/registry.go @@ -7,6 +7,7 @@ import ( "io" "io/ioutil" "net/http" + "os" "path" "strings" ) @@ -97,7 +98,7 @@ func (graph *Graph) LookupRemoteImage(imgId string, authConfig *auth.AuthConfig) func (graph *Graph) getRemoteImage(stdout io.Writer, imgId string, authConfig *auth.AuthConfig) (*Image, Archive, error) { client := &http.Client{} - fmt.Fprintf(stdout, "Pulling %s metadata\n", imgId) + fmt.Fprintf(stdout, "Pulling %s metadata\r\n", imgId) // Get the Json req, err := http.NewRequest("GET", REGISTRY_ENDPOINT+"/images/"+imgId+"/json", nil) if err != nil { @@ -125,7 +126,7 @@ func (graph *Graph) getRemoteImage(stdout io.Writer, imgId string, authConfig *a img.Id = imgId // Get the layer - fmt.Fprintf(stdout, "Pulling %s fs layer\n", imgId) + fmt.Fprintf(stdout, "Pulling %s fs layer\r\n", imgId) req, err = http.NewRequest("GET", REGISTRY_ENDPOINT+"/images/"+imgId+"/layer", nil) if err != nil { return nil, nil, fmt.Errorf("Error while getting from the server: %s\n", err) @@ -135,7 +136,7 @@ func (graph *Graph) getRemoteImage(stdout io.Writer, imgId string, authConfig *a if err != nil { return nil, nil, err } - return img, ProgressReader(res.Body, int(res.ContentLength), stdout), nil + return img, ProgressReader(res.Body, int(res.ContentLength), stdout, "Downloading %v/%v (%v)"), nil } func (graph *Graph) PullImage(stdout io.Writer, imgId string, authConfig *auth.AuthConfig) error { @@ -164,7 +165,7 @@ func (graph *Graph) PullImage(stdout io.Writer, imgId string, authConfig *auth.A func (graph *Graph) PullRepository(stdout io.Writer, remote, askedTag string, repositories *TagStore, authConfig *auth.AuthConfig) error { client := &http.Client{} - fmt.Fprintf(stdout, "Pulling repository %s\n", remote) + fmt.Fprintf(stdout, "Pulling repository %s\r\n", remote) var repositoryTarget string // If we are asking for 'root' repository, lookup on the Library's registry @@ -196,7 +197,7 @@ func (graph *Graph) PullRepository(stdout io.Writer, remote, askedTag string, re return err } for tag, rev := range t { - fmt.Fprintf(stdout, "Pulling tag %s:%s\n", remote, tag) + fmt.Fprintf(stdout, "Pulling tag %s:%s\r\n", remote, tag) if err = graph.PullImage(stdout, rev, authConfig); err != nil { return err } @@ -223,7 +224,7 @@ func (graph *Graph) PushImage(stdout io.Writer, imgOrig *Image, authConfig *auth return fmt.Errorf("Error while retreiving the path for {%s}: %s", img.Id, err) } - fmt.Fprintf(stdout, "Pushing %s metadata\n", img.Id) + fmt.Fprintf(stdout, "Pushing %s metadata\r\n", img.Id) // FIXME: try json with UTF8 jsonData := strings.NewReader(string(jsonRaw)) @@ -253,7 +254,7 @@ func (graph *Graph) PushImage(stdout io.Writer, imgOrig *Image, authConfig *auth } } - fmt.Fprintf(stdout, "Pushing %s fs layer\n", img.Id) + fmt.Fprintf(stdout, "Pushing %s fs layer\r\n", img.Id) req2, err := http.NewRequest("PUT", REGISTRY_ENDPOINT+"/images/"+img.Id+"/layer", nil) req2.SetBasicAuth(authConfig.Username, authConfig.Password) res2, err := client.Do(req2) @@ -269,24 +270,20 @@ func (graph *Graph) PushImage(stdout io.Writer, imgOrig *Image, authConfig *auth return fmt.Errorf("Failed to retrieve layer upload location: %s", err) } - // FIXME: Don't do this :D. Check the S3 requierement and implement chunks of 5MB - // FIXME2: I won't stress it enough, DON'T DO THIS! very high priority - layerData2, err := Tar(path.Join(graph.Root, img.Id, "layer"), Xz) - tmp, err := ioutil.ReadAll(layerData2) + // FIXME: stream the archive directly to the registry instead of buffering it on disk. This requires either: + // a) Implementing S3's proprietary streaming logic, or + // b) Stream directly to the registry instead of S3. + // I prefer option b. because it doesn't lock us into a proprietary cloud service. + tmpLayer, err := graph.TempLayerArchive(img.Id, Xz, stdout) if err != nil { return err } - layerLength := len(tmp) - - layerData, err := Tar(path.Join(graph.Root, img.Id, "layer"), Xz) - if err != nil { - return fmt.Errorf("Failed to generate layer archive: %s", err) - } - req3, err := http.NewRequest("PUT", url.String(), ProgressReader(layerData.(io.ReadCloser), layerLength, stdout)) + defer os.Remove(tmpLayer.Name()) + req3, err := http.NewRequest("PUT", url.String(), ProgressReader(tmpLayer, int(tmpLayer.Size), stdout, "Uploading %v/%v (%v)")) if err != nil { return err } - req3.ContentLength = int64(layerLength) + req3.ContentLength = int64(tmpLayer.Size) req3.TransferEncoding = []string{"none"} res3, err := client.Do(req3) @@ -375,15 +372,15 @@ func (graph *Graph) pushPrimitive(stdout io.Writer, remote, tag, imgId string, a // Check if the local impage exists img, err := graph.Get(imgId) if err != nil { - fmt.Fprintf(stdout, "Skipping tag %s:%s: %s does not exist\n", remote, tag, imgId) + fmt.Fprintf(stdout, "Skipping tag %s:%s: %s does not exist\r\n", remote, tag, imgId) return nil } - fmt.Fprintf(stdout, "Pushing tag %s:%s\n", remote, tag) + fmt.Fprintf(stdout, "Pushing tag %s:%s\r\n", remote, tag) // Push the image if err = graph.PushImage(stdout, img, authConfig); err != nil { return err } - fmt.Fprintf(stdout, "Registering tag %s:%s\n", remote, tag) + fmt.Fprintf(stdout, "Registering tag %s:%s\r\n", remote, tag) // And then the tag if err = graph.pushTag(remote, imgId, tag, authConfig); err != nil { return err @@ -399,7 +396,7 @@ func (graph *Graph) PushRepository(stdout io.Writer, remote string, localRepo Re return fmt.Errorf("Permission denied on repository %s\n", remote) } - fmt.Fprintf(stdout, "Pushing repository %s (%d tags)\n", remote, len(localRepo)) + fmt.Fprintf(stdout, "Pushing repository %s (%d tags)\r\n", remote, len(localRepo)) // For each image within the repo, push them for tag, imgId := range localRepo { if err := graph.pushPrimitive(stdout, remote, tag, imgId, authConfig); err != nil { diff --git a/runtime.go b/runtime.go index 7971fe4f48..b894a2cdaf 100644 --- a/runtime.go +++ b/runtime.go @@ -6,6 +6,7 @@ import ( "github.com/dotcloud/docker/auth" "io" "io/ioutil" + "log" "os" "os/exec" "path" @@ -14,6 +15,11 @@ import ( "time" ) +type Capabilities struct { + MemoryLimit bool + SwapLimit bool +} + type Runtime struct { root string repository string @@ -23,6 +29,8 @@ type Runtime struct { repositories *TagStore authConfig *auth.AuthConfig idIndex *TruncIndex + capabilities *Capabilities + kernelVersion *KernelVersionInfo } var sysInitPath string @@ -82,6 +90,7 @@ func (runtime *Runtime) Create(config *Config) (*Container, error) { if config.Hostname == "" { config.Hostname = id[:12] } + container := &Container{ // FIXME: we should generate the ID here instead of receiving it as an argument Id: id, @@ -100,6 +109,24 @@ func (runtime *Runtime) Create(config *Config) (*Container, error) { if err := os.Mkdir(container.root, 0700); err != nil { return nil, err } + + // If custom dns exists, then create a resolv.conf for the container + if len(config.Dns) > 0 { + container.ResolvConfPath = path.Join(container.root, "resolv.conf") + f, err := os.Create(container.ResolvConfPath) + if err != nil { + return nil, err + } + defer f.Close() + for _, dns := range config.Dns { + if _, err := f.Write([]byte("nameserver " + dns + "\n")); err != nil { + return nil, err + } + } + } else { + container.ResolvConfPath = "/etc/resolv.conf" + } + // Step 2: save the container json if err := container.ToDisk(); err != nil { return nil, err @@ -119,6 +146,9 @@ func (runtime *Runtime) Load(id string) (*Container, error) { if container.Id != id { return container, fmt.Errorf("Container %s is stored at %s", container.Id, id) } + if container.State.Running { + container.State.Ghost = true + } if err := runtime.Register(container); err != nil { return nil, err } @@ -134,6 +164,9 @@ func (runtime *Runtime) Register(container *Container) error { return err } + // init the wait lock + container.waitLock = make(chan struct{}) + // FIXME: if the container is supposed to be running but is not, auto restart it? // if so, then we need to restart monitor and init a new lock // If the container is supposed to be running, make sure of it @@ -150,6 +183,14 @@ func (runtime *Runtime) Register(container *Container) error { } } } + + // If the container is not running or just has been flagged not running + // then close the wait lock chan (will be reset upon start) + if !container.State.Running { + close(container.waitLock) + } + + // Even if not running, we init the lock (prevents races in start/stop/kill) container.State.initLock() container.runtime = runtime @@ -184,7 +225,7 @@ func (runtime *Runtime) Destroy(container *Container) error { return fmt.Errorf("Container %v not found - maybe it was already destroyed?", container.Id) } - if err := container.Stop(); err != nil { + if err := container.Stop(10); err != nil { return err } if mounted, err := container.Mounted(); err != nil { @@ -205,7 +246,7 @@ func (runtime *Runtime) Destroy(container *Container) error { // Commit creates a new filesystem image from the current state of a container. // The image can optionally be tagged into a repository -func (runtime *Runtime) Commit(id, repository, tag, comment string) (*Image, error) { +func (runtime *Runtime) Commit(id, repository, tag, comment, author string) (*Image, error) { container := runtime.Get(id) if container == nil { return nil, fmt.Errorf("No such container: %s", id) @@ -217,7 +258,7 @@ func (runtime *Runtime) Commit(id, repository, tag, comment string) (*Image, err return nil, err } // Create a new image from the container's base layers + a new layer from container changes - img, err := runtime.graph.Create(rwTar, container, comment) + img, err := runtime.graph.Create(rwTar, container, comment, author) if err != nil { return nil, err } @@ -249,7 +290,38 @@ func (runtime *Runtime) restore() error { // FIXME: harmonize with NewGraph() func NewRuntime() (*Runtime, error) { - return NewRuntimeFromDirectory("/var/lib/docker") + runtime, err := NewRuntimeFromDirectory("/var/lib/docker") + if err != nil { + return nil, err + } + + k, err := GetKernelVersion() + if err != nil { + return nil, err + } + runtime.kernelVersion = k + + if CompareKernelVersion(k, &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 { + log.Printf("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String()) + } + + if cgroupMemoryMountpoint, err := FindCgroupMountpoint("memory"); err != nil { + log.Printf("WARNING: %s\n", err) + } else { + _, err1 := ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.limit_in_bytes")) + _, err2 := ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.soft_limit_in_bytes")) + runtime.capabilities.MemoryLimit = err1 == nil && err2 == nil + if !runtime.capabilities.MemoryLimit { + log.Printf("WARNING: Your kernel does not support cgroup memory limit.") + } + + _, err = ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.memsw.limit_in_bytes")) + runtime.capabilities.SwapLimit = err == nil + if !runtime.capabilities.SwapLimit { + log.Printf("WARNING: Your kernel does not support cgroup swap limit.") + } + } + return runtime, nil } func NewRuntimeFromDirectory(root string) (*Runtime, error) { @@ -288,6 +360,7 @@ func NewRuntimeFromDirectory(root string) (*Runtime, error) { repositories: repositories, authConfig: authConfig, idIndex: NewTruncIndex(), + capabilities: &Capabilities{}, } if err := runtime.restore(); err != nil { diff --git a/runtime_test.go b/runtime_test.go index 9ab8b9b1e7..c43e8641ea 100644 --- a/runtime_test.go +++ b/runtime_test.go @@ -1,9 +1,11 @@ package docker import ( + "fmt" "github.com/dotcloud/docker/rcli" "io" "io/ioutil" + "net" "os" "os/exec" "os/user" @@ -12,12 +14,9 @@ import ( "time" ) -// FIXME: this is no longer needed -const testLayerPath string = "/var/lib/docker/docker-ut.tar" const unitTestImageName string = "docker-ut" -var unitTestStoreBase string -var srv *Server +const unitTestStoreBase string = "/var/lib/docker/unit-tests" func nuke(runtime *Runtime) error { var wg sync.WaitGroup @@ -61,15 +60,8 @@ func init() { panic("docker tests needs to be run as root") } - // Create a temp directory - root, err := ioutil.TempDir("", "docker-test") - if err != nil { - panic(err) - } - unitTestStoreBase = root - // Make it our Store root - runtime, err := NewRuntimeFromDirectory(root) + runtime, err := NewRuntimeFromDirectory(unitTestStoreBase) if err != nil { panic(err) } @@ -262,6 +254,47 @@ func TestGet(t *testing.T) { } +// Run a container with a TCP port allocated, and test that it can receive connections on localhost +func TestAllocatePortLocalhost(t *testing.T) { + runtime, err := newTestRuntime() + if err != nil { + t.Fatal(err) + } + container, err := runtime.Create(&Config{ + Image: GetTestImage(runtime).Id, + Cmd: []string{"sh", "-c", "echo well hello there | nc -l -p 5555"}, + PortSpecs: []string{"5555"}, + }, + ) + if err != nil { + t.Fatal(err) + } + if err := container.Start(); err != nil { + t.Fatal(err) + } + defer container.Kill() + time.Sleep(300 * time.Millisecond) // Wait for the container to run + conn, err := net.Dial("tcp", + fmt.Sprintf( + "localhost:%s", container.NetworkSettings.PortMapping["5555"], + ), + ) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + output, err := ioutil.ReadAll(conn) + if err != nil { + t.Fatal(err) + } + if string(output) != "well hello there\n" { + t.Fatalf("Received wrong output from network connection: should be '%s', not '%s'", + "well hello there\n", + string(output), + ) + } +} + func TestRestore(t *testing.T) { root, err := ioutil.TempDir("", "docker-test") diff --git a/state.go b/state.go index 2ca7130921..f51a06b01a 100644 --- a/state.go +++ b/state.go @@ -12,11 +12,15 @@ type State struct { ExitCode int StartedAt time.Time l *sync.Mutex + Ghost bool } // String returns a human-readable description of the state func (s *State) String() string { if s.Running { + if s.Ghost { + return fmt.Sprintf("Ghost") + } return fmt.Sprintf("Up %s", HumanDuration(time.Now().Sub(s.StartedAt))) } return fmt.Sprintf("Exit %d", s.ExitCode) diff --git a/sysinit.go b/sysinit.go index a2c06239e9..4b2d6c3032 100644 --- a/sysinit.go +++ b/sysinit.go @@ -17,8 +17,7 @@ func setupNetworking(gw string) { if gw == "" { return } - cmd := exec.Command("/sbin/route", "add", "default", "gw", gw) - if err := cmd.Run(); err != nil { + if _, err := ip("route", "add", "default", "via", gw); err != nil { log.Fatalf("Unable to set up networking: %v", err) } } @@ -54,8 +53,7 @@ func changeUser(u string) { } // Clear environment pollution introduced by lxc-start -func cleanupEnv() { - env := os.Environ() +func cleanupEnv(env ListOpts) { os.Clearenv() for _, kv := range env { parts := strings.SplitN(kv, "=", 2) @@ -92,10 +90,13 @@ func SysInit() { var u = flag.String("u", "", "username or uid") var gw = flag.String("g", "", "gateway address") + var flEnv ListOpts + flag.Var(&flEnv, "e", "Set environment variables") + flag.Parse() + cleanupEnv(flEnv) setupNetworking(*gw) - cleanupEnv() changeUser(*u) executeProgram(flag.Arg(0), flag.Args()) } diff --git a/utils.go b/utils.go index 68e12b20bd..a039ca6eb6 100644 --- a/utils.go +++ b/utils.go @@ -12,9 +12,12 @@ import ( "os" "os/exec" "path/filepath" + "regexp" "runtime" + "strconv" "strings" "sync" + "syscall" "time" ) @@ -69,23 +72,30 @@ type progressReader struct { readTotal int // Expected stream length (bytes) readProgress int // How much has been read so far (bytes) lastUpdate int // How many bytes read at least update + template string // Template to print. Default "%v/%v (%v)" } func (r *progressReader) Read(p []byte) (n int, err error) { read, err := io.ReadCloser(r.reader).Read(p) r.readProgress += read - // Only update progress for every 1% read - updateEvery := int(0.01 * float64(r.readTotal)) - if r.readProgress-r.lastUpdate > updateEvery || r.readProgress == r.readTotal { - fmt.Fprintf(r.output, "%d/%d (%.0f%%)\r", - r.readProgress, - r.readTotal, - float64(r.readProgress)/float64(r.readTotal)*100) + updateEvery := 4096 + if r.readTotal > 0 { + // Only update progress for every 1% read + if increment := int(0.01 * float64(r.readTotal)); increment > updateEvery { + updateEvery = increment + } + } + if r.readProgress-r.lastUpdate > updateEvery || err != nil { + if r.readTotal > 0 { + fmt.Fprintf(r.output, r.template+"\r", r.readProgress, r.readTotal, fmt.Sprintf("%.0f%%", float64(r.readProgress)/float64(r.readTotal)*100)) + } else { + fmt.Fprintf(r.output, r.template+"\r", r.readProgress, "?", "n/a") + } r.lastUpdate = r.readProgress } // Send newline when complete - if err == io.EOF { + if err != nil { fmt.Fprintf(r.output, "\n") } @@ -94,8 +104,11 @@ func (r *progressReader) Read(p []byte) (n int, err error) { func (r *progressReader) Close() error { return io.ReadCloser(r.reader).Close() } -func ProgressReader(r io.ReadCloser, size int, output io.Writer) *progressReader { - return &progressReader{r, output, size, 0, 0} +func ProgressReader(r io.ReadCloser, size int, output io.Writer, template string) *progressReader { + if template == "" { + template = "%v/%v (%v)" + } + return &progressReader{r, output, size, 0, 0, template} } // HumanDuration returns a human-readable approximation of a duration @@ -384,3 +397,104 @@ func CopyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) } return written, err } + +type KernelVersionInfo struct { + Kernel int + Major int + Minor int + Flavor string +} + +// FIXME: this doens't build on Darwin +func GetKernelVersion() (*KernelVersionInfo, error) { + var uts syscall.Utsname + + if err := syscall.Uname(&uts); err != nil { + return nil, err + } + + release := make([]byte, len(uts.Release)) + + i := 0 + for _, c := range uts.Release { + release[i] = byte(c) + i++ + } + + tmp := strings.SplitN(string(release), "-", 2) + if len(tmp) != 2 { + return nil, fmt.Errorf("Unrecognized kernel version") + } + tmp2 := strings.SplitN(tmp[0], ".", 3) + if len(tmp2) != 3 { + return nil, fmt.Errorf("Unrecognized kernel version") + } + + kernel, err := strconv.Atoi(tmp2[0]) + if err != nil { + return nil, err + } + + major, err := strconv.Atoi(tmp2[1]) + if err != nil { + return nil, err + } + + minor, err := strconv.Atoi(tmp2[2]) + if err != nil { + return nil, err + } + + flavor := tmp[1] + + return &KernelVersionInfo{ + Kernel: kernel, + Major: major, + Minor: minor, + Flavor: flavor, + }, nil +} + +func (k *KernelVersionInfo) String() string { + return fmt.Sprintf("%d.%d.%d-%s", k.Kernel, k.Major, k.Minor, k.Flavor) +} + +// Compare two KernelVersionInfo struct. +// Returns -1 if a < b, = if a == b, 1 it a > b +func CompareKernelVersion(a, b *KernelVersionInfo) int { + if a.Kernel < b.Kernel { + return -1 + } else if a.Kernel > b.Kernel { + return 1 + } + + if a.Major < b.Major { + return -1 + } else if a.Major > b.Major { + return 1 + } + + if a.Minor < b.Minor { + return -1 + } else if a.Minor > b.Minor { + return 1 + } + + return 0 +} + +func FindCgroupMountpoint(cgroupType string) (string, error) { + output, err := exec.Command("mount").CombinedOutput() + if err != nil { + return "", err + } + + reg := regexp.MustCompile(`^cgroup on (.*) type cgroup \(.*` + cgroupType + `[,\)]`) + for _, line := range strings.Split(string(output), "\n") { + r := reg.FindStringSubmatch(line) + if len(r) == 2 { + return r[1], nil + } + } + return "", fmt.Errorf("cgroup mountpoint not found for %s", cgroupType) +} diff --git a/utils_test.go b/utils_test.go index c15084f61e..aa2a1b9682 100644 --- a/utils_test.go +++ b/utils_test.go @@ -228,3 +228,36 @@ func assertIndexGet(t *testing.T, index *TruncIndex, input, expectedResult strin t.Fatalf("Getting '%s' returned '%s' instead of '%s'", input, result, expectedResult) } } + +func assertKernelVersion(t *testing.T, a, b *KernelVersionInfo, result int) { + if r := CompareKernelVersion(a, b); r != result { + t.Fatalf("Unepected kernel version comparaison result. Found %d, expected %d", r, result) + } +} + +func TestCompareKernelVersion(t *testing.T) { + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 0) + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + -1) + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + &KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0}, + 1) + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "0"}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "16"}, + 0) + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 5}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 1) + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 3, Major: 0, Minor: 20, Flavor: "25"}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "0"}, + -1) +} From f744cfd5a75e3b1565740c2f266dbfdf7be8727a Mon Sep 17 00:00:00 2001 From: Daniel Mizyrycki Date: Tue, 23 Apr 2013 13:51:03 -0700 Subject: [PATCH 7/7] packaging-ubuntu: update maintainer documentation for changelog file --- packaging/ubuntu/changelog | 108 +++++++++++++++++++++++++---- packaging/ubuntu/maintainer.ubuntu | 12 ++-- 2 files changed, 102 insertions(+), 18 deletions(-) diff --git a/packaging/ubuntu/changelog b/packaging/ubuntu/changelog index aa5ea6cc87..b0c691366d 100644 --- a/packaging/ubuntu/changelog +++ b/packaging/ubuntu/changelog @@ -1,30 +1,110 @@ +lxc-docker (0.1.8-1) precise; urgency=low + + - Dynamically detect cgroup capabilities + - Issue stability warning on kernels <3.8 + - 'docker push' buffers on disk instead of memory + - Fix 'docker diff' for removed files + - Fix 'docker stop' for ghost containers + - Fix handling of pidfile + - Various bugfixes and stability improvements + + -- dotCloud Mon, 22 Apr 2013 00:00:00 -0700 + + +lxc-docker (0.1.7-1) precise; urgency=low + + - Container ports are available on localhost + - 'docker ps' shows allocated TCP ports + - Contributors can run 'make hack' to start a continuous integration VM + - Streamline ubuntu packaging & uploading + - Various bugfixes and stability improvements + + -- dotCloud Thu, 18 Apr 2013 00:00:00 -0700 + + lxc-docker (0.1.6-1) precise; urgency=low - Improvements [+], Updates [*], Bug fixes [-]: - + Multiple improvements, updates and bug fixes + - Record the author an image with 'docker commit -author' - -- dotCloud Wed, 17 Apr 2013 20:43:43 -0700 + -- dotCloud Wed, 17 Apr 2013 00:00:00 -0700 -lxc-docker (0.1.4.1-1) precise; urgency=low +lxc-docker (0.1.5-1) precise; urgency=low - Improvements [+], Updates [*], Bug fixes [-]: - * Test PPA + - Disable standalone mode + - Use a custom DNS resolver with 'docker -d -dns' + - Detect ghost containers + - Improve diagnosis of missing system capabilities + - Allow disabling memory limits at compile time + - Add debian packaging + - Documentation: installing on Arch Linux + - Documentation: running Redis on docker + - Fixed lxc 0.9 compatibility + - Automatically load aufs module + - Various bugfixes and stability improvements - -- dotCloud Mon, 15 Apr 2013 12:14:50 -0700 + -- dotCloud Wed, 17 Apr 2013 00:00:00 -0700 lxc-docker (0.1.4-1) precise; urgency=low - Improvements [+], Updates [*], Bug fixes [-]: - * Changed default bridge interface do 'docker0' - - Fix a race condition when running the port allocator + - Full support for TTY emulation + - Detach from a TTY session with the escape sequence `C-p C-q` + - Various bugfixes and stability improvements + - Minor UI improvements + - Automatically create our own bridge interface 'docker0' - -- dotCloud Fri, 12 Apr 2013 12:20:06 -0700 + -- dotCloud Tue, 9 Apr 2013 00:00:00 -0700 -lxc-docker (0.1.0-1) unstable; urgency=low +lxc-docker (0.1.3-1) precise; urgency=low - * Initial release + - Choose TCP frontend port with '-p :PORT' + - Layer format is versioned + - Major reliability improvements to the process manager + - Various bugfixes and stability improvements - -- dotCloud Mon, 25 Mar 2013 05:51:12 -0700 + -- dotCloud Thu, 4 Apr 2013 00:00:00 -0700 + + +lxc-docker (0.1.2-1) precise; urgency=low + + - Set container hostname with 'docker run -h' + - Selective attach at run with 'docker run -a [stdin[,stdout[,stderr]]]' + - Various bugfixes and stability improvements + - UI polish + - Progress bar on push/pull + - Use XZ compression by default + - Make IP allocator lazy + + -- dotCloud Wed, 3 Apr 2013 00:00:00 -0700 + + +lxc-docker (0.1.1-1) precise; urgency=low + + - Display shorthand IDs for convenience + - Stabilize process management + - Layers can include a commit message + - Simplified 'docker attach' + - Fixed support for re-attaching + - Various bugfixes and stability improvements + - Auto-download at run + - Auto-login on push + - Beefed up documentation + + -- dotCloud Sun, 31 Mar 2013 00:00:00 -0700 + + +lxc-docker (0.1.0-1) precise; urgency=low + + - First release + - Implement registry in order to push/pull images + - TCP port allocation + - Fix termcaps on Linux + - Add documentation + - Add Vagrant support with Vagrantfile + - Add unit tests + - Add repository/tags to ease image management + - Improve the layer implementation + + -- dotCloud Sat, 23 Mar 2013 00:00:00 -0700 diff --git a/packaging/ubuntu/maintainer.ubuntu b/packaging/ubuntu/maintainer.ubuntu index 406498ebad..07ab0a1f0e 100644 --- a/packaging/ubuntu/maintainer.ubuntu +++ b/packaging/ubuntu/maintainer.ubuntu @@ -15,9 +15,12 @@ accessed adding the following line to /etc/apt/sources.list :: Releasing a new package ~~~~~~~~~~~~~~~~~~~~~~~ -The most relevant information to update is the changelog file: +The most relevant information to update is the packaging/ubuntu/changelog file: Each new release should create a new first paragraph with new release version, -changes, and the maintainer information. +changes, and the maintainer information. The core of this paragraph is +located on CHANGELOG.md. Make sure to transcribe it and translate the formats +(eg: packaging/ubuntu/changelog uses 2 spaces for body change descriptions +instead of 1 space from CHANGELOG.md) Assuming your PPA GPG signing key is on /media/usbdrive/docker.key, load it into the GPG_KEY environment variable with:: @@ -28,8 +31,9 @@ into the GPG_KEY environment variable with:: After this is done and you are ready to upload the package to the PPA, you have a couple of choices: -* Follow README.debian to generate the actual source packages and upload them - to the PPA +* Follow packaging/ubuntu/README.ubuntu to generate the actual source packages + and upload them to the PPA + * Let vagrant do all the work for you:: ( cd docker/packaging/ubuntu; vagrant up )