diff --git a/.dockerignore b/.dockerignore index a03616e534..055ae7ed19 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,4 +1,9 @@ +*.egg-info +.coverage .git +.tox build -dist +coverage-html +docs/_site venv +.tox diff --git a/.gitignore b/.gitignore index da7fe7fa47..83a08a0e69 100644 --- a/.gitignore +++ b/.gitignore @@ -1,8 +1,10 @@ *.egg-info *.pyc -.tox +/.coverage +/.tox /build +/coverage-html /dist /docs/_site /venv -docker-compose.spec +README.rst diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000..3fad8ddcbe --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,20 @@ +- repo: git://github.com/pre-commit/pre-commit-hooks + sha: 'v0.4.2' + hooks: + - id: check-added-large-files + - id: check-docstring-first + - id: check-merge-conflict + - id: check-yaml + - id: check-json + - id: debug-statements + - id: end-of-file-fixer + - id: flake8 + - id: name-tests-test + exclude: 'tests/integration/testcases.py' + - id: requirements-txt-fixer + - id: trailing-whitespace +- repo: git://github.com/asottile/reorder_python_imports + sha: 3d86483455ab5bd06cc1069fdd5ac57be5463f10 + hooks: + - id: reorder-python-imports + language_version: 'python2.7' diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000000..3310e2ad9f --- /dev/null +++ b/.travis.yml @@ -0,0 +1,29 @@ +sudo: required + +language: python + +services: + - docker + +matrix: + include: + - os: linux + - os: osx + language: generic + + +install: ./script/travis/install + +script: + - ./script/travis/ci + - ./script/travis/build-binary + +before_deploy: + - "./script/travis/render-bintray-config.py < ./script/travis/bintray.json.tmpl > ./bintray.json" + +deploy: + provider: bintray + user: docker-compose-roleuser + key: '$BINTRAY_API_KEY' + file: ./bintray.json + skip_cleanup: true diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000000..a123c2a44d --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,536 @@ +Change log +========== + +1.5.0 (2015-11-03) +------------------ + +**Breaking changes:** + +With the introduction of variable substitution support in the Compose file, any +Compose file that uses an environment variable (`$VAR` or `${VAR}`) in the `command:` +or `entrypoint:` field will break. + +Previously these values were interpolated inside the container, with a value +from the container environment. In Compose 1.5.0, the values will be +interpolated on the host, with a value from the host environment. + +To migrate a Compose file to 1.5.0, escape the variables with an extra `$` +(ex: `$$VAR` or `$${VAR}`). See +https://github.com/docker/compose/blob/8cc8e61/docs/compose-file.md#variable-substitution + +Major features: + +- Compose is now available for Windows. + +- Environment variables can be used in the Compose file. See + https://github.com/docker/compose/blob/8cc8e61/docs/compose-file.md#variable-substitution + +- Multiple compose files can be specified, allowing you to override + settings in the default Compose file. See + https://github.com/docker/compose/blob/8cc8e61/docs/reference/docker-compose.md + for more details. + +- Compose now produces better error messages when a file contains + invalid configuration. + +- `up` now waits for all services to exit before shutting down, + rather than shutting down as soon as one container exits. + +- Experimental support for the new docker networking system can be + enabled with the `--x-networking` flag. Read more here: + https://github.com/docker/docker/blob/8fee1c20/docs/userguide/dockernetworks.md + +New features: + +- You can now optionally pass a mode to `volumes_from`, e.g. + `volumes_from: ["servicename:ro"]`. + +- Since Docker now lets you create volumes with names, you can refer to those + volumes by name in `docker-compose.yml`. For example, + `volumes: ["mydatavolume:/data"]` will mount the volume named + `mydatavolume` at the path `/data` inside the container. + + If the first component of an entry in `volumes` starts with a `.`, `/` or + `~`, it is treated as a path and expansion of relative paths is performed as + necessary. Otherwise, it is treated as a volume name and passed straight + through to Docker. + + Read more on named volumes and volume drivers here: + https://github.com/docker/docker/blob/244d9c33/docs/userguide/dockervolumes.md + +- `docker-compose build --pull` instructs Compose to pull the base image for + each Dockerfile before building. + +- `docker-compose pull --ignore-pull-failures` instructs Compose to continue + if it fails to pull a single service's image, rather than aborting. + +- You can now specify an IPC namespace in `docker-compose.yml` with the `ipc` + option. + +- Containers created by `docker-compose run` can now be named with the + `--name` flag. + +- If you install Compose with pip or use it as a library, it now works with + Python 3. + +- `image` now supports image digests (in addition to ids and tags), e.g. + `image: "busybox@sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d"` + +- `ports` now supports ranges of ports, e.g. + + ports: + - "3000-3005" + - "9000-9001:8000-8001" + +- `docker-compose run` now supports a `-p|--publish` parameter, much like + `docker run -p`, for publishing specific ports to the host. + +- `docker-compose pause` and `docker-compose unpause` have been implemented, + analogous to `docker pause` and `docker unpause`. + +- When using `extends` to copy configuration from another service in the same + Compose file, you can omit the `file` option. + +- Compose can be installed and run as a Docker image. This is an experimental + feature. + +Bug fixes: + +- All values for the `log_driver` option which are supported by the Docker + daemon are now supported by Compose. + +- `docker-compose build` can now be run successfully against a Swarm cluster. + + + +1.4.2 (2015-09-22) +------------------ + +- Fixed a regression in the 1.4.1 release that would cause `docker-compose up` + without the `-d` option to exit immediately. + +1.4.1 (2015-09-10) +------------------ + +The following bugs have been fixed: + +- Some configuration changes (notably changes to `links`, `volumes_from`, and + `net`) were not properly triggering a container recreate as part of + `docker-compose up`. +- `docker-compose up ` was showing logs for all services instead of + just the specified services. +- Containers with custom container names were showing up in logs as + `service_number` instead of their custom container name. +- When scaling a service sometimes containers would be recreated even when + the configuration had not changed. + + +1.4.0 (2015-08-04) +------------------ + +- By default, `docker-compose up` now only recreates containers for services whose configuration has changed since they were created. This should result in a dramatic speed-up for many applications. + + The experimental `--x-smart-recreate` flag which introduced this feature in Compose 1.3.0 has been removed, and a `--force-recreate` flag has been added for when you want to recreate everything. + +- Several of Compose's commands - `scale`, `stop`, `kill` and `rm` - now perform actions on multiple containers in parallel, rather than in sequence, which will run much faster on larger applications. + +- You can now specify a custom name for a service's container with `container_name`. Because Docker container names must be unique, this means you can't scale the service beyond one container. + +- You no longer have to specify a `file` option when using `extends` - it will default to the current file. + +- Service names can now contain dots, dashes and underscores. + +- Compose can now read YAML configuration from standard input, rather than from a file, by specifying `-` as the filename. This makes it easier to generate configuration dynamically: + + $ echo 'redis: {"image": "redis"}' | docker-compose --file - up + +- There's a new `docker-compose version` command which prints extended information about Compose's bundled dependencies. + +- `docker-compose.yml` now supports `log_opt` as well as `log_driver`, allowing you to pass extra configuration to a service's logging driver. + +- `docker-compose.yml` now supports `memswap_limit`, similar to `docker run --memory-swap`. + +- When mounting volumes with the `volumes` option, you can now pass in any mode supported by the daemon, not just `:ro` or `:rw`. For example, SELinux users can pass `:z` or `:Z`. + +- You can now specify a custom volume driver with the `volume_driver` option in `docker-compose.yml`, much like `docker run --volume-driver`. + +- A bug has been fixed where Compose would fail to pull images from private registries serving plain (unsecured) HTTP. The `--allow-insecure-ssl` flag, which was previously used to work around this issue, has been deprecated and now has no effect. + +- A bug has been fixed where `docker-compose build` would fail if the build depended on a private Hub image or an image from a private registry. + +- A bug has been fixed where Compose would crash if there were containers which the Docker daemon had not finished removing. + +- Two bugs have been fixed where Compose would sometimes fail with a "Duplicate bind mount" error, or fail to attach volumes to a container, if there was a volume path specified in `docker-compose.yml` with a trailing slash. + +Thanks @mnowster, @dnephin, @ekristen, @funkyfuture, @jeffk and @lukemarsden! + +1.3.3 (2015-07-15) +------------------ + +Two regressions have been fixed: + +- When stopping containers gracefully, Compose was setting the timeout to 0, effectively forcing a SIGKILL every time. +- Compose would sometimes crash depending on the formatting of container data returned from the Docker API. + +1.3.2 (2015-07-14) +------------------ + +The following bugs have been fixed: + +- When there were one-off containers created by running `docker-compose run` on an older version of Compose, `docker-compose run` would fail with a name collision. Compose now shows an error if you have leftover containers of this type lying around, and tells you how to remove them. +- Compose was not reading Docker authentication config files created in the new location, `~/docker/config.json`, and authentication against private registries would therefore fail. +- When a container had a pseudo-TTY attached, its output in `docker-compose up` would be truncated. +- `docker-compose up --x-smart-recreate` would sometimes fail when an image tag was updated. +- `docker-compose up` would sometimes create two containers with the same numeric suffix. +- `docker-compose rm` and `docker-compose ps` would sometimes list services that aren't part of the current project (though no containers were erroneously removed). +- Some `docker-compose` commands would not show an error if invalid service names were passed in. + +Thanks @dano, @josephpage, @kevinsimper, @lieryan, @phemmer, @soulrebel and @sschepens! + +1.3.1 (2015-06-21) +------------------ + +The following bugs have been fixed: + +- `docker-compose build` would always attempt to pull the base image before building. +- `docker-compose help migrate-to-labels` failed with an error. +- If no network mode was specified, Compose would set it to "bridge", rather than allowing the Docker daemon to use its configured default network mode. + +1.3.0 (2015-06-18) +------------------ + +Firstly, two important notes: + +- **This release contains breaking changes, and you will need to either remove or migrate your existing containers before running your app** - see the [upgrading section of the install docs](https://github.com/docker/compose/blob/1.3.0rc1/docs/install.md#upgrading) for details. + +- Compose now requires Docker 1.6.0 or later. + +We've done a lot of work in this release to remove hacks and make Compose more stable: + +- Compose now uses container labels, rather than names, to keep track of containers. This makes Compose both faster and easier to integrate with your own tools. + +- Compose no longer uses "intermediate containers" when recreating containers for a service. This makes `docker-compose up` less complex and more resilient to failure. + +There are some new features: + +- `docker-compose up` has an **experimental** new behaviour: it will only recreate containers for services whose configuration has changed in `docker-compose.yml`. This will eventually become the default, but for now you can take it for a spin: + + $ docker-compose up --x-smart-recreate + +- When invoked in a subdirectory of a project, `docker-compose` will now climb up through parent directories until it finds a `docker-compose.yml`. + +Several new configuration keys have been added to `docker-compose.yml`: + +- `dockerfile`, like `docker build --file`, lets you specify an alternate Dockerfile to use with `build`. +- `labels`, like `docker run --labels`, lets you add custom metadata to containers. +- `extra_hosts`, like `docker run --add-host`, lets you add entries to a container's `/etc/hosts` file. +- `pid: host`, like `docker run --pid=host`, lets you reuse the same PID namespace as the host machine. +- `cpuset`, like `docker run --cpuset-cpus`, lets you specify which CPUs to allow execution in. +- `read_only`, like `docker run --read-only`, lets you mount a container's filesystem as read-only. +- `security_opt`, like `docker run --security-opt`, lets you specify [security options](https://docs.docker.com/reference/run/#security-configuration). +- `log_driver`, like `docker run --log-driver`, lets you specify a [log driver](https://docs.docker.com/reference/run/#logging-drivers-log-driver). + +Many bugs have been fixed, including the following: + +- The output of `docker-compose run` was sometimes truncated, especially when running under Jenkins. +- A service's volumes would sometimes not update after volume configuration was changed in `docker-compose.yml`. +- Authenticating against third-party registries would sometimes fail. +- `docker-compose run --rm` would fail to remove the container if the service had a `restart` policy in place. +- `docker-compose scale` would refuse to scale a service beyond 1 container if it exposed a specific port number on the host. +- Compose would refuse to create multiple volume entries with the same host path. + +Thanks @ahromis, @albers, @aleksandr-vin, @antoineco, @ccverak, @chernjie, @dnephin, @edmorley, @fordhurley, @josephpage, @KyleJamesWalker, @lsowen, @mchasal, @noironetworks, @sdake, @sdurrheimer, @sherter, @stephenlawrence, @thaJeztah, @thieman, @turtlemonvh, @twhiteman, @vdemeester, @xuxinkun and @zwily! + +1.2.0 (2015-04-16) +------------------ + +- `docker-compose.yml` now supports an `extends` option, which enables a service to inherit configuration from another service in another configuration file. This is really good for sharing common configuration between apps, or for configuring the same app for different environments. Here's the [documentation](https://github.com/docker/compose/blob/master/docs/yml.md#extends). + +- When using Compose with a Swarm cluster, containers that depend on one another will be co-scheduled on the same node. This means that most Compose apps will now work out of the box, as long as they don't use `build`. + +- Repeated invocations of `docker-compose up` when using Compose with a Swarm cluster now work reliably. + +- Directories passed to `build`, filenames passed to `env_file` and volume host paths passed to `volumes` are now treated as relative to the *directory of the configuration file*, not the directory that `docker-compose` is being run in. In the majority of cases, those are the same, but if you use the `-f|--file` argument to specify a configuration file in another directory, **this is a breaking change**. + +- A service can now share another service's network namespace with `net: container:`. + +- `volumes_from` and `net: container:` entries are taken into account when resolving dependencies, so `docker-compose up ` will correctly start all dependencies of ``. + +- `docker-compose run` now accepts a `--user` argument to specify a user to run the command as, just like `docker run`. + +- The `up`, `stop` and `restart` commands now accept a `--timeout` (or `-t`) argument to specify how long to wait when attempting to gracefully stop containers, just like `docker stop`. + +- `docker-compose rm` now accepts `-f` as a shorthand for `--force`, just like `docker rm`. + +Thanks, @abesto, @albers, @alunduil, @dnephin, @funkyfuture, @gilclark, @IanVS, @KingsleyKelly, @knutwalker, @thaJeztah and @vmalloc! + +1.1.0 (2015-02-25) +------------------ + +Fig has been renamed to Docker Compose, or just Compose for short. This has several implications for you: + +- The command you type is now `docker-compose`, not `fig`. +- You should rename your fig.yml to docker-compose.yml. +- If you’re installing via PyPi, the package is now `docker-compose`, so install it with `pip install docker-compose`. + +Besides that, there’s a lot of new stuff in this release: + +- We’ve made a few small changes to ensure that Compose will work with Swarm, Docker’s new clustering tool (https://github.com/docker/swarm). Eventually you'll be able to point Compose at a Swarm cluster instead of a standalone Docker host and it’ll run your containers on the cluster with no extra work from you. As Swarm is still developing, integration is rough and lots of Compose features don't work yet. + +- `docker-compose run` now has a `--service-ports` flag for exposing ports on the given service. This is useful for e.g. running your webapp with an interactive debugger. + +- You can now link to containers outside your app with the `external_links` option in docker-compose.yml. + +- You can now prevent `docker-compose up` from automatically building images with the `--no-build` option. This will make fewer API calls and run faster. + +- If you don’t specify a tag when using the `image` key, Compose will default to the `latest` tag, rather than pulling all tags. + +- `docker-compose kill` now supports the `-s` flag, allowing you to specify the exact signal you want to send to a service’s containers. + +- docker-compose.yml now has an `env_file` key, analogous to `docker run --env-file`, letting you specify multiple environment variables in a separate file. This is great if you have a lot of them, or if you want to keep sensitive information out of version control. + +- docker-compose.yml now supports the `dns_search`, `cap_add`, `cap_drop`, `cpu_shares` and `restart` options, analogous to `docker run`’s `--dns-search`, `--cap-add`, `--cap-drop`, `--cpu-shares` and `--restart` options. + +- Compose now ships with Bash tab completion - see the installation and usage docs at https://github.com/docker/compose/blob/1.1.0/docs/completion.md + +- A number of bugs have been fixed - see the milestone for details: https://github.com/docker/compose/issues?q=milestone%3A1.1.0+ + +Thanks @dnephin, @squebe, @jbalonso, @raulcd, @benlangfield, @albers, @ggtools, @bersace, @dtenenba, @petercv, @drewkett, @TFenby, @paulRbr, @Aigeruth and @salehe! + +1.0.1 (2014-11-04) +------------------ + + - Added an `--allow-insecure-ssl` option to allow `fig up`, `fig run` and `fig pull` to pull from insecure registries. + - Fixed `fig run` not showing output in Jenkins. + - Fixed a bug where Fig couldn't build Dockerfiles with ADD statements pointing at URLs. + +1.0.0 (2014-10-16) +------------------ + +The highlights: + + - [Fig has joined Docker.](https://www.orchardup.com/blog/orchard-is-joining-docker) Fig will continue to be maintained, but we'll also be incorporating the best bits of Fig into Docker itself. + + This means the GitHub repository has moved to [https://github.com/docker/fig](https://github.com/docker/fig) and our IRC channel is now #docker-fig on Freenode. + + - Fig can be used with the [official Docker OS X installer](https://docs.docker.com/installation/mac/). Boot2Docker will mount the home directory from your host machine so volumes work as expected. + + - Fig supports Docker 1.3. + + - It is now possible to connect to the Docker daemon using TLS by using the `DOCKER_CERT_PATH` and `DOCKER_TLS_VERIFY` environment variables. + + - There is a new `fig port` command which outputs the host port binding of a service, in a similar way to `docker port`. + + - There is a new `fig pull` command which pulls the latest images for a service. + + - There is a new `fig restart` command which restarts a service's containers. + + - Fig creates multiple containers in service by appending a number to the service name (e.g. `db_1`, `db_2`, etc). As a convenience, Fig will now give the first container an alias of the service name (e.g. `db`). + + This link alias is also a valid hostname and added to `/etc/hosts` so you can connect to linked services using their hostname. For example, instead of resolving the environment variables `DB_PORT_5432_TCP_ADDR` and `DB_PORT_5432_TCP_PORT`, you could just use the hostname `db` and port `5432` directly. + + - Volume definitions now support `ro` mode, expanding `~` and expanding environment variables. + + - `.dockerignore` is supported when building. + + - The project name can be set with the `FIG_PROJECT_NAME` environment variable. + + - The `--env` and `--entrypoint` options have been added to `fig run`. + + - The Fig binary for Linux is now linked against an older version of glibc so it works on CentOS 6 and Debian Wheezy. + +Other things: + + - `fig ps` now works on Jenkins and makes fewer API calls to the Docker daemon. + - `--verbose` displays more useful debugging output. + - When starting a service where `volumes_from` points to a service without any containers running, that service will now be started. + - Lots of docs improvements. Notably, environment variables are documented and official repositories are used throughout. + +Thanks @dnephin, @d11wtq, @marksteve, @rubbish, @jbalonso, @timfreund, @alunduil, @mieciu, @shuron, @moss, @suzaku and @chmouel! Whew. + +0.5.2 (2014-07-28) +------------------ + + - Added a `--no-cache` option to `fig build`, which bypasses the cache just like `docker build --no-cache`. + - Fixed the `dns:` fig.yml option, which was causing fig to error out. + - Fixed a bug where fig couldn't start under Python 2.6. + - Fixed a log-streaming bug that occasionally caused fig to exit. + +Thanks @dnephin and @marksteve! + + +0.5.1 (2014-07-11) +------------------ + + - If a service has a command defined, `fig run [service]` with no further arguments will run it. + - The project name now defaults to the directory containing fig.yml, not the current working directory (if they're different) + - `volumes_from` now works properly with containers as well as services + - Fixed a race condition when recreating containers in `fig up` + +Thanks @ryanbrainard and @d11wtq! + + +0.5.0 (2014-07-11) +------------------ + + - Fig now starts links when you run `fig run` or `fig up`. + + For example, if you have a `web` service which depends on a `db` service, `fig run web ...` will start the `db` service. + + - Environment variables can now be resolved from the environment that Fig is running in. Just specify it as a blank variable in your `fig.yml` and, if set, it'll be resolved: + ``` + environment: + RACK_ENV: development + SESSION_SECRET: + ``` + + - `volumes_from` is now supported in `fig.yml`. All of the volumes from the specified services and containers will be mounted: + + ``` + volumes_from: + - service_name + - container_name + ``` + + - A host address can now be specified in `ports`: + + ``` + ports: + - "0.0.0.0:8000:8000" + - "127.0.0.1:8001:8001" + ``` + + - The `net` and `workdir` options are now supported in `fig.yml`. + - The `hostname` option now works in the same way as the Docker CLI, splitting out into a `domainname` option. + - TTY behaviour is far more robust, and resizes are supported correctly. + - Load YAML files safely. + +Thanks to @d11wtq, @ryanbrainard, @rail44, @j0hnsmith, @binarin, @Elemecca, @mozz100 and @marksteve for their help with this release! + + +0.4.2 (2014-06-18) +------------------ + + - Fix various encoding errors when using `fig run`, `fig up` and `fig build`. + +0.4.1 (2014-05-08) +------------------ + + - Add support for Docker 0.11.0. (Thanks @marksteve!) + - Make project name configurable. (Thanks @jefmathiot!) + - Return correct exit code from `fig run`. + +0.4.0 (2014-04-29) +------------------ + + - Support Docker 0.9 and 0.10 + - Display progress bars correctly when pulling images (no more ski slopes) + - `fig up` now stops all services when any container exits + - Added support for the `privileged` config option in fig.yml (thanks @kvz!) + - Shortened and aligned log prefixes in `fig up` output + - Only containers started with `fig run` link back to their own service + - Handle UTF-8 correctly when streaming `fig build/run/up` output (thanks @mauvm and @shanejonas!) + - Error message improvements + +0.3.2 (2014-03-05) +------------------ + + - Added an `--rm` option to `fig run`. (Thanks @marksteve!) + - Added an `expose` option to `fig.yml`. + +0.3.1 (2014-03-04) +------------------ + + - Added contribution instructions. (Thanks @kvz!) + - Fixed `fig rm` throwing an error. + - Fixed a bug in `fig ps` on Docker 0.8.1 when there is a container with no command. + +0.3.0 (2014-03-03) +------------------ + + - We now ship binaries for OS X and Linux. No more having to install with Pip! + - Add `-f` flag to specify alternate `fig.yml` files + - Add support for custom link names + - Fix a bug where recreating would sometimes hang + - Update docker-py to support Docker 0.8.0. + - Various documentation improvements + - Various error message improvements + +Thanks @marksteve, @Gazler and @teozkr! + +0.2.2 (2014-02-17) +------------------ + + - Resolve dependencies using Cormen/Tarjan topological sort + - Fix `fig up` not printing log output + - Stop containers in reverse order to starting + - Fix scale command not binding ports + +Thanks to @barnybug and @dustinlacewell for their work on this release. + +0.2.1 (2014-02-04) +------------------ + + - General improvements to error reporting (#77, #79) + +0.2.0 (2014-01-31) +------------------ + + - Link services to themselves so run commands can access the running service. (#67) + - Much better documentation. + - Make service dependency resolution more reliable. (#48) + - Load Fig configurations with a `.yaml` extension. (#58) + +Big thanks to @cameronmaske, @mrchrisadams and @damianmoore for their help with this release. + +0.1.4 (2014-01-27) +------------------ + + - Add a link alias without the project name. This makes the environment variables a little shorter: `REDIS_1_PORT_6379_TCP_ADDR`. (#54) + +0.1.3 (2014-01-23) +------------------ + + - Fix ports sometimes being configured incorrectly. (#46) + - Fix log output sometimes not displaying. (#47) + +0.1.2 (2014-01-22) +------------------ + + - Add `-T` option to `fig run` to disable pseudo-TTY. (#34) + - Fix `fig up` requiring the ubuntu image to be pulled to recreate containers. (#33) Thanks @cameronmaske! + - Improve reliability, fix arrow keys and fix a race condition in `fig run`. (#34, #39, #40) + +0.1.1 (2014-01-17) +------------------ + + - Fix bug where ports were not exposed correctly (#29). Thanks @dustinlacewell! + +0.1.0 (2014-01-16) +------------------ + + - Containers are recreated on each `fig up`, ensuring config is up-to-date with `fig.yml` (#2) + - Add `fig scale` command (#9) + - Use `DOCKER_HOST` environment variable to find Docker daemon, for consistency with the official Docker client (was previously `DOCKER_URL`) (#19) + - Truncate long commands in `fig ps` (#18) + - Fill out CLI help banners for commands (#15, #16) + - Show a friendlier error when `fig.yml` is missing (#4) + - Fix bug with `fig build` logging (#3) + - Fix bug where builds would time out if a step took a long time without generating output (#6) + - Fix bug where streaming container output over the Unix socket raised an error (#7) + +Big thanks to @tomstuart, @EnTeQuAk, @schickling, @aronasorman and @GeoffreyPlitt. + +0.0.2 (2014-01-02) +------------------ + + - Improve documentation + - Try to connect to Docker on `tcp://localdocker:4243` and a UNIX socket in addition to `localhost`. + - Improve `fig up` behaviour + - Add confirmation prompt to `fig rm` + - Add `fig build` command + +0.0.1 (2013-12-20) +------------------ + +Initial release. diff --git a/CHANGES.md b/CHANGES.md deleted file mode 100644 index 0353edc65b..0000000000 --- a/CHANGES.md +++ /dev/null @@ -1,437 +0,0 @@ -Change log -========== - -1.4.2 (2015-09-22) ------------------- - -Fixes a regression in the 1.4.1 release that would cause `docker-compose up` -without the `-d` option to exit immediately. - - -1.4.1 (2015-09-10) ------------------- - -The following bugs have been fixed: - -- Some configuration changes (notably changes to `links`, `volumes_from`, and - `net`) were not properly triggering a container recreate as part of - `docker-compose up`. -- `docker-compose up ` was showing logs for all services instead of - just the specified services. -- Containers with custom container names were showing up in logs as - `service_number` instead of their custom container name. -- When scaling a service sometimes containers would be recreated even when - the configuration had not changed. - - -1.4.0 (2015-08-04) ------------------- - -- By default, `docker-compose up` now only recreates containers for services whose configuration has changed since they were created. This should result in a dramatic speed-up for many applications. - - The experimental `--x-smart-recreate` flag which introduced this feature in Compose 1.3.0 has been removed, and a `--force-recreate` flag has been added for when you want to recreate everything. - -- Several of Compose's commands - `scale`, `stop`, `kill` and `rm` - now perform actions on multiple containers in parallel, rather than in sequence, which will run much faster on larger applications. - -- You can now specify a custom name for a service's container with `container_name`. Because Docker container names must be unique, this means you can't scale the service beyond one container. - -- You no longer have to specify a `file` option when using `extends` - it will default to the current file. - -- Service names can now contain dots, dashes and underscores. - -- Compose can now read YAML configuration from standard input, rather than from a file, by specifying `-` as the filename. This makes it easier to generate configuration dynamically: - - $ echo 'redis: {"image": "redis"}' | docker-compose --file - up - -- There's a new `docker-compose version` command which prints extended information about Compose's bundled dependencies. - -- `docker-compose.yml` now supports `log_opt` as well as `log_driver`, allowing you to pass extra configuration to a service's logging driver. - -- `docker-compose.yml` now supports `memswap_limit`, similar to `docker run --memory-swap`. - -- When mounting volumes with the `volumes` option, you can now pass in any mode supported by the daemon, not just `:ro` or `:rw`. For example, SELinux users can pass `:z` or `:Z`. - -- You can now specify a custom volume driver with the `volume_driver` option in `docker-compose.yml`, much like `docker run --volume-driver`. - -- A bug has been fixed where Compose would fail to pull images from private registries serving plain (unsecured) HTTP. The `--allow-insecure-ssl` flag, which was previously used to work around this issue, has been deprecated and now has no effect. - -- A bug has been fixed where `docker-compose build` would fail if the build depended on a private Hub image or an image from a private registry. - -- A bug has been fixed where Compose would crash if there were containers which the Docker daemon had not finished removing. - -- Two bugs have been fixed where Compose would sometimes fail with a "Duplicate bind mount" error, or fail to attach volumes to a container, if there was a volume path specified in `docker-compose.yml` with a trailing slash. - -Thanks @mnowster, @dnephin, @ekristen, @funkyfuture, @jeffk and @lukemarsden! - -1.3.3 (2015-07-15) ------------------- - -Two regressions have been fixed: - -- When stopping containers gracefully, Compose was setting the timeout to 0, effectively forcing a SIGKILL every time. -- Compose would sometimes crash depending on the formatting of container data returned from the Docker API. - -1.3.2 (2015-07-14) ------------------- - -The following bugs have been fixed: - -- When there were one-off containers created by running `docker-compose run` on an older version of Compose, `docker-compose run` would fail with a name collision. Compose now shows an error if you have leftover containers of this type lying around, and tells you how to remove them. -- Compose was not reading Docker authentication config files created in the new location, `~/docker/config.json`, and authentication against private registries would therefore fail. -- When a container had a pseudo-TTY attached, its output in `docker-compose up` would be truncated. -- `docker-compose up --x-smart-recreate` would sometimes fail when an image tag was updated. -- `docker-compose up` would sometimes create two containers with the same numeric suffix. -- `docker-compose rm` and `docker-compose ps` would sometimes list services that aren't part of the current project (though no containers were erroneously removed). -- Some `docker-compose` commands would not show an error if invalid service names were passed in. - -Thanks @dano, @josephpage, @kevinsimper, @lieryan, @phemmer, @soulrebel and @sschepens! - -1.3.1 (2015-06-21) ------------------- - -The following bugs have been fixed: - -- `docker-compose build` would always attempt to pull the base image before building. -- `docker-compose help migrate-to-labels` failed with an error. -- If no network mode was specified, Compose would set it to "bridge", rather than allowing the Docker daemon to use its configured default network mode. - -1.3.0 (2015-06-18) ------------------- - -Firstly, two important notes: - -- **This release contains breaking changes, and you will need to either remove or migrate your existing containers before running your app** - see the [upgrading section of the install docs](https://github.com/docker/compose/blob/1.3.0rc1/docs/install.md#upgrading) for details. - -- Compose now requires Docker 1.6.0 or later. - -We've done a lot of work in this release to remove hacks and make Compose more stable: - -- Compose now uses container labels, rather than names, to keep track of containers. This makes Compose both faster and easier to integrate with your own tools. - -- Compose no longer uses "intermediate containers" when recreating containers for a service. This makes `docker-compose up` less complex and more resilient to failure. - -There are some new features: - -- `docker-compose up` has an **experimental** new behaviour: it will only recreate containers for services whose configuration has changed in `docker-compose.yml`. This will eventually become the default, but for now you can take it for a spin: - - $ docker-compose up --x-smart-recreate - -- When invoked in a subdirectory of a project, `docker-compose` will now climb up through parent directories until it finds a `docker-compose.yml`. - -Several new configuration keys have been added to `docker-compose.yml`: - -- `dockerfile`, like `docker build --file`, lets you specify an alternate Dockerfile to use with `build`. -- `labels`, like `docker run --labels`, lets you add custom metadata to containers. -- `extra_hosts`, like `docker run --add-host`, lets you add entries to a container's `/etc/hosts` file. -- `pid: host`, like `docker run --pid=host`, lets you reuse the same PID namespace as the host machine. -- `cpuset`, like `docker run --cpuset-cpus`, lets you specify which CPUs to allow execution in. -- `read_only`, like `docker run --read-only`, lets you mount a container's filesystem as read-only. -- `security_opt`, like `docker run --security-opt`, lets you specify [security options](https://docs.docker.com/reference/run/#security-configuration). -- `log_driver`, like `docker run --log-driver`, lets you specify a [log driver](https://docs.docker.com/reference/run/#logging-drivers-log-driver). - -Many bugs have been fixed, including the following: - -- The output of `docker-compose run` was sometimes truncated, especially when running under Jenkins. -- A service's volumes would sometimes not update after volume configuration was changed in `docker-compose.yml`. -- Authenticating against third-party registries would sometimes fail. -- `docker-compose run --rm` would fail to remove the container if the service had a `restart` policy in place. -- `docker-compose scale` would refuse to scale a service beyond 1 container if it exposed a specific port number on the host. -- Compose would refuse to create multiple volume entries with the same host path. - -Thanks @ahromis, @albers, @aleksandr-vin, @antoineco, @ccverak, @chernjie, @dnephin, @edmorley, @fordhurley, @josephpage, @KyleJamesWalker, @lsowen, @mchasal, @noironetworks, @sdake, @sdurrheimer, @sherter, @stephenlawrence, @thaJeztah, @thieman, @turtlemonvh, @twhiteman, @vdemeester, @xuxinkun and @zwily! - -1.2.0 (2015-04-16) ------------------- - -- `docker-compose.yml` now supports an `extends` option, which enables a service to inherit configuration from another service in another configuration file. This is really good for sharing common configuration between apps, or for configuring the same app for different environments. Here's the [documentation](https://github.com/docker/compose/blob/master/docs/yml.md#extends). - -- When using Compose with a Swarm cluster, containers that depend on one another will be co-scheduled on the same node. This means that most Compose apps will now work out of the box, as long as they don't use `build`. - -- Repeated invocations of `docker-compose up` when using Compose with a Swarm cluster now work reliably. - -- Directories passed to `build`, filenames passed to `env_file` and volume host paths passed to `volumes` are now treated as relative to the *directory of the configuration file*, not the directory that `docker-compose` is being run in. In the majority of cases, those are the same, but if you use the `-f|--file` argument to specify a configuration file in another directory, **this is a breaking change**. - -- A service can now share another service's network namespace with `net: container:`. - -- `volumes_from` and `net: container:` entries are taken into account when resolving dependencies, so `docker-compose up ` will correctly start all dependencies of ``. - -- `docker-compose run` now accepts a `--user` argument to specify a user to run the command as, just like `docker run`. - -- The `up`, `stop` and `restart` commands now accept a `--timeout` (or `-t`) argument to specify how long to wait when attempting to gracefully stop containers, just like `docker stop`. - -- `docker-compose rm` now accepts `-f` as a shorthand for `--force`, just like `docker rm`. - -Thanks, @abesto, @albers, @alunduil, @dnephin, @funkyfuture, @gilclark, @IanVS, @KingsleyKelly, @knutwalker, @thaJeztah and @vmalloc! - -1.1.0 (2015-02-25) ------------------- - -Fig has been renamed to Docker Compose, or just Compose for short. This has several implications for you: - -- The command you type is now `docker-compose`, not `fig`. -- You should rename your fig.yml to docker-compose.yml. -- If you’re installing via PyPi, the package is now `docker-compose`, so install it with `pip install docker-compose`. - -Besides that, there’s a lot of new stuff in this release: - -- We’ve made a few small changes to ensure that Compose will work with Swarm, Docker’s new clustering tool (https://github.com/docker/swarm). Eventually you'll be able to point Compose at a Swarm cluster instead of a standalone Docker host and it’ll run your containers on the cluster with no extra work from you. As Swarm is still developing, integration is rough and lots of Compose features don't work yet. - -- `docker-compose run` now has a `--service-ports` flag for exposing ports on the given service. This is useful for e.g. running your webapp with an interactive debugger. - -- You can now link to containers outside your app with the `external_links` option in docker-compose.yml. - -- You can now prevent `docker-compose up` from automatically building images with the `--no-build` option. This will make fewer API calls and run faster. - -- If you don’t specify a tag when using the `image` key, Compose will default to the `latest` tag, rather than pulling all tags. - -- `docker-compose kill` now supports the `-s` flag, allowing you to specify the exact signal you want to send to a service’s containers. - -- docker-compose.yml now has an `env_file` key, analogous to `docker run --env-file`, letting you specify multiple environment variables in a separate file. This is great if you have a lot of them, or if you want to keep sensitive information out of version control. - -- docker-compose.yml now supports the `dns_search`, `cap_add`, `cap_drop`, `cpu_shares` and `restart` options, analogous to `docker run`’s `--dns-search`, `--cap-add`, `--cap-drop`, `--cpu-shares` and `--restart` options. - -- Compose now ships with Bash tab completion - see the installation and usage docs at https://github.com/docker/compose/blob/1.1.0/docs/completion.md - -- A number of bugs have been fixed - see the milestone for details: https://github.com/docker/compose/issues?q=milestone%3A1.1.0+ - -Thanks @dnephin, @squebe, @jbalonso, @raulcd, @benlangfield, @albers, @ggtools, @bersace, @dtenenba, @petercv, @drewkett, @TFenby, @paulRbr, @Aigeruth and @salehe! - -1.0.1 (2014-11-04) ------------------- - - - Added an `--allow-insecure-ssl` option to allow `fig up`, `fig run` and `fig pull` to pull from insecure registries. - - Fixed `fig run` not showing output in Jenkins. - - Fixed a bug where Fig couldn't build Dockerfiles with ADD statements pointing at URLs. - -1.0.0 (2014-10-16) ------------------- - -The highlights: - - - [Fig has joined Docker.](https://www.orchardup.com/blog/orchard-is-joining-docker) Fig will continue to be maintained, but we'll also be incorporating the best bits of Fig into Docker itself. - - This means the GitHub repository has moved to [https://github.com/docker/fig](https://github.com/docker/fig) and our IRC channel is now #docker-fig on Freenode. - - - Fig can be used with the [official Docker OS X installer](https://docs.docker.com/installation/mac/). Boot2Docker will mount the home directory from your host machine so volumes work as expected. - - - Fig supports Docker 1.3. - - - It is now possible to connect to the Docker daemon using TLS by using the `DOCKER_CERT_PATH` and `DOCKER_TLS_VERIFY` environment variables. - - - There is a new `fig port` command which outputs the host port binding of a service, in a similar way to `docker port`. - - - There is a new `fig pull` command which pulls the latest images for a service. - - - There is a new `fig restart` command which restarts a service's containers. - - - Fig creates multiple containers in service by appending a number to the service name (e.g. `db_1`, `db_2`, etc). As a convenience, Fig will now give the first container an alias of the service name (e.g. `db`). - - This link alias is also a valid hostname and added to `/etc/hosts` so you can connect to linked services using their hostname. For example, instead of resolving the environment variables `DB_PORT_5432_TCP_ADDR` and `DB_PORT_5432_TCP_PORT`, you could just use the hostname `db` and port `5432` directly. - - - Volume definitions now support `ro` mode, expanding `~` and expanding environment variables. - - - `.dockerignore` is supported when building. - - - The project name can be set with the `FIG_PROJECT_NAME` environment variable. - - - The `--env` and `--entrypoint` options have been added to `fig run`. - - - The Fig binary for Linux is now linked against an older version of glibc so it works on CentOS 6 and Debian Wheezy. - -Other things: - - - `fig ps` now works on Jenkins and makes fewer API calls to the Docker daemon. - - `--verbose` displays more useful debugging output. - - When starting a service where `volumes_from` points to a service without any containers running, that service will now be started. - - Lots of docs improvements. Notably, environment variables are documented and official repositories are used throughout. - -Thanks @dnephin, @d11wtq, @marksteve, @rubbish, @jbalonso, @timfreund, @alunduil, @mieciu, @shuron, @moss, @suzaku and @chmouel! Whew. - -0.5.2 (2014-07-28) ------------------- - - - Added a `--no-cache` option to `fig build`, which bypasses the cache just like `docker build --no-cache`. - - Fixed the `dns:` fig.yml option, which was causing fig to error out. - - Fixed a bug where fig couldn't start under Python 2.6. - - Fixed a log-streaming bug that occasionally caused fig to exit. - -Thanks @dnephin and @marksteve! - - -0.5.1 (2014-07-11) ------------------- - - - If a service has a command defined, `fig run [service]` with no further arguments will run it. - - The project name now defaults to the directory containing fig.yml, not the current working directory (if they're different) - - `volumes_from` now works properly with containers as well as services - - Fixed a race condition when recreating containers in `fig up` - -Thanks @ryanbrainard and @d11wtq! - - -0.5.0 (2014-07-11) ------------------- - - - Fig now starts links when you run `fig run` or `fig up`. - - For example, if you have a `web` service which depends on a `db` service, `fig run web ...` will start the `db` service. - - - Environment variables can now be resolved from the environment that Fig is running in. Just specify it as a blank variable in your `fig.yml` and, if set, it'll be resolved: - ``` - environment: - RACK_ENV: development - SESSION_SECRET: - ``` - - - `volumes_from` is now supported in `fig.yml`. All of the volumes from the specified services and containers will be mounted: - - ``` - volumes_from: - - service_name - - container_name - ``` - - - A host address can now be specified in `ports`: - - ``` - ports: - - "0.0.0.0:8000:8000" - - "127.0.0.1:8001:8001" - ``` - - - The `net` and `workdir` options are now supported in `fig.yml`. - - The `hostname` option now works in the same way as the Docker CLI, splitting out into a `domainname` option. - - TTY behaviour is far more robust, and resizes are supported correctly. - - Load YAML files safely. - -Thanks to @d11wtq, @ryanbrainard, @rail44, @j0hnsmith, @binarin, @Elemecca, @mozz100 and @marksteve for their help with this release! - - -0.4.2 (2014-06-18) ------------------- - - - Fix various encoding errors when using `fig run`, `fig up` and `fig build`. - -0.4.1 (2014-05-08) ------------------- - - - Add support for Docker 0.11.0. (Thanks @marksteve!) - - Make project name configurable. (Thanks @jefmathiot!) - - Return correct exit code from `fig run`. - -0.4.0 (2014-04-29) ------------------- - - - Support Docker 0.9 and 0.10 - - Display progress bars correctly when pulling images (no more ski slopes) - - `fig up` now stops all services when any container exits - - Added support for the `privileged` config option in fig.yml (thanks @kvz!) - - Shortened and aligned log prefixes in `fig up` output - - Only containers started with `fig run` link back to their own service - - Handle UTF-8 correctly when streaming `fig build/run/up` output (thanks @mauvm and @shanejonas!) - - Error message improvements - -0.3.2 (2014-03-05) ------------------- - - - Added an `--rm` option to `fig run`. (Thanks @marksteve!) - - Added an `expose` option to `fig.yml`. - -0.3.1 (2014-03-04) ------------------- - - - Added contribution instructions. (Thanks @kvz!) - - Fixed `fig rm` throwing an error. - - Fixed a bug in `fig ps` on Docker 0.8.1 when there is a container with no command. - -0.3.0 (2014-03-03) ------------------- - - - We now ship binaries for OS X and Linux. No more having to install with Pip! - - Add `-f` flag to specify alternate `fig.yml` files - - Add support for custom link names - - Fix a bug where recreating would sometimes hang - - Update docker-py to support Docker 0.8.0. - - Various documentation improvements - - Various error message improvements - -Thanks @marksteve, @Gazler and @teozkr! - -0.2.2 (2014-02-17) ------------------- - - - Resolve dependencies using Cormen/Tarjan topological sort - - Fix `fig up` not printing log output - - Stop containers in reverse order to starting - - Fix scale command not binding ports - -Thanks to @barnybug and @dustinlacewell for their work on this release. - -0.2.1 (2014-02-04) ------------------- - - - General improvements to error reporting (#77, #79) - -0.2.0 (2014-01-31) ------------------- - - - Link services to themselves so run commands can access the running service. (#67) - - Much better documentation. - - Make service dependency resolution more reliable. (#48) - - Load Fig configurations with a `.yaml` extension. (#58) - -Big thanks to @cameronmaske, @mrchrisadams and @damianmoore for their help with this release. - -0.1.4 (2014-01-27) ------------------- - - - Add a link alias without the project name. This makes the environment variables a little shorter: `REDIS_1_PORT_6379_TCP_ADDR`. (#54) - -0.1.3 (2014-01-23) ------------------- - - - Fix ports sometimes being configured incorrectly. (#46) - - Fix log output sometimes not displaying. (#47) - -0.1.2 (2014-01-22) ------------------- - - - Add `-T` option to `fig run` to disable pseudo-TTY. (#34) - - Fix `fig up` requiring the ubuntu image to be pulled to recreate containers. (#33) Thanks @cameronmaske! - - Improve reliability, fix arrow keys and fix a race condition in `fig run`. (#34, #39, #40) - -0.1.1 (2014-01-17) ------------------- - - - Fix bug where ports were not exposed correctly (#29). Thanks @dustinlacewell! - -0.1.0 (2014-01-16) ------------------- - - - Containers are recreated on each `fig up`, ensuring config is up-to-date with `fig.yml` (#2) - - Add `fig scale` command (#9) - - Use `DOCKER_HOST` environment variable to find Docker daemon, for consistency with the official Docker client (was previously `DOCKER_URL`) (#19) - - Truncate long commands in `fig ps` (#18) - - Fill out CLI help banners for commands (#15, #16) - - Show a friendlier error when `fig.yml` is missing (#4) - - Fix bug with `fig build` logging (#3) - - Fix bug where builds would time out if a step took a long time without generating output (#6) - - Fix bug where streaming container output over the Unix socket raised an error (#7) - -Big thanks to @tomstuart, @EnTeQuAk, @schickling, @aronasorman and @GeoffreyPlitt. - -0.0.2 (2014-01-02) ------------------- - - - Improve documentation - - Try to connect to Docker on `tcp://localdocker:4243` and a UNIX socket in addition to `localhost`. - - Improve `fig up` behaviour - - Add confirmation prompt to `fig rm` - - Add `fig build` command - -0.0.1 (2013-12-20) ------------------- - -Initial release. - - diff --git a/CHANGES.md b/CHANGES.md new file mode 120000 index 0000000000..83b694704b --- /dev/null +++ b/CHANGES.md @@ -0,0 +1 @@ +CHANGELOG.md \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c9188ac98a..62bf415c7e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -30,6 +30,17 @@ that should get you started. `docker-compose` from anywhere on your machine, it will run your development version of Compose. +## Install pre-commit hooks + +This step is optional, but recommended. Pre-commit hooks will run style checks +and in some cases fix style issues for you, when you commit code. + +Install the git pre-commit hooks using [tox](https://tox.readthedocs.org) by +running `tox -e pre-commit` or by following the +[pre-commit install guide](http://pre-commit.com/#install). + +To run the style checks at any time run `tox -e pre-commit`. + ## Submitting a pull request See Docker's [basic contribution workflow](https://docs.docker.com/project/make-a-contribution/#the-basic-contribution-workflow) for a guide on how to submit a pull request for code or documentation. @@ -53,11 +64,11 @@ you can specify a test directory, file, module, class or method: $ script/test tests/unit $ script/test tests/unit/cli_test.py - $ script/test tests.integration.service_test - $ script/test tests.integration.service_test:ServiceTest.test_containers + $ script/test tests/unit/config_test.py::ConfigTest + $ script/test tests/unit/config_test.py::ConfigTest::test_load ## Finding things to work on -We use a [Waffle.io board](https://waffle.io/docker/compose) to keep track of specific things we are working on and planning to work on. If you're looking for things to work on, stuff in the backlog is a great place to start. +We use a [ZenHub board](https://www.zenhub.io/) to keep track of specific things we are working on and planning to work on. If you're looking for things to work on, stuff in the backlog is a great place to start. For more information about our project planning, take a look at our [GitHub wiki](https://github.com/docker/compose/wiki). diff --git a/Dockerfile b/Dockerfile index 7c0482323b..c6dbdefd66 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,20 +3,23 @@ FROM debian:wheezy RUN set -ex; \ apt-get update -qq; \ apt-get install -y \ + locales \ gcc \ make \ zlib1g \ zlib1g-dev \ libssl-dev \ git \ - apt-transport-https \ ca-certificates \ curl \ - lxc \ - iptables \ + libsqlite3-dev \ ; \ rm -rf /var/lib/apt/lists/* +RUN curl https://get.docker.com/builds/Linux/x86_64/docker-latest \ + -o /usr/local/bin/docker && \ + chmod +x /usr/local/bin/docker + # Build Python 2.7.9 from source RUN set -ex; \ curl -LO https://www.python.org/ftp/python/2.7.9/Python-2.7.9.tgz; \ @@ -29,6 +32,18 @@ RUN set -ex; \ rm -rf /Python-2.7.9; \ rm Python-2.7.9.tgz +# Build python 3.4 from source +RUN set -ex; \ + curl -LO https://www.python.org/ftp/python/3.4.3/Python-3.4.3.tgz; \ + tar -xzf Python-3.4.3.tgz; \ + cd Python-3.4.3; \ + ./configure --enable-shared; \ + make; \ + make install; \ + cd ..; \ + rm -rf /Python-3.4.3; \ + rm Python-3.4.3.tgz + # Make libpython findable ENV LD_LIBRARY_PATH /usr/local/lib @@ -48,29 +63,24 @@ RUN set -ex; \ rm -rf pip-7.0.1; \ rm pip-7.0.1.tar.gz -ENV ALL_DOCKER_VERSIONS 1.7.1 1.8.0-rc3 - -RUN set -ex; \ - curl https://get.docker.com/builds/Linux/x86_64/docker-1.7.1 -o /usr/local/bin/docker-1.7.1; \ - chmod +x /usr/local/bin/docker-1.7.1; \ - curl https://test.docker.com/builds/Linux/x86_64/docker-1.8.0-rc3 -o /usr/local/bin/docker-1.8.0-rc3; \ - chmod +x /usr/local/bin/docker-1.8.0-rc3 - -# Set the default Docker to be run -RUN ln -s /usr/local/bin/docker-1.7.1 /usr/local/bin/docker +# Python3 requires a valid locale +RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen +ENV LANG en_US.UTF-8 RUN useradd -d /home/user -m -s /bin/bash user WORKDIR /code/ -ADD requirements.txt /code/ -RUN pip install -r requirements.txt +RUN pip install tox==2.1.1 +ADD requirements.txt /code/ ADD requirements-dev.txt /code/ -RUN pip install -r requirements-dev.txt +ADD .pre-commit-config.yaml /code/ +ADD setup.py /code/ +ADD tox.ini /code/ +ADD compose /code/compose/ +RUN tox --notest ADD . /code/ -RUN python setup.py install - RUN chown -R user /code/ -ENTRYPOINT ["/usr/local/bin/docker-compose"] +ENTRYPOINT ["/code/.tox/py27/bin/docker-compose"] diff --git a/Dockerfile.run b/Dockerfile.run new file mode 100644 index 0000000000..9f3745fefc --- /dev/null +++ b/Dockerfile.run @@ -0,0 +1,13 @@ + +FROM alpine:edge +RUN apk -U add \ + python \ + py-pip + +COPY requirements.txt /code/requirements.txt +RUN pip install -r /code/requirements.txt + +ADD dist/docker-compose-release.tar.gz /code/docker-compose +RUN pip install /code/docker-compose/docker-compose-* + +ENTRYPOINT ["/usr/bin/docker-compose"] diff --git a/MANIFEST.in b/MANIFEST.in index 6c756417e0..0342e35bea 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -4,6 +4,9 @@ include requirements.txt include requirements-dev.txt include tox.ini include *.md +exclude README.md +include README.rst +include compose/config/*.json recursive-include contrib/completion * recursive-include tests * global-exclude *.pyc diff --git a/README.md b/README.md index 7121f6a2d9..4c967aebcc 100644 --- a/README.md +++ b/README.md @@ -1,14 +1,16 @@ Docker Compose ============== -*(Previously known as Fig)* +![Docker Compose](logo.png?raw=true "Docker Compose Logo") -Compose is a tool for defining and running multi-container applications with -Docker. With Compose, you define a multi-container application in a single -file, then spin your application up in a single command which does everything -that needs to be done to get it running. +Compose is a tool for defining and running multi-container Docker applications. +With Compose, you use a Compose file to configure your application's services. +Then, using a single command, you create and start all the services +from your configuration. To learn more about all the features of Compose +see [the list of features](docs/index.md#features). -Compose is great for development environments, staging servers, and CI. We don't -recommend that you use it in production yet. +Compose is great for development, testing, and staging environments, as well as +CI workflows. You can learn more about each case in +[Common Use Cases](#common-use-cases). Using Compose is basically a three-step process. @@ -31,6 +33,9 @@ A `docker-compose.yml` looks like this: redis: image: redis +For more information about the Compose file, see the +[Compose file reference](docs/compose-file.md) + Compose has commands for managing the whole lifecycle of your application: * Start, stop and rebuild services @@ -54,4 +59,4 @@ Want to help build Compose? Check out our [contributing documentation](https://g Releasing --------- -Releases are built by maintainers, following an outline of the [release process](https://github.com/docker/compose/blob/master/RELEASE_PROCESS.md). \ No newline at end of file +Releases are built by maintainers, following an outline of the [release process](https://github.com/docker/compose/blob/master/project/RELEASE-PROCESS.md). diff --git a/RELEASE_PROCESS.md b/RELEASE_PROCESS.md deleted file mode 100644 index 86522faaf3..0000000000 --- a/RELEASE_PROCESS.md +++ /dev/null @@ -1,36 +0,0 @@ -# Building a Compose release - -## Building binaries - -`script/build-linux` builds the Linux binary inside a Docker container: - - $ script/build-linux - -`script/build-osx` builds the Mac OS X binary inside a virtualenv: - - $ script/build-osx - -For official releases, you should build inside a Mountain Lion VM for proper -compatibility. Run the this script first to prepare the environment before -building - it will use Homebrew to make sure Python is installed and -up-to-date. - - $ script/prepare-osx - -## Release process - -1. Open pull request that: - - Updates the version in `compose/__init__.py` - - Updates the binary URL in `docs/install.md` - - Adds release notes to `CHANGES.md` -2. Create unpublished GitHub release with release notes -3. Build Linux version on any Docker host with `script/build-linux` and attach - to release -4. Build OS X version on Mountain Lion with `script/build-osx` and attach to - release as `docker-compose-Darwin-x86_64` and `docker-compose-Linux-x86_64`. -5. Publish GitHub release, creating tag -6. Update website with `script/deploy-docs` -7. Upload PyPi package - - $ git checkout $VERSION - $ python setup.py sdist upload diff --git a/appveyor.yml b/appveyor.yml new file mode 100644 index 0000000000..b162db1e3a --- /dev/null +++ b/appveyor.yml @@ -0,0 +1,28 @@ + +version: '{branch}-{build}' + +install: + - "SET PATH=C:\\Python27-x64;C:\\Python27-x64\\Scripts;%PATH%" + - "python --version" + - "pip install tox==2.1.1 virtualenv==13.1.2" + +# Build the binary after tests +build: false + +environment: + BINTRAY_USER: "docker-compose-roleuser" + BINTRAY_PATH: "docker-compose/master/windows/master/docker-compose-Windows-x86_64.exe" + +test_script: + - "tox -e py27,py34 -- tests/unit" + - ps: ".\\script\\build-windows.ps1" + +deploy_script: + - "curl -sS + -u \"%BINTRAY_USER%:%BINTRAY_API_KEY%\" + -X PUT \"https://api.bintray.com/content/%BINTRAY_PATH%?override=1&publish=1\" + --data-binary @dist\\docker-compose-Windows-x86_64.exe" + +artifacts: + - path: .\dist\docker-compose-Windows-x86_64.exe + name: "Compose Windows binary" diff --git a/compose/__init__.py b/compose/__init__.py index af2bdbf242..2b8d5e72b2 100644 --- a/compose/__init__.py +++ b/compose/__init__.py @@ -1,3 +1,3 @@ from __future__ import unicode_literals -__version__ = '1.4.2' +__version__ = '1.5.0' diff --git a/compose/cli/command.py b/compose/cli/command.py index 204ed52710..525217ee75 100644 --- a/compose/cli/command.py +++ b/compose/cli/command.py @@ -1,102 +1,118 @@ -from __future__ import unicode_literals from __future__ import absolute_import -from requests.exceptions import ConnectionError, SSLError +from __future__ import unicode_literals + +import contextlib import logging import os import re -import six +import six +from requests.exceptions import ConnectionError +from requests.exceptions import SSLError + +from . import errors +from . import verbose_proxy +from .. import __version__ from .. import config from ..project import Project from ..service import ConfigError -from .docopt_command import DocoptCommand -from .utils import call_silently, is_mac, is_ubuntu from .docker_client import docker_client -from . import verbose_proxy -from . import errors -from .. import __version__ +from .utils import call_silently +from .utils import is_mac +from .utils import is_ubuntu log = logging.getLogger(__name__) -class Command(DocoptCommand): - base_dir = '.' - - def dispatch(self, *args, **kwargs): - try: - super(Command, self).dispatch(*args, **kwargs) - except SSLError as e: - raise errors.UserError('SSL error: %s' % e) - except ConnectionError: - if call_silently(['which', 'docker']) != 0: - if is_mac(): - raise errors.DockerNotFoundMac() - elif is_ubuntu(): - raise errors.DockerNotFoundUbuntu() - else: - raise errors.DockerNotFoundGeneric() - elif call_silently(['which', 'boot2docker']) == 0: - raise errors.ConnectionErrorBoot2Docker() +@contextlib.contextmanager +def friendly_error_message(): + try: + yield + except SSLError as e: + raise errors.UserError('SSL error: %s' % e) + except ConnectionError: + if call_silently(['which', 'docker']) != 0: + if is_mac(): + raise errors.DockerNotFoundMac() + elif is_ubuntu(): + raise errors.DockerNotFoundUbuntu() else: - raise errors.ConnectionErrorGeneric(self.get_client().base_url) + raise errors.DockerNotFoundGeneric() + elif call_silently(['which', 'docker-machine']) == 0: + raise errors.ConnectionErrorDockerMachine() + else: + raise errors.ConnectionErrorGeneric(get_client().base_url) - def perform_command(self, options, handler, command_options): - if options['COMMAND'] in ('help', 'version'): - # Skip looking up the compose file. - handler(None, command_options) - return - if 'FIG_FILE' in os.environ: - log.warn('The FIG_FILE environment variable is deprecated.') - log.warn('Please use COMPOSE_FILE instead.') +def project_from_options(base_dir, options): + return get_project( + base_dir, + get_config_path(options.get('--file')), + project_name=options.get('--project-name'), + verbose=options.get('--verbose'), + use_networking=options.get('--x-networking'), + network_driver=options.get('--x-network-driver'), + ) - explicit_config_path = options.get('--file') or os.environ.get('COMPOSE_FILE') or os.environ.get('FIG_FILE') - project = self.get_project( - explicit_config_path, - project_name=options.get('--project-name'), - verbose=options.get('--verbose')) - handler(project, command_options) +def get_config_path(file_option): + if file_option: + return file_option - def get_client(self, verbose=False): - client = docker_client() - if verbose: - version_info = six.iteritems(client.version()) - log.info("Compose version %s", __version__) - log.info("Docker base_url: %s", client.base_url) - log.info("Docker version: %s", - ", ".join("%s=%s" % item for item in version_info)) - return verbose_proxy.VerboseProxy('docker', client) - return client + if 'FIG_FILE' in os.environ: + log.warn('The FIG_FILE environment variable is deprecated.') + log.warn('Please use COMPOSE_FILE instead.') - def get_project(self, config_path=None, project_name=None, verbose=False): - config_details = config.find(self.base_dir, config_path) + config_file = os.environ.get('COMPOSE_FILE') or os.environ.get('FIG_FILE') + return [config_file] if config_file else None - try: - return Project.from_dicts( - self.get_project_name(config_details.working_dir, project_name), - config.load(config_details), - self.get_client(verbose=verbose)) - except ConfigError as e: - raise errors.UserError(six.text_type(e)) - def get_project_name(self, working_dir, project_name=None): - def normalize_name(name): - return re.sub(r'[^a-z0-9]', '', name.lower()) +def get_client(verbose=False, version=None): + client = docker_client(version=version) + if verbose: + version_info = six.iteritems(client.version()) + log.info("Compose version %s", __version__) + log.info("Docker base_url: %s", client.base_url) + log.info("Docker version: %s", + ", ".join("%s=%s" % item for item in version_info)) + return verbose_proxy.VerboseProxy('docker', client) + return client - if 'FIG_PROJECT_NAME' in os.environ: - log.warn('The FIG_PROJECT_NAME environment variable is deprecated.') - log.warn('Please use COMPOSE_PROJECT_NAME instead.') - project_name = ( - project_name or - os.environ.get('COMPOSE_PROJECT_NAME') or - os.environ.get('FIG_PROJECT_NAME')) - if project_name is not None: - return normalize_name(project_name) +def get_project(base_dir, config_path=None, project_name=None, verbose=False, + use_networking=False, network_driver=None): + config_details = config.find(base_dir, config_path) - project = os.path.basename(os.path.abspath(working_dir)) - if project: - return normalize_name(project) + api_version = '1.21' if use_networking else None + try: + return Project.from_dicts( + get_project_name(config_details.working_dir, project_name), + config.load(config_details), + get_client(verbose=verbose, version=api_version), + use_networking=use_networking, + network_driver=network_driver, + ) + except ConfigError as e: + raise errors.UserError(six.text_type(e)) - return 'default' + +def get_project_name(working_dir, project_name=None): + def normalize_name(name): + return re.sub(r'[^a-z0-9]', '', name.lower()) + + if 'FIG_PROJECT_NAME' in os.environ: + log.warn('The FIG_PROJECT_NAME environment variable is deprecated.') + log.warn('Please use COMPOSE_PROJECT_NAME instead.') + + project_name = ( + project_name or + os.environ.get('COMPOSE_PROJECT_NAME') or + os.environ.get('FIG_PROJECT_NAME')) + if project_name is not None: + return normalize_name(project_name) + + project = os.path.basename(os.path.abspath(working_dir)) + if project: + return normalize_name(project) + + return 'default' diff --git a/compose/cli/docker_client.py b/compose/cli/docker_client.py index 244bcbef2f..734f4237b0 100644 --- a/compose/cli/docker_client.py +++ b/compose/cli/docker_client.py @@ -1,37 +1,28 @@ -from docker import Client -from docker import tls -import ssl +import logging import os +from docker import Client +from docker.utils import kwargs_from_env -def docker_client(): +from ..const import HTTP_TIMEOUT + +log = logging.getLogger(__name__) + + +DEFAULT_API_VERSION = '1.19' + + +def docker_client(version=None): """ Returns a docker-py client configured using environment variables according to the same logic as the official Docker client. """ - cert_path = os.environ.get('DOCKER_CERT_PATH', '') - if cert_path == '': - cert_path = os.path.join(os.environ.get('HOME', ''), '.docker') + if 'DOCKER_CLIENT_TIMEOUT' in os.environ: + log.warn('The DOCKER_CLIENT_TIMEOUT environment variable is deprecated. Please use COMPOSE_HTTP_TIMEOUT instead.') - base_url = os.environ.get('DOCKER_HOST') - api_version = os.environ.get('COMPOSE_API_VERSION', '1.19') - - tls_config = None - - if os.environ.get('DOCKER_TLS_VERIFY', '') != '': - parts = base_url.split('://', 1) - base_url = '%s://%s' % ('https', parts[1]) - - client_cert = (os.path.join(cert_path, 'cert.pem'), os.path.join(cert_path, 'key.pem')) - ca_cert = os.path.join(cert_path, 'ca.pem') - - tls_config = tls.TLSConfig( - ssl_version=ssl.PROTOCOL_TLSv1, - verify=True, - assert_hostname=False, - client_cert=client_cert, - ca_cert=ca_cert, - ) - - timeout = int(os.environ.get('DOCKER_CLIENT_TIMEOUT', 60)) - return Client(base_url=base_url, tls=tls_config, version=api_version, timeout=timeout) + kwargs = kwargs_from_env(assert_hostname=False) + kwargs['version'] = version or os.environ.get( + 'COMPOSE_API_VERSION', + DEFAULT_API_VERSION) + kwargs['timeout'] = HTTP_TIMEOUT + return Client(**kwargs) diff --git a/compose/cli/docopt_command.py b/compose/cli/docopt_command.py index 6eeb33a317..e3f4aa9e5b 100644 --- a/compose/cli/docopt_command.py +++ b/compose/cli/docopt_command.py @@ -1,9 +1,11 @@ -from __future__ import unicode_literals from __future__ import absolute_import -import sys +from __future__ import unicode_literals +import sys from inspect import getdoc -from docopt import docopt, DocoptExit + +from docopt import docopt +from docopt import DocoptExit def docopt_full_help(docstring, *args, **kwargs): @@ -23,9 +25,6 @@ class DocoptCommand(object): def dispatch(self, argv, global_options): self.perform_command(*self.parse(argv, global_options)) - def perform_command(self, options, handler, command_options): - handler(command_options) - def parse(self, argv, global_options): options = docopt_full_help(getdoc(self), argv, **self.docopt_options()) command = options['COMMAND'] diff --git a/compose/cli/errors.py b/compose/cli/errors.py index 135710d434..244897f8ab 100644 --- a/compose/cli/errors.py +++ b/compose/cli/errors.py @@ -1,4 +1,5 @@ from __future__ import absolute_import + from textwrap import dedent @@ -39,10 +40,10 @@ class DockerNotFoundGeneric(UserError): """) -class ConnectionErrorBoot2Docker(UserError): +class ConnectionErrorDockerMachine(UserError): def __init__(self): - super(ConnectionErrorBoot2Docker, self).__init__(""" - Couldn't connect to Docker daemon - you might need to run `boot2docker up`. + super(ConnectionErrorDockerMachine, self).__init__(""" + Couldn't connect to Docker daemon - you might need to run `docker-machine start default`. """) diff --git a/compose/cli/formatter.py b/compose/cli/formatter.py index b5b0b3c03d..d0ed0f87eb 100644 --- a/compose/cli/formatter.py +++ b/compose/cli/formatter.py @@ -1,8 +1,13 @@ -from __future__ import unicode_literals from __future__ import absolute_import +from __future__ import unicode_literals + +import logging import os + import texttable +from compose.cli import colors + def get_tty_width(): tty_size = os.popen('stty size', 'r').read().split() @@ -13,6 +18,7 @@ def get_tty_width(): class Formatter(object): + """Format tabular data for printing.""" def table(self, headers, rows): table = texttable.Texttable(max_width=get_tty_width()) table.set_cols_dtype(['t' for h in headers]) @@ -21,3 +27,22 @@ class Formatter(object): table.set_chars(['-', '|', '+', '-']) return table.draw() + + +class ConsoleWarningFormatter(logging.Formatter): + """A logging.Formatter which prints WARNING and ERROR messages with + a prefix of the log level colored appropriate for the log level. + """ + + def get_level_message(self, record): + separator = ': ' + if record.levelno == logging.WARNING: + return colors.yellow(record.levelname) + separator + if record.levelno == logging.ERROR: + return colors.red(record.levelname) + separator + + return '' + + def format(self, record): + message = super(ConsoleWarningFormatter, self).format(record) + return self.get_level_message(record) + message diff --git a/compose/cli/log_printer.py b/compose/cli/log_printer.py index ce7e106533..66920726ce 100644 --- a/compose/cli/log_printer.py +++ b/compose/cli/log_printer.py @@ -1,82 +1,91 @@ -from __future__ import unicode_literals from __future__ import absolute_import -import sys +from __future__ import unicode_literals +import sys from itertools import cycle -from .multiplexer import Multiplexer, STOP from . import colors -from .utils import split_buffer +from .multiplexer import Multiplexer +from compose import utils +from compose.utils import split_buffer class LogPrinter(object): - def __init__(self, containers, attach_params=None, output=sys.stdout, monochrome=False): + """Print logs from many containers to a single output stream.""" + + def __init__(self, containers, output=sys.stdout, monochrome=False): self.containers = containers - self.attach_params = attach_params or {} - self.prefix_width = self._calculate_prefix_width(containers) - self.generators = self._make_log_generators(monochrome) - self.output = output + self.output = utils.get_output_stream(output) + self.monochrome = monochrome def run(self): - mux = Multiplexer(self.generators) - for line in mux.loop(): + if not self.containers: + return + + prefix_width = max_name_width(self.containers) + generators = list(self._make_log_generators(self.monochrome, prefix_width)) + for line in Multiplexer(generators).loop(): self.output.write(line) - def _calculate_prefix_width(self, containers): - """ - Calculate the maximum width of container names so we can make the log - prefixes line up like so: - - db_1 | Listening - web_1 | Listening - """ - prefix_width = 0 - for container in containers: - prefix_width = max(prefix_width, len(container.name_without_project)) - return prefix_width - - def _make_log_generators(self, monochrome): - color_fns = cycle(colors.rainbow()) - generators = [] - + def _make_log_generators(self, monochrome, prefix_width): def no_color(text): return text - for container in self.containers: - if monochrome: - color_fn = no_color - else: - color_fn = next(color_fns) - generators.append(self._make_log_generator(container, color_fn)) + if monochrome: + color_funcs = cycle([no_color]) + else: + color_funcs = cycle(colors.rainbow()) - return generators + for color_func, container in zip(color_funcs, self.containers): + generator_func = get_log_generator(container) + prefix = color_func(build_log_prefix(container, prefix_width)) + yield generator_func(container, prefix, color_func) - def _make_log_generator(self, container, color_fn): - prefix = color_fn(self._generate_prefix(container)).encode('utf-8') - # Attach to container before log printer starts running - line_generator = split_buffer(self._attach(container), '\n') - for line in line_generator: - yield prefix + line +def build_log_prefix(container, prefix_width): + return container.name_without_project.ljust(prefix_width) + ' | ' - exit_code = container.wait() - yield color_fn("%s exited with code %s\n" % (container.name, exit_code)) - yield STOP - def _generate_prefix(self, container): - """ - Generate the prefix for a log line without colour - """ - name = container.name_without_project - padding = ' ' * (self.prefix_width - len(name)) - return ''.join([name, padding, ' | ']) +def max_name_width(containers): + """Calculate the maximum width of container names so we can make the log + prefixes line up like so: - def _attach(self, container): - params = { - 'stdout': True, - 'stderr': True, - 'stream': True, - } - params.update(self.attach_params) - params = dict((name, 1 if value else 0) for (name, value) in list(params.items())) - return container.attach(**params) + db_1 | Listening + web_1 | Listening + """ + return max(len(container.name_without_project) for container in containers) + + +def get_log_generator(container): + if container.has_api_logs: + return build_log_generator + return build_no_log_generator + + +def build_no_log_generator(container, prefix, color_func): + """Return a generator that prints a warning about logs and waits for + container to exit. + """ + yield "{} WARNING: no logs are available with the '{}' log driver\n".format( + prefix, + container.log_driver) + yield color_func(wait_on_exit(container)) + + +def build_log_generator(container, prefix, color_func): + # if the container doesn't have a log_stream we need to attach to container + # before log printer starts running + if container.log_stream is None: + stream = container.attach(stdout=True, stderr=True, stream=True, logs=True) + line_generator = split_buffer(stream) + else: + line_generator = split_buffer(container.log_stream) + + for line in line_generator: + yield prefix + line + yield color_func(wait_on_exit(container)) + + +def wait_on_exit(container): + exit_code = container.wait() + return "%s exited with code %s\n" % (container.name, exit_code) diff --git a/compose/cli/main.py b/compose/cli/main.py index 3504c24167..b54b307ef2 100644 --- a/compose/cli/main.py +++ b/compose/cli/main.py @@ -1,33 +1,48 @@ from __future__ import print_function from __future__ import unicode_literals -from inspect import getdoc -from operator import attrgetter + import logging import re import signal import sys +from inspect import getdoc +from operator import attrgetter from docker.errors import APIError -import dockerpty +from requests.exceptions import ReadTimeout from .. import __version__ from .. import legacy -from ..const import DEFAULT_TIMEOUT -from ..project import NoSuchService, ConfigurationError -from ..service import BuildError, NeedsBuildError from ..config import parse_environment +from ..const import DEFAULT_TIMEOUT +from ..const import HTTP_TIMEOUT +from ..const import IS_WINDOWS_PLATFORM from ..progress_stream import StreamOutputError -from .command import Command +from ..project import ConfigurationError +from ..project import NoSuchService +from ..service import BuildError +from ..service import ConvergenceStrategy +from ..service import NeedsBuildError +from .command import friendly_error_message +from .command import project_from_options +from .docopt_command import DocoptCommand from .docopt_command import NoSuchCommand from .errors import UserError +from .formatter import ConsoleWarningFormatter from .formatter import Formatter from .log_printer import LogPrinter -from .utils import yesno, get_version_info +from .utils import get_version_info +from .utils import yesno + + +if not IS_WINDOWS_PLATFORM: + import dockerpty log = logging.getLogger(__name__) +console_handler = logging.StreamHandler(sys.stderr) INSECURE_SSL_WARNING = """ -Warning: --allow-insecure-ssl is deprecated and has no effect. +--allow-insecure-ssl is deprecated and has no effect. It will be removed in a future version of Compose. """ @@ -44,9 +59,8 @@ def main(): log.error(e.msg) sys.exit(1) except NoSuchCommand as e: - log.error("No such command: %s", e.command) - log.error("") - log.error("\n".join(parse_doc_section("commands:", getdoc(e.supercommand)))) + commands = "\n".join(parse_doc_section("commands:", getdoc(e.supercommand))) + log.error("No such command: %s\n\n%s", e.command, commands) sys.exit(1) except APIError as e: log.error(e.explanation) @@ -60,12 +74,15 @@ def main(): except NeedsBuildError as e: log.error("Service '%s' needs to be built, but --no-build was passed." % e.service.name) sys.exit(1) + except ReadTimeout as e: + log.error( + "An HTTP request took too long to complete. Retry with --verbose to obtain debug information.\n" + "If you encounter this issue regularly because of slow network conditions, consider setting " + "COMPOSE_HTTP_TIMEOUT to a higher value (current value: %s)." % HTTP_TIMEOUT + ) def setup_logging(): - console_handler = logging.StreamHandler(sys.stderr) - console_handler.setFormatter(logging.Formatter()) - console_handler.setLevel(logging.INFO) root_logger = logging.getLogger() root_logger.addHandler(console_handler) root_logger.setLevel(logging.DEBUG) @@ -74,6 +91,20 @@ def setup_logging(): logging.getLogger("requests").propagate = False +def setup_console_handler(handler, verbose): + if handler.stream.isatty(): + format_class = ConsoleWarningFormatter + else: + format_class = logging.Formatter + + if verbose: + handler.setFormatter(format_class('%(name)s.%(funcName)s: %(message)s')) + handler.setLevel(logging.DEBUG) + else: + handler.setFormatter(format_class()) + handler.setLevel(logging.INFO) + + # stolen from docopt master def parse_doc_section(name, source): pattern = re.compile('^([^\n]*' + name + '[^\n]*\n?(?:[ \t].*?(?:\n|$))*)', @@ -81,16 +112,20 @@ def parse_doc_section(name, source): return [s.strip() for s in pattern.findall(source)] -class TopLevelCommand(Command): +class TopLevelCommand(DocoptCommand): """Define and run multi-container applications with Docker. Usage: - docker-compose [options] [COMMAND] [ARGS...] + docker-compose [-f=...] [options] [COMMAND] [ARGS...] docker-compose -h|--help Options: -f, --file FILE Specify an alternate compose file (default: docker-compose.yml) -p, --project-name NAME Specify an alternate project name (default: directory name) + --x-networking (EXPERIMENTAL) Use new Docker networking functionality. + Requires Docker 1.9 or later. + --x-network-driver DRIVER (EXPERIMENTAL) Specify a network driver (default: "bridge"). + Requires Docker 1.9 or later. --verbose Show more output -v, --version Print version and exit @@ -99,6 +134,7 @@ class TopLevelCommand(Command): help Get help on a command kill Kill containers logs View output from containers + pause Pause services port Print the public port for a port binding ps List containers pull Pulls service images @@ -108,16 +144,31 @@ class TopLevelCommand(Command): scale Set number of containers for a service start Start services stop Stop services + unpause Unpause services up Create and start containers migrate-to-labels Recreate containers to add labels version Show the Docker-Compose version information """ + base_dir = '.' + def docopt_options(self): options = super(TopLevelCommand, self).docopt_options() options['version'] = get_version_info('compose') return options + def perform_command(self, options, handler, command_options): + setup_console_handler(console_handler, options.get('--verbose')) + + if options['COMMAND'] in ('help', 'version'): + # Skip looking up the compose file. + handler(None, command_options) + return + + project = project_from_options(self.base_dir, options) + with friendly_error_message(): + handler(project, command_options) + def build(self, project, options): """ Build or rebuild services. @@ -130,9 +181,11 @@ class TopLevelCommand(Command): Options: --no-cache Do not use cache when building the image. + --pull Always attempt to pull a newer version of the image. """ no_cache = bool(options.get('--no-cache', False)) - project.build(service_names=options['SERVICE'], no_cache=no_cache) + pull = bool(options.get('--pull', False)) + project.build(service_names=options['SERVICE'], no_cache=no_cache, pull=pull) def help(self, project, options): """ @@ -170,7 +223,15 @@ class TopLevelCommand(Command): monochrome = options['--no-color'] print("Attaching to", list_containers(containers)) - LogPrinter(containers, attach_params={'logs': True}, monochrome=monochrome).run() + LogPrinter(containers, monochrome=monochrome).run() + + def pause(self, project, options): + """ + Pause services. + + Usage: pause [SERVICE...] + """ + project.pause(service_names=options['SERVICE']) def port(self, project, options): """ @@ -237,6 +298,7 @@ class TopLevelCommand(Command): Usage: pull [options] [SERVICE...] Options: + --ignore-pull-failures Pull what it can and ignores images with pull failures. --allow-insecure-ssl Deprecated - no effect. """ if options['--allow-insecure-ssl']: @@ -244,6 +306,7 @@ class TopLevelCommand(Command): project.pull( service_names=options['SERVICE'], + ignore_pull_failures=options.get('--ignore-pull-failures') ) def rm(self, project, options): @@ -282,17 +345,19 @@ class TopLevelCommand(Command): running. If you do not want to start linked services, use `docker-compose run --no-deps SERVICE COMMAND [ARGS...]`. - Usage: run [options] [-e KEY=VAL...] SERVICE [COMMAND] [ARGS...] + Usage: run [options] [-p PORT...] [-e KEY=VAL...] SERVICE [COMMAND] [ARGS...] Options: --allow-insecure-ssl Deprecated - no effect. -d Detached mode: Run container in the background, print new container name. + --name NAME Assign a name to the container --entrypoint CMD Override the entrypoint of the image. -e KEY=VAL Set an environment variable (can be used multiple times) -u, --user="" Run as specified username or uid --no-deps Don't start linked services. --rm Remove container after run. Ignored in detached mode. + -p, --publish=[] Publish a container's port(s) to the host --service-ports Run command with the service's ports enabled and mapped to the host. -T Disable pseudo-tty allocation. By default `docker-compose run` @@ -300,6 +365,14 @@ class TopLevelCommand(Command): """ service = project.get_service(options['SERVICE']) + detach = options['-d'] + + if IS_WINDOWS_PLATFORM and not detach: + raise UserError( + "Interactive mode is not yet supported on Windows.\n" + "Please pass the -d flag when using `docker-compose run`." + ) + if options['--allow-insecure-ssl']: log.warn(INSECURE_SSL_WARNING) @@ -310,11 +383,13 @@ class TopLevelCommand(Command): project.up( service_names=deps, start_deps=True, - allow_recreate=False, + strategy=ConvergenceStrategy.never, ) + elif project.use_networking: + project.ensure_network_exists() tty = True - if options['-d'] or options['-T'] or not sys.stdin.isatty(): + if detach or options['-T'] or not sys.stdin.isatty(): tty = False if options['COMMAND']: @@ -325,8 +400,8 @@ class TopLevelCommand(Command): container_options = { 'command': command, 'tty': tty, - 'stdin_open': not options['-d'], - 'detach': options['-d'], + 'stdin_open': not detach, + 'detach': detach, } if options['-e']: @@ -344,6 +419,18 @@ class TopLevelCommand(Command): if not options['--service-ports']: container_options['ports'] = [] + if options['--publish']: + container_options['ports'] = options.get('--publish') + + if options['--publish'] and options['--service-ports']: + raise UserError( + 'Service port mapping and manual port mapping ' + 'can not be used togather' + ) + + if options['--name']: + container_options['name'] = options['--name'] + try: container = service.create_container( quiet=True, @@ -360,7 +447,7 @@ class TopLevelCommand(Command): raise e - if options['-d']: + if detach: service.start_container(container) print(container.name) else: @@ -434,6 +521,14 @@ class TopLevelCommand(Command): timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT) project.restart(service_names=options['SERVICE'], timeout=timeout) + def unpause(self, project, options): + """ + Unpause services. + + Usage: unpause [SERVICE...] + """ + project.unpause(service_names=options['SERVICE']) + def up(self, project, options): """ Builds, (re)creates, starts, and attaches to containers for a service. @@ -473,26 +568,19 @@ class TopLevelCommand(Command): if options['--allow-insecure-ssl']: log.warn(INSECURE_SSL_WARNING) - detached = options['-d'] - monochrome = options['--no-color'] - start_deps = not options['--no-deps'] - allow_recreate = not options['--no-recreate'] - force_recreate = options['--force-recreate'] service_names = options['SERVICE'] timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT) - - if force_recreate and not allow_recreate: - raise UserError("--force-recreate and --no-recreate cannot be combined.") + detached = options.get('-d') to_attach = project.up( service_names=service_names, start_deps=start_deps, - allow_recreate=allow_recreate, - force_recreate=force_recreate, + strategy=convergence_strategy_from_opts(options), do_build=not options['--no-build'], - timeout=timeout + timeout=timeout, + detached=detached ) if not detached: @@ -540,14 +628,28 @@ class TopLevelCommand(Command): print(get_version_info('full')) +def convergence_strategy_from_opts(options): + no_recreate = options['--no-recreate'] + force_recreate = options['--force-recreate'] + if force_recreate and no_recreate: + raise UserError("--force-recreate and --no-recreate cannot be combined.") + + if force_recreate: + return ConvergenceStrategy.always + + if no_recreate: + return ConvergenceStrategy.never + + return ConvergenceStrategy.changed + + def build_log_printer(containers, service_names, monochrome): if service_names: - containers = [c for c in containers if c.service in service_names] - - return LogPrinter( - containers, - attach_params={"logs": True}, - monochrome=monochrome) + containers = [ + container + for container in containers if container.service in service_names + ] + return LogPrinter(containers, monochrome=monochrome) def attach_to_logs(project, log_printer, service_names, timeout): diff --git a/compose/cli/multiplexer.py b/compose/cli/multiplexer.py index 849dbd66a2..4c73c6cdc6 100644 --- a/compose/cli/multiplexer.py +++ b/compose/cli/multiplexer.py @@ -1,42 +1,60 @@ from __future__ import absolute_import + from threading import Thread +from six.moves import _thread as thread + try: from Queue import Queue, Empty except ImportError: from queue import Queue, Empty # Python 3.x -# Yield STOP from an input generator to stop the -# top-level loop without processing any more input. STOP = object() class Multiplexer(object): - def __init__(self, generators): - self.generators = generators + """ + Create a single iterator from several iterators by running all of them in + parallel and yielding results as they come in. + """ + + def __init__(self, iterators): + self.iterators = iterators + self._num_running = len(iterators) self.queue = Queue() def loop(self): self._init_readers() - while True: + while self._num_running > 0: try: - item = self.queue.get(timeout=0.1) + item, exception = self.queue.get(timeout=0.1) + + if exception: + raise exception + if item is STOP: - break + self._num_running -= 1 else: yield item except Empty: pass + # See https://github.com/docker/compose/issues/189 + except thread.error: + raise KeyboardInterrupt() def _init_readers(self): - for generator in self.generators: - t = Thread(target=_enqueue_output, args=(generator, self.queue)) + for iterator in self.iterators: + t = Thread(target=_enqueue_output, args=(iterator, self.queue)) t.daemon = True t.start() -def _enqueue_output(generator, queue): - for item in generator: - queue.put(item) +def _enqueue_output(iterator, queue): + try: + for item in iterator: + queue.put((item, None)) + queue.put((STOP, None)) + except Exception as e: + queue.put((None, e)) diff --git a/compose/cli/utils.py b/compose/cli/utils.py index 7f2ba2e0dd..07510e2f31 100644 --- a/compose/cli/utils.py +++ b/compose/cli/utils.py @@ -1,14 +1,16 @@ -from __future__ import unicode_literals from __future__ import absolute_import from __future__ import division +from __future__ import unicode_literals -from .. import __version__ -import datetime -from docker import version as docker_py_version import os import platform -import subprocess import ssl +import subprocess + +from docker import version as docker_py_version +from six.moves import input + +from .. import __version__ def yesno(prompt, default=None): @@ -21,7 +23,7 @@ def yesno(prompt, default=None): Unrecognised input (anything other than "y", "n", "yes", "no" or "") will return None. """ - answer = raw_input(prompt).strip().lower() + answer = input(prompt).strip().lower() if answer == "y" or answer == "yes": return True @@ -33,89 +35,17 @@ def yesno(prompt, default=None): return None -# http://stackoverflow.com/a/5164027 -def prettydate(d): - diff = datetime.datetime.utcnow() - d - s = diff.seconds - if diff.days > 7 or diff.days < 0: - return d.strftime('%d %b %y') - elif diff.days == 1: - return '1 day ago' - elif diff.days > 1: - return '{0} days ago'.format(diff.days) - elif s <= 1: - return 'just now' - elif s < 60: - return '{0} seconds ago'.format(s) - elif s < 120: - return '1 minute ago' - elif s < 3600: - return '{0} minutes ago'.format(s / 60) - elif s < 7200: - return '1 hour ago' - else: - return '{0} hours ago'.format(s / 3600) - - -def mkdir(path, permissions=0o700): - if not os.path.exists(path): - os.mkdir(path) - - os.chmod(path, permissions) - - return path - - -def find_candidates_in_parent_dirs(filenames, path): - """ - Given a directory path to start, looks for filenames in the - directory, and then each parent directory successively, - until found. - - Returns tuple (candidates, path). - """ - candidates = [filename for filename in filenames - if os.path.exists(os.path.join(path, filename))] - - if len(candidates) == 0: - parent_dir = os.path.join(path, '..') - if os.path.abspath(parent_dir) != os.path.abspath(path): - return find_candidates_in_parent_dirs(filenames, parent_dir) - - return (candidates, path) - - -def split_buffer(reader, separator): - """ - Given a generator which yields strings and a separator string, - joins all input, splits on the separator and yields each chunk. - - Unlike string.split(), each chunk includes the trailing - separator, except for the last one if none was found on the end - of the input. - """ - buffered = str('') - separator = str(separator) - - for data in reader: - buffered += data - while True: - index = buffered.find(separator) - if index == -1: - break - yield buffered[:index + 1] - buffered = buffered[index + 1:] - - if len(buffered) > 0: - yield buffered - - def call_silently(*args, **kwargs): """ Like subprocess.call(), but redirects stdout and stderr to /dev/null. """ with open(os.devnull, 'w') as shutup: - return subprocess.call(*args, stdout=shutup, stderr=shutup, **kwargs) + try: + return subprocess.call(*args, stdout=shutup, stderr=shutup, **kwargs) + except WindowsError: + # On Windows, subprocess.call() can still raise exceptions. Normalize + # to POSIXy behaviour by returning a nonzero exit code. + return 1 def is_mac(): diff --git a/compose/cli/verbose_proxy.py b/compose/cli/verbose_proxy.py index a548983e1c..68dfabe521 100644 --- a/compose/cli/verbose_proxy.py +++ b/compose/cli/verbose_proxy.py @@ -1,8 +1,7 @@ - import functools -from itertools import chain import logging import pprint +from itertools import chain import six diff --git a/compose/config/__init__.py b/compose/config/__init__.py new file mode 100644 index 0000000000..de6f10c949 --- /dev/null +++ b/compose/config/__init__.py @@ -0,0 +1,9 @@ +# flake8: noqa +from .config import ConfigDetails +from .config import ConfigurationError +from .config import DOCKER_CONFIG_KEYS +from .config import find +from .config import get_service_name_from_net +from .config import load +from .config import merge_environment +from .config import parse_environment diff --git a/compose/config.py b/compose/config/config.py similarity index 51% rename from compose/config.py rename to compose/config/config.py index 6bb0fea6ac..21549e9b34 100644 --- a/compose/config.py +++ b/compose/config/config.py @@ -1,17 +1,27 @@ +import codecs import logging import os import sys -import yaml from collections import namedtuple import six +import yaml -from compose.cli.utils import find_candidates_in_parent_dirs +from .errors import CircularReference +from .errors import ComposeFileNotFound +from .errors import ConfigurationError +from .interpolation import interpolate_environment_variables +from .validation import validate_against_fields_schema +from .validation import validate_against_service_schema +from .validation import validate_extended_service_exists +from .validation import validate_extends_file_path +from .validation import validate_top_level_object DOCKER_CONFIG_KEYS = [ 'cap_add', 'cap_drop', + 'cgroup_parent', 'command', 'cpu_shares', 'cpuset', @@ -26,6 +36,7 @@ DOCKER_CONFIG_KEYS = [ 'extra_hosts', 'hostname', 'image', + 'ipc', 'labels', 'links', 'log_driver', @@ -58,22 +69,6 @@ ALLOWED_KEYS = DOCKER_CONFIG_KEYS + [ 'name', ] -DOCKER_CONFIG_HINTS = { - 'cpu_share': 'cpu_shares', - 'add_host': 'extra_hosts', - 'hosts': 'extra_hosts', - 'extra_host': 'extra_hosts', - 'device': 'devices', - 'link': 'links', - 'memory_swap': 'memswap_limit', - 'port': 'ports', - 'privilege': 'privileged', - 'priviliged': 'privileged', - 'privilige': 'privileged', - 'volume': 'volumes', - 'workdir': 'working_dir', -} - SUPPORTED_FILENAMES = [ 'docker-compose.yml', @@ -82,35 +77,50 @@ SUPPORTED_FILENAMES = [ 'fig.yaml', ] - -PATH_START_CHARS = [ - '/', - '.', - '~', -] - +DEFAULT_OVERRIDE_FILENAME = 'docker-compose.override.yml' log = logging.getLogger(__name__) -ConfigDetails = namedtuple('ConfigDetails', 'config working_dir filename') +class ConfigDetails(namedtuple('_ConfigDetails', 'working_dir config_files')): + """ + :param working_dir: the directory to use for relative paths in the config + :type working_dir: string + :param config_files: list of configuration files to load + :type config_files: list of :class:`ConfigFile` + """ -def find(base_dir, filename): - if filename == '-': - return ConfigDetails(yaml.safe_load(sys.stdin), os.getcwd(), None) +class ConfigFile(namedtuple('_ConfigFile', 'filename config')): + """ + :param filename: filename of the config file + :type filename: string + :param config: contents of the config file + :type config: :class:`dict` + """ - if filename: - filename = os.path.join(base_dir, filename) + +def find(base_dir, filenames): + if filenames == ['-']: + return ConfigDetails( + os.getcwd(), + [ConfigFile(None, yaml.safe_load(sys.stdin))]) + + if filenames: + filenames = [os.path.join(base_dir, f) for f in filenames] else: - filename = get_config_path(base_dir) - return ConfigDetails(load_yaml(filename), os.path.dirname(filename), filename) + filenames = get_default_config_files(base_dir) + + log.debug("Using configuration files: {}".format(",".join(filenames))) + return ConfigDetails( + os.path.dirname(filenames[0]), + [ConfigFile(f, load_yaml(f)) for f in filenames]) -def get_config_path(base_dir): +def get_default_config_files(base_dir): (candidates, path) = find_candidates_in_parent_dirs(SUPPORTED_FILENAMES, base_dir) - if len(candidates) == 0: + if not candidates: raise ComposeFileNotFound(SUPPORTED_FILENAMES) winner = candidates[0] @@ -128,109 +138,194 @@ def get_config_path(base_dir): log.warn("%s is deprecated and will not be supported in future. " "Please rename your config file to docker-compose.yml\n" % winner) - return os.path.join(path, winner) + return [os.path.join(path, winner)] + get_default_override_file(path) + + +def get_default_override_file(path): + override_filename = os.path.join(path, DEFAULT_OVERRIDE_FILENAME) + return [override_filename] if os.path.exists(override_filename) else [] + + +def find_candidates_in_parent_dirs(filenames, path): + """ + Given a directory path to start, looks for filenames in the + directory, and then each parent directory successively, + until found. + + Returns tuple (candidates, path). + """ + candidates = [filename for filename in filenames + if os.path.exists(os.path.join(path, filename))] + + if not candidates: + parent_dir = os.path.join(path, '..') + if os.path.abspath(parent_dir) != os.path.abspath(path): + return find_candidates_in_parent_dirs(filenames, parent_dir) + + return (candidates, path) def load(config_details): - dictionary, working_dir, filename = config_details - service_dicts = [] + """Load the configuration from a working directory and a list of + configuration files. Files are loaded in order, and merged on top + of each other to create the final configuration. - for service_name, service_dict in list(dictionary.items()): - if not isinstance(service_dict, dict): - raise ConfigurationError('Service "%s" doesn\'t have any configuration options. All top level keys in your docker-compose.yml must map to a dictionary of configuration options.' % service_name) - loader = ServiceLoader(working_dir=working_dir, filename=filename) - service_dict = loader.make_service_dict(service_name, service_dict) + Return a fully interpolated, extended and validated configuration. + """ + + def build_service(filename, service_name, service_dict): + loader = ServiceLoader( + config_details.working_dir, + filename, + service_name, + service_dict) + service_dict = loader.make_service_dict() validate_paths(service_dict) - service_dicts.append(service_dict) + return service_dict - return service_dicts + def load_file(filename, config): + processed_config = interpolate_environment_variables(config) + validate_against_fields_schema(processed_config) + return [ + build_service(filename, name, service_config) + for name, service_config in processed_config.items() + ] + + def merge_services(base, override): + all_service_names = set(base) | set(override) + return { + name: merge_service_dicts_from_files( + base.get(name, {}), + override.get(name, {})) + for name in all_service_names + } + + config_file = config_details.config_files[0] + validate_top_level_object(config_file.config) + for next_file in config_details.config_files[1:]: + validate_top_level_object(next_file.config) + + config_file = ConfigFile( + config_file.filename, + merge_services(config_file.config, next_file.config)) + + return load_file(config_file.filename, config_file.config) class ServiceLoader(object): - def __init__(self, working_dir, filename=None, already_seen=None): + def __init__(self, working_dir, filename, service_name, service_dict, already_seen=None): + if working_dir is None: + raise Exception("No working_dir passed to ServiceLoader()") + self.working_dir = os.path.abspath(working_dir) + if filename: self.filename = os.path.abspath(filename) else: self.filename = filename self.already_seen = already_seen or [] + self.service_dict = service_dict.copy() + self.service_name = service_name + self.service_dict['name'] = service_name def detect_cycle(self, name): if self.signature(name) in self.already_seen: raise CircularReference(self.already_seen + [self.signature(name)]) - def make_service_dict(self, name, service_dict): - service_dict = service_dict.copy() - service_dict['name'] = name - service_dict = resolve_environment(service_dict, working_dir=self.working_dir) - service_dict = self.resolve_extends(service_dict) - return process_container_options(service_dict, working_dir=self.working_dir) + def make_service_dict(self): + self.resolve_environment() + if 'extends' in self.service_dict: + self.validate_and_construct_extends() + self.service_dict = self.resolve_extends() - def resolve_extends(self, service_dict): - if 'extends' not in service_dict: - return service_dict + if not self.already_seen: + validate_against_service_schema(self.service_dict, self.service_name) - extends_options = self.validate_extends_options(service_dict['name'], service_dict['extends']) + return process_container_options(self.service_dict, working_dir=self.working_dir) - if self.working_dir is None: - raise Exception("No working_dir passed to ServiceLoader()") + def resolve_environment(self): + """ + Unpack any environment variables from an env_file, if set. + Interpolate environment values if set. + """ + if 'environment' not in self.service_dict and 'env_file' not in self.service_dict: + return - if 'file' in extends_options: - extends_from_filename = extends_options['file'] - other_config_path = expand_path(self.working_dir, extends_from_filename) - else: - other_config_path = self.filename + env = {} + + if 'env_file' in self.service_dict: + for f in get_env_files(self.service_dict, working_dir=self.working_dir): + env.update(env_vars_from_file(f)) + del self.service_dict['env_file'] + + env.update(parse_environment(self.service_dict.get('environment'))) + env = dict(resolve_env_var(k, v) for k, v in six.iteritems(env)) + + self.service_dict['environment'] = env + + def validate_and_construct_extends(self): + extends = self.service_dict['extends'] + if not isinstance(extends, dict): + extends = {'service': extends} + + validate_extends_file_path( + self.service_name, + extends, + self.filename + ) + self.extended_config_path = self.get_extended_config_path(extends) + self.extended_service_name = extends['service'] + + config = load_yaml(self.extended_config_path) + validate_top_level_object(config) + full_extended_config = interpolate_environment_variables(config) + + validate_extended_service_exists( + self.extended_service_name, + full_extended_config, + self.extended_config_path + ) + validate_against_fields_schema(full_extended_config) + + self.extended_config = full_extended_config[self.extended_service_name] + + def resolve_extends(self): + other_working_dir = os.path.dirname(self.extended_config_path) + other_already_seen = self.already_seen + [self.signature(self.service_name)] - other_working_dir = os.path.dirname(other_config_path) - other_already_seen = self.already_seen + [self.signature(service_dict['name'])] other_loader = ServiceLoader( working_dir=other_working_dir, - filename=other_config_path, + filename=self.extended_config_path, + service_name=self.service_name, + service_dict=self.extended_config, already_seen=other_already_seen, ) - other_config = load_yaml(other_config_path) - other_service_dict = other_config[extends_options['service']] - other_loader.detect_cycle(extends_options['service']) - other_service_dict = other_loader.make_service_dict( - service_dict['name'], - other_service_dict, - ) + other_loader.detect_cycle(self.extended_service_name) + other_service_dict = other_loader.make_service_dict() validate_extended_service_dict( other_service_dict, - filename=other_config_path, - service=extends_options['service'], + filename=self.extended_config_path, + service=self.extended_service_name, ) - return merge_service_dicts(other_service_dict, service_dict) + return merge_service_dicts(other_service_dict, self.service_dict) + + def get_extended_config_path(self, extends_options): + """ + Service we are extending either has a value for 'file' set, which we + need to obtain a full path too or we are extending from a service + defined in our own file. + """ + if 'file' in extends_options: + extends_from_filename = extends_options['file'] + return expand_path(self.working_dir, extends_from_filename) + + return self.filename def signature(self, name): return (self.filename, name) - def validate_extends_options(self, service_name, extends_options): - error_prefix = "Invalid 'extends' configuration for %s:" % service_name - - if not isinstance(extends_options, dict): - raise ConfigurationError("%s must be a dictionary" % error_prefix) - - if 'service' not in extends_options: - raise ConfigurationError( - "%s you need to specify a service, e.g. 'service: web'" % error_prefix - ) - - if 'file' not in extends_options and self.filename is None: - raise ConfigurationError( - "%s you need to specify a 'file', e.g. 'file: something.yml'" % error_prefix - ) - - for k, _ in extends_options.items(): - if k not in ['file', 'service']: - raise ConfigurationError( - "%s unsupported configuration option '%s'" % (error_prefix, k) - ) - - return extends_options - def validate_extended_service_dict(service_dict, filename, service): error_prefix = "Cannot extend service '%s' in %s:" % (service, filename) @@ -247,18 +342,8 @@ def validate_extended_service_dict(service_dict, filename, service): def process_container_options(service_dict, working_dir=None): - for k in service_dict: - if k not in ALLOWED_KEYS: - msg = "Unsupported config option for %s service: '%s'" % (service_dict['name'], k) - if k in DOCKER_CONFIG_HINTS: - msg += " (did you mean '%s'?)" % DOCKER_CONFIG_HINTS[k] - raise ConfigurationError(msg) - service_dict = service_dict.copy() - if 'memswap_limit' in service_dict and 'mem_limit' not in service_dict: - raise ConfigurationError("Invalid 'memswap_limit' configuration for %s service: when defining 'memswap_limit' you must set 'mem_limit' as well" % service_dict['name']) - if 'volumes' in service_dict and service_dict.get('volume_driver') is None: service_dict['volumes'] = resolve_volume_paths(service_dict, working_dir=working_dir) @@ -271,6 +356,17 @@ def process_container_options(service_dict, working_dir=None): return service_dict +def merge_service_dicts_from_files(base, override): + """When merging services from multiple files we need to merge the `extends` + field. This is not handled by `merge_service_dicts()` which is used to + perform the `extends`. + """ + new_service = merge_service_dicts(base, override) + if 'extends' in override: + new_service['extends'] = override['extends'] + return new_service + + def merge_service_dicts(base, override): d = base.copy() @@ -328,25 +424,10 @@ def merge_environment(base, override): return env -def parse_links(links): - return dict(parse_link(l) for l in links) - - -def parse_link(link): - if ':' in link: - source, alias = link.split(':', 1) - return (alias, source) - else: - return (link, link) - - def get_env_files(options, working_dir=None): if 'env_file' not in options: return {} - if working_dir is None: - raise Exception("No working_dir passed to get_env_files()") - env_files = options.get('env_file', []) if not isinstance(env_files, list): env_files = [env_files] @@ -354,26 +435,6 @@ def get_env_files(options, working_dir=None): return [expand_path(working_dir, path) for path in env_files] -def resolve_environment(service_dict, working_dir=None): - service_dict = service_dict.copy() - - if 'environment' not in service_dict and 'env_file' not in service_dict: - return service_dict - - env = {} - - if 'env_file' in service_dict: - for f in get_env_files(service_dict, working_dir=working_dir): - env.update(env_vars_from_file(f)) - del service_dict['env_file'] - - env.update(parse_environment(service_dict.get('environment'))) - env = dict(resolve_env_var(k, v) for k, v in six.iteritems(env)) - - service_dict['environment'] = env - return service_dict - - def parse_environment(environment): if not environment: return {} @@ -391,6 +452,8 @@ def parse_environment(environment): def split_env(env): + if isinstance(env, six.binary_type): + env = env.decode('utf-8') if '=' in env: return env.split('=', 1) else: @@ -413,7 +476,7 @@ def env_vars_from_file(filename): if not os.path.exists(filename): raise ConfigurationError("Couldn't find env file: %s" % filename) env = {} - for line in open(filename, 'r'): + for line in codecs.open(filename, 'r', 'utf-8'): line = line.strip() if line and not line.startswith('#'): k, v = split_env(line) @@ -433,22 +496,12 @@ def resolve_volume_paths(service_dict, working_dir=None): def resolve_volume_path(volume, working_dir, service_name): container_path, host_path = split_path_mapping(volume) - container_path = os.path.expanduser(os.path.expandvars(container_path)) if host_path is not None: - host_path = os.path.expanduser(os.path.expandvars(host_path)) - - if not any(host_path.startswith(c) for c in PATH_START_CHARS): - log.warn( - 'Warning: the mapping "{0}:{1}" in the volumes config for ' - 'service "{2}" is ambiguous. In a future version of Docker, ' - 'it will designate a "named" volume ' - '(see https://github.com/docker/docker/pull/14242). ' - 'To prevent unexpected behaviour, change it to "./{0}:{1}"' - .format(host_path, container_path, service_name) - ) - - return "%s:%s" % (expand_path(working_dir, host_path), container_path) + if host_path.startswith('.'): + host_path = expand_path(working_dir, host_path) + host_path = os.path.expanduser(host_path) + return "{}:{}".format(host_path, container_path) else: return container_path @@ -483,12 +536,24 @@ def path_mappings_from_dict(d): return [join_path_mapping(v) for v in d.items()] -def split_path_mapping(string): - if ':' in string: - (host, container) = string.split(':', 1) - return (container, host) +def split_path_mapping(volume_path): + """ + Ascertain if the volume_path contains a host path as well as a container + path. Using splitdrive so windows absolute paths won't cause issues with + splitting on ':'. + """ + # splitdrive has limitations when it comes to relative paths, so when it's + # relative, handle special case to set the drive to '' + if volume_path.startswith('.') or volume_path.startswith('~'): + drive, volume_config = '', volume_path else: - return (string, None) + drive, volume_config = os.path.splitdrive(volume_path) + + if ':' in volume_config: + (host, container) = volume_config.split(':', 1) + return (container, drive + host) + else: + return (volume_path, None) def join_path_mapping(pair): @@ -515,11 +580,6 @@ def parse_labels(labels): if isinstance(labels, dict): return labels - raise ConfigurationError( - "labels \"%s\" must be a list or mapping" % - labels - ) - def split_label(label): if '=' in label: @@ -529,7 +589,7 @@ def split_label(label): def expand_path(working_dir, path): - return os.path.abspath(os.path.join(working_dir, path)) + return os.path.abspath(os.path.join(working_dir, os.path.expanduser(path))) def to_list(value): @@ -556,35 +616,6 @@ def load_yaml(filename): try: with open(filename, 'r') as fh: return yaml.safe_load(fh) - except IOError as e: - raise ConfigurationError(six.text_type(e)) - - -class ConfigurationError(Exception): - def __init__(self, msg): - self.msg = msg - - def __str__(self): - return self.msg - - -class CircularReference(ConfigurationError): - def __init__(self, trail): - self.trail = trail - - @property - def msg(self): - lines = [ - "{} in {}".format(service_name, filename) - for (filename, service_name) in self.trail - ] - return "Circular reference:\n {}".format("\n extends ".join(lines)) - - -class ComposeFileNotFound(ConfigurationError): - def __init__(self, supported_filenames): - super(ComposeFileNotFound, self).__init__(""" - Can't find a suitable configuration file in this directory or any parent. Are you in the right directory? - - Supported filenames: %s - """ % ", ".join(supported_filenames)) + except (IOError, yaml.YAMLError) as e: + error_name = getattr(e, '__module__', '') + '.' + e.__class__.__name__ + raise ConfigurationError(u"{}: {}".format(error_name, e)) diff --git a/compose/config/errors.py b/compose/config/errors.py new file mode 100644 index 0000000000..037b7ec84d --- /dev/null +++ b/compose/config/errors.py @@ -0,0 +1,28 @@ +class ConfigurationError(Exception): + def __init__(self, msg): + self.msg = msg + + def __str__(self): + return self.msg + + +class CircularReference(ConfigurationError): + def __init__(self, trail): + self.trail = trail + + @property + def msg(self): + lines = [ + "{} in {}".format(service_name, filename) + for (filename, service_name) in self.trail + ] + return "Circular reference:\n {}".format("\n extends ".join(lines)) + + +class ComposeFileNotFound(ConfigurationError): + def __init__(self, supported_filenames): + super(ComposeFileNotFound, self).__init__(""" + Can't find a suitable configuration file in this directory or any parent. Are you in the right directory? + + Supported filenames: %s + """ % ", ".join(supported_filenames)) diff --git a/compose/config/fields_schema.json b/compose/config/fields_schema.json new file mode 100644 index 0000000000..e254e3539f --- /dev/null +++ b/compose/config/fields_schema.json @@ -0,0 +1,154 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "type": "object", + + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/service" + } + }, + + "definitions": { + "service": { + "type": "object", + + "properties": { + "build": {"type": "string"}, + "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "cgroup_parent": {"type": "string"}, + "command": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + }, + "container_name": {"type": "string"}, + "cpu_shares": {"type": ["number", "string"]}, + "cpuset": {"type": "string"}, + "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "dns": {"$ref": "#/definitions/string_or_list"}, + "dns_search": {"$ref": "#/definitions/string_or_list"}, + "dockerfile": {"type": "string"}, + "domainname": {"type": "string"}, + "entrypoint": {"$ref": "#/definitions/string_or_list"}, + "env_file": {"$ref": "#/definitions/string_or_list"}, + + "environment": { + "oneOf": [ + { + "type": "object", + "patternProperties": { + "^[^-]+$": { + "type": ["string", "number", "boolean", "null"], + "format": "environment" + } + }, + "additionalProperties": false + }, + {"type": "array", "items": {"type": "string"}, "uniqueItems": true} + ] + }, + + "expose": { + "type": "array", + "items": {"type": ["string", "number"]}, + "uniqueItems": true + }, + + "extends": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "object", + + "properties": { + "service": {"type": "string"}, + "file": {"type": "string"} + }, + "required": ["service"], + "additionalProperties": false + } + ] + }, + + "extra_hosts": {"$ref": "#/definitions/list_or_dict"}, + "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "hostname": {"type": "string"}, + "image": {"type": "string"}, + "ipc": {"type": "string"}, + "labels": {"$ref": "#/definitions/list_or_dict"}, + "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + + "log_driver": {"type": "string"}, + "log_opt": {"type": "object"}, + + "mac_address": {"type": "string"}, + "mem_limit": {"type": ["number", "string"]}, + "memswap_limit": {"type": ["number", "string"]}, + "name": {"type": "string"}, + "net": {"type": "string"}, + "pid": {"type": ["string", "null"]}, + + "ports": { + "type": "array", + "items": { + "oneOf": [ + { + "type": "string", + "format": "ports" + }, + { + "type": "number", + "format": "ports" + } + ] + }, + "uniqueItems": true + }, + + "privileged": {"type": "boolean"}, + "read_only": {"type": "boolean"}, + "restart": {"type": "string"}, + "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "stdin_open": {"type": "boolean"}, + "tty": {"type": "boolean"}, + "user": {"type": "string"}, + "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "volume_driver": {"type": "string"}, + "volumes_from": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "working_dir": {"type": "string"} + }, + + "dependencies": { + "memswap_limit": ["mem_limit"] + }, + "additionalProperties": false + }, + + "string_or_list": { + "oneOf": [ + {"type": "string"}, + {"$ref": "#/definitions/list_of_strings"} + ] + }, + + "list_of_strings": { + "type": "array", + "items": {"type": "string"}, + "uniqueItems": true + }, + + "list_or_dict": { + "oneOf": [ + {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + {"type": "object"} + ] + } + + }, + "additionalProperties": false +} diff --git a/compose/config/interpolation.py b/compose/config/interpolation.py new file mode 100644 index 0000000000..f8e1da610d --- /dev/null +++ b/compose/config/interpolation.py @@ -0,0 +1,91 @@ +import logging +import os +from string import Template + +import six + +from .errors import ConfigurationError +log = logging.getLogger(__name__) + + +def interpolate_environment_variables(config): + mapping = BlankDefaultDict(os.environ) + + return dict( + (service_name, process_service(service_name, service_dict, mapping)) + for (service_name, service_dict) in config.items() + ) + + +def process_service(service_name, service_dict, mapping): + if not isinstance(service_dict, dict): + raise ConfigurationError( + 'Service "%s" doesn\'t have any configuration options. ' + 'All top level keys in your docker-compose.yml must map ' + 'to a dictionary of configuration options.' % service_name + ) + + return dict( + (key, interpolate_value(service_name, key, val, mapping)) + for (key, val) in service_dict.items() + ) + + +def interpolate_value(service_name, config_key, value, mapping): + try: + return recursive_interpolate(value, mapping) + except InvalidInterpolation as e: + raise ConfigurationError( + 'Invalid interpolation format for "{config_key}" option ' + 'in service "{service_name}": "{string}"' + .format( + config_key=config_key, + service_name=service_name, + string=e.string, + ) + ) + + +def recursive_interpolate(obj, mapping): + if isinstance(obj, six.string_types): + return interpolate(obj, mapping) + elif isinstance(obj, dict): + return dict( + (key, recursive_interpolate(val, mapping)) + for (key, val) in obj.items() + ) + elif isinstance(obj, list): + return [recursive_interpolate(val, mapping) for val in obj] + else: + return obj + + +def interpolate(string, mapping): + try: + return Template(string).substitute(mapping) + except ValueError: + raise InvalidInterpolation(string) + + +class BlankDefaultDict(dict): + def __init__(self, *args, **kwargs): + super(BlankDefaultDict, self).__init__(*args, **kwargs) + self.missing_keys = [] + + def __getitem__(self, key): + try: + return super(BlankDefaultDict, self).__getitem__(key) + except KeyError: + if key not in self.missing_keys: + log.warn( + "The {} variable is not set. Defaulting to a blank string." + .format(key) + ) + self.missing_keys.append(key) + + return "" + + +class InvalidInterpolation(Exception): + def __init__(self, string): + self.string = string diff --git a/compose/config/service_schema.json b/compose/config/service_schema.json new file mode 100644 index 0000000000..5cb5d6d070 --- /dev/null +++ b/compose/config/service_schema.json @@ -0,0 +1,39 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "type": "object", + + "properties": { + "name": {"type": "string"} + }, + + "required": ["name"], + + "allOf": [ + {"$ref": "fields_schema.json#/definitions/service"}, + {"$ref": "#/definitions/service_constraints"} + ], + + "definitions": { + "service_constraints": { + "anyOf": [ + { + "required": ["build"], + "not": {"required": ["image"]} + }, + { + "required": ["image"], + "not": {"anyOf": [ + {"required": ["build"]}, + {"required": ["dockerfile"]} + ]} + }, + { + "required": ["extends"], + "not": {"required": ["build", "image"]} + } + ] + } + } + +} diff --git a/compose/config/validation.py b/compose/config/validation.py new file mode 100644 index 0000000000..542081d526 --- /dev/null +++ b/compose/config/validation.py @@ -0,0 +1,323 @@ +import json +import logging +import os +import sys + +import six +from docker.utils.ports import split_port +from jsonschema import Draft4Validator +from jsonschema import FormatChecker +from jsonschema import RefResolver +from jsonschema import ValidationError + +from .errors import ConfigurationError + + +log = logging.getLogger(__name__) + + +DOCKER_CONFIG_HINTS = { + 'cpu_share': 'cpu_shares', + 'add_host': 'extra_hosts', + 'hosts': 'extra_hosts', + 'extra_host': 'extra_hosts', + 'device': 'devices', + 'link': 'links', + 'memory_swap': 'memswap_limit', + 'port': 'ports', + 'privilege': 'privileged', + 'priviliged': 'privileged', + 'privilige': 'privileged', + 'volume': 'volumes', + 'workdir': 'working_dir', +} + + +VALID_NAME_CHARS = '[a-zA-Z0-9\._\-]' + + +@FormatChecker.cls_checks( + format="ports", + raises=ValidationError( + "Invalid port formatting, it should be " + "'[[remote_ip:]remote_port:]port[/protocol]'")) +def format_ports(instance): + try: + split_port(instance) + except ValueError: + return False + return True + + +@FormatChecker.cls_checks(format="environment") +def format_boolean_in_environment(instance): + """ + Check if there is a boolean in the environment and display a warning. + Always return True here so the validation won't raise an error. + """ + if isinstance(instance, bool): + log.warn( + "There is a boolean value in the 'environment' key.\n" + "Environment variables can only be strings.\n" + "Please add quotes to any boolean values to make them string " + "(eg, 'True', 'yes', 'N').\n" + "This warning will become an error in a future release. \r\n" + ) + return True + + +def validate_service_names(config): + for service_name in config.keys(): + if not isinstance(service_name, six.string_types): + raise ConfigurationError( + "Service name: {} needs to be a string, eg '{}'".format( + service_name, + service_name)) + + +def validate_top_level_object(config): + if not isinstance(config, dict): + raise ConfigurationError( + "Top level object needs to be a dictionary. Check your .yml file " + "that you have defined a service at the top level.") + validate_service_names(config) + + +def validate_extends_file_path(service_name, extends_options, filename): + """ + The service to be extended must either be defined in the config key 'file', + or within 'filename'. + """ + error_prefix = "Invalid 'extends' configuration for %s:" % service_name + + if 'file' not in extends_options and filename is None: + raise ConfigurationError( + "%s you need to specify a 'file', e.g. 'file: something.yml'" % error_prefix + ) + + +def validate_extended_service_exists(extended_service_name, full_extended_config, extended_config_path): + if extended_service_name not in full_extended_config: + msg = ( + "Cannot extend service '%s' in %s: Service not found" + ) % (extended_service_name, extended_config_path) + raise ConfigurationError(msg) + + +def get_unsupported_config_msg(service_name, error_key): + msg = "Unsupported config option for '{}' service: '{}'".format(service_name, error_key) + if error_key in DOCKER_CONFIG_HINTS: + msg += " (did you mean '{}'?)".format(DOCKER_CONFIG_HINTS[error_key]) + return msg + + +def anglicize_validator(validator): + if validator in ["array", "object"]: + return 'an ' + validator + return 'a ' + validator + + +def process_errors(errors, service_name=None): + """ + jsonschema gives us an error tree full of information to explain what has + gone wrong. Process each error and pull out relevant information and re-write + helpful error messages that are relevant. + """ + def _parse_key_from_error_msg(error): + return error.message.split("'")[1] + + def _clean_error_message(message): + return message.replace("u'", "'") + + def _parse_valid_types_from_validator(validator): + """ + A validator value can be either an array of valid types or a string of + a valid type. Parse the valid types and prefix with the correct article. + """ + if isinstance(validator, list): + if len(validator) >= 2: + first_type = anglicize_validator(validator[0]) + last_type = anglicize_validator(validator[-1]) + types_from_validator = ", ".join([first_type] + validator[1:-1]) + + msg = "{} or {}".format( + types_from_validator, + last_type + ) + else: + msg = "{}".format(anglicize_validator(validator[0])) + else: + msg = "{}".format(anglicize_validator(validator)) + + return msg + + def _parse_oneof_validator(error): + """ + oneOf has multiple schemas, so we need to reason about which schema, sub + schema or constraint the validation is failing on. + Inspecting the context value of a ValidationError gives us information about + which sub schema failed and which kind of error it is. + """ + required = [context for context in error.context if context.validator == 'required'] + if required: + return required[0].message + + additionalProperties = [context for context in error.context if context.validator == 'additionalProperties'] + if additionalProperties: + invalid_config_key = _parse_key_from_error_msg(additionalProperties[0]) + return "contains unsupported option: '{}'".format(invalid_config_key) + + constraint = [context for context in error.context if len(context.path) > 0] + if constraint: + valid_types = _parse_valid_types_from_validator(constraint[0].validator_value) + invalid_config_key = "".join( + "'{}' ".format(fragment) for fragment in constraint[0].path + if isinstance(fragment, six.string_types) + ) + msg = "{}contains {}, which is an invalid type, it should be {}".format( + invalid_config_key, + constraint[0].instance, + valid_types + ) + return msg + + uniqueness = [context for context in error.context if context.validator == 'uniqueItems'] + if uniqueness: + msg = "contains non unique items, please remove duplicates from {}".format( + uniqueness[0].instance + ) + return msg + + types = [context.validator_value for context in error.context if context.validator == 'type'] + valid_types = _parse_valid_types_from_validator(types) + + msg = "contains an invalid type, it should be {}".format(valid_types) + + return msg + + root_msgs = [] + invalid_keys = [] + required = [] + type_errors = [] + other_errors = [] + + for error in errors: + # handle root level errors + if len(error.path) == 0 and not error.instance.get('name'): + if error.validator == 'type': + msg = "Top level object needs to be a dictionary. Check your .yml file that you have defined a service at the top level." + root_msgs.append(msg) + elif error.validator == 'additionalProperties': + invalid_service_name = _parse_key_from_error_msg(error) + msg = "Invalid service name '{}' - only {} characters are allowed".format(invalid_service_name, VALID_NAME_CHARS) + root_msgs.append(msg) + else: + root_msgs.append(_clean_error_message(error.message)) + + else: + if not service_name: + # field_schema errors will have service name on the path + service_name = error.path[0] + error.path.popleft() + else: + # service_schema errors have the service name passed in, as that + # is not available on error.path or necessarily error.instance + service_name = service_name + + if error.validator == 'additionalProperties': + invalid_config_key = _parse_key_from_error_msg(error) + invalid_keys.append(get_unsupported_config_msg(service_name, invalid_config_key)) + elif error.validator == 'anyOf': + if 'image' in error.instance and 'build' in error.instance: + required.append( + "Service '{}' has both an image and build path specified. " + "A service can either be built to image or use an existing " + "image, not both.".format(service_name)) + elif 'image' not in error.instance and 'build' not in error.instance: + required.append( + "Service '{}' has neither an image nor a build path " + "specified. Exactly one must be provided.".format(service_name)) + elif 'image' in error.instance and 'dockerfile' in error.instance: + required.append( + "Service '{}' has both an image and alternate Dockerfile. " + "A service can either be built to image or use an existing " + "image, not both.".format(service_name)) + else: + required.append(_clean_error_message(error.message)) + elif error.validator == 'oneOf': + config_key = error.path[0] + msg = _parse_oneof_validator(error) + + type_errors.append("Service '{}' configuration key '{}' {}".format( + service_name, config_key, msg) + ) + elif error.validator == 'type': + msg = _parse_valid_types_from_validator(error.validator_value) + + if len(error.path) > 0: + config_key = " ".join(["'%s'" % k for k in error.path]) + type_errors.append( + "Service '{}' configuration key {} contains an invalid " + "type, it should be {}".format( + service_name, + config_key, + msg)) + else: + root_msgs.append( + "Service '{}' doesn\'t have any configuration options. " + "All top level keys in your docker-compose.yml must map " + "to a dictionary of configuration options.'".format(service_name)) + elif error.validator == 'required': + config_key = error.path[0] + required.append( + "Service '{}' option '{}' is invalid, {}".format( + service_name, + config_key, + _clean_error_message(error.message))) + elif error.validator == 'dependencies': + dependency_key = list(error.validator_value.keys())[0] + required_keys = ",".join(error.validator_value[dependency_key]) + required.append("Invalid '{}' configuration for '{}' service: when defining '{}' you must set '{}' as well".format( + dependency_key, service_name, dependency_key, required_keys)) + else: + config_key = " ".join(["'%s'" % k for k in error.path]) + err_msg = "Service '{}' configuration key {} value {}".format(service_name, config_key, error.message) + other_errors.append(err_msg) + + return "\n".join(root_msgs + invalid_keys + required + type_errors + other_errors) + + +def validate_against_fields_schema(config): + schema_filename = "fields_schema.json" + format_checkers = ["ports", "environment"] + return _validate_against_schema(config, schema_filename, format_checkers) + + +def validate_against_service_schema(config, service_name): + schema_filename = "service_schema.json" + format_checkers = ["ports"] + return _validate_against_schema(config, schema_filename, format_checkers, service_name) + + +def _validate_against_schema(config, schema_filename, format_checker=[], service_name=None): + config_source_dir = os.path.dirname(os.path.abspath(__file__)) + + if sys.platform == "win32": + file_pre_fix = "///" + config_source_dir = config_source_dir.replace('\\', '/') + else: + file_pre_fix = "//" + + resolver_full_path = "file:{}{}/".format(file_pre_fix, config_source_dir) + schema_file = os.path.join(config_source_dir, schema_filename) + + with open(schema_file, "r") as schema_fh: + schema = json.load(schema_fh) + + resolver = RefResolver(resolver_full_path, schema) + validation_output = Draft4Validator(schema, resolver=resolver, format_checker=FormatChecker(format_checker)) + + errors = [error for error in sorted(validation_output.iter_errors(config), key=str)] + if errors: + error_msg = process_errors(errors, service_name) + raise ConfigurationError("Validation failed, reason(s):\n{}".format(error_msg)) diff --git a/compose/const.py b/compose/const.py index 709c3a10d7..1b6894189e 100644 --- a/compose/const.py +++ b/compose/const.py @@ -1,5 +1,9 @@ +import os +import sys DEFAULT_TIMEOUT = 10 +HTTP_TIMEOUT = int(os.environ.get('COMPOSE_HTTP_TIMEOUT', os.environ.get('DOCKER_CLIENT_TIMEOUT', 60))) +IS_WINDOWS_PLATFORM = (sys.platform == "win32") LABEL_CONTAINER_NUMBER = 'com.docker.compose.container-number' LABEL_ONE_OFF = 'com.docker.compose.oneoff' LABEL_PROJECT = 'com.docker.compose.project' diff --git a/compose/container.py b/compose/container.py index 6f88b41dc6..1ca483809a 100644 --- a/compose/container.py +++ b/compose/container.py @@ -1,10 +1,13 @@ -from __future__ import unicode_literals from __future__ import absolute_import +from __future__ import unicode_literals -import six from functools import reduce -from .const import LABEL_CONTAINER_NUMBER, LABEL_SERVICE +import six + +from .const import LABEL_CONTAINER_NUMBER +from .const import LABEL_PROJECT +from .const import LABEL_SERVICE class Container(object): @@ -16,6 +19,7 @@ class Container(object): self.client = client self.dictionary = dictionary self.has_been_inspected = has_been_inspected + self.log_stream = None @classmethod def from_ps(cls, client, dictionary, **kwargs): @@ -68,7 +72,12 @@ class Container(object): @property def name_without_project(self): - return '{0}_{1}'.format(self.service, self.number) + project = self.labels.get(LABEL_PROJECT) + + if self.name.startswith('{0}_{1}'.format(project, self.service)): + return '{0}_{1}'.format(self.service, self.number) + else: + return self.name @property def number(self): @@ -104,6 +113,8 @@ class Container(object): @property def human_readable_state(self): + if self.is_paused: + return 'Paused' if self.is_running: return 'Ghost' if self.get('State.Ghost') else 'Up' else: @@ -123,6 +134,26 @@ class Container(object): def is_running(self): return self.get('State.Running') + @property + def is_paused(self): + return self.get('State.Paused') + + @property + def log_driver(self): + return self.get('HostConfig.LogConfig.Type') + + @property + def has_api_logs(self): + log_type = self.log_driver + return not log_type or log_type != 'none' + + def attach_log_stream(self): + """A log stream can only be attached if the container uses a json-file + log driver. + """ + if self.has_api_logs: + self.log_stream = self.attach(stdout=True, stderr=True, stream=True) + def get(self, key): """Return a value from the container or None if the value is not set. @@ -146,6 +177,12 @@ class Container(object): def stop(self, **options): return self.client.stop(self.id, **options) + def pause(self, **options): + return self.client.pause(self.id, **options) + + def unpause(self, **options): + return self.client.unpause(self.id, **options) + def kill(self, **options): return self.client.kill(self.id, **options) @@ -155,6 +192,15 @@ class Container(object): def remove(self, **options): return self.client.remove_container(self.id, **options) + def rename_to_tmp_name(self): + """Rename the container to a hopefully unique temporary container name + by prepending the short id. + """ + self.client.rename( + self.id, + '%s_%s' % (self.short_id, self.name) + ) + def inspect_if_not_inspected(self): if not self.has_been_inspected: self.inspect() @@ -183,9 +229,6 @@ class Container(object): def attach(self, *args, **kwargs): return self.client.attach(self.id, *args, **kwargs) - def attach_socket(self, **kwargs): - return self.client.attach_socket(self.id, **kwargs) - def __repr__(self): return '' % (self.name, self.id[:6]) diff --git a/compose/legacy.py b/compose/legacy.py index 6fbf74d692..5416241789 100644 --- a/compose/legacy.py +++ b/compose/legacy.py @@ -2,7 +2,8 @@ import logging import re from .const import LABEL_VERSION -from .container import get_container_name, Container +from .container import Container +from .container import get_container_name log = logging.getLogger(__name__) @@ -16,7 +17,8 @@ Compose found the following containers without labels: {names_list} -As of Compose 1.3.0, containers are identified with labels instead of naming convention. If you want to continue using these containers, run: +As of Compose 1.3.0, containers are identified with labels instead of naming +convention. If you want to continue using these containers, run: $ docker-compose migrate-to-labels diff --git a/compose/progress_stream.py b/compose/progress_stream.py index 317c6e8157..ac8e4b410f 100644 --- a/compose/progress_stream.py +++ b/compose/progress_stream.py @@ -1,6 +1,4 @@ -import json -import os -import codecs +from compose import utils class StreamOutputError(Exception): @@ -8,14 +6,13 @@ class StreamOutputError(Exception): def stream_output(output, stream): - is_terminal = hasattr(stream, 'fileno') and os.isatty(stream.fileno()) - stream = codecs.getwriter('utf-8')(stream) + is_terminal = hasattr(stream, 'isatty') and stream.isatty() + stream = utils.get_output_stream(stream) all_events = [] lines = {} diff = 0 - for chunk in output: - event = json.loads(chunk) + for event in utils.json_stream(output): all_events.append(event) if 'progress' in event or 'progressDetail' in event: @@ -55,7 +52,6 @@ def print_output_event(event, stream, is_terminal): # erase current line stream.write("%c[2K\r" % 27) terminator = "\r" - pass elif 'progressDetail' in event: return diff --git a/compose/project.py b/compose/project.py index abc3132a27..1e01eaf6d2 100644 --- a/compose/project.py +++ b/compose/project.py @@ -1,20 +1,30 @@ -from __future__ import unicode_literals from __future__ import absolute_import -from functools import reduce +from __future__ import unicode_literals + import logging +from functools import reduce from docker.errors import APIError +from docker.errors import NotFound -from .config import get_service_name_from_net, ConfigurationError -from .const import DEFAULT_TIMEOUT, LABEL_PROJECT, LABEL_SERVICE, LABEL_ONE_OFF +from .config import ConfigurationError +from .config import get_service_name_from_net +from .const import DEFAULT_TIMEOUT +from .const import LABEL_ONE_OFF +from .const import LABEL_PROJECT +from .const import LABEL_SERVICE from .container import Container from .legacy import check_for_legacy_containers from .service import ContainerNet +from .service import ConvergenceStrategy from .service import Net +from .service import parse_volume_from_spec from .service import Service from .service import ServiceNet +from .service import VolumeFromSpec from .utils import parallel_execute + log = logging.getLogger(__name__) @@ -27,12 +37,18 @@ def sort_service_dicts(services): def get_service_names(links): return [link.split(':')[0] for link in links] + def get_service_names_from_volumes_from(volumes_from): + return [ + parse_volume_from_spec(volume_from).source + for volume_from in volumes_from + ] + def get_service_dependents(service_dict, services): name = service_dict['name'] return [ service for service in services if (name in get_service_names(service.get('links', [])) or - name in service.get('volumes_from', []) or + name in get_service_names_from_volumes_from(service.get('volumes_from', [])) or name == get_service_name_from_net(service.get('net'))) ] @@ -62,10 +78,12 @@ class Project(object): """ A collection of services. """ - def __init__(self, name, services, client): + def __init__(self, name, services, client, use_networking=False, network_driver=None): self.name = name self.services = services self.client = client + self.use_networking = use_networking + self.network_driver = network_driver def labels(self, one_off=False): return [ @@ -74,11 +92,15 @@ class Project(object): ] @classmethod - def from_dicts(cls, name, service_dicts, client): + def from_dicts(cls, name, service_dicts, client, use_networking=False, network_driver=None): """ Construct a ServiceCollection from a list of dicts representing services. """ - project = cls(name, [], client) + project = cls(name, [], client, use_networking=use_networking, network_driver=network_driver) + + if use_networking: + remove_links(service_dicts) + for service_dict in sort_service_dicts(service_dicts): links = project.get_links(service_dict) volumes_from = project.get_volumes_from(service_dict) @@ -88,6 +110,7 @@ class Project(object): Service( client=client, project=name, + use_networking=use_networking, links=links, net=net, volumes_from=volumes_from, @@ -160,29 +183,40 @@ class Project(object): try: links.append((self.get_service(service_name), link_name)) except NoSuchService: - raise ConfigurationError('Service "%s" has a link to service "%s" which does not exist.' % (service_dict['name'], service_name)) + raise ConfigurationError( + 'Service "%s" has a link to service "%s" which does not ' + 'exist.' % (service_dict['name'], service_name)) del service_dict['links'] return links def get_volumes_from(self, service_dict): volumes_from = [] if 'volumes_from' in service_dict: - for volume_name in service_dict.get('volumes_from', []): + for volume_from_config in service_dict.get('volumes_from', []): + volume_from_spec = parse_volume_from_spec(volume_from_config) + # Get service try: - service = self.get_service(volume_name) - volumes_from.append(service) + service_name = self.get_service(volume_from_spec.source) + volume_from_spec = VolumeFromSpec(service_name, volume_from_spec.mode) except NoSuchService: try: - container = Container.from_id(self.client, volume_name) - volumes_from.append(container) + container_name = Container.from_id(self.client, volume_from_spec.source) + volume_from_spec = VolumeFromSpec(container_name, volume_from_spec.mode) except APIError: - raise ConfigurationError('Service "%s" mounts volumes from "%s", which is not the name of a service or container.' % (service_dict['name'], volume_name)) + raise ConfigurationError( + 'Service "%s" mounts volumes from "%s", which is ' + 'not the name of a service or container.' % ( + service_dict['name'], + volume_from_spec.source)) + volumes_from.append(volume_from_spec) del service_dict['volumes_from'] return volumes_from def get_net(self, service_dict): net = service_dict.pop('net', None) if not net: + if self.use_networking: + return Net(self.name) return Net(None) net_name = get_service_name_from_net(net) @@ -214,6 +248,14 @@ class Project(object): msg="Stopping" ) + def pause(self, service_names=None, **options): + for service in reversed(self.get_services(service_names)): + service.pause(**options) + + def unpause(self, service_names=None, **options): + for service in self.get_services(service_names): + service.unpause(**options) + def kill(self, service_names=None, **options): parallel_execute( objects=self.containers(service_names), @@ -236,34 +278,30 @@ class Project(object): for service in self.get_services(service_names): service.restart(**options) - def build(self, service_names=None, no_cache=False): + def build(self, service_names=None, no_cache=False, pull=False): for service in self.get_services(service_names): if service.can_be_built(): - service.build(no_cache) + service.build(no_cache, pull) else: log.info('%s uses an image, skipping' % service.name) def up(self, service_names=None, start_deps=True, - allow_recreate=True, - force_recreate=False, + strategy=ConvergenceStrategy.changed, do_build=True, - timeout=DEFAULT_TIMEOUT): - - if force_recreate and not allow_recreate: - raise ValueError("force_recreate and allow_recreate are in conflict") + timeout=DEFAULT_TIMEOUT, + detached=False): services = self.get_services(service_names, include_deps=start_deps) for service in services: service.remove_duplicate_containers() - plans = self._get_convergence_plans( - services, - allow_recreate=allow_recreate, - force_recreate=force_recreate, - ) + plans = self._get_convergence_plans(services, strategy) + + if self.use_networking: + self.ensure_network_exists() return [ container @@ -271,15 +309,12 @@ class Project(object): for container in service.execute_convergence_plan( plans[service.name], do_build=do_build, - timeout=timeout + timeout=timeout, + detached=detached ) ] - def _get_convergence_plans(self, - services, - allow_recreate=True, - force_recreate=False): - + def _get_convergence_plans(self, services, strategy): plans = {} for service in services: @@ -290,28 +325,21 @@ class Project(object): and plans[name].action == 'recreate' ] - if updated_dependencies and allow_recreate: - log.debug( - '%s has upstream changes (%s)', - service.name, ", ".join(updated_dependencies), - ) - plan = service.convergence_plan( - allow_recreate=allow_recreate, - force_recreate=True, - ) + if updated_dependencies and strategy.allows_recreate: + log.debug('%s has upstream changes (%s)', + service.name, + ", ".join(updated_dependencies)) + plan = service.convergence_plan(ConvergenceStrategy.always) else: - plan = service.convergence_plan( - allow_recreate=allow_recreate, - force_recreate=force_recreate, - ) + plan = service.convergence_plan(strategy) plans[service.name] = plan return plans - def pull(self, service_names=None): - for service in self.get_services(service_names, include_deps=True): - service.pull() + def pull(self, service_names=None, ignore_pull_failures=False): + for service in self.get_services(service_names, include_deps=False): + service.pull(ignore_pull_failures) def containers(self, service_names=None, stopped=False, one_off=False): if service_names: @@ -319,11 +347,11 @@ class Project(object): else: service_names = self.service_names - containers = filter(None, [ + containers = list(filter(None, [ Container.from_ps(self.client, container) for container in self.client.containers( all=stopped, - filters={'label': self.labels(one_off=one_off)})]) + filters={'label': self.labels(one_off=one_off)})])) def matches_service_names(container): return container.labels.get(LABEL_SERVICE) in service_names @@ -335,7 +363,27 @@ class Project(object): self.service_names, ) - return filter(matches_service_names, containers) + return [c for c in containers if matches_service_names(c)] + + def get_network(self): + try: + return self.client.inspect_network(self.name) + except NotFound: + return None + + def ensure_network_exists(self): + # TODO: recreate network if driver has changed? + if self.get_network() is None: + log.info( + 'Creating network "{}" with driver "{}"' + .format(self.name, self.network_driver) + ) + self.client.create_network(self.name, driver=self.network_driver) + + def remove_network(self): + network = self.get_network() + if network: + self.client.remove_network(network['id']) def _inject_deps(self, acc, service): dep_names = service.get_dependency_names() @@ -352,6 +400,26 @@ class Project(object): return acc + dep_services +def remove_links(service_dicts): + services_with_links = [s for s in service_dicts if 'links' in s] + if not services_with_links: + return + + if len(services_with_links) == 1: + prefix = '"{}" defines'.format(services_with_links[0]['name']) + else: + prefix = 'Some services ({}) define'.format( + ", ".join('"{}"'.format(s['name']) for s in services_with_links)) + + log.warn( + '\n{} links, which are not compatible with Docker networking and will be ignored.\n' + 'Future versions of Docker will not support links - you should remove them for ' + 'forwards-compatibility.\n'.format(prefix)) + + for s in services_with_links: + del s['links'] + + class NoSuchService(Exception): def __init__(self, name): self.name = name diff --git a/compose/service.py b/compose/service.py index b3c68735c5..66c90b0e03 100644 --- a/compose/service.py +++ b/compose/service.py @@ -1,31 +1,39 @@ -from __future__ import unicode_literals from __future__ import absolute_import -from collections import namedtuple +from __future__ import unicode_literals + import logging -import re import os +import re import sys +from collections import namedtuple from operator import attrgetter +import enum import six from docker.errors import APIError -from docker.utils import create_host_config, LogConfig +from docker.utils import LogConfig +from docker.utils.ports import build_port_bindings +from docker.utils.ports import split_port from . import __version__ -from .config import DOCKER_CONFIG_KEYS, merge_environment -from .const import ( - DEFAULT_TIMEOUT, - LABEL_CONTAINER_NUMBER, - LABEL_ONE_OFF, - LABEL_PROJECT, - LABEL_SERVICE, - LABEL_VERSION, - LABEL_CONFIG_HASH, -) +from .config import DOCKER_CONFIG_KEYS +from .config import merge_environment +from .config.validation import VALID_NAME_CHARS +from .const import DEFAULT_TIMEOUT +from .const import IS_WINDOWS_PLATFORM +from .const import LABEL_CONFIG_HASH +from .const import LABEL_CONTAINER_NUMBER +from .const import LABEL_ONE_OFF +from .const import LABEL_PROJECT +from .const import LABEL_SERVICE +from .const import LABEL_VERSION from .container import Container from .legacy import check_for_legacy_containers -from .progress_stream import stream_output, StreamOutputError -from .utils import json_hash, parallel_execute +from .progress_stream import stream_output +from .progress_stream import StreamOutputError +from .utils import json_hash +from .utils import parallel_execute + log = logging.getLogger(__name__) @@ -33,11 +41,13 @@ log = logging.getLogger(__name__) DOCKER_START_KEYS = [ 'cap_add', 'cap_drop', + 'cgroup_parent', 'devices', 'dns', 'dns_search', 'env_file', 'extra_hosts', + 'ipc', 'read_only', 'net', 'log_driver', @@ -51,8 +61,6 @@ DOCKER_START_KEYS = [ 'security_opt', ] -VALID_NAME_CHARS = '[a-zA-Z0-9\._\-]' - class BuildError(Exception): def __init__(self, service, reason): @@ -76,46 +84,61 @@ class NoSuchImageError(Exception): VolumeSpec = namedtuple('VolumeSpec', 'external internal mode') +VolumeFromSpec = namedtuple('VolumeFromSpec', 'source mode') + + ServiceName = namedtuple('ServiceName', 'project service number') ConvergencePlan = namedtuple('ConvergencePlan', 'action containers') +@enum.unique +class ConvergenceStrategy(enum.Enum): + """Enumeration for all possible convergence strategies. Values refer to + when containers should be recreated. + """ + changed = 1 + always = 2 + never = 3 + + @property + def allows_recreate(self): + return self is not type(self).never + + class Service(object): def __init__( self, name, client=None, project='default', + use_networking=False, links=None, volumes_from=None, net=None, **options ): - if not re.match('^%s+$' % VALID_NAME_CHARS, name): - raise ConfigError('Invalid service name "%s" - only %s are allowed' % (name, VALID_NAME_CHARS)) if not re.match('^%s+$' % VALID_NAME_CHARS, project): raise ConfigError('Invalid project name "%s" - only %s are allowed' % (project, VALID_NAME_CHARS)) - if 'image' in options and 'build' in options: - raise ConfigError('Service %s has both an image and build path specified. A service can either be built to image or use an existing image, not both.' % name) - if 'image' not in options and 'build' not in options: - raise ConfigError('Service %s has neither an image nor a build path specified. Exactly one must be provided.' % name) self.name = name self.client = client self.project = project + self.use_networking = use_networking self.links = links or [] self.volumes_from = volumes_from or [] self.net = net or Net(None) self.options = options - def containers(self, stopped=False, one_off=False): - containers = filter(None, [ + def containers(self, stopped=False, one_off=False, filters={}): + filters.update({'label': self.labels(one_off=one_off)}) + + containers = list(filter(None, [ Container.from_ps(self.client, container) for container in self.client.containers( all=stopped, - filters={'label': self.labels(one_off=one_off)})]) + filters=filters)])) if not containers: check_for_legacy_containers( @@ -143,17 +166,27 @@ class Service(object): # TODO: remove these functions, project takes care of starting/stopping, def stop(self, **options): for c in self.containers(): - log.info("Stopping %s..." % c.name) + log.info("Stopping %s" % c.name) c.stop(**options) + def pause(self, **options): + for c in self.containers(filters={'status': 'running'}): + log.info("Pausing %s" % c.name) + c.pause(**options) + + def unpause(self, **options): + for c in self.containers(filters={'status': 'paused'}): + log.info("Unpausing %s" % c.name) + c.unpause() + def kill(self, **options): for c in self.containers(): - log.info("Killing %s..." % c.name) + log.info("Killing %s" % c.name) c.kill(**options) def restart(self, **options): for c in self.containers(): - log.info("Restarting %s..." % c.name) + log.info("Restarting %s" % c.name) c.restart(**options) # end TODO @@ -279,7 +312,7 @@ class Service(object): ) if 'name' in container_options and not quiet: - log.info("Creating %s..." % container_options['name']) + log.info("Creating %s" % container_options['name']) return Container.create(self.client, **container_options) @@ -316,22 +349,19 @@ class Service(object): else: return self.options['image'] - def convergence_plan(self, - allow_recreate=True, - force_recreate=False): - - if force_recreate and not allow_recreate: - raise ValueError("force_recreate and allow_recreate are in conflict") - + def convergence_plan(self, strategy=ConvergenceStrategy.changed): containers = self.containers(stopped=True) if not containers: return ConvergencePlan('create', []) - if not allow_recreate: + if strategy is ConvergenceStrategy.never: return ConvergencePlan('start', containers) - if force_recreate or self._containers_have_diverged(containers): + if ( + strategy is ConvergenceStrategy.always or + self._containers_have_diverged(containers) + ): return ConvergencePlan('recreate', containers) stopped = [c for c in containers if not c.is_running] @@ -345,7 +375,7 @@ class Service(object): config_hash = None try: - config_hash = self.config_hash() + config_hash = self.config_hash except NoSuchImageError as e: log.debug( 'Service %s has diverged: %s', @@ -369,13 +399,17 @@ class Service(object): def execute_convergence_plan(self, plan, do_build=True, - timeout=DEFAULT_TIMEOUT): + timeout=DEFAULT_TIMEOUT, + detached=False): (action, containers) = plan + should_attach_logs = not detached if action == 'create': - container = self.create_container( - do_build=do_build, - ) + container = self.create_container(do_build=do_build) + + if should_attach_logs: + container.attach_log_stream() + self.start_container(container) return [container] @@ -383,15 +417,16 @@ class Service(object): elif action == 'recreate': return [ self.recreate_container( - c, - timeout=timeout + container, + timeout=timeout, + attach_logs=should_attach_logs ) - for c in containers + for container in containers ] elif action == 'start': - for c in containers: - self.start_container_if_stopped(c) + for container in containers: + self.start_container_if_stopped(container, attach_logs=should_attach_logs) return containers @@ -406,44 +441,37 @@ class Service(object): def recreate_container(self, container, - timeout=DEFAULT_TIMEOUT): + timeout=DEFAULT_TIMEOUT, + attach_logs=False): """Recreate a container. The original container is renamed to a temporary name so that data volumes can be copied to the new container, before the original container is removed. """ - log.info("Recreating %s..." % container.name) - try: - container.stop(timeout=timeout) - except APIError as e: - if (e.response.status_code == 500 - and e.explanation - and 'no such process' in str(e.explanation)): - pass - else: - raise - - # Use a hopefully unique container name by prepending the short id - self.client.rename( - container.id, - '%s_%s' % (container.short_id, container.name)) + log.info("Recreating %s" % container.name) + container.stop(timeout=timeout) + container.rename_to_tmp_name() new_container = self.create_container( do_build=False, previous_container=container, number=container.labels.get(LABEL_CONTAINER_NUMBER), quiet=True, ) + if attach_logs: + new_container.attach_log_stream() self.start_container(new_container) container.remove() return new_container - def start_container_if_stopped(self, container): + def start_container_if_stopped(self, container, attach_logs=False): if container.is_running: return container else: - log.info("Starting %s..." % container.name) + log.info("Starting %s" % container.name) + if attach_logs: + container.attach_log_stream() return self.start_container(container) def start_container(self, container): @@ -452,7 +480,7 @@ class Service(object): def remove_duplicate_containers(self, timeout=DEFAULT_TIMEOUT): for c in self.duplicate_containers(): - log.info('Removing %s...' % c.name) + log.info('Removing %s' % c.name) c.stop(timeout=timeout) c.remove() @@ -470,6 +498,7 @@ class Service(object): else: numbers.add(c.number) + @property def config_hash(self): return json_hash(self.config_dict()) @@ -495,7 +524,7 @@ class Service(object): return [(service.name, alias) for service, alias in self.links] def get_volumes_from_names(self): - return [s.name for s in self.volumes_from if isinstance(s, Service)] + return [s.source.name for s in self.volumes_from if isinstance(s.source, Service)] def get_container_name(self, number, one_off=False): # TODO: Implement issue #652 here @@ -514,6 +543,9 @@ class Service(object): return 1 if not numbers else max(numbers) + 1 def _get_links(self, link_to_self): + if self.use_networking: + return [] + links = [] for service, link_name in self.links: for container in service.containers(): @@ -535,16 +567,9 @@ class Service(object): def _get_volumes_from(self): volumes_from = [] - for volume_source in self.volumes_from: - if isinstance(volume_source, Service): - containers = volume_source.containers(stopped=True) - if not containers: - volumes_from.append(volume_source.create_container().id) - else: - volumes_from.extend(map(attrgetter('id'), containers)) - - elif isinstance(volume_source, Container): - volumes_from.append(volume_source.id) + for volume_from_spec in self.volumes_from: + volumes = build_volume_from(volume_from_spec) + volumes_from.extend(volumes) return volumes_from @@ -563,7 +588,7 @@ class Service(object): if self.custom_container_name() and not one_off: container_options['name'] = self.custom_container_name() - else: + elif not container_options.get('name'): container_options['name'] = self.get_container_name(number, one_off) if 'detach' not in container_options: @@ -580,16 +605,19 @@ class Service(object): container_options['hostname'] = parts[0] container_options['domainname'] = parts[2] + if 'hostname' not in container_options and self.use_networking: + container_options['hostname'] = self.name + if 'ports' in container_options or 'expose' in self.options: ports = [] all_ports = container_options.get('ports', []) + self.options.get('expose', []) - for port in all_ports: - port = str(port) - if ':' in port: - port = port.split(':')[-1] - if '/' in port: - port = tuple(port.split('/')) - ports.append(port) + for port_range in all_ports: + internal_range, _ = split_port(port_range) + for port in internal_range: + port = str(port) + if '/' in port: + port = tuple(port.split('/')) + ports.append(port) container_options['ports'] = ports override_options['binds'] = merge_volume_bindings( @@ -614,7 +642,7 @@ class Service(object): container_options.get('labels', {}), self.labels(one_off=one_off), number, - self.config_hash() if add_config_hash else None) + self.config_hash if add_config_hash else None) # Delete options which are only used when starting for key in DOCKER_START_KEYS: @@ -634,7 +662,7 @@ class Service(object): cap_add = options.get('cap_add', None) cap_drop = options.get('cap_drop', None) log_config = LogConfig( - type=options.get('log_driver', 'json-file'), + type=options.get('log_driver', ""), config=options.get('log_opt', None) ) pid = options.get('pid', None) @@ -654,8 +682,9 @@ class Service(object): read_only = options.get('read_only', None) devices = options.get('devices', None) + cgroup_parent = options.get('cgroup_parent', None) - return create_host_config( + return self.client.create_host_config( links=self._get_links(link_to_self=one_off), port_bindings=port_bindings, binds=options.get('binds'), @@ -674,20 +703,26 @@ class Service(object): extra_hosts=extra_hosts, read_only=read_only, pid_mode=pid, - security_opt=security_opt + security_opt=security_opt, + ipc_mode=options.get('ipc'), + cgroup_parent=cgroup_parent ) - def build(self, no_cache=False): - log.info('Building %s...' % self.name) + def build(self, no_cache=False, pull=False): + log.info('Building %s' % self.name) - path = six.binary_type(self.options['build']) + path = self.options['build'] + # python2 os.path() doesn't support unicode, so we need to encode it to + # a byte string + if not six.PY3: + path = path.encode('utf8') build_output = self.client.build( path=path, tag=self.image_name, stream=True, rm=True, - pull=False, + pull=pull, nocache=no_cache, dockerfile=self.options.get('dockerfile', None), ) @@ -695,7 +730,7 @@ class Service(object): try: all_events = stream_output(build_output, sys.stdout) except StreamOutputError as e: - raise BuildError(self, unicode(e)) + raise BuildError(self, six.text_type(e)) # Ensure the HTTP connection is not reused for another # streaming command, as the Docker daemon can sometimes @@ -741,19 +776,26 @@ class Service(object): return True return False - def pull(self): + def pull(self, ignore_pull_failures=False): if 'image' not in self.options: return - repo, tag = parse_repository_tag(self.options['image']) + repo, tag, separator = parse_repository_tag(self.options['image']) tag = tag or 'latest' - log.info('Pulling %s (%s:%s)...' % (self.name, repo, tag)) + log.info('Pulling %s (%s%s%s)...' % (self.name, repo, separator, tag)) output = self.client.pull( repo, tag=tag, stream=True, ) - stream_output(output, sys.stdout) + + try: + stream_output(output, sys.stdout) + except StreamOutputError as e: + if not ignore_pull_failures: + raise + else: + log.error(six.text_type(e)) class Net(object): @@ -806,7 +848,7 @@ class ServiceNet(object): if containers: return 'container:' + containers[0].id - log.warn("Warning: Service %s is trying to use reuse the network stack " + log.warn("Service %s is trying to use reuse the network stack " "of another service that is not running." % (self.id)) return None @@ -823,14 +865,31 @@ def build_container_name(project, service, number, one_off=False): # Images +def parse_repository_tag(repo_path): + """Splits image identification into base image path, tag/digest + and it's separator. -def parse_repository_tag(s): - if ":" not in s: - return s, "" - repo, tag = s.rsplit(":", 1) - if "/" in tag: - return s, "" - return repo, tag + Example: + + >>> parse_repository_tag('user/repo@sha256:digest') + ('user/repo', 'sha256:digest', '@') + >>> parse_repository_tag('user/repo:v1') + ('user/repo', 'v1', ':') + """ + tag_separator = ":" + digest_separator = "@" + + if digest_separator in repo_path: + repo, tag = repo_path.rsplit(digest_separator, 1) + return repo, tag, digest_separator + + repo, tag = repo_path, "" + if tag_separator in repo_path: + repo, tag = repo_path.rsplit(tag_separator, 1) + if "/" in tag: + repo, tag = repo_path, "" + + return repo, tag, tag_separator # Volumes @@ -849,7 +908,7 @@ def merge_volume_bindings(volumes_option, previous_container): volume_bindings.update( get_container_data_volumes(previous_container, volumes_option)) - return volume_bindings.values() + return list(volume_bindings.values()) def get_container_data_volumes(container, volumes_option): @@ -862,7 +921,7 @@ def get_container_data_volumes(container, volumes_option): container_volumes = container.get('Volumes') or {} image_volumes = container.image_config['ContainerConfig'].get('Volumes') or {} - for volume in set(volumes_option + image_volumes.keys()): + for volume in set(volumes_option + list(image_volumes)): volume = parse_volume_spec(volume) # No need to preserve host volumes if volume.external: @@ -884,53 +943,85 @@ def build_volume_binding(volume_spec): return volume_spec.internal, "{}:{}:{}".format(*volume_spec) +def normalize_paths_for_engine(external_path, internal_path): + """Windows paths, c:\my\path\shiny, need to be changed to be compatible with + the Engine. Volume paths are expected to be linux style /c/my/path/shiny/ + """ + if not IS_WINDOWS_PLATFORM: + return external_path, internal_path + + if external_path: + drive, tail = os.path.splitdrive(external_path) + + if drive: + external_path = '/' + drive.lower().rstrip(':') + tail + + external_path = external_path.replace('\\', '/') + + return external_path, internal_path.replace('\\', '/') + + def parse_volume_spec(volume_config): - parts = volume_config.split(':') + """ + Parse a volume_config path and split it into external:internal[:mode] + parts to be returned as a valid VolumeSpec. + """ + if IS_WINDOWS_PLATFORM: + # relative paths in windows expand to include the drive, eg C:\ + # so we join the first 2 parts back together to count as one + drive, tail = os.path.splitdrive(volume_config) + parts = tail.split(":") + + if drive: + parts[0] = drive + parts[0] + else: + parts = volume_config.split(':') + if len(parts) > 3: raise ConfigError("Volume %s has incorrect format, should be " "external:internal[:mode]" % volume_config) if len(parts) == 1: - external = None - internal = os.path.normpath(parts[0]) + external, internal = normalize_paths_for_engine(None, os.path.normpath(parts[0])) else: - external = os.path.normpath(parts[0]) - internal = os.path.normpath(parts[1]) + external, internal = normalize_paths_for_engine(os.path.normpath(parts[0]), os.path.normpath(parts[1])) - mode = parts[2] if len(parts) == 3 else 'rw' + mode = 'rw' + if len(parts) == 3: + mode = parts[2] return VolumeSpec(external, internal, mode) -# Ports +def build_volume_from(volume_from_spec): + """ + volume_from can be either a service or a container. We want to return the + container.id and format it into a string complete with the mode. + """ + if isinstance(volume_from_spec.source, Service): + containers = volume_from_spec.source.containers(stopped=True) + if not containers: + return ["{}:{}".format(volume_from_spec.source.create_container().id, volume_from_spec.mode)] + + container = containers[0] + return ["{}:{}".format(container.id, volume_from_spec.mode)] + elif isinstance(volume_from_spec.source, Container): + return ["{}:{}".format(volume_from_spec.source.id, volume_from_spec.mode)] -def build_port_bindings(ports): - port_bindings = {} - for port in ports: - internal_port, external = split_port(port) - if internal_port in port_bindings: - port_bindings[internal_port].append(external) - else: - port_bindings[internal_port] = [external] - return port_bindings - - -def split_port(port): - parts = str(port).split(':') - if not 1 <= len(parts) <= 3: - raise ConfigError('Invalid port "%s", should be ' - '[[remote_ip:]remote_port:]port[/protocol]' % port) +def parse_volume_from_spec(volume_from_config): + parts = volume_from_config.split(':') + if len(parts) > 2: + raise ConfigError("Volume %s has incorrect format, should be " + "external:internal[:mode]" % volume_from_config) if len(parts) == 1: - internal_port, = parts - return internal_port, None - if len(parts) == 2: - external_port, internal_port = parts - return internal_port, external_port + source = parts[0] + mode = 'rw' + else: + source, mode = parts - external_ip, external_port, internal_port = parts - return internal_port, (external_ip, external_port or None) + return VolumeFromSpec(source, mode) # Labels diff --git a/compose/utils.py b/compose/utils.py index 61d6d80243..c8fddc5f16 100644 --- a/compose/utils.py +++ b/compose/utils.py @@ -1,25 +1,29 @@ import codecs import hashlib import json +import json.decoder import logging import sys - -from docker.errors import APIError -from Queue import Queue, Empty from threading import Thread +import six +from docker.errors import APIError +from six.moves.queue import Empty +from six.moves.queue import Queue + log = logging.getLogger(__name__) +json_decoder = json.JSONDecoder() + def parallel_execute(objects, obj_callable, msg_index, msg): """ For a given list of objects, call the callable passing in the first object we give it. """ - stream = codecs.getwriter('utf-8')(sys.stdout) + stream = get_output_stream(sys.stdout) lines = [] - errors = {} for obj in objects: write_out_msg(stream, lines, msg_index(obj), msg) @@ -27,16 +31,17 @@ def parallel_execute(objects, obj_callable, msg_index, msg): q = Queue() def inner_execute_function(an_callable, parameter, msg_index): + error = None try: result = an_callable(parameter) except APIError as e: - errors[msg_index] = e.explanation + error = e.explanation result = "error" except Exception as e: - errors[msg_index] = e + error = e result = 'unexpected_exception' - q.put((msg_index, result)) + q.put((msg_index, result, error)) for an_object in objects: t = Thread( @@ -47,15 +52,17 @@ def parallel_execute(objects, obj_callable, msg_index, msg): t.start() done = 0 + errors = {} total_to_execute = len(objects) while done < total_to_execute: try: - msg_index, result = q.get(timeout=1) + msg_index, result, error = q.get(timeout=1) if result == 'unexpected_exception': - raise errors[msg_index] + errors[msg_index] = result, error if result == 'error': + errors[msg_index] = result, error write_out_msg(stream, lines, msg_index, msg, status='error') else: write_out_msg(stream, lines, msg_index, msg) @@ -63,10 +70,85 @@ def parallel_execute(objects, obj_callable, msg_index, msg): except Empty: pass - if errors: - stream.write("\n") - for error in errors: - stream.write("ERROR: for {} {} \n".format(error, errors[error])) + if not errors: + return + + stream.write("\n") + for msg_index, (result, error) in errors.items(): + stream.write("ERROR: for {} {} \n".format(msg_index, error)) + if result == 'unexpected_exception': + raise error + + +def get_output_stream(stream): + if six.PY3: + return stream + return codecs.getwriter('utf-8')(stream) + + +def stream_as_text(stream): + """Given a stream of bytes or text, if any of the items in the stream + are bytes convert them to text. + + This function can be removed once docker-py returns text streams instead + of byte streams. + """ + for data in stream: + if not isinstance(data, six.text_type): + data = data.decode('utf-8') + yield data + + +def line_splitter(buffer, separator=u'\n'): + index = buffer.find(six.text_type(separator)) + if index == -1: + return None, None + return buffer[:index + 1], buffer[index + 1:] + + +def split_buffer(stream, splitter=None, decoder=lambda a: a): + """Given a generator which yields strings and a splitter function, + joins all input, splits on the separator and yields each chunk. + + Unlike string.split(), each chunk includes the trailing + separator, except for the last one if none was found on the end + of the input. + """ + splitter = splitter or line_splitter + buffered = six.text_type('') + + for data in stream_as_text(stream): + buffered += data + while True: + item, rest = splitter(buffered) + if not item: + break + + buffered = rest + yield item + + if buffered: + yield decoder(buffered) + + +def json_splitter(buffer): + """Attempt to parse a json object from a buffer. If there is at least one + object, return it and the rest of the buffer, otherwise return None. + """ + try: + obj, index = json_decoder.raw_decode(buffer) + rest = buffer[json.decoder.WHITESPACE.match(buffer, index).end():] + return obj, rest + except ValueError: + return None, None + + +def json_stream(stream): + """Given a stream of text, return a stream of json objects. + This handles streams which are inconsistently buffered (some entries may + be newline delimited, and others are not). + """ + return split_buffer(stream_as_text(stream), json_splitter, json_decoder.decode) def write_out_msg(stream, lines, msg_index, msg, status="done"): @@ -82,13 +164,13 @@ def write_out_msg(stream, lines, msg_index, msg, status="done"): stream.write("%c[%dA" % (27, diff)) # erase stream.write("%c[2K\r" % 27) - stream.write("{} {}... {}\n".format(msg, obj_index, status)) + stream.write("{} {} ... {}\n".format(msg, obj_index, status)) # move back down stream.write("%c[%dB" % (27, diff)) else: diff = 0 lines.append(obj_index) - stream.write("{} {}... \r\n".format(msg, obj_index)) + stream.write("{} {} ... \r\n".format(msg, obj_index)) stream.flush() @@ -96,5 +178,5 @@ def write_out_msg(stream, lines, msg_index, msg, status="done"): def json_hash(obj): dump = json.dumps(obj, sort_keys=True, separators=(',', ':')) h = hashlib.sha256() - h.update(dump) + h.update(dump.encode('utf8')) return h.hexdigest() diff --git a/contrib/completion/bash/docker-compose b/contrib/completion/bash/docker-compose index e7d8cb3f8e..0eed1f18b7 100644 --- a/contrib/completion/bash/docker-compose +++ b/contrib/completion/bash/docker-compose @@ -20,7 +20,7 @@ # For compatibility reasons, Compose and therefore its completion supports several # stack compositon files as listed here, in descending priority. # Support for these filenames might be dropped in some future version. -__docker-compose_compose_file() { +__docker_compose_compose_file() { local file for file in docker-compose.y{,a}ml fig.y{,a}ml ; do [ -e $file ] && { @@ -32,34 +32,34 @@ __docker-compose_compose_file() { } # Extracts all service names from the compose file. -___docker-compose_all_services_in_compose_file() { - awk -F: '/^[a-zA-Z0-9]/{print $1}' "${compose_file:-$(__docker-compose_compose_file)}" 2>/dev/null +___docker_compose_all_services_in_compose_file() { + awk -F: '/^[a-zA-Z0-9]/{print $1}' "${compose_file:-$(__docker_compose_compose_file)}" 2>/dev/null } # All services, even those without an existing container -__docker-compose_services_all() { - COMPREPLY=( $(compgen -W "$(___docker-compose_all_services_in_compose_file)" -- "$cur") ) +__docker_compose_services_all() { + COMPREPLY=( $(compgen -W "$(___docker_compose_all_services_in_compose_file)" -- "$cur") ) } # All services that have an entry with the given key in their compose_file section -___docker-compose_services_with_key() { +___docker_compose_services_with_key() { # flatten sections to one line, then filter lines containing the key and return section name. - awk '/^[a-zA-Z0-9]/{printf "\n"};{printf $0;next;}' "${compose_file:-$(__docker-compose_compose_file)}" | awk -F: -v key=": +$1:" '$0 ~ key {print $1}' + awk '/^[a-zA-Z0-9]/{printf "\n"};{printf $0;next;}' "${compose_file:-$(__docker_compose_compose_file)}" 2>/dev/null | awk -F: -v key=": +$1:" '$0 ~ key {print $1}' } # All services that are defined by a Dockerfile reference -__docker-compose_services_from_build() { - COMPREPLY=( $(compgen -W "$(___docker-compose_services_with_key build)" -- "$cur") ) +__docker_compose_services_from_build() { + COMPREPLY=( $(compgen -W "$(___docker_compose_services_with_key build)" -- "$cur") ) } # All services that are defined by an image -__docker-compose_services_from_image() { - COMPREPLY=( $(compgen -W "$(___docker-compose_services_with_key image)" -- "$cur") ) +__docker_compose_services_from_image() { + COMPREPLY=( $(compgen -W "$(___docker_compose_services_with_key image)" -- "$cur") ) } # The services for which containers have been created, optionally filtered # by a boolean expression passed in as argument. -__docker-compose_services_with() { +__docker_compose_services_with() { local containers names containers="$(docker-compose 2>/dev/null ${compose_file:+-f $compose_file} ${compose_project:+-p $compose_project} ps -q)" names=( $(docker 2>/dev/null inspect --format "{{if ${1:-true}}} {{ .Name }} {{end}}" $containers) ) @@ -68,30 +68,35 @@ __docker-compose_services_with() { COMPREPLY=( $(compgen -W "${names[*]}" -- "$cur") ) } +# The services for which at least one paused container exists +__docker_compose_services_paused() { + __docker_compose_services_with '.State.Paused' +} + # The services for which at least one running container exists -__docker-compose_services_running() { - __docker-compose_services_with '.State.Running' +__docker_compose_services_running() { + __docker_compose_services_with '.State.Running' } # The services for which at least one stopped container exists -__docker-compose_services_stopped() { - __docker-compose_services_with 'not .State.Running' +__docker_compose_services_stopped() { + __docker_compose_services_with 'not .State.Running' } -_docker-compose_build() { +_docker_compose_build() { case "$cur" in -*) - COMPREPLY=( $( compgen -W "--help --no-cache" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "--help --no-cache --pull" -- "$cur" ) ) ;; *) - __docker-compose_services_from_build + __docker_compose_services_from_build ;; esac } -_docker-compose_docker-compose() { +_docker_compose_docker_compose() { case "$prev" in --file|-f) _filedir "y?(a)ml" @@ -100,11 +105,15 @@ _docker-compose_docker-compose() { --project-name|-p) return ;; + --x-network-driver) + COMPREPLY=( $( compgen -W "bridge host none overlay" -- "$cur" ) ) + return + ;; esac case "$cur" in -*) - COMPREPLY=( $( compgen -W "--help -h --verbose --version -v --file -f --project-name -p" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "--file -f --help -h --project-name -p --verbose --version -v --x-networking --x-network-driver" -- "$cur" ) ) ;; *) COMPREPLY=( $( compgen -W "${commands[*]}" -- "$cur" ) ) @@ -113,12 +122,12 @@ _docker-compose_docker-compose() { } -_docker-compose_help() { +_docker_compose_help() { COMPREPLY=( $( compgen -W "${commands[*]}" -- "$cur" ) ) } -_docker-compose_kill() { +_docker_compose_kill() { case "$prev" in -s) COMPREPLY=( $( compgen -W "SIGHUP SIGINT SIGKILL SIGUSR1 SIGUSR2" -- "$(echo $cur | tr '[:lower:]' '[:upper:]')" ) ) @@ -131,25 +140,25 @@ _docker-compose_kill() { COMPREPLY=( $( compgen -W "--help -s" -- "$cur" ) ) ;; *) - __docker-compose_services_running + __docker_compose_services_running ;; esac } -_docker-compose_logs() { +_docker_compose_logs() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help --no-color" -- "$cur" ) ) ;; *) - __docker-compose_services_all + __docker_compose_services_all ;; esac } -_docker-compose_migrate-to-labels() { +_docker_compose_migrate_to_labels() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) @@ -158,7 +167,19 @@ _docker-compose_migrate-to-labels() { } -_docker-compose_port() { +_docker_compose_pause() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_compose_services_running + ;; + esac +} + + +_docker_compose_port() { case "$prev" in --protocol) COMPREPLY=( $( compgen -W "tcp udp" -- "$cur" ) ) @@ -174,39 +195,39 @@ _docker-compose_port() { COMPREPLY=( $( compgen -W "--help --index --protocol" -- "$cur" ) ) ;; *) - __docker-compose_services_all + __docker_compose_services_all ;; esac } -_docker-compose_ps() { +_docker_compose_ps() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help -q" -- "$cur" ) ) ;; *) - __docker-compose_services_all + __docker_compose_services_all ;; esac } -_docker-compose_pull() { +_docker_compose_pull() { case "$cur" in -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "--help --ignore-pull-failures" -- "$cur" ) ) ;; *) - __docker-compose_services_from_image + __docker_compose_services_from_image ;; esac } -_docker-compose_restart() { +_docker_compose_restart() { case "$prev" in - -t | --timeout) + --timeout|-t) return ;; esac @@ -216,81 +237,54 @@ _docker-compose_restart() { COMPREPLY=( $( compgen -W "--help --timeout -t" -- "$cur" ) ) ;; *) - __docker-compose_services_running + __docker_compose_services_running ;; esac } -_docker-compose_rm() { +_docker_compose_rm() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--force -f --help -v" -- "$cur" ) ) ;; *) - __docker-compose_services_stopped + __docker_compose_services_stopped ;; esac } -_docker-compose_run() { +_docker_compose_run() { case "$prev" in -e) COMPREPLY=( $( compgen -e -- "$cur" ) ) compopt -o nospace return ;; - --entrypoint|--user|-u) + --entrypoint|--name|--user|-u) return ;; esac case "$cur" in -*) - COMPREPLY=( $( compgen -W "-d --entrypoint -e --help --no-deps --rm --service-ports -T --user -u" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "-d --entrypoint -e --help --name --no-deps --publish -p --rm --service-ports -T --user -u" -- "$cur" ) ) ;; *) - __docker-compose_services_all + __docker_compose_services_all ;; esac } -_docker-compose_scale() { +_docker_compose_scale() { case "$prev" in =) COMPREPLY=("$cur") + return ;; - *) - COMPREPLY=( $(compgen -S "=" -W "$(___docker-compose_all_services_in_compose_file)" -- "$cur") ) - compopt -o nospace - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - esac -} - - -_docker-compose_start() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - __docker-compose_services_stopped - ;; - esac -} - - -_docker-compose_stop() { - case "$prev" in - -t | --timeout) + --timeout|-t) return ;; esac @@ -300,15 +294,58 @@ _docker-compose_stop() { COMPREPLY=( $( compgen -W "--help --timeout -t" -- "$cur" ) ) ;; *) - __docker-compose_services_running + COMPREPLY=( $(compgen -S "=" -W "$(___docker_compose_all_services_in_compose_file)" -- "$cur") ) + compopt -o nospace ;; esac } -_docker-compose_up() { +_docker_compose_start() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_compose_services_stopped + ;; + esac +} + + +_docker_compose_stop() { case "$prev" in - -t | --timeout) + --timeout|-t) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --timeout -t" -- "$cur" ) ) + ;; + *) + __docker_compose_services_running + ;; + esac +} + + +_docker_compose_unpause() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_compose_services_paused + ;; + esac +} + + +_docker_compose_up() { + case "$prev" in + --timeout|-t) return ;; esac @@ -318,13 +355,13 @@ _docker-compose_up() { COMPREPLY=( $( compgen -W "-d --help --no-build --no-color --no-deps --no-recreate --force-recreate --timeout -t" -- "$cur" ) ) ;; *) - __docker-compose_services_all + __docker_compose_services_all ;; esac } -_docker-compose_version() { +_docker_compose_version() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--short" -- "$cur" ) ) @@ -333,7 +370,7 @@ _docker-compose_version() { } -_docker-compose() { +_docker_compose() { local previous_extglob_setting=$(shopt -p extglob) shopt -s extglob @@ -343,6 +380,7 @@ _docker-compose() { kill logs migrate-to-labels + pause port ps pull @@ -352,6 +390,7 @@ _docker-compose() { scale start stop + unpause up version ) @@ -362,19 +401,22 @@ _docker-compose() { # search subcommand and invoke its handler. # special treatment of some top-level options - local command='docker-compose' + local command='docker_compose' local counter=1 local compose_file compose_project while [ $counter -lt $cword ]; do case "${words[$counter]}" in - -f|--file) + --file|-f) (( counter++ )) compose_file="${words[$counter]}" ;; - -p|--project-name) + --project-name|p) (( counter++ )) compose_project="${words[$counter]}" ;; + --x-network-driver) + (( counter++ )) + ;; -*) ;; *) @@ -385,11 +427,11 @@ _docker-compose() { (( counter++ )) done - local completions_func=_docker-compose_${command} + local completions_func=_docker_compose_${command//-/_} declare -F $completions_func >/dev/null && $completions_func eval "$previous_extglob_setting" return 0 } -complete -F _docker-compose docker-compose +complete -F _docker_compose docker-compose diff --git a/contrib/completion/zsh/_docker-compose b/contrib/completion/zsh/_docker-compose index 9af21a98b3..d79b25d165 100644 --- a/contrib/completion/zsh/_docker-compose +++ b/contrib/completion/zsh/_docker-compose @@ -7,7 +7,7 @@ # ------------------------------------------------------------------------- # Version # ------- -# 0.1.0 +# 1.5.0 # ------------------------------------------------------------------------- # Authors # ------- @@ -37,40 +37,54 @@ __docker-compose_compose_file() { ___docker-compose_all_services_in_compose_file() { local already_selected local -a services - already_selected=$(echo ${words[@]} | tr " " "|") + already_selected=$(echo $words | tr " " "|") awk -F: '/^[a-zA-Z0-9]/{print $1}' "${compose_file:-$(__docker-compose_compose_file)}" 2>/dev/null | grep -Ev "$already_selected" } # All services, even those without an existing container __docker-compose_services_all() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 services=$(___docker-compose_all_services_in_compose_file) - _alternative "args:services:($services)" + _alternative "args:services:($services)" && ret=0 + + return ret } # All services that have an entry with the given key in their docker-compose.yml section ___docker-compose_services_with_key() { local already_selected local -a buildable - already_selected=$(echo ${words[@]} | tr " " "|") + already_selected=$(echo $words | tr " " "|") # flatten sections to one line, then filter lines containing the key and return section name. awk '/^[a-zA-Z0-9]/{printf "\n"};{printf $0;next;}' "${compose_file:-$(__docker-compose_compose_file)}" 2>/dev/null | awk -F: -v key=": +$1:" '$0 ~ key {print $1}' 2>/dev/null | grep -Ev "$already_selected" } # All services that are defined by a Dockerfile reference __docker-compose_services_from_build() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 buildable=$(___docker-compose_services_with_key build) - _alternative "args:buildable services:($buildable)" + _alternative "args:buildable services:($buildable)" && ret=0 + + return ret } # All services that are defined by an image __docker-compose_services_from_image() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 pullable=$(___docker-compose_services_with_key image) - _alternative "args:pullable services:($pullable)" + _alternative "args:pullable services:($pullable)" && ret=0 + + return ret } __docker-compose_get_services() { - local kind expl - declare -a running stopped lines args services + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local kind + declare -a running paused stopped lines args services docker_status=$(docker ps > /dev/null 2>&1) if [ $? -ne 0 ]; then @@ -80,64 +94,78 @@ __docker-compose_get_services() { kind=$1 shift - [[ $kind = (stopped|all) ]] && args=($args -a) + [[ $kind =~ (stopped|all) ]] && args=($args -a) - lines=(${(f)"$(_call_program commands docker ps ${args})"}) - services=(${(f)"$(_call_program commands docker-compose 2>/dev/null ${compose_file:+-f $compose_file} ${compose_project:+-p $compose_project} ps -q)"}) + lines=(${(f)"$(_call_program commands docker ps $args)"}) + services=(${(f)"$(_call_program commands docker-compose 2>/dev/null $compose_options ps -q)"}) # Parse header line to find columns local i=1 j=1 k header=${lines[1]} declare -A begin end - while (( $j < ${#header} - 1 )) { - i=$(( $j + ${${header[$j,-1]}[(i)[^ ]]} - 1)) - j=$(( $i + ${${header[$i,-1]}[(i) ]} - 1)) - k=$(( $j + ${${header[$j,-1]}[(i)[^ ]]} - 2)) - begin[${header[$i,$(($j-1))]}]=$i - end[${header[$i,$(($j-1))]}]=$k - } + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done lines=(${lines[2,-1]}) # Container ID local line s name local -a names for line in $lines; do - if [[ $services == *"${line[${begin[CONTAINER ID]},${end[CONTAINER ID]}]%% ##}"* ]]; then + if [[ ${services[@]} == *"${line[${begin[CONTAINER ID]},${end[CONTAINER ID]}]%% ##}"* ]]; then names=(${(ps:,:)${${line[${begin[NAMES]},-1]}%% *}}) for name in $names; do s="${${name%_*}#*_}:${(l:15:: :::)${${line[${begin[CREATED]},${end[CREATED]}]/ ago/}%% ##}}" s="$s, ${line[${begin[CONTAINER ID]},${end[CONTAINER ID]}]%% ##}" - s="$s, ${${${line[$begin[IMAGE],$end[IMAGE]]}/:/\\:}%% ##}" + s="$s, ${${${line[${begin[IMAGE]},${end[IMAGE]}]}/:/\\:}%% ##}" if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = Exit* ]]; then stopped=($stopped $s) else + if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = *\(Paused\)* ]]; then + paused=($paused $s) + fi running=($running $s) fi done fi done - [[ $kind = (running|all) ]] && _describe -t services-running "running services" running - [[ $kind = (stopped|all) ]] && _describe -t services-stopped "stopped services" stopped + [[ $kind =~ (running|all) ]] && _describe -t services-running "running services" running "$@" && ret=0 + [[ $kind =~ (paused|all) ]] && _describe -t services-paused "paused services" paused "$@" && ret=0 + [[ $kind =~ (stopped|all) ]] && _describe -t services-stopped "stopped services" stopped "$@" && ret=0 + + return ret +} + +__docker-compose_pausedservices() { + [[ $PREFIX = -* ]] && return 1 + __docker-compose_get_services paused "$@" } __docker-compose_stoppedservices() { + [[ $PREFIX = -* ]] && return 1 __docker-compose_get_services stopped "$@" } __docker-compose_runningservices() { + [[ $PREFIX = -* ]] && return 1 __docker-compose_get_services running "$@" } -__docker-compose_services () { +__docker-compose_services() { + [[ $PREFIX = -* ]] && return 1 __docker-compose_get_services all "$@" } __docker-compose_caching_policy() { - oldp=( "$1"(Nmh+1) ) # 1 hour + oldp=( "$1"(Nmh+1) ) # 1 hour (( $#oldp )) } -__docker-compose_commands () { +__docker-compose_commands() { local cache_policy zstyle -s ":completion:${curcontext}:" cache-policy cache_policy @@ -156,14 +184,16 @@ __docker-compose_commands () { _describe -t docker-compose-commands "docker-compose command" _docker_compose_subcommands } -__docker-compose_subcommand () { - local -a _command_args +__docker-compose_subcommand() { + local opts_help='(: -)--help[Print usage]' integer ret=1 + case "$words[1]" in (build) _arguments \ - '--help[Print usage]' \ + $opts_help \ '--no-cache[Do not use cache when building the image]' \ + '--pull[Always attempt to pull a newer version of the image.]' \ '*:services:__docker-compose_services_from_build' && ret=0 ;; (help) @@ -171,24 +201,29 @@ __docker-compose_subcommand () { ;; (kill) _arguments \ - '--help[Print usage]' \ + $opts_help \ '-s[SIGNAL to send to the container. Default signal is SIGKILL.]:signal:_signals' \ '*:running services:__docker-compose_runningservices' && ret=0 ;; (logs) _arguments \ - '--help[Print usage]' \ + $opts_help \ '--no-color[Produce monochrome output.]' \ '*:services:__docker-compose_services_all' && ret=0 ;; (migrate-to-labels) _arguments -A '-*' \ - '--help[Print usage]' \ + $opts_help \ '(-):Recreate containers to add labels' && ret=0 ;; + (pause) + _arguments \ + $opts_help \ + '*:running services:__docker-compose_runningservices' && ret=0 + ;; (port) _arguments \ - '--help[Print usage]' \ + $opts_help \ '--protocol=-[tcp or udap (defaults to tcp)]:protocol:(tcp udp)' \ '--index=-[index of the container if there are mutiple instances of a service (defaults to 1)]:index: ' \ '1:running services:__docker-compose_runningservices' \ @@ -196,32 +231,35 @@ __docker-compose_subcommand () { ;; (ps) _arguments \ - '--help[Print usage]' \ + $opts_help \ '-q[Only display IDs]' \ '*:services:__docker-compose_services_all' && ret=0 ;; (pull) _arguments \ - '--help[Print usage]' \ + $opts_help \ + '--ignore-pull-failures[Pull what it can and ignores images with pull failures.]' \ '*:services:__docker-compose_services_from_image' && ret=0 ;; (rm) _arguments \ + $opts_help \ '(-f --force)'{-f,--force}"[Don't ask to confirm removal]" \ - '--help[Print usage]' \ '-v[Remove volumes associated with containers]' \ '*:stopped services:__docker-compose_stoppedservices' && ret=0 ;; (run) _arguments \ + $opts_help \ '-d[Detached mode: Run container in the background, print new container name.]' \ + '--name[Assign a name to the container]:name: ' \ '--entrypoint[Overwrite the entrypoint of the image.]:entry point: ' \ '*-e[KEY=VAL Set an environment variable (can be used multiple times)]:environment variable KEY=VAL: ' \ - '--help[Print usage]' \ '(-u --user)'{-u,--user=-}'[Run as specified username or uid]:username or uid:_users' \ "--no-deps[Don't start linked services.]" \ '--rm[Remove container after run. Ignored in detached mode.]' \ "--service-ports[Run command with the service's ports enabled and mapped to the host.]" \ + '(-p --publish)'{-p,--publish=-}"[Run command with manually mapped container's port(s) to the host.]" \ '-T[Disable pseudo-tty allocation. By default `docker-compose run` allocates a TTY.]' \ '(-):services:__docker-compose_services' \ '(-):command: _command_names -e' \ @@ -229,45 +267,52 @@ __docker-compose_subcommand () { ;; (scale) _arguments \ - '--help[Print usage]' \ + $opts_help \ + '(-t --timeout)'{-t,--timeout}"[Specify a shutdown timeout in seconds. (default: 10)]:seconds: " \ '*:running services:__docker-compose_runningservices' && ret=0 ;; (start) _arguments \ - '--help[Print usage]' \ + $opts_help \ '*:stopped services:__docker-compose_stoppedservices' && ret=0 ;; (stop|restart) _arguments \ - '--help[Print usage]' \ + $opts_help \ '(-t --timeout)'{-t,--timeout}"[Specify a shutdown timeout in seconds. (default: 10)]:seconds: " \ '*:running services:__docker-compose_runningservices' && ret=0 ;; + (unpause) + _arguments \ + $opts_help \ + '*:paused services:__docker-compose_pausedservices' && ret=0 + ;; (up) _arguments \ + $opts_help \ '-d[Detached mode: Run containers in the background, print new container names.]' \ - '--help[Print usage]' \ '--no-color[Produce monochrome output.]' \ "--no-deps[Don't start linked services.]" \ + "--force-recreate[Recreate containers even if their configuration and image haven't changed. Incompatible with --no-recreate.]" \ "--no-recreate[If containers already exist, don't recreate them.]" \ - "--force-recreate[Recreate containers even if their configuration and image haven't changed]" \ "--no-build[Don't build an image, even if it's missing]" \ '(-t --timeout)'{-t,--timeout}"[Specify a shutdown timeout in seconds. (default: 10)]:seconds: " \ '*:services:__docker-compose_services_all' && ret=0 ;; (version) _arguments \ - '--help[Print usage]' \ + $opts_help \ "--short[Shows only Compose's version number.]" && ret=0 ;; (*) - _message 'Unknown sub command' + _message 'Unknown sub command' && ret=1 + ;; esac return ret } -_docker-compose () { +_docker-compose() { # Support for subservices, which allows for `compdef _docker docker-shell=_docker_containers`. # Based on /usr/share/zsh/functions/Completion/Unix/_git without support for `ret`. if [[ $service != docker-compose ]]; then @@ -275,7 +320,8 @@ _docker-compose () { return fi - local curcontext="$curcontext" state line ret=1 + local curcontext="$curcontext" state line + integer ret=1 typeset -A opt_args _arguments -C \ @@ -284,26 +330,14 @@ _docker-compose () { '(- :)'{-v,--version}'[Print version and exit]' \ '(-f --file)'{-f,--file}'[Specify an alternate docker-compose file (default: docker-compose.yml)]:file:_files -g "*.yml"' \ '(-p --project-name)'{-p,--project-name}'[Specify an alternate project name (default: directory name)]:project name:' \ + '--x-networking[(EXPERIMENTAL) Use new Docker networking functionality. Requires Docker 1.9 or later.]' \ + '--x-network-driver[(EXPERIMENTAL) Specify a network driver (default: "bridge"). Requires Docker 1.9 or later.]:Network Driver:(bridge host none overlay)' \ '(-): :->command' \ '(-)*:: :->option-or-argument' && ret=0 - local counter=1 - #local compose_file compose_project - while [ $counter -lt ${#words[@]} ]; do - case "${words[$counter]}" in - -f|--file) - (( counter++ )) - compose_file="${words[$counter]}" - ;; - -p|--project-name) - (( counter++ )) - compose_project="${words[$counter]}" - ;; - *) - ;; - esac - (( counter++ )) - done + local compose_file=${opt_args[-f]}${opt_args[--file]} + local compose_project=${opt_args[-p]}${opt_args[--project-name]} + local compose_options="${compose_file:+--file $compose_file} ${compose_project:+--project-name $compose_project}" case $state in (command) diff --git a/docker b/docker deleted file mode 100755 index f24f3613f9..0000000000 Binary files a/docker and /dev/null differ diff --git a/docker-compose.spec b/docker-compose.spec new file mode 100644 index 0000000000..678fc13238 --- /dev/null +++ b/docker-compose.spec @@ -0,0 +1,26 @@ +# -*- mode: python -*- + +block_cipher = None + +a = Analysis(['bin/docker-compose'], + pathex=['.'], + hiddenimports=[], + hookspath=None, + runtime_hooks=None, + cipher=block_cipher) + +pyz = PYZ(a.pure, + cipher=block_cipher) + +exe = EXE(pyz, + a.scripts, + a.binaries, + a.zipfiles, + a.datas, + [('compose/config/fields_schema.json', 'compose/config/fields_schema.json', 'DATA')], + [('compose/config/service_schema.json', 'compose/config/service_schema.json', 'DATA')], + name='docker-compose', + debug=False, + strip=None, + upx=True, + console=True ) diff --git a/docs/Dockerfile b/docs/Dockerfile index d9add75c15..0114f04e48 100644 --- a/docs/Dockerfile +++ b/docs/Dockerfile @@ -1,26 +1,15 @@ -FROM docs/base:latest +FROM docs/base:hugo-github-linking MAINTAINER Mary Anthony (@moxiegirl) +RUN svn checkout https://github.com/docker/docker/trunk/docs /docs/content/engine +RUN svn checkout https://github.com/docker/swarm/trunk/docs /docs/content/swarm +RUN svn checkout https://github.com/docker/machine/trunk/docs /docs/content/machine +RUN svn checkout https://github.com/docker/distribution/trunk/docs /docs/content/registry +RUN svn checkout https://github.com/kitematic/kitematic/trunk/docs /docs/content/kitematic +RUN svn checkout https://github.com/docker/tutorials/trunk/docs /docs/content/tutorials +RUN svn checkout https://github.com/docker/opensource/trunk/docs /docs/content + # To get the git info for this repo COPY . /src COPY . /docs/content/compose/ - -RUN svn checkout https://github.com/docker/docker/trunk/docs /docs/content/docker -RUN svn checkout https://github.com/docker/swarm/trunk/docs /docs/content/swarm -RUN svn checkout https://github.com/docker/machine/trunk/docs /docs/content/machine -RUN svn checkout https://github.com/docker/distribution/trunk/docs /docs/content/registry -RUN svn checkout https://github.com/docker/tutorials/trunk/docs /docs/content/tutorials -RUN svn checkout https://github.com/docker/opensource/trunk/docs /docs/content - - -# Sed to process GitHub Markdown -# 1-2 Remove comment code from metadata block -# 3 Change ](/word to ](/project/ in links -# 4 Change ](word.md) to ](/project/word) -# 5 Remove .md extension from link text -# 6 Change ](../ to ](/project/word) -# 7 Change ](../../ to ](/project/ --> not implemented -# -# -RUN /src/pre-process.sh /docs diff --git a/docs/Makefile b/docs/Makefile index 021e8f6e5e..b9ef054828 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -13,8 +13,8 @@ DOCKER_ENVS := \ -e TIMEOUT # note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds -# to allow `make DOCSDIR=docs docs-shell` (to create a bind mount in docs) -DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR)) +# to allow `make DOCSDIR=1 docs-shell` (to create a bind mount in docs) +DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR):/docs/content/compose) # to allow `make DOCSPORT=9000 docs` DOCSPORT := 8000 @@ -37,7 +37,7 @@ GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null) default: docs docs: docs-build - $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP) + $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP) --watch docs-draft: docs-build $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --buildDrafts="true" --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP) diff --git a/docs/README.md b/docs/README.md index 4d6465637f..8fbad30c58 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,8 +1,8 @@ # Contributing to the Docker Compose documentation -The documentation in this directory is part of the [https://docs.docker.com](https://docs.docker.com) website. Docker uses [the Hugo static generator](http://gohugo.io/overview/introduction/) to convert project Markdown files to a static HTML site. +The documentation in this directory is part of the [https://docs.docker.com](https://docs.docker.com) website. Docker uses [the Hugo static generator](http://gohugo.io/overview/introduction/) to convert project Markdown files to a static HTML site. -You don't need to be a Hugo expert to contribute to the compose documentation. If you are familiar with Markdown, you can modify the content in the `docs` files. +You don't need to be a Hugo expert to contribute to the compose documentation. If you are familiar with Markdown, you can modify the content in the `docs` files. If you want to add a new file or change the location of the document in the menu, you do need to know a little more. @@ -23,7 +23,7 @@ If you want to add a new file or change the location of the document in the menu docker run --rm -it -e AWS_S3_BUCKET -e NOCACHE -p 8000:8000 -e DOCKERHOST "docs-base:test-tooling" hugo server --port=8000 --baseUrl=192.168.59.103 --bind=0.0.0.0 ERROR: 2015/06/13 MenuEntry's .Url is deprecated and will be removed in Hugo 0.15. Use .URL instead. 0 of 4 drafts rendered - 0 future content + 0 future content 12 pages created 0 paginator pages created 0 tags created @@ -52,7 +52,7 @@ The top of each Docker Compose documentation file contains TOML metadata. The me parent="smn_workw_compose" weight=2 +++ - + The metadata alone has this structure: @@ -64,7 +64,7 @@ The metadata alone has this structure: parent="smn_workw_compose" weight=2 +++ - + The `[menu.main]` section refers to navigation defined [in the main Docker menu](https://github.com/docker/docs-base/blob/hugo/config.toml). This metadata says *add a menu item called* Extending services in Compose *to the menu with the* `smn_workdw_compose` *identifier*. If you locate the menu in the configuration, you'll find *Create multi-container applications* is the menu title. You can move an article in the tree by specifying a new parent. You can shift the location of the item by changing its weight. Higher numbers are heavier and shift the item to the bottom of menu. Low or no numbers shift it up. @@ -73,5 +73,5 @@ You can move an article in the tree by specifying a new parent. You can shift th ## Other key documentation repositories The `docker/docs-base` repository contains [the Hugo theme and menu configuration](https://github.com/docker/docs-base). If you open the `Dockerfile` you'll see the `make docs` relies on this as a base image for building the Compose documentation. - + The `docker/docs.docker.com` repository contains [build system for building the Docker documentation site](https://github.com/docker/docs.docker.com). Fork this repository to build the entire documentation site. diff --git a/docs/completion.md b/docs/completion.md index 41ef88e62d..3c2022d827 100644 --- a/docs/completion.md +++ b/docs/completion.md @@ -1,15 +1,15 @@ -# Command Completion +# Command-line Completion Compose comes with [command completion](http://en.wikipedia.org/wiki/Command-line_completion) for the bash and zsh shell. @@ -59,11 +59,10 @@ Enjoy working with Compose faster and with less typos! ## Compose documentation -- [User guide](/) +- [User guide](index.md) - [Installing Compose](install.md) - [Get started with Django](django.md) - [Get started with Rails](rails.md) -- [Get started with Wordpress](wordpress.md) -- [Command line reference](cli.md) -- [Yaml file reference](yml.md) -- [Compose environment variables](env.md) +- [Get started with WordPress](wordpress.md) +- [Command line reference](./reference/index.md) +- [Compose file reference](compose-file.md) diff --git a/docs/yml.md b/docs/compose-file.md similarity index 63% rename from docs/yml.md rename to docs/compose-file.md index bd339ec1a0..034653efe8 100644 --- a/docs/yml.md +++ b/docs/compose-file.md @@ -1,15 +1,20 @@ -# docker-compose.yml reference +# Compose file reference + +The compose file is a [YAML](http://yaml.org/) file where all the top level +keys are the name of a service, and the values are the service definition. +The default path for a compose file is `./docker-compose.yml`. Each service defined in `docker-compose.yml` must specify exactly one of `image` or `build`. Other keys are optional, and are analogous to their @@ -19,14 +24,10 @@ As with `docker run`, options specified in the Dockerfile (e.g., `CMD`, `EXPOSE`, `VOLUME`, `ENV`) are respected by default - you don't need to specify them again in `docker-compose.yml`. -### image +## Service configuration reference -Tag or partial image ID. Can be local or remote - Compose will attempt to -pull if it doesn't exist locally. - - image: ubuntu - image: orchardup/postgresql - image: a4bc65fd +This section contains a list of all configuration options supported by a service +definition. ### build @@ -38,6 +39,68 @@ Compose will build and tag it with a generated name, and use that image thereaft build: /path/to/build/dir +Using `build` together with `image` is not allowed. Attempting to do so results in an error. + +### cap_add, cap_drop + +Add or drop container capabilities. +See `man 7 capabilities` for a full list. + + cap_add: + - ALL + + cap_drop: + - NET_ADMIN + - SYS_ADMIN + +### command + +Override the default command. + + command: bundle exec thin -p 3000 + +### cgroup_parent + +Specify an optional parent cgroup for the container. + + cgroup_parent: m-executor-abcd + +### container_name + +Specify a custom container name, rather than a generated default name. + + container_name: my-web-container + +Because Docker container names must be unique, you cannot scale a service +beyond 1 container if you have specified a custom name. Attempting to do so +results in an error. + +### devices + +List of device mappings. Uses the same format as the `--device` docker +client create option. + + devices: + - "/dev/ttyUSB0:/dev/ttyUSB0" + +### dns + +Custom DNS servers. Can be a single value or a list. + + dns: 8.8.8.8 + dns: + - 8.8.8.8 + - 9.9.9.9 + +### dns_search + +Custom DNS search domains. Can be a single value or a list. + + dns_search: example.com + dns_search: + - dc1.example.com + - dc2.example.com + ### dockerfile Alternate Dockerfile. @@ -46,33 +109,86 @@ Compose will use an alternate file to build with. dockerfile: Dockerfile-alternate -### command +Using `dockerfile` together with `image` is not allowed. Attempting to do so results in an error. -Override the default command. +### env_file - command: bundle exec thin -p 3000 +Add environment variables from a file. Can be a single value or a list. - -### links +If you have specified a Compose file with `docker-compose -f FILE`, paths in +`env_file` are relative to the directory that file is in. -Link to containers in another service. Either specify both the service name and -the link alias (`SERVICE:ALIAS`), or just the service name (which will also be -used for the alias). +Environment variables specified in `environment` override these values. - links: - - db - - db:database - - redis + env_file: .env -An entry with the alias' name will be created in `/etc/hosts` inside containers -for this service, e.g: + env_file: + - ./common.env + - ./apps/web.env + - /opt/secrets.env - 172.17.2.186 db - 172.17.2.186 database - 172.17.2.187 redis +Compose expects each line in an env file to be in `VAR=VAL` format. Lines +beginning with `#` (i.e. comments) are ignored, as are blank lines. -Environment variables will also be created - see the [environment variable -reference](env.md) for details. + # Set Rails/Rack environment + RACK_ENV=development + +### environment + +Add environment variables. You can use either an array or a dictionary. Any +boolean values; true, false, yes no, need to be enclosed in quotes to ensure +they are not converted to True or False by the YML parser. + +Environment variables with only a key are resolved to their values on the +machine Compose is running on, which can be helpful for secret or host-specific values. + + environment: + RACK_ENV: development + SHOW: 'true' + SESSION_SECRET: + + environment: + - RACK_ENV=development + - SHOW=true + - SESSION_SECRET + +### expose + +Expose ports without publishing them to the host machine - they'll only be +accessible to linked services. Only the internal port can be specified. + + expose: + - "3000" + - "8000" + +### extends + +Extend another service, in the current file or another, optionally overriding +configuration. + +You can use `extends` on any service together with other configuration keys. +The `extends` value must be a dictionary defined with a required `service` +and an optional `file` key. + + extends: + file: common.yml + service: webapp + +The `service` the name of the service being extended, for example +`web` or `database`. The `file` is the location of a Compose configuration +file defining that service. + +If you omit the `file` Compose looks for the service configuration in the +current file. The `file` value can be an absolute or relative path. If you +specify a relative path, Compose treats it as relative to the location of the +current file. + +You can extend a service that itself extends another. You can extend +indefinitely. Compose does not support circular references and `docker-compose` +returns an error if it encounters one. + +For more on `extends`, see the +[the extends documentation](extends.md#extending-services). ### external_links @@ -99,136 +215,14 @@ An entry with the ip address and hostname will be created in `/etc/hosts` inside 162.242.195.82 somehost 50.31.209.229 otherhost -### ports +### image -Expose ports. Either specify both ports (`HOST:CONTAINER`), or just the container -port (a random host port will be chosen). +Tag or partial image ID. Can be local or remote - Compose will attempt to +pull if it doesn't exist locally. -> **Note:** When mapping ports in the `HOST:CONTAINER` format, you may experience -> erroneous results when using a container port lower than 60, because YAML will -> parse numbers in the format `xx:yy` as sexagesimal (base 60). For this reason, -> we recommend always explicitly specifying your port mappings as strings. - - ports: - - "3000" - - "8000:8000" - - "49100:22" - - "127.0.0.1:8001:8001" - -### expose - -Expose ports without publishing them to the host machine - they'll only be -accessible to linked services. Only the internal port can be specified. - - expose: - - "3000" - - "8000" - -### volumes - -Mount paths as volumes, optionally specifying a path on the host machine -(`HOST:CONTAINER`), or an access mode (`HOST:CONTAINER:ro`). - - volumes: - - /var/lib/mysql - - ./cache:/tmp/cache - - ~/configs:/etc/configs/:ro - -You can mount a relative path on the host, which will expand relative to -the directory of the Compose configuration file being used. Relative paths -should always begin with `.` or `..`. - -> Note: No path expansion will be done if you have also specified a -> `volume_driver`. - -### volumes_from - -Mount all of the volumes from another service or container. - - volumes_from: - - service_name - - container_name - -### environment - -Add environment variables. You can use either an array or a dictionary. - -Environment variables with only a key are resolved to their values on the -machine Compose is running on, which can be helpful for secret or host-specific values. - - environment: - RACK_ENV: development - SESSION_SECRET: - - environment: - - RACK_ENV=development - - SESSION_SECRET - -### env_file - -Add environment variables from a file. Can be a single value or a list. - -If you have specified a Compose file with `docker-compose -f FILE`, paths in -`env_file` are relative to the directory that file is in. - -Environment variables specified in `environment` override these values. - - env_file: .env - - env_file: - - ./common.env - - ./apps/web.env - - /opt/secrets.env - -Compose expects each line in an env file to be in `VAR=VAL` format. Lines -beginning with `#` (i.e. comments) are ignored, as are blank lines. - - # Set Rails/Rack environment - RACK_ENV=development - -### extends - -Extend another service, in the current file or another, optionally overriding -configuration. - -Here's a simple example. Suppose we have 2 files - **common.yml** and -**development.yml**. We can use `extends` to define a service in -**development.yml** which uses configuration defined in **common.yml**: - -**common.yml** - - webapp: - build: ./webapp - environment: - - DEBUG=false - - SEND_EMAILS=false - -**development.yml** - - web: - extends: - file: common.yml - service: webapp - ports: - - "8000:8000" - links: - - db - environment: - - DEBUG=true - db: - image: postgres - -Here, the `web` service in **development.yml** inherits the configuration of -the `webapp` service in **common.yml** - the `build` and `environment` keys - -and adds `ports` and `links` configuration. It overrides one of the defined -environment variables (DEBUG) with a new value, and the other one -(SEND_EMAILS) is left untouched. - -The `file` key is optional, if it is not set then Compose will look for the -service within the current file. - -For more on `extends`, see the [tutorial](extends.md#example) and -[reference](extends.md#reference). + image: ubuntu + image: orchardup/postgresql + image: a4bc65fd ### labels @@ -246,21 +240,31 @@ It's recommended that you use reverse-DNS notation to prevent your labels from c - "com.example.department=Finance" - "com.example.label-with-empty-value" -### container_name +### links -Specify a custom container name, rather than a generated default name. +Link to containers in another service. Either specify both the service name and +the link alias (`SERVICE:ALIAS`), or just the service name (which will also be +used for the alias). - container_name: my-web-container + links: + - db + - db:database + - redis -Because Docker container names must be unique, you cannot scale a service -beyond 1 container if you have specified a custom name. Attempting to do so -results in an error. +An entry with the alias' name will be created in `/etc/hosts` inside containers +for this service, e.g: -### log driver + 172.17.2.186 db + 172.17.2.186 database + 172.17.2.187 redis -Specify a logging driver for the service's containers, as with the ``--log-driver`` option for docker run ([documented here](http://docs.docker.com/reference/run/#logging-drivers-log-driver)). +Environment variables will also be created - see the [environment variable +reference](env.md) for details. -Allowed values are currently ``json-file``, ``syslog`` and ``none``. The list will change over time as more drivers are added to the Docker engine. +### log_driver + +Specify a logging driver for the service's containers, as with the ``--log-driver`` +option for docker run ([documented here](https://docs.docker.com/reference/logging/overview/)). The default value is json-file. @@ -268,13 +272,19 @@ The default value is json-file. log_driver: "syslog" log_driver: "none" +> **Note:** Only the `json-file` driver makes the logs available directly from +> `docker-compose up` and `docker-compose logs`. Using any other driver will not +> print any logs. + +### log_opt + Specify logging options with `log_opt` for the logging driver, as with the ``--log-opt`` option for `docker run`. Logging options are key value pairs. An example of `syslog` options: log_driver: "syslog" log_opt: - address: "tcp://192.168.0.42:123" + syslog-address: "tcp://192.168.0.42:123" ### net @@ -294,43 +304,24 @@ container and the host operating system the PID address space. Containers launched with this flag will be able to access and manipulate other containers in the bare-metal machine's namespace and vise-versa. -### dns +### ports -Custom DNS servers. Can be a single value or a list. +Expose ports. Either specify both ports (`HOST:CONTAINER`), or just the container +port (a random host port will be chosen). - dns: 8.8.8.8 - dns: - - 8.8.8.8 - - 9.9.9.9 +> **Note:** When mapping ports in the `HOST:CONTAINER` format, you may experience +> erroneous results when using a container port lower than 60, because YAML will +> parse numbers in the format `xx:yy` as sexagesimal (base 60). For this reason, +> we recommend always explicitly specifying your port mappings as strings. -### cap_add, cap_drop - -Add or drop container capabilities. -See `man 7 capabilities` for a full list. - - cap_add: - - ALL - - cap_drop: - - NET_ADMIN - - SYS_ADMIN - -### dns_search - -Custom DNS search domains. Can be a single value or a list. - - dns_search: example.com - dns_search: - - dc1.example.com - - dc2.example.com - -### devices - -List of device mappings. Uses the same format as the `--device` docker -client create option. - - devices: - - "/dev/ttyUSB0:/dev/ttyUSB0" + ports: + - "3000" + - "3000-3005" + - "8000:8000" + - "9090-9091:8080-8081" + - "49100:22" + - "127.0.0.1:8001:8001" + - "127.0.0.1:5000-5010:5000-5010" ### security_opt @@ -340,7 +331,44 @@ Override the default labeling scheme for each container. - label:user:USER - label:role:ROLE -### working\_dir, entrypoint, user, hostname, domainname, mac\_address, mem\_limit, memswap\_limit, privileged, restart, stdin\_open, tty, cpu\_shares, cpuset, read\_only, volume\_driver +### volumes, volume\_driver + +Mount paths as volumes, optionally specifying a path on the host machine +(`HOST:CONTAINER`), or an access mode (`HOST:CONTAINER:ro`). + + volumes: + - /var/lib/mysql + - ./cache:/tmp/cache + - ~/configs:/etc/configs/:ro + +You can mount a relative path on the host, which will expand relative to +the directory of the Compose configuration file being used. Relative paths +should always begin with `.` or `..`. + +If you use a volume name (instead of a volume path), you may also specify +a `volume_driver`. + + volume_driver: mydriver + + +> Note: No path expansion will be done if you have also specified a +> `volume_driver`. + +See [Docker Volumes](https://docs.docker.com/userguide/dockervolumes/) and +[Volume Plugins](https://docs.docker.com/extend/plugins_volume/) for more +information. + +### volumes_from + +Mount all of the volumes from another service or container, optionally +specifying read-only access(``ro``) or read-write(``rw``). + + volumes_from: + - service_name + - container_name + - service_name:rw + +### cpu\_shares, cpuset, domainname, entrypoint, hostname, ipc, mac\_address, mem\_limit, memswap\_limit, privileged, read\_only, restart, stdin\_open, tty, user, working\_dir Each of these is a single value, analogous to its [docker run](https://docs.docker.com/reference/run/) counterpart. @@ -348,13 +376,13 @@ Each of these is a single value, analogous to its cpu_shares: 73 cpuset: 0,1 - working_dir: /code entrypoint: /code/entrypoint.sh user: postgresql + working_dir: /code - hostname: foo domainname: foo.com - + hostname: foo + ipc: host mac_address: 02:42:ac:11:65:43 mem_limit: 1000000000 @@ -363,20 +391,51 @@ Each of these is a single value, analogous to its restart: always + read_only: true stdin_open: true tty: true - read_only: true - volume_driver: mydriver -``` +## Variable substitution + +Your configuration options can contain environment variables. Compose uses the +variable values from the shell environment in which `docker-compose` is run. For +example, suppose the shell contains `POSTGRES_VERSION=9.3` and you supply this +configuration: + + db: + image: "postgres:${POSTGRES_VERSION}" + +When you run `docker-compose up` with this configuration, Compose looks for the +`POSTGRES_VERSION` environment variable in the shell and substitutes its value +in. For this example, Compose resolves the `image` to `postgres:9.3` before +running the configuration. + +If an environment variable is not set, Compose substitutes with an empty +string. In the example above, if `POSTGRES_VERSION` is not set, the value for +the `image` option is `postgres:`. + +Both `$VARIABLE` and `${VARIABLE}` syntax are supported. Extended shell-style +features, such as `${VARIABLE-default}` and `${VARIABLE/foo/bar}`, are not +supported. + +You can use a `$$` (double-dollar sign) when your configuration needs a literal +dollar sign. This also prevents Compose from interpolating a value, so a `$$` +allows you to refer to environment variables that you don't want processed by +Compose. + + web: + build: . + command: "$$VAR_NOT_INTERPOLATED_BY_COMPOSE" + +If you forget and use a single dollar sign (`$`), Compose interprets the value as an environment variable and will warn you: + + The VAR_NOT_INTERPOLATED_BY_COMPOSE is not set. Substituting an empty string. ## Compose documentation -- [User guide](/) +- [User guide](index.md) - [Installing Compose](install.md) - [Get started with Django](django.md) - [Get started with Rails](rails.md) -- [Get started with Wordpress](wordpress.md) -- [Command line reference](cli.md) -- [Compose environment variables](env.md) -- [Compose command line completion](completion.md) +- [Get started with WordPress](wordpress.md) +- [Command line reference](./reference/index.md) diff --git a/docs/django.md b/docs/django.md index 71df4e1168..d4d2bd1ecf 100644 --- a/docs/django.md +++ b/docs/django.md @@ -10,126 +10,171 @@ weight=4 -## Quickstart Guide: Compose and Django +# Quickstart Guide: Compose and Django - -This Quick-start Guide will demonstrate how to use Compose to set up and run a +This quick-start guide demonstrates how to use Compose to set up and run a simple Django/PostgreSQL app. Before starting, you'll need to have [Compose installed](install.md). -### Define the project +## Define the project components -Start by setting up the three files you'll need to build the app. First, since -your app is going to run inside a Docker container containing all of its -dependencies, you'll need to define exactly what needs to be included in the -container. This is done using a file called `Dockerfile`. To begin with, the -Dockerfile consists of: +For this project, you need to create a Dockerfile, a Python dependencies file, +and a `docker-compose.yml` file. - FROM python:2.7 - ENV PYTHONUNBUFFERED 1 - RUN mkdir /code - WORKDIR /code - ADD requirements.txt /code/ - RUN pip install -r requirements.txt - ADD . /code/ +1. Create an empty project directory. -This Dockerfile will define an image that is used to build a container that -includes your application and has Python installed alongside all of your Python -dependencies. For more information on how to write Dockerfiles, see the -[Docker user guide](https://docs.docker.com/userguide/dockerimages/#building-an-image-from-a-dockerfile) and the [Dockerfile reference](http://docs.docker.com/reference/builder/). + You can name the directory something easy for you to remember. This directory is the context for your application image. The directory should only contain resources to build that image. -Second, you'll define your Python dependencies in a file called -`requirements.txt`: +2. Create a new file called `Dockerfile` in your project directory. - Django - psycopg2 + The Dockerfile defines an application's image content via one or more build + commands that configure that image. Once built, you can run the image in a + container. For more information on `Dockerfiles`, see the [Docker user + guide](https://docs.docker.com/userguide/dockerimages/#building-an-image-from-a-dockerfile) + and the [Dockerfile reference](http://docs.docker.com/reference/builder/). -Finally, this is all tied together with a file called `docker-compose.yml`. It -describes the services that comprise your app (here, a web server and database), -which Docker images they use, how they link together, what volumes will be -mounted inside the containers, and what ports they expose. +3. Add the following content to the `Dockerfile`. - db: - image: postgres - web: - build: . - command: python manage.py runserver 0.0.0.0:8000 - volumes: - - .:/code - ports: - - "8000:8000" - links: - - db + FROM python:2.7 + ENV PYTHONUNBUFFERED 1 + RUN mkdir /code + WORKDIR /code + ADD requirements.txt /code/ + RUN pip install -r requirements.txt + ADD . /code/ -See the [`docker-compose.yml` reference](yml.md) for more information on how -this file works. + This `Dockerfile` starts with a Python 2.7 base image. The base image is + modified by adding a new `code` directory. The base image is further modified + by installing the Python requirements defined in the `requirements.txt` file. -### Build the project +4. Save and close the `Dockerfile`. -You can now start a Django project with `docker-compose run`: +5. Create a `requirements.txt` in your project directory. - $ docker-compose run web django-admin.py startproject composeexample . + This file is used by the `RUN pip install -r requirements.txt` command in your `Dockerfile`. -First, Compose will build an image for the `web` service using the `Dockerfile`. -It will then run `django-admin.py startproject composeexample .` inside a -container built using that image. +6. Add the required software in the file. -This will generate a Django app inside the current directory: + Django + psycopg2 - $ ls - Dockerfile docker-compose.yml composeexample manage.py requirements.txt +7. Save and close the `requirements.txt` file. -### Connect the database +8. Create a file called `docker-compose.yml` in your project directory. -Now you need to set up the database connection. Replace the `DATABASES = ...` -definition in `composeexample/settings.py` to read: + The `docker-compose.yml` file describes the services that make your app. In + this example those services are a web server and database. The compose file + also describes which Docker images these services use, how they link + together, any volumes they might need mounted inside the containers. + Finally, the `docker-compose.yml` file describes which ports these services + expose. See the [`docker-compose.yml` reference](compose-file.md) for more + information on how this file works. - DATABASES = { - 'default': { - 'ENGINE': 'django.db.backends.postgresql_psycopg2', - 'NAME': 'postgres', - 'USER': 'postgres', - 'HOST': 'db', - 'PORT': 5432, +9. Add the following configuration to the file. + + db: + image: postgres + web: + build: . + command: python manage.py runserver 0.0.0.0:8000 + volumes: + - .:/code + ports: + - "8000:8000" + links: + - db + + This file defines two services: The `db` service and the `web` service. + +10. Save and close the `docker-compose.yml` file. + +## Create a Django project + +In this step, you create a Django started project by building the image from the build context defined in the previous procedure. + +1. Change to the root of your project directory. + +2. Create the Django project using the `docker-compose` command. + + $ docker-compose run web django-admin.py startproject composeexample . + + This instructs Compose to run `django-admin.py startproject composeeexample` + in a container, using the `web` service's image and configuration. Because + the `web` image doesn't exist yet, Compose builds it from the current + directory, as specified by the `build: .` line in `docker-compose.yml`. + + Once the `web` service image is built, Compose runs it and executes the + `django-admin.py startproject` command in the container. This command + instructs Django to create a set of files and directories representing a + Django project. + +3. After the `docker-compose` command completes, list the contents of your project. + + $ ls -l + drwxr-xr-x 2 root root composeexample + -rw-rw-r-- 1 user user docker-compose.yml + -rw-rw-r-- 1 user user Dockerfile + -rwxr-xr-x 1 root root manage.py + -rw-rw-r-- 1 user user requirements.txt + + The files `django-admin` created are owned by root. This happens because + the container runs as the `root` user. + +4. Change the ownership of the new files. + + sudo chown -R $USER:$USER . + + +## Connect the database + +In this section, you set up the database connection for Django. + +1. In your project dirctory, edit the `composeexample/settings.py` file. + +2. Replace the `DATABASES = ...` with the following: + + DATABASES = { + 'default': { + 'ENGINE': 'django.db.backends.postgresql_psycopg2', + 'NAME': 'postgres', + 'USER': 'postgres', + 'HOST': 'db', + 'PORT': 5432, + } } - } -These settings are determined by the -[postgres](https://registry.hub.docker.com/_/postgres/) Docker image specified -in the Dockerfile. + These settings are determined by the + [postgres](https://registry.hub.docker.com/_/postgres/) Docker image + specified in `docker-compose.yml`. -Then, run `docker-compose up`: +3. Save and close the file. - Recreating myapp_db_1... - Recreating myapp_web_1... - Attaching to myapp_db_1, myapp_web_1 - myapp_db_1 | - myapp_db_1 | PostgreSQL stand-alone backend 9.1.11 - myapp_db_1 | 2014-01-27 12:17:03 UTC LOG: database system is ready to accept connections - myapp_db_1 | 2014-01-27 12:17:03 UTC LOG: autovacuum launcher started - myapp_web_1 | Validating models... - myapp_web_1 | - myapp_web_1 | 0 errors found - myapp_web_1 | January 27, 2014 - 12:12:40 - myapp_web_1 | Django version 1.6.1, using settings 'composeexample.settings' - myapp_web_1 | Starting development server at http://0.0.0.0:8000/ - myapp_web_1 | Quit the server with CONTROL-C. +4. Run the `docker-compose up` command. -Your Django app should nw be running at port 8000 on your Docker daemon. If you are using a Docker Machine VM, you can use the `docker-machine ip MACHINE_NAME` to get the IP address. + $ docker-compose up + Starting composepractice_db_1... + Starting composepractice_web_1... + Attaching to composepractice_db_1, composepractice_web_1 + ... + db_1 | PostgreSQL init process complete; ready for start up. + ... + db_1 | LOG: database system is ready to accept connections + db_1 | LOG: autovacuum launcher started + .. + web_1 | Django version 1.8.4, using settings 'composeexample.settings' + web_1 | Starting development server at http://0.0.0.0:8000/ + web_1 | Quit the server with CONTROL-C. -You can also run management commands with Docker. To set up your database, for -example, run `docker-compose up` and in another terminal run: - - $ docker-compose run web python manage.py syncdb + At this point, your Django app should be running at port `8000` on your + Docker host. If you are using a Docker Machine VM, you can use the + `docker-machine ip MACHINE_NAME` to get the IP address. ## More Compose documentation -- [User guide](/) +- [User guide](../index.md) - [Installing Compose](install.md) -- [Get started with Django](django.md) +- [Getting Started](gettingstarted.md) - [Get started with Rails](rails.md) -- [Get started with Wordpress](wordpress.md) -- [Command line reference](cli.md) -- [Yaml file reference](yml.md) -- [Compose environment variables](env.md) -- [Compose command line completion](completion.md) +- [Get started with WordPress](wordpress.md) +- [Command line reference](./reference/index.md) +- [Compose file reference](compose-file.md) diff --git a/docs/env.md b/docs/env.md index afeb829e72..d7b51ba2b5 100644 --- a/docs/env.md +++ b/docs/env.md @@ -11,7 +11,7 @@ weight=3 # Compose environment variables reference -**Note:** Environment variables are no longer the recommended method for connecting to linked services. Instead, you should use the link name (by default, the name of the linked service) as the hostname to connect to. See the [docker-compose.yml documentation](yml.md#links) for details. +**Note:** Environment variables are no longer the recommended method for connecting to linked services. Instead, you should use the link name (by default, the name of the linked service) as the hostname to connect to. See the [docker-compose.yml documentation](compose-file.md#links) for details. Compose uses [Docker links] to expose services' containers to one another. Each linked container injects a set of environment variables, each of which begins with the uppercase name of the container. @@ -37,13 +37,9 @@ Fully qualified container name, e.g. `DB_1_NAME=/myapp_web_1/myapp_db_1` [Docker links]: http://docs.docker.com/userguide/dockerlinks/ -## Compose documentation +## Related Information -- [User guide](/) +- [User guide](index.md) - [Installing Compose](install.md) -- [Get started with Django](django.md) -- [Get started with Rails](rails.md) -- [Get started with Wordpress](wordpress.md) -- [Command line reference](cli.md) -- [Yaml file reference](yml.md) -- [Compose command line completion](completion.md) +- [Command line reference](./reference/index.md) +- [Compose file reference](compose-file.md) diff --git a/docs/extends.md b/docs/extends.md index 7a92b771a3..b21b6d76db 100644 --- a/docs/extends.md +++ b/docs/extends.md @@ -10,18 +10,176 @@ weight=2 -## Extending services in Compose +# Extending services and Compose files + +Compose supports two methods of sharing common configuration: + +1. Extending an entire Compose file by + [using multiple Compose files](#multiple-compose-files) +2. Extending individual services with [the `extends` field](#extending-services) + + +## Multiple Compose files + +Using multiple Compose files enables you to customize a Compose application +for different environments or different workflows. + +### Understanding multiple Compose files + +By default, Compose reads two files, a `docker-compose.yml` and an optional +`docker-compose.override.yml` file. By convention, the `docker-compose.yml` +contains your base configuration. The override file, as its name implies, can +contain configuration overrides for existing services or entirely new +services. + +If a service is defined in both files, Compose merges the configurations using +the same rules as the `extends` field (see [Adding and overriding +configuration](#adding-and-overriding-configuration)), with one exception. If a +service contains `links` or `volumes_from` those fields are copied over and +replace any values in the original service, in the same way single-valued fields +are copied. + +To use multiple override files, or an override file with a different name, you +can use the `-f` option to specify the list of files. Compose merges files in +the order they're specified on the command line. See the [`docker-compose` +command reference](./reference/docker-compose.md) for more information about +using `-f`. + +When you use multiple configuration files, you must make sure all paths in the +files are relative to the base Compose file (the first Compose file specified +with `-f`). This is required because override files need not be valid +Compose files. Override files can contain small fragments of configuration. +Tracking which fragment of a service is relative to which path is difficult and +confusing, so to keep paths easier to understand, all paths must be defined +relative to the base file. + +### Example use case + +In this section are two common use cases for multiple compose files: changing a +Compose app for different environments, and running administrative tasks +against a Compose app. + +#### Different environments + +A common use case for multiple files is changing a development Compose app +for a production-like environment (which may be production, staging or CI). +To support these differences, you can split your Compose configuration into +a few different files: + +Start with a base file that defines the canonical configuration for the +services. + +**docker-compose.yml** + + web: + image: example/my_web_app:latest + links: + - db + - cache + + db: + image: postgres:latest + + cache: + image: redis:latest + +In this example the development configuration exposes some ports to the +host, mounts our code as a volume, and builds the web image. + +**docker-compose.override.yml** + + + web: + build: . + volumes: + - '.:/code' + ports: + - 8883:80 + environment: + DEBUG: 'true' + + db: + command: '-d' + ports: + - 5432:5432 + + cache: + ports: + - 6379:6379 + +When you run `docker-compose up` it reads the overrides automatically. + +Now, it would be nice to use this Compose app in a production environment. So, +create another override file (which might be stored in a different git +repo or managed by a different team). + +**docker-compose.prod.yml** + + web: + ports: + - 80:80 + environment: + PRODUCTION: 'true' + + cache: + environment: + TTL: '500' + +To deploy with this production Compose file you can run + + docker-compose -f docker-compose.yml -f docker-compose.prod.yml up -d + +This deploys all three services using the configuration in +`docker-compose.yml` and `docker-compose.prod.yml` (but not the +dev configuration in `docker-compose.override.yml`). + + +See [production](production.md) for more information about Compose in +production. + +#### Administrative tasks + +Another common use case is running adhoc or administrative tasks against one +or more services in a Compose app. This example demonstrates running a +database backup. + +Start with a **docker-compose.yml**. + + web: + image: example/my_web_app:latest + links: + - db + + db: + image: postgres:latest + +In a **docker-compose.admin.yml** add a new service to run the database +export or backup. + + dbadmin: + build: database_admin/ + links: + - db + +To start a normal environment run `docker-compose up -d`. To run a database +backup, include the `docker-compose.admin.yml` as well. + + docker-compose -f docker-compose.yml -f docker-compose.admin.yml \ + run dbadmin db-backup + + +## Extending services Docker Compose's `extends` keyword enables sharing of common configurations among different files, or even different projects entirely. Extending services -is useful if you have several applications that reuse commonly-defined services. -Using `extends` you can define a service in one place and refer to it from -anywhere. +is useful if you have several services that reuse a common set of configuration +options. Using `extends` you can define a common set of service options in one +place and refer to it from anywhere. -Alternatively, you can deploy the same application to multiple environments with -a slightly different set of services in each case (or with changes to the -configuration of some services). Moreover, you can do so without copy-pasting -the configuration around. +> **Note:** `links` and `volumes_from` are never shared between services using +> `extends`. See +> [Adding and overriding configuration](#adding-and-overriding-configuration) + > for more information. ### Understand the extends configuration @@ -45,8 +203,8 @@ looks like this: - "/data" In this case, you'll get exactly the same result as if you wrote -`docker-compose.yml` with that `build`, `ports` and `volumes` configuration -defined directly under `web`. +`docker-compose.yml` with the same `build`, `ports` and `volumes` configuration +values defined directly under `web`. You can go further and define (or re-define) configuration locally in `docker-compose.yml`: @@ -59,6 +217,10 @@ You can go further and define (or re-define) configuration locally in - DEBUG=1 cpu_shares: 5 + important_web: + extends: web + cpu_shares: 10 + You can also write other services and link your `web` service to them: web: @@ -73,182 +235,45 @@ You can also write other services and link your `web` service to them: db: image: postgres -For full details on how to use `extends`, refer to the [reference](#reference). - ### Example use case -In this example, you’ll repurpose the example app from the [quick start -guide](index.md). (If you're not familiar with Compose, it's recommended that -you go through the quick start first.) This example assumes you want to use -Compose both to develop an application locally and then deploy it to a -production environment. +Extending an individual service is useful when you have multiple services that +have a common configuration. The example below is a Compose app with +two services: a web application and a queue worker. Both services use the same +codebase and share many configuration options. -The local and production environments are similar, but there are some -differences. In development, you mount the application code as a volume so that -it can pick up changes; in production, the code should be immutable from the -outside. This ensures it’s not accidentally changed. The development environment -uses a local Redis container, but in production another team manages the Redis -service, which is listening at `redis-production.example.com`. +In a **common.yml** we define the common configuration: -To configure with `extends` for this sample, you must: + app: + build: . + environment: + CONFIG_FILE_PATH: /code/config + API_KEY: xxxyyy + cpu_shares: 5 -1. Define the web application as a Docker image in `Dockerfile` and a Compose - service in `common.yml`. +In a **docker-compose.yml** we define the concrete services which use the +common configuration: -2. Define the development environment in the standard Compose file, - `docker-compose.yml`. + webapp: + extends: + file: common.yml + service: app + command: /code/run_web_app + ports: + - 8080:8080 + links: + - queue + - db - - Use `extends` to pull in the web service. - - Configure a volume to enable code reloading. - - Create an additional Redis service for the application to use locally. + queue_worker: + extends: + file: common.yml + service: app + command: /code/run_worker + links: + - queue -3. Define the production environment in a third Compose file, `production.yml`. - - - Use `extends` to pull in the web service. - - Configure the web service to talk to the external, production Redis service. - -#### Define the web app - -Defining the web application requires the following: - -1. Create an `app.py` file. - - This file contains a simple Python application that uses Flask to serve HTTP - and increments a counter in Redis: - - from flask import Flask - from redis import Redis - import os - - app = Flask(__name__) - redis = Redis(host=os.environ['REDIS_HOST'], port=6379) - - @app.route('/') - def hello(): - redis.incr('hits') - return 'Hello World! I have been seen %s times.\n' % redis.get('hits') - - if __name__ == "__main__": - app.run(host="0.0.0.0", debug=True) - - This code uses a `REDIS_HOST` environment variable to determine where to - find Redis. - -2. Define the Python dependencies in a `requirements.txt` file: - - flask - redis - -3. Create a `Dockerfile` to build an image containing the app: - - FROM python:2.7 - ADD . /code - WORKDIR /code - RUN pip install -r requirements.txt - CMD python app.py - -4. Create a Compose configuration file called `common.yml`: - - This configuration defines how to run the app. - - web: - build: . - ports: - - "5000:5000" - - Typically, you would have dropped this configuration into - `docker-compose.yml` file, but in order to pull it into multiple files with - `extends`, it needs to be in a separate file. - -#### Define the development environment - -1. Create a `docker-compose.yml` file. - - The `extends` option pulls in the `web` service from the `common.yml` file - you created in the previous section. - - web: - extends: - file: common.yml - service: web - volumes: - - .:/code - links: - - redis - environment: - - REDIS_HOST=redis - redis: - image: redis - - The new addition defines a `web` service that: - - - Fetches the base configuration for `web` out of `common.yml`. - - Adds `volumes` and `links` configuration to the base (`common.yml`) - configuration. - - Sets the `REDIS_HOST` environment variable to point to the linked redis - container. This environment uses a stock `redis` image from the Docker Hub. - -2. Run `docker-compose up`. - - Compose creates, links, and starts a web and redis container linked together. - It mounts your application code inside the web container. - -3. Verify that the code is mounted by changing the message in - `app.py`—say, from `Hello world!` to `Hello from Compose!`. - - Don't forget to refresh your browser to see the change! - -#### Define the production environment - -You are almost done. Now, define your production environment: - -1. Create a `production.yml` file. - - As with `docker-compose.yml`, the `extends` option pulls in the `web` service - from `common.yml`. - - web: - extends: - file: common.yml - service: web - environment: - - REDIS_HOST=redis-production.example.com - -2. Run `docker-compose -f production.yml up`. - - Compose creates *just* a web container and configures the Redis connection via - the `REDIS_HOST` environment variable. This variable points to the production - Redis instance. - - > **Note**: If you try to load up the webapp in your browser you'll get an - > error—`redis-production.example.com` isn't actually a Redis server. - -You've now done a basic `extends` configuration. As your application develops, -you can make any necessary changes to the web service in `common.yml`. Compose -picks up both the development and production environments when you next run -`docker-compose`. You don't have to do any copy-and-paste, and you don't have to -manually keep both environments in sync. - - -### Reference - -You can use `extends` on any service together with other configuration keys. It -always expects a dictionary that should always contain the key: `service` and optionally the `file` key. - -The `file` key specifies the location of a Compose configuration file defining -the extension. The `file` value can be an absolute or relative path. If you -specify a relative path, Docker Compose treats it as relative to the location -of the current file. If you don't specify a `file`, Compose looks in the -current configuration file. - -The `service` key specifies the name of the service to extend, for example `web` -or `database`. - -You can extend a service that itself extends another. You can extend -indefinitely. Compose does not support circular references and `docker-compose` -returns an error if it encounters them. - -#### Adding and overriding configuration +## Adding and overriding configuration Compose copies configurations from the original service over to the local one, **except** for `links` and `volumes_from`. These exceptions exist to avoid @@ -257,13 +282,11 @@ locally. This ensures dependencies between services are clearly visible when reading the current file. Defining these locally also ensures changes to the referenced file don't result in breakage. -If a configuration option is defined in both the original service and the local -service, the local value either *override*s or *extend*s the definition of the -original service. This works differently for other configuration options. +If a configuration option is defined in both the original service the local +service, the local value *replaces* or *extends* the original value. For single-value options like `image`, `command` or `mem_limit`, the new value -replaces the old value. **This is the default behaviour - all exceptions are -listed below.** +replaces the old value. # original service command: python app.py @@ -277,6 +300,8 @@ listed below.** In the case of `build` and `image`, using one in the local service causes Compose to discard the other, if it was defined in the original service. +Example of image replacing build: + # original service build: . @@ -286,6 +311,9 @@ Compose to discard the other, if it was defined in the original service. # result image: redis + +Example of build replacing image: + # original service image: redis @@ -313,8 +341,8 @@ For the **multi-value options** `ports`, `expose`, `external_links`, `dns` and - "4000" - "5000" -In the case of `environment` and `labels`, Compose "merges" entries together -with locally-defined values taking precedence: +In the case of `environment`, `labels`, `volumes` and `devices`, Compose +"merges" entries together with locally-defined values taking precedence: # original service environment: @@ -332,32 +360,16 @@ with locally-defined values taking precedence: - BAR=local - BAZ=local -Finally, for `volumes` and `devices`, Compose "merges" entries together with -locally-defined bindings taking precedence: - # original service - volumes: - - /original-dir/foo:/foo - - /original-dir/bar:/bar - # local service - volumes: - - /local-dir/bar:/bar - - /local-dir/baz/:baz - - # result - volumes: - - /original-dir/foo:/foo - - /local-dir/bar:/bar - - /local-dir/baz/:baz ## Compose documentation - [User guide](/) - [Installing Compose](install.md) +- [Getting Started](gettingstarted.md) - [Get started with Django](django.md) - [Get started with Rails](rails.md) -- [Get started with Wordpress](wordpress.md) -- [Command line reference](cli.md) -- [Yaml file reference](yml.md) -- [Compose command line completion](completion.md) +- [Get started with WordPress](wordpress.md) +- [Command line reference](./reference/index.md) +- [Compose file reference](compose-file.md) diff --git a/docs/gettingstarted.md b/docs/gettingstarted.md new file mode 100644 index 0000000000..f685bf3820 --- /dev/null +++ b/docs/gettingstarted.md @@ -0,0 +1,188 @@ + + + +# Getting Started + +On this page you build a simple Python web application running on Compose. The +application uses the Flask framework and increments a value in Redis. While the +sample uses Python, the concepts demonstrated here should be understandable even +if you're not familiar with it. + +## Prerequisites + +Make sure you have already +[installed both Docker Engine and Docker Compose](install.md). You +don't need to install Python, it is provided by a Docker image. + +## Step 1: Setup + +1. Create a directory for the project: + + $ mkdir composetest + $ cd composetest + +2. With your favorite text editor create a file called `app.py` in your project + directory. + + from flask import Flask + from redis import Redis + + app = Flask(__name__) + redis = Redis(host='redis', port=6379) + + @app.route('/') + def hello(): + redis.incr('hits') + return 'Hello World! I have been seen %s times.' % redis.get('hits') + + if __name__ == "__main__": + app.run(host="0.0.0.0", debug=True) + +3. Create another file called `requirements.txt` in your project directory and + add the following: + + flask + redis + + These define the applications dependencies. + +## Step 2: Create a Docker image + +In this step, you build a new Docker image. The image contains all the +dependencies the Python application requires, including Python itself. + +1. In your project directory create a file named `Dockerfile` and add the + following: + + FROM python:2.7 + ADD . /code + WORKDIR /code + RUN pip install -r requirements.txt + CMD python app.py + + This tells Docker to: + + * Build an image starting with the Python 2.7 image. + * Add the current directory `.` into the path `/code` in the image. + * Set the working directory to `/code`. + * Install the Python dependencies. + * Set the default command for the container to `python app.py` + + For more information on how to write Dockerfiles, see the [Docker user guide](https://docs.docker.com/userguide/dockerimages/#building-an-image-from-a-dockerfile) and the [Dockerfile reference](http://docs.docker.com/reference/builder/). + +2. Build the image. + + $ docker build -t web . + + This command builds an image named `web` from the contents of the current + directory. The command automatically locates the `Dockerfile`, `app.py`, and + `requirements.txt` files. + + +## Step 3: Define services + +Define a set of services using `docker-compose.yml`: + +1. Create a file called docker-compose.yml in your project directory and add + the following: + + web: + build: . + ports: + - "5000:5000" + volumes: + - .:/code + links: + - redis + redis: + image: redis + +This Compose file defines two services, `web` and `redis`. The web service: + +* Builds from the `Dockerfile` in the current directory. +* Forwards the exposed port 5000 on the container to port 5000 on the host machine. +* Mounts the project directory on the host to `/code` inside the container allowing you to modify the code without having to rebuild the image. +* Links the web service to the Redis service. + +The `redis` service uses the latest public [Redis](https://registry.hub.docker.com/_/redis/) image pulled from the Docker Hub registry. + +## Step 4: Build and run your app with Compose + +1. From your project directory, start up your application. + + $ docker-compose up + Pulling image redis... + Building web... + Starting composetest_redis_1... + Starting composetest_web_1... + redis_1 | [8] 02 Jan 18:43:35.576 # Server started, Redis version 2.8.3 + web_1 | * Running on http://0.0.0.0:5000/ + web_1 | * Restarting with stat + + Compose pulls a Redis image, builds an image for your code, and start the + services you defined. + +2. Enter `http://0.0.0.0:5000/` in a browser to see the application running. + + If you're using Docker on Linux natively, then the web app should now be + listening on port 5000 on your Docker daemon host. If http://0.0.0.0:5000 + doesn't resolve, you can also try http://localhost:5000. + + If you're using Docker Machine on a Mac, use `docker-machine ip MACHINE_VM` to get + the IP address of your Docker host. Then, `open http://MACHINE_VM_IP:5000` in a + browser. + + You should see a message in your browser saying: + + `Hello World! I have been seen 1 times.` + +3. Refresh the page. + + The number should increment. + +## Step 5: Experiment with some other commands + +If you want to run your services in the background, you can pass the `-d` flag +(for "detached" mode) to `docker-compose up` and use `docker-compose ps` to +see what is currently running: + + $ docker-compose up -d + Starting composetest_redis_1... + Starting composetest_web_1... + $ docker-compose ps + Name Command State Ports + ------------------------------------------------------------------- + composetest_redis_1 /usr/local/bin/run Up + composetest_web_1 /bin/sh -c python app.py Up 5000->5000/tcp + +The `docker-compose run` command allows you to run one-off commands for your +services. For example, to see what environment variables are available to the +`web` service: + + $ docker-compose run web env + +See `docker-compose --help` to see other available commands. You can also install [command completion](completion.md) for the bash and zsh shell, which will also show you available commands. + +If you started Compose with `docker-compose up -d`, you'll probably want to stop +your services once you've finished with them: + + $ docker-compose stop + +At this point, you have seen the basics of how Compose works. + + +## Where to go next + +- Next, try the quick start guide for [Django](django.md), + [Rails](rails.md), or [WordPress](wordpress.md). +- [Explore the full list of Compose commands](./reference/index.md) +- [Compose configuration file reference](compose-file.md) diff --git a/docs/index.md b/docs/index.md index 6d949f88d3..279154eef9 100644 --- a/docs/index.md +++ b/docs/index.md @@ -11,20 +11,22 @@ parent="smn_workw_compose" # Overview of Docker Compose -Compose is a tool for defining and running multi-container applications with -Docker. With Compose, you define a multi-container application in a single -file, then spin your application up in a single command which does everything -that needs to be done to get it running. +Compose is a tool for defining and running multi-container Docker applications. +With Compose, you use a Compose file to configure your application's services. +Then, using a single command, you create and start all the services +from your configuration. To learn more about all the features of Compose +see [the list of features](#features). -Compose is great for development environments, staging servers, and CI. We don't -recommend that you use it in production yet. +Compose is great for development, testing, and staging environments, as well as +CI workflows. You can learn more about each case in +[Common Use Cases](#common-use-cases). Using Compose is basically a three-step process. 1. Define your app's environment with a `Dockerfile` so it can be reproduced anywhere. 2. Define the services that make up your app in `docker-compose.yml` so -they can be run together in an isolated environment: +they can be run together in an isolated environment. 3. Lastly, run `docker-compose up` and Compose will start and run your entire app. A `docker-compose.yml` looks like this: @@ -40,6 +42,9 @@ A `docker-compose.yml` looks like this: redis: image: redis +For more information about the Compose file, see the +[Compose file reference](compose-file.md) + Compose has commands for managing the whole lifecycle of your application: * Start, stop and rebuild services @@ -50,183 +55,130 @@ Compose has commands for managing the whole lifecycle of your application: ## Compose documentation - [Installing Compose](install.md) +- [Getting Started](gettingstarted.md) - [Get started with Django](django.md) - [Get started with Rails](rails.md) -- [Get started with Wordpress](wordpress.md) -- [Command line reference](cli.md) -- [Yaml file reference](yml.md) -- [Compose environment variables](env.md) -- [Compose command line completion](completion.md) +- [Get started with WordPress](wordpress.md) +- [Command line reference](./reference/index.md) +- [Compose file reference](compose-file.md) -## Quick start +## Features -Let's get started with a walkthrough of getting a simple Python web app running -on Compose. It assumes a little knowledge of Python, but the concepts -demonstrated here should be understandable even if you're not familiar with -Python. +The features of Compose that make it effective are: -### Installation and set-up +* [Multiple isolated environments on a single host](#Multiple-isolated-environments-on-a-single-host) +* [Preserve volume data when containers are created](#preserve-volume-data-when-containers-are-created) +* [Only recreate containers that have changed](#only-recreate-containers-that-have-changed) +* [Variables and moving a composition between environments](#variables-and-moving-a-composition-between-environments) -First, [install Docker and Compose](install.md). +#### Multiple isolated environments on a single host -Next, you'll want to make a directory for the project: +Compose uses a project name to isolate environments from each other. You can use +this project name to: - $ mkdir composetest - $ cd composetest +* on a dev host, to create multiple copies of a single environment (ex: you want + to run a stable copy for each feature branch of a project) +* on a CI server, to keep builds from interfering with each other, you can set + the project name to a unique build number +* on a shared host or dev host, to prevent different projects which may use the + same service names, from interfering with each other -Inside this directory, create `app.py`, a simple web app that uses the Flask -framework and increments a value in Redis. Don't worry if you don't have Redis installed, docker is going to take care of that for you when we [define services](#define-services): +The default project name is the basename of the project directory. You can set +a custom project name by using the +[`-p` command line option](./reference/docker-compose.md) or the +[`COMPOSE_PROJECT_NAME` environment variable](./reference/overview.md#compose-project-name). - from flask import Flask - from redis import Redis +#### Preserve volume data when containers are created - app = Flask(__name__) - redis = Redis(host='redis', port=6379) +Compose preserves all volumes used by your services. When `docker-compose up` +runs, if it finds any containers from previous runs, it copies the volumes from +the old container to the new container. This process ensures that any data +you've created in volumes isn't lost. - @app.route('/') - def hello(): - redis.incr('hits') - return 'Hello World! I have been seen %s times.' % redis.get('hits') - if __name__ == "__main__": - app.run(host="0.0.0.0", debug=True) +#### Only recreate containers that have changed -Next, define the Python dependencies in a file called `requirements.txt`: +Compose caches the configuration used to create a container. When you +restart a service that has not changed, Compose re-uses the existing +containers. Re-using containers means that you can make changes to your +environment very quickly. - flask - redis -### Create a Docker image +#### Variables and moving a composition between environments -Now, create a Docker image containing all of your app's dependencies. You -specify how to build the image using a file called -[`Dockerfile`](http://docs.docker.com/reference/builder/): +Compose supports variables in the Compose file. You can use these variables +to customize your composition for different environments, or different users. +See [Variable substitution](compose-file.md#variable-substitution) for more +details. - FROM python:2.7 - ADD . /code - WORKDIR /code - RUN pip install -r requirements.txt - CMD python app.py +You can extend a Compose file using the `extends` field or by creating multiple +Compose files. See [extends](extends.md) for more details. -This tells Docker to: -* Build an image starting with the Python 2.7 image. -* Add the current directory `.` into the path `/code` in the image. -* Set the working directory to `/code`. -* Install your Python dependencies. -* Set the default command for the container to `python app.py` +## Common Use Cases -For more information on how to write Dockerfiles, see the [Docker user guide](https://docs.docker.com/userguide/dockerimages/#building-an-image-from-a-dockerfile) and the [Dockerfile reference](http://docs.docker.com/reference/builder/). +Compose can be used in many different ways. Some common use cases are outlined +below. -You can test that this builds by running `docker build -t web .`. +### Development environments -### Define services +When you're developing software, the ability to run an application in an +isolated environment and interact with it is crucial. The Compose command +line tool can be used to create the environment and interact with it. -Next, define a set of services using `docker-compose.yml`: +The [Compose file](compose-file.md) provides a way to document and configure +all of the application's service dependencies (databases, queues, caches, +web service APIs, etc). Using the Compose command line tool you can create +and start one or more containers for each dependency with a single command +(`docker-compose up`). - web: - build: . - ports: - - "5000:5000" - volumes: - - .:/code - links: - - redis - redis: - image: redis +Together, these features provide a convenient way for developers to get +started on a project. Compose can reduce a multi-page "developer getting +started guide" to a single machine readable Compose file and a few commands. -This defines two services: +### Automated testing environments -#### web - -* Builds from the `Dockerfile` in the current directory. -* Forwards the exposed port 5000 on the container to port 5000 on the host machine. -* Connects the web container to the Redis service via a link. -* Mounts the current directory on the host to `/code` inside the container allowing you to modify the code without having to rebuild the image. - -#### redis - -* Uses the public [Redis](https://registry.hub.docker.com/_/redis/) image which gets pulled from the Docker Hub registry. - -### Build and run your app with Compose - -Now, when you run `docker-compose up`, Compose will pull a Redis image, build an image for your code, and start everything up: - - $ docker-compose up - Pulling image redis... - Building web... - Starting composetest_redis_1... - Starting composetest_web_1... - redis_1 | [8] 02 Jan 18:43:35.576 # Server started, Redis version 2.8.3 - web_1 | * Running on http://0.0.0.0:5000/ - web_1 | * Restarting with stat - -If you're using [Docker Machine](https://docs.docker.com/machine), then `docker-machine ip MACHINE_VM` will tell you its address and you can open `http://MACHINE_VM_IP:5000` in a browser. - -If you're not using Boot2docker and are on linux, then the web app should now be listening on port 5000 on your Docker daemon host. If http://0.0.0.0:5000 doesn't resolve, you can also try localhost:5000. - -You should get a message in your browser saying: - -`Hello World! I have been seen 1 times.` - -Refreshing the page will increment the number. - -If you want to run your services in the background, you can pass the `-d` flag -(for "detached" mode) to `docker-compose up` and use `docker-compose ps` to -see what is currently running: +An important part of any Continuous Deployment or Continuous Integration process +is the automated test suite. Automated end-to-end testing requires an +environment in which to run tests. Compose provides a convenient way to create +and destroy isolated testing environments for your test suite. By defining the full +environment in a [Compose file](compose-file.md) you can create and destroy these +environments in just a few commands: $ docker-compose up -d - Starting composetest_redis_1... - Starting composetest_web_1... - $ docker-compose ps - Name Command State Ports - ------------------------------------------------------------------- - composetest_redis_1 /usr/local/bin/run Up - composetest_web_1 /bin/sh -c python app.py Up 5000->5000/tcp - -The `docker-compose run` command allows you to run one-off commands for your -services. For example, to see what environment variables are available to the -`web` service: - - $ docker-compose run web env - -See `docker-compose --help` to see other available commands. You can also install [command completion](completion.md) for the bash and zsh shell, which will also show you available commands. - -If you started Compose with `docker-compose up -d`, you'll probably want to stop -your services once you've finished with them: - + $ ./run_tests $ docker-compose stop + $ docker-compose rm -f -At this point, you have seen the basics of how Compose works. +### Single host deployments + +Compose has traditionally been focused on development and testing workflows, +but with each release we're making progress on more production-oriented features. +You can use Compose to deploy to a remote Docker Engine. The Docker Engine may +be a single instance provisioned with +[Docker Machine](https://docs.docker.com/machine/) or an entire +[Docker Swarm](https://docs.docker.com/swarm/) cluster. + +For details on using production-oriented features, see +[compose in production](production.md) in this documentation. -- Next, try the quick start guide for [Django](django.md), - [Rails](rails.md), or [Wordpress](wordpress.md). -- See the reference guides for complete details on the [commands](cli.md), the - [configuration file](yml.md) and [environment variables](env.md). ## Release Notes -### Version 1.2.0 (April 7, 2015) - -For complete information on this release, see the [1.2.0 Milestone project page](https://github.com/docker/compose/wiki/1.2.0-Milestone-Project-Page). -In addition to bug fixes and refinements, this release adds the following: - -* The `extends` keyword, which adds the ability to extend services by sharing common configurations. For details, see -[PR #1088](https://github.com/docker/compose/pull/1088). - -* Better integration with Swarm. Swarm will now schedule inter-dependent -containers on the same host. For details, see -[PR #972](https://github.com/docker/compose/pull/972). +To see a detailed list of changes for past and current releases of Docker +Compose, please refer to the +[CHANGELOG](https://github.com/docker/compose/blob/master/CHANGELOG.md). ## Getting help -Docker Compose is still in its infancy and under active development. If you need -help, would like to contribute, or simply want to talk about the project with -like-minded individuals, we have a number of open channels for communication. +Docker Compose is under active development. If you need help, would like to +contribute, or simply want to talk about the project with like-minded +individuals, we have a number of open channels for communication. * To report bugs or file feature requests: please use the [issue tracker on Github](https://github.com/docker/compose/issues). -* To talk about the project with people in real time: please join the `#docker-compose` channel on IRC. +* To talk about the project with people in real time: please join the + `#docker-compose` channel on freenode IRC. * To contribute code or documentation changes: please submit a [pull request on Github](https://github.com/docker/compose/pulls). diff --git a/docs/install.md b/docs/install.md index b74f8f620d..c5304409c5 100644 --- a/docs/install.md +++ b/docs/install.md @@ -14,46 +14,78 @@ weight=4 You can run Compose on OS X and 64-bit Linux. It is currently not supported on the Windows operating system. To install Compose, you'll need to install Docker -first. - -Depending on how your system is configured, you may require `sudo` access to -install Compose. If your system requires `sudo`, you will receive "Permission -denied" errors when installing Compose. If this is the case for you, preface the -install commands with `sudo` to install. +first. To install Compose, do the following: 1. Install Docker Engine version 1.7.1 or greater: - * Mac OS X installation (installs both Engine and Compose) - + * Mac OS X installation (Toolbox installation includes both Engine and Compose) + * Ubuntu installation - + * other system installations - + 2. Mac OS X users are done installing. Others should continue to the next step. - -3. Go to the repository release page. -4. Enter the `curl` command in your termial. +3. Go to the Compose repository release page on GitHub. - The command has the following format: +4. Follow the instructions from the release page and run the `curl` command, +which the release page specifies, in your terminal. + + > Note: If you get a "Permission denied" error, your `/usr/local/bin` directory + probably isn't writable and you'll need to install Compose as the superuser. Run + `sudo -i`, then the two commands below, then `exit`. + + The following is an example command illustrating the format: curl -L https://github.com/docker/compose/releases/download/VERSION_NUM/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose - - If you have problems installing with `curl`, you can use `pip` instead: `pip install -U docker-compose` - -4. Apply executable permissions to the binary: + + If you have problems installing with `curl`, see + [Alternative Install Options](#alternative-install-options). + +5. Apply executable permissions to the binary: $ chmod +x /usr/local/bin/docker-compose -5. Optionally, install [command completion](completion.md) for the +6. Optionally, install [command completion](completion.md) for the `bash` and `zsh` shell. -6. Test the installation. +7. Test the installation. $ docker-compose --version - docker-compose version: 1.4.2 + docker-compose version: 1.5.0 + + +## Alternative install options + +### Install using pip + +Compose can be installed from [pypi](https://pypi.python.org/pypi/docker-compose) +using `pip`. If you install using `pip` it is highly recommended that you use a +[virtualenv](https://virtualenv.pypa.io/en/latest/) because many operating systems +have python system packages that conflict with docker-compose dependencies. See +the [virtualenv tutorial](http://docs.python-guide.org/en/latest/dev/virtualenvs/) +to get started. + + $ pip install docker-compose + + +### Install as a container + +Compose can also be run inside a container, from a small bash script wrapper. +To install compose as a container run: + + $ curl -L https://github.com/docker/compose/releases/download/1.5.0/run.sh > /usr/local/bin/docker-compose + $ chmod +x /usr/local/bin/docker-compose + +## Master builds + +If you're interested in trying out a pre-release build you can download a +binary from https://dl.bintray.com/docker-compose/master/. Pre-release +builds allow you to try out new features before they are released, but may +be less stable. + ## Upgrading @@ -69,7 +101,7 @@ to preserve) you can migrate them with the following command: $ docker-compose migrate-to-labels -Alternatively, if you're not worried about keeping them, you can remove them &endash; +Alternatively, if you're not worried about keeping them, you can remove them. Compose will just create new ones. $ docker rm -f -v myapp_web_1 myapp_db_1 ... @@ -85,7 +117,7 @@ To uninstall Docker Compose if you installed using `curl`: To uninstall Docker Compose if you installed using `pip`: $ pip uninstall docker-compose - + >**Note**: If you get a "Permission denied" error using either of the above >methods, you probably do not have the proper permissions to remove >`docker-compose`. To force the removal, prepend `sudo` to either of the above @@ -95,10 +127,9 @@ To uninstall Docker Compose if you installed using `pip`: ## Where to go next - [User guide](/) +- [Getting Started](gettingstarted.md) - [Get started with Django](django.md) - [Get started with Rails](rails.md) -- [Get started with Wordpress](wordpress.md) -- [Command line reference](cli.md) -- [Yaml file reference](yml.md) -- [Compose environment variables](env.md) -- [Compose command line completion](completion.md) +- [Get started with WordPress](wordpress.md) +- [Command line reference](./reference/index.md) +- [Compose file reference](compose-file.md) diff --git a/docs/networking.md b/docs/networking.md new file mode 100644 index 0000000000..718d56c7a2 --- /dev/null +++ b/docs/networking.md @@ -0,0 +1,92 @@ + + + +# Networking in Compose + +> **Note:** Compose's networking support is experimental, and must be explicitly enabled with the `docker-compose --x-networking` flag. + +Compose sets up a single default +[network](/engine/reference/commandline/network_create.md) for your app. Each +container for a service joins the default network and is both *reachable* by +other containers on that network, and *discoverable* by them at a hostname +identical to the container name. + +> **Note:** Your app's network is given the same name as the "project name", which is based on the name of the directory it lives in. See the [Command line overview](reference/docker-compose.md) for how to override it. + +For example, suppose your app is in a directory called `myapp`, and your `docker-compose.yml` looks like this: + + web: + build: . + ports: + - "8000:8000" + db: + image: postgres + +When you run `docker-compose --x-networking up`, the following happens: + +1. A network called `myapp` is created. +2. A container is created using `web`'s configuration. It joins the network +`myapp` under the name `myapp_web_1`. +3. A container is created using `db`'s configuration. It joins the network +`myapp` under the name `myapp_db_1`. + +Each container can now look up the hostname `myapp_web_1` or `myapp_db_1` and +get back the appropriate container's IP address. For example, `web`'s +application code could connect to the URL `postgres://myapp_db_1:5432` and start +using the Postgres database. + +Because `web` explicitly maps a port, it's also accessible from the outside world via port 8000 on your Docker host's network interface. + +> **Note:** in the next release there will be additional aliases for the +> container, including a short name without the project name and container +> index. The full container name will remain as one of the alias for backwards +> compatibility. + +## Updating containers + +If you make a configuration change to a service and run `docker-compose up` to update it, the old container will be removed and the new one will join the network under a different IP address but the same name. Running containers will be able to look up that name and connect to the new address, but the old address will stop working. + +If any containers have connections open to the old container, they will be closed. It is a container's responsibility to detect this condition, look up the name again and reconnect. + +## Configure how services are published + +By default, containers for each service are published on the network with the +container name. If you want to change the name, or stop containers from being +discoverable at all, you can use the `container_name` option: + + web: + build: . + container_name: "my-web-application" + +## Links + +Docker links are a one-way, single-host communication system. They should now be considered deprecated, and you should update your app to use networking instead. In the majority of cases, this will simply involve removing the `links` sections from your `docker-compose.yml`. + +## Specifying the network driver + +By default, Compose uses the `bridge` driver when creating the app’s network. The Docker Engine provides one other driver out-of-the-box: `overlay`, which implements secure communication between containers on different hosts (see the next section for how to set up and use the `overlay` driver). Docker also allows you to install [custom network drivers](/engine/extend/plugins_network.md). + +You can specify which one to use with the `--x-network-driver` flag: + + $ docker-compose --x-networking --x-network-driver=overlay up + +## Multi-host networking + +(TODO: talk about Swarm and the overlay driver) + +## Custom container network modes + +Compose allows you to specify a custom network mode for a service with the `net` option - for example, `net: "host"` specifies that its containers should use the same network namespace as the Docker host, and `net: "none"` specifies that they should have no networking capabilities. + +If a service specifies the `net` option, its containers will *not* join the app’s network and will not be able to communicate with other services in the app. + +If *all* services in an app specify the `net` option, a network will not be created at all. diff --git a/docs/pre-process.sh b/docs/pre-process.sh deleted file mode 100755 index 75e9611f2f..0000000000 --- a/docs/pre-process.sh +++ /dev/null @@ -1,61 +0,0 @@ -#!/bin/bash -e - -# Populate an array with just docker dirs and one with content dirs -docker_dir=(`ls -d /docs/content/docker/*`) -content_dir=(`ls -d /docs/content/*`) - -# Loop content not of docker/ -# -# Sed to process GitHub Markdown -# 1-2 Remove comment code from metadata block -# 3 Remove .md extension from link text -# 4 Change ](/ to ](/project/ in links -# 5 Change ](word) to ](/project/word) -# 6 Change ](../../ to ](/project/ -# 7 Change ](../ to ](/project/word) -# -for i in "${content_dir[@]}" -do - : - case $i in - "/docs/content/windows") - ;; - "/docs/content/mac") - ;; - "/docs/content/linux") - ;; - "/docs/content/docker") - y=${i##*/} - find $i -type f -name "*.md" -exec sed -i.old \ - -e '/^/g' \ - -e '/^/g' {} \; - ;; - *) - y=${i##*/} - find $i -type f -name "*.md" -exec sed -i.old \ - -e '/^/g' \ - -e '/^/g' \ - -e 's/\(\]\)\([(]\)\(\/\)/\1\2\/'$y'\//g' \ - -e 's/\(\][(]\)\([A-z].*\)\(\.md\)/\1\/'$y'\/\2/g' \ - -e 's/\([(]\)\(.*\)\(\.md\)/\1\2/g' \ - -e 's/\(\][(]\)\(\.\/\)/\1\/'$y'\//g' \ - -e 's/\(\][(]\)\(\.\.\/\.\.\/\)/\1\/'$y'\//g' \ - -e 's/\(\][(]\)\(\.\.\/\)/\1\/'$y'\//g' {} \; - ;; - esac -done - -# -# Move docker directories to content -# -for i in "${docker_dir[@]}" -do - : - if [ -d $i ] - then - mv $i /docs/content/ - fi -done - -rm -rf /docs/content/docker - diff --git a/docs/production.md b/docs/production.md index 294f3c4e86..0a5e77b522 100644 --- a/docs/production.md +++ b/docs/production.md @@ -12,12 +12,9 @@ weight=1 ## Using Compose in production -While **Compose is not yet considered production-ready**, if you'd like to experiment and learn more about using it in production deployments, this guide -can help. -The project is actively working towards becoming -production-ready; to learn more about the progress being made, check out the -[roadmap](https://github.com/docker/compose/blob/master/ROADMAP.md) for details -on how it's coming along and what still needs to be done. +> Compose is still primarily aimed at development and testing environments. +> Compose may be used for smaller production deployments, but is probably +> not yet suitable for larger deployments. When deploying to production, you'll almost certainly want to make changes to your app configuration that are more appropriate to a live environment. These @@ -31,22 +28,19 @@ changes may include: - Specifying a restart policy (e.g., `restart: always`) to avoid downtime - Adding extra services (e.g., a log aggregator) -For this reason, you'll probably want to define a separate Compose file, say -`production.yml`, which specifies production-appropriate configuration. +For this reason, you'll probably want to define an additional Compose file, say +`production.yml`, which specifies production-appropriate +configuration. This configuration file only needs to include the changes you'd +like to make from the original Compose file. The additional Compose file +can be applied over the original `docker-compose.yml` to create a new configuration. -> **Note:** The [extends](extends.md) keyword is useful for maintaining multiple -> Compose files which re-use common services without having to manually copy and -> paste. +Once you've got a second configuration file, tell Compose to use it with the +`-f` option: -Once you've got an alternate configuration file, make Compose use it -by setting the `COMPOSE_FILE` environment variable: + $ docker-compose -f docker-compose.yml -f production.yml up -d - $ COMPOSE_FILE=production.yml - $ docker-compose up -d - -> **Note:** You can also use the file for a one-off command without setting -> an environment variable. You do this by passing the `-f` flag, e.g., -> `docker-compose -f production.yml up -d`. +See [Using multiple compose files](extends.md#different-environments) for a more +complete example. ### Deploying changes @@ -80,17 +74,12 @@ system, exposes the same API as a single Docker host, which means you can use Compose against a Swarm instance and run your apps across multiple hosts. Compose/Swarm integration is still in the experimental stage, and Swarm is still -in beta, but if you'd like to explore and experiment, check out the -[integration guide](https://github.com/docker/compose/blob/master/SWARM.md). +in beta, but if you'd like to explore and experiment, check out the integration +guide. ## Compose documentation - [Installing Compose](install.md) -- [Get started with Django](django.md) -- [Get started with Rails](rails.md) -- [Get started with Wordpress](wordpress.md) -- [Command line reference](cli.md) -- [Yaml file reference](yml.md) -- [Compose environment variables](env.md) -- [Compose command line completion](completion.md) - +- [Command line reference](./reference/index.md) +- [Compose file reference](compose-file.md) diff --git a/docs/rails.md b/docs/rails.md index 9ce6c4a6f8..8e16af6423 100644 --- a/docs/rails.md +++ b/docs/rails.md @@ -26,6 +26,7 @@ Dockerfile consists of: RUN mkdir /myapp WORKDIR /myapp ADD Gemfile /myapp/Gemfile + ADD Gemfile.lock /myapp/Gemfile.lock RUN bundle install ADD . /myapp @@ -36,6 +37,10 @@ Next, create a bootstrap `Gemfile` which just loads Rails. It'll be overwritten source 'https://rubygems.org' gem 'rails', '4.2.0' +You'll need an empty `Gemfile.lock` in order to build our `Dockerfile`. + + $ touch Gemfile.lock + Finally, `docker-compose.yml` is where the magic happens. This file describes the services that comprise your app (a database and a web app), how to get each one's Docker image (the database just runs on a pre-made PostgreSQL image, and the web app is built from the current directory), and the configuration needed to link them together and expose the web app's port. db: @@ -68,6 +73,12 @@ image. Once it's done, you should have generated a fresh app: README.rdoc config.ru public Rakefile db test + +The files `rails new` created are owned by root. This happens because the +container runs as the `root` user. Change the ownership of the new files. + + sudo chown -R $USER:$USER . + Uncomment the line in your new `Gemfile` which loads `therubyracer`, so you've got a Javascript runtime: @@ -79,6 +90,7 @@ rebuild.) $ docker-compose build + ### Connect the database The app is now bootable, but you're not quite there yet. By default, Rails @@ -86,8 +98,7 @@ expects a database to be running on `localhost` - so you need to point it at the `db` container instead. You also need to change the database and username to align with the defaults set by the `postgres` image. -Open up your newly-generated `database.yml` file. Replace its contents with the -following: +Replace the contents of `config/database.yml` with the following: development: &default adapter: postgresql @@ -117,17 +128,15 @@ Finally, you need to create the database. In another terminal, run: $ docker-compose run web rake db:create -That's it. Your app should now be running on port 3000 on your Docker daemon. If you're using [Docker Machine](https://docs.docker.com/machine), then `docker-machine ip MACHINE_VM` returns the Docker host IP address. +That's it. Your app should now be running on port 3000 on your Docker daemon. If you're using [Docker Machine](https://docs.docker.com/machine), then `docker-machine ip MACHINE_VM` returns the Docker host IP address. ## More Compose documentation - [User guide](/) - [Installing Compose](install.md) +- [Getting Started](gettingstarted.md) - [Get started with Django](django.md) -- [Get started with Rails](rails.md) -- [Get started with Wordpress](wordpress.md) -- [Command line reference](cli.md) -- [Yaml file reference](yml.md) -- [Compose environment variables](env.md) -- [Compose command line completion](completion.md) +- [Get started with WordPress](wordpress.md) +- [Command line reference](./reference/index.md) +- [Compose file reference](compose-file.md) diff --git a/docs/reference/build.md b/docs/reference/build.md index b6e27bb264..c427199fec 100644 --- a/docs/reference/build.md +++ b/docs/reference/build.md @@ -16,8 +16,9 @@ Usage: build [options] [SERVICE...] Options: --no-cache Do not use cache when building the image. +--pull Always attempt to pull a newer version of the image. ``` Services are built once and then tagged as `project_service`, e.g., `composetest_db`. If you change a service's Dockerfile or the contents of its -build directory, run `docker-compose build` to rebuild it. \ No newline at end of file +build directory, run `docker-compose build` to rebuild it. diff --git a/docs/reference/docker-compose.md b/docs/reference/docker-compose.md index e252da0a70..32fcbe7064 100644 --- a/docs/reference/docker-compose.md +++ b/docs/reference/docker-compose.md @@ -5,7 +5,7 @@ description = "docker-compose Command Binary" keywords = ["fig, composition, compose, docker, orchestration, cli, docker-compose"] [menu.main] parent = "smn_compose_cli" -weight=-2 +weight=-2 +++ @@ -14,7 +14,7 @@ weight=-2 ``` Usage: - docker-compose [options] [COMMAND] [ARGS...] + docker-compose [-f=...] [options] [COMMAND] [ARGS...] docker-compose -h|--help Options: @@ -28,6 +28,7 @@ Commands: help Get help on a command kill Kill containers logs View output from containers + pause Pause services port Print the public port for a port binding ps List containers pull Pulls service images @@ -37,19 +38,67 @@ Commands: scale Set number of containers for a service start Start services stop Stop services + unpause Unpause services up Create and start containers migrate-to-labels Recreate containers to add labels + version Show the Docker-Compose version information ``` -The Docker Compose binary. You use this command to build and manage multiple services in Docker containers. +The Docker Compose binary. You use this command to build and manage multiple +services in Docker containers. -Use the `-f` flag to specify the location of a Compose configuration file. This -flag is optional. If you don't provide this flag. Compose looks for a file named -`docker-compose.yml` in the working directory. If the file is not found, -Compose looks in each parent directory successively, until it finds the file. +Use the `-f` flag to specify the location of a Compose configuration file. You +can supply multiple `-f` configuration files. When you supply multiple files, +Compose combines them into a single configuration. Compose builds the +configuration in the order you supply the files. Subsequent files override and +add to their successors. -Use a `-` as the filename to read configuration file from stdin. When stdin is -used all paths in the configuration are relative to the current working -directory. +For example, consider this command line: -Each configuration can has a project name. If you supply a `-p` flag, you can specify a project name. If you don't specify the flag, Compose uses the current directory name. +``` +$ docker-compose -f docker-compose.yml -f docker-compose.admin.yml run backup_db` +``` + +The `docker-compose.yml` file might specify a `webapp` service. + +``` +webapp: + image: examples/web + ports: + - "8000:8000" + volumes: + - "/data" +``` + +If the `docker-compose.admin.yml` also specifies this same service, any matching +fields will override the previous file. New values, add to the `webapp` service +configuration. + +``` +webapp: + build: . + environment: + - DEBUG=1 +``` + +Use a `-f` with `-` (dash) as the filename to read the configuration from +stdin. When stdin is used all paths in the configuration are +relative to the current working directory. + +The `-f` flag is optional. If you don't provide this flag on the command line, +Compose traverses the working directory and its subdirectories looking for a +`docker-compose.yml` and a `docker-compose.override.yml` file. You must supply +at least the `docker-compose.yml` file. If both files are present, Compose +combines the two files into a single configuration. The configuration in the +`docker-compose.override.yml` file is applied over and in addition to the values +in the `docker-compose.yml` file. + +Each configuration has a project name. If you supply a `-p` flag, you can +specify a project name. If you don't specify the flag, Compose uses the current +directory name. + + +## Where to go next + +* [CLI environment variables](overview.md) +* [Command line reference](index.md) diff --git a/docs/reference/index.md b/docs/reference/index.md index 3d3d55d82a..b2fb5bcadc 100644 --- a/docs/reference/index.md +++ b/docs/reference/index.md @@ -5,25 +5,30 @@ description = "Compose CLI reference" keywords = ["fig, composition, compose, docker, orchestration, cli, reference"] [menu.main] identifier = "smn_compose_cli" -parent = "smn_compose_ref" +parent = "smn_compose_ref" +++ ## Compose CLI reference -The following pages describe the usage information for the [docker-compose](/reference/docker-compose.md) subcommands. You can also see this information by running `docker-compose [SUBCOMMAND] --help` from the command line. +The following pages describe the usage information for the [docker-compose](docker-compose.md) subcommands. You can also see this information by running `docker-compose [SUBCOMMAND] --help` from the command line. -* [build](/reference/reference/build.md) -* [help](/reference/help.md) -* [kill](/reference/kill.md) -* [ps](/reference/ps.md) -* [restart](/reference/restart.md) -* [run](/reference/run.md) -* [start](/reference/start.md) -* [up](/reference/up.md) -* [logs](/reference/logs.md) -* [port](/reference/port.md) -* [pull](/reference/pull.md) -* [rm](/reference/rm.md) -* [scale](/reference/scale.md) -* [stop](/reference/stop.md) +* [build](build.md) +* [help](help.md) +* [kill](kill.md) +* [ps](ps.md) +* [restart](restart.md) +* [run](run.md) +* [start](start.md) +* [up](up.md) +* [logs](logs.md) +* [port](port.md) +* [pull](pull.md) +* [rm](rm.md) +* [scale](scale.md) +* [stop](stop.md) + +## Where to go next + +* [CLI environment variables](overview.md) +* [docker-compose Command](docker-compose.md) diff --git a/docs/reference/kill.md b/docs/reference/kill.md index e5dd057361..dc4bf23a1b 100644 --- a/docs/reference/kill.md +++ b/docs/reference/kill.md @@ -21,4 +21,4 @@ Options: Forces running containers to stop by sending a `SIGKILL` signal. Optionally the signal can be passed, for example: - $ docker-compose kill -s SIGINT \ No newline at end of file + $ docker-compose kill -s SIGINT diff --git a/docs/reference/overview.md b/docs/reference/overview.md index 458dea4046..3f589a9ded 100644 --- a/docs/reference/overview.md +++ b/docs/reference/overview.md @@ -5,7 +5,7 @@ description = "Introduction to the CLI" keywords = ["fig, composition, compose, docker, orchestration, cli, reference"] [menu.main] parent = "smn_compose_cli" -weight=-2 +weight=-2 +++ @@ -14,6 +14,13 @@ weight=-2 This section describes the subcommands you can use with the `docker-compose` command. You can run subcommand against one or more services. To run against a specific service, you supply the service name from your compose configuration. If you do not specify the service name, the command runs against all the services in your configuration. + +## Commands + +* [docker-compose Command](docker-compose.md) +* [CLI Reference](index.md) + + ## Environment Variables Several environment variables are available for you to configure the Docker Compose command-line behaviour. @@ -31,6 +38,26 @@ Setting this is optional. If you do not set this, the `COMPOSE_PROJECT_NAME` def Specify the file containing the compose configuration. If not provided, Compose looks for a file named `docker-compose.yml` in the current directory and then each parent directory in succession until a file by that name is found. +### COMPOSE\_API\_VERSION + +The Docker API only supports requests from clients which report a specific +version. If you receive a `client and server don't have same version error` using +`docker-compose`, you can workaround this error by setting this environment +variable. Set the version value to match the server version. + +Setting this variable is intended as a workaround for situations where you need +to run temporarily with a mismatch between the client and server version. For +example, if you can upgrade the client but need to wait to upgrade the server. + +Running with this variable set and a known mismatch does prevent some Docker +features from working properly. The exact features that fail would depend on the +Docker client and server versions. For this reason, running with this variable +set is only intended as a workaround and it is not officially supported. + +If you run into problems running with this set, resolve the mismatch through +upgrade and remove this setting to see if your problems resolve before notifying +support. + ### DOCKER\_HOST Sets the URL of the `docker` daemon. As with the Docker client, defaults to `unix:///var/run/docker.sock`. @@ -44,19 +71,14 @@ the `docker` daemon. Configures the path to the `ca.pem`, `cert.pem`, and `key.pem` files used for TLS verification. Defaults to `~/.docker`. +### COMPOSE\_HTTP\_TIMEOUT + +Configures the time (in seconds) a request to the Docker daemon is allowed to hang before Compose considers +it failed. Defaults to 60 seconds. +## Related Information - - - -## Compose documentation - -- [User guide](/) -- [Installing Compose](install.md) -- [Get started with Django](django.md) -- [Get started with Rails](rails.md) -- [Get started with Wordpress](wordpress.md) -- [Yaml file reference](yml.md) -- [Compose environment variables](env.md) -- [Compose command line completion](completion.md) +- [User guide](../index.md) +- [Installing Compose](../install.md) +- [Compose file reference](../compose-file.md) diff --git a/docs/reference/pause.md b/docs/reference/pause.md new file mode 100644 index 0000000000..a0ffab0359 --- /dev/null +++ b/docs/reference/pause.md @@ -0,0 +1,18 @@ + + +# pause + +``` +Usage: pause [SERVICE...] +``` + +Pauses running containers of a service. They can be unpaused with `docker-compose unpause`. diff --git a/docs/reference/port.md b/docs/reference/port.md index 76f93f2393..c946a97d39 100644 --- a/docs/reference/port.md +++ b/docs/reference/port.md @@ -20,4 +20,4 @@ Options: instances of a service [default: 1] ``` -Prints the public port for a port binding. \ No newline at end of file +Prints the public port for a port binding. diff --git a/docs/reference/pull.md b/docs/reference/pull.md index e5b5d166ff..5ec184b72c 100644 --- a/docs/reference/pull.md +++ b/docs/reference/pull.md @@ -13,6 +13,9 @@ parent = "smn_compose_cli" ``` Usage: pull [options] [SERVICE...] + +Options: +--ignore-pull-failures Pull what it can and ignores images with pull failures. ``` -Pulls service images. \ No newline at end of file +Pulls service images. diff --git a/docs/reference/run.md b/docs/reference/run.md index 5ea9a61bec..c1efb9a773 100644 --- a/docs/reference/run.md +++ b/docs/reference/run.md @@ -22,11 +22,12 @@ Options: -u, --user="" Run as specified username or uid --no-deps Don't start linked services. --rm Remove container after run. Ignored in detached mode. +-p, --publish=[] Publish a container's port(s) to the host --service-ports Run command with the service's ports enabled and mapped to the host. -T Disable pseudo-tty allocation. By default `docker-compose run` allocates a TTY. ``` -Runs a one-time command against a service. For example, the following command starts the `web` service and runs `bash` as its command. +Runs a one-time command against a service. For example, the following command starts the `web` service and runs `bash` as its command. $ docker-compose run web bash @@ -38,6 +39,10 @@ The second difference is the `docker-compose run` command does not create any of $ docker-compose run --service-ports web python manage.py shell +Alternatively manual port mapping can be specified. Same as when running Docker's `run` command - using `--publish` or `-p` options: + + $ docker-compose run --publish 8080:80 -p 2022:22 -p 127.0.0.1:2021:21 web python manage.py shell + If you start a service configured with links, the `run` command first checks to see if the linked service is running and starts the service if it is stopped. Once all the linked services are running, the `run` executes the command you passed it. So, for example, you could run: $ docker-compose run db psql -h db -U docker @@ -47,7 +52,3 @@ This would open up an interactive PostgreSQL shell for the linked `db` container If you do not want the `run` command to start linked containers, specify the `--no-deps` flag: $ docker-compose run --no-deps web python manage.py shell - - - - diff --git a/docs/reference/scale.md b/docs/reference/scale.md index 9541830097..75140ee9e5 100644 --- a/docs/reference/scale.md +++ b/docs/reference/scale.md @@ -18,4 +18,4 @@ Sets the number of containers to run for a service. Numbers are specified as arguments in the form `service=num`. For example: - $ docker-compose scale web=2 worker=3 \ No newline at end of file + $ docker-compose scale web=2 worker=3 diff --git a/docs/reference/unpause.md b/docs/reference/unpause.md new file mode 100644 index 0000000000..6434b09ccc --- /dev/null +++ b/docs/reference/unpause.md @@ -0,0 +1,18 @@ + + +# pause + +``` +Usage: unpause [SERVICE...] +``` + +Unpauses paused containers of a service. diff --git a/docs/wordpress.md b/docs/wordpress.md index eda755c178..373ef4d0d5 100644 --- a/docs/wordpress.md +++ b/docs/wordpress.md @@ -1,7 +1,7 @@ -# Quickstart Guide: Compose and Wordpress +# Quickstart Guide: Compose and WordPress -You can use Compose to easily run Wordpress in an isolated environment built -with Docker containers. +You can use Compose to easily run WordPress in an isolated environment built +with Docker containers. ## Define the project -First, [Install Compose](install.md) and then download Wordpress into the +First, [Install Compose](install.md) and then download WordPress into the current directory: $ curl https://wordpress.org/latest.tar.gz | tar -xvzf - @@ -36,7 +36,7 @@ your Dockerfile should be: ADD . /code This tells Docker how to build an image defining a container that contains PHP -and Wordpress. +and WordPress. Next you'll create a `docker-compose.yml` file that will start your web service and a separate MySQL instance: @@ -55,8 +55,8 @@ and a separate MySQL instance: environment: MYSQL_DATABASE: wordpress -Two supporting files are needed to get this working - first, `wp-config.php` is -the standard Wordpress config file with a single change to point the database +A supporting file is needed to get this working. `wp-config.php` is +the standard WordPress config file with a single change to point the database configuration at the `db` container: /usr/local/bin/docker-compose + chmod +x /usr/local/bin/docker-compose + + Or install the PyPi package: + + pip install -U docker-compose==1.5.0 + + Here's what's new: + + ...release notes go here... + +5. Attach the binaries and `script/run.sh` + +6. Add "Thanks" with a list of contributors. The contributor list can be generated + by running `./script/release/contributors`. + +7. If everything looks good, it's time to push the release. + + + ./script/release/push-release + + +8. Publish the release on GitHub. + +9. Check that all the binaries download (following the install instructions) and run. + +10. Email maintainers@dockerproject.org and engineering@docker.com about the new release. + +## If it’s a stable release (not an RC) + +1. Merge the bump PR. + +2. Make sure `origin/release` is updated locally: + + git fetch origin + +3. Update the `docs` branch on the upstream repo: + + git push git@github.com:docker/compose.git origin/release:docs + +4. Let the docs team know that it’s been updated so they can publish it. + +5. Close the release’s milestone. + +## If it’s a minor release (1.x.0), rather than a patch release (1.x.y) + +1. Open a PR against `master` to: + + - update `CHANGELOG.md` to bring it in line with `release` + - bump the version in `compose/__init__.py` to the *next* minor version number with `dev` appended. For example, if you just released `1.4.0`, update it to `1.5.0dev`. + +2. Get the PR merged. + +## Finally + +1. Celebrate, however you’d like. diff --git a/requirements-build.txt b/requirements-build.txt new file mode 100644 index 0000000000..20aad4208c --- /dev/null +++ b/requirements-build.txt @@ -0,0 +1 @@ +pyinstaller==3.0 diff --git a/requirements-dev.txt b/requirements-dev.txt index 7b529623fb..73b8078350 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,6 +1,4 @@ -mock >= 1.0.1 -nose==1.3.4 -git+https://github.com/pyinstaller/pyinstaller.git@12e40471c77f588ea5be352f7219c873ddaae056#egg=pyinstaller -unittest2==0.8.0 -flake8==2.3.0 -pep8==1.6.1 +coverage==3.7.1 +mock>=1.0.1 +pytest==2.7.2 +pytest-cov==2.1.0 diff --git a/requirements.txt b/requirements.txt index f9cec8372c..daaaa95026 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,8 +1,10 @@ PyYAML==3.10 -docker-py==1.3.1 +docker-py==1.5.0 dockerpty==0.3.4 docopt==0.6.1 -requests==2.6.1 +enum34==1.0.4 +jsonschema==2.5.1 +requests==2.7.0 six==1.7.3 texttable==0.8.2 websocket-client==0.32.0 diff --git a/script/build-image b/script/build-image new file mode 100755 index 0000000000..3ac9729b47 --- /dev/null +++ b/script/build-image @@ -0,0 +1,15 @@ +#!/bin/bash + +set -e + +if [ -z "$1" ]; then + >&2 echo "First argument must be image tag." + exit 1 +fi + +TAG=$1 +VERSION="$(python setup.py --version)" + +python setup.py sdist +cp dist/docker-compose-$VERSION.tar.gz dist/docker-compose-release.tar.gz +docker build -t docker/compose:$TAG -f Dockerfile.run . diff --git a/script/build-linux b/script/build-linux index 5e4a9470e9..ade18bc535 100755 --- a/script/build-linux +++ b/script/build-linux @@ -2,11 +2,11 @@ set -ex +./script/clean + TAG="docker-compose" -docker build -t "$TAG" . +docker build -t "$TAG" . | tail -n 200 docker run \ - --rm \ - --user=user \ - --volume="$(pwd):/code" \ - --entrypoint="script/build-linux-inner" \ - "$TAG" + --rm --entrypoint="script/build-linux-inner" \ + -v $(pwd)/dist:/code/dist \ + "$TAG" diff --git a/script/build-linux-inner b/script/build-linux-inner index adc030eaa8..01137ff240 100755 --- a/script/build-linux-inner +++ b/script/build-linux-inner @@ -2,9 +2,13 @@ set -ex +TARGET=dist/docker-compose-Linux-x86_64 +VENV=/code/.tox/py27 + mkdir -p `pwd`/dist chmod 777 `pwd`/dist -pyinstaller -F bin/docker-compose -mv dist/docker-compose dist/docker-compose-Linux-x86_64 -dist/docker-compose-Linux-x86_64 version +$VENV/bin/pip install -q -r requirements-build.txt +su -c "$VENV/bin/pyinstaller docker-compose.spec" user +mv dist/docker-compose $TARGET +$TARGET version diff --git a/script/build-osx b/script/build-osx index 2a9cf512ef..042964e4be 100755 --- a/script/build-osx +++ b/script/build-osx @@ -4,10 +4,11 @@ set -ex PATH="/usr/local/bin:$PATH" rm -rf venv + virtualenv -p /usr/local/bin/python venv venv/bin/pip install -r requirements.txt -venv/bin/pip install -r requirements-dev.txt -venv/bin/pip install . -venv/bin/pyinstaller -F bin/docker-compose +venv/bin/pip install -r requirements-build.txt +venv/bin/pip install --no-deps . +venv/bin/pyinstaller docker-compose.spec mv dist/docker-compose dist/docker-compose-Darwin-x86_64 dist/docker-compose-Darwin-x86_64 version diff --git a/script/build-windows.ps1 b/script/build-windows.ps1 new file mode 100644 index 0000000000..42a4a501c1 --- /dev/null +++ b/script/build-windows.ps1 @@ -0,0 +1,57 @@ +# Builds the Windows binary. +# +# From a fresh 64-bit Windows 10 install, prepare the system as follows: +# +# 1. Install Git: +# +# http://git-scm.com/download/win +# +# 2. Install Python 2.7.10: +# +# https://www.python.org/downloads/ +# +# 3. Append ";C:\Python27;C:\Python27\Scripts" to the "Path" environment variable: +# +# https://www.microsoft.com/resources/documentation/windows/xp/all/proddocs/en-us/sysdm_advancd_environmnt_addchange_variable.mspx?mfr=true +# +# 4. In Powershell, run the following commands: +# +# $ pip install virtualenv +# $ Set-ExecutionPolicy -Scope CurrentUser RemoteSigned +# +# 5. Clone the repository: +# +# $ git clone https://github.com/docker/compose.git +# $ cd compose +# +# 6. Build the binary: +# +# .\script\build-windows.ps1 + +$ErrorActionPreference = "Stop" + +# Remove virtualenv +if (Test-Path venv) { + Remove-Item -Recurse -Force .\venv +} + +# Remove .pyc files +Get-ChildItem -Recurse -Include *.pyc | foreach ($_) { Remove-Item $_.FullName } + +# Create virtualenv +virtualenv .\venv + +# Install dependencies +.\venv\Scripts\pip install pypiwin32==219 +.\venv\Scripts\pip install -r requirements.txt +.\venv\Scripts\pip install --no-deps . +.\venv\Scripts\pip install --allow-external pyinstaller -r requirements-build.txt + +# Build binary +# pyinstaller has lots of warnings, so we need to run with ErrorAction = Continue +$ErrorActionPreference = "Continue" +.\venv\Scripts\pyinstaller .\docker-compose.spec +$ErrorActionPreference = "Stop" + +Move-Item -Force .\dist\docker-compose.exe .\dist\docker-compose-Windows-x86_64.exe +.\dist\docker-compose-Windows-x86_64.exe --version diff --git a/script/ci b/script/ci index 2e4ec9197f..f30265c02a 100755 --- a/script/ci +++ b/script/ci @@ -6,10 +6,16 @@ # $ docker build -t "$TAG" . # $ docker run --rm --volume="/var/run/docker.sock:/var/run/docker.sock" --volume="$(pwd)/.git:/code/.git" -e "TAG=$TAG" --entrypoint="script/ci" "$TAG" -set -e +set -ex + +docker version export DOCKER_VERSIONS=all +STORAGE_DRIVER=${STORAGE_DRIVER:-overlay} +export DOCKER_DAEMON_ARGS="--storage-driver=$STORAGE_DRIVER" + +GIT_VOLUME="--volumes-from=$(hostname)" . script/test-versions >&2 echo "Building Linux binary" -su -c script/build-linux-inner user +. script/build-linux-inner diff --git a/script/clean b/script/clean index 07a9cff14d..08ba551ae9 100755 --- a/script/clean +++ b/script/clean @@ -1,3 +1,6 @@ #!/bin/sh +set -e + find . -type f -name '*.pyc' -delete +find -name __pycache__ -delete rm -rf docs/_site build dist docker-compose.egg-info diff --git a/script/dind b/script/dind deleted file mode 100755 index f8fae6379c..0000000000 --- a/script/dind +++ /dev/null @@ -1,88 +0,0 @@ -#!/bin/bash -set -e - -# DinD: a wrapper script which allows docker to be run inside a docker container. -# Original version by Jerome Petazzoni -# See the blog post: http://blog.docker.com/2013/09/docker-can-now-run-within-docker/ -# -# This script should be executed inside a docker container in privilieged mode -# ('docker run --privileged', introduced in docker 0.6). - -# Usage: dind CMD [ARG...] - -# apparmor sucks and Docker needs to know that it's in a container (c) @tianon -export container=docker - -# First, make sure that cgroups are mounted correctly. -CGROUP=/cgroup - -mkdir -p "$CGROUP" - -if ! mountpoint -q "$CGROUP"; then - mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP || { - echo >&2 'Could not make a tmpfs mount. Did you use --privileged?' - exit 1 - } -fi - -if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then - mount -t securityfs none /sys/kernel/security || { - echo >&2 'Could not mount /sys/kernel/security.' - echo >&2 'AppArmor detection and -privileged mode might break.' - } -fi - -# Mount the cgroup hierarchies exactly as they are in the parent system. -for SUBSYS in $(cut -d: -f2 /proc/1/cgroup); do - mkdir -p "$CGROUP/$SUBSYS" - if ! mountpoint -q $CGROUP/$SUBSYS; then - mount -n -t cgroup -o "$SUBSYS" cgroup "$CGROUP/$SUBSYS" - fi - - # The two following sections address a bug which manifests itself - # by a cryptic "lxc-start: no ns_cgroup option specified" when - # trying to start containers withina container. - # The bug seems to appear when the cgroup hierarchies are not - # mounted on the exact same directories in the host, and in the - # container. - - # Named, control-less cgroups are mounted with "-o name=foo" - # (and appear as such under /proc//cgroup) but are usually - # mounted on a directory named "foo" (without the "name=" prefix). - # Systemd and OpenRC (and possibly others) both create such a - # cgroup. To avoid the aforementioned bug, we symlink "foo" to - # "name=foo". This shouldn't have any adverse effect. - name="${SUBSYS#name=}" - if [ "$name" != "$SUBSYS" ]; then - ln -s "$SUBSYS" "$CGROUP/$name" - fi - - # Likewise, on at least one system, it has been reported that - # systemd would mount the CPU and CPU accounting controllers - # (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu" - # but on a directory called "cpu,cpuacct" (note the inversion - # in the order of the groups). This tries to work around it. - if [ "$SUBSYS" = 'cpuacct,cpu' ]; then - ln -s "$SUBSYS" "$CGROUP/cpu,cpuacct" - fi -done - -# Note: as I write those lines, the LXC userland tools cannot setup -# a "sub-container" properly if the "devices" cgroup is not in its -# own hierarchy. Let's detect this and issue a warning. -if ! grep -q :devices: /proc/1/cgroup; then - echo >&2 'WARNING: the "devices" cgroup should be in its own hierarchy.' -fi -if ! grep -qw devices /proc/1/cgroup; then - echo >&2 'WARNING: it looks like the "devices" cgroup is not mounted.' -fi - -# Mount /tmp -mount -t tmpfs none /tmp - -if [ $# -gt 0 ]; then - exec "$@" -fi - -echo >&2 'ERROR: No command specified.' -echo >&2 'You probably want to run hack/make.sh, or maybe a shell?' diff --git a/script/docs b/script/docs deleted file mode 100755 index 31c58861d0..0000000000 --- a/script/docs +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/sh -set -ex - -# import the existing docs build cmds from docker/docker -DOCSPORT=8000 -GIT_BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null) -DOCKER_DOCS_IMAGE="compose-docs$GIT_BRANCH" -DOCKER_RUN_DOCS="docker run --rm -it -e NOCACHE" - -docker build -t "$DOCKER_DOCS_IMAGE" -f docs/Dockerfile . -$DOCKER_RUN_DOCS -p $DOCSPORT:8000 "$DOCKER_DOCS_IMAGE" mkdocs serve diff --git a/script/prepare-osx b/script/prepare-osx index ca2776b641..10bbbecc3d 100755 --- a/script/prepare-osx +++ b/script/prepare-osx @@ -24,7 +24,7 @@ if !(which brew); then ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" fi -brew update +brew update > /dev/null if !(python_version | grep "$desired_python_version"); then if brew list | grep python; then diff --git a/script/release/build-binaries b/script/release/build-binaries new file mode 100755 index 0000000000..083f8eb589 --- /dev/null +++ b/script/release/build-binaries @@ -0,0 +1,37 @@ +#!/bin/bash +# +# Build the release binaries +# + +. "$(dirname "${BASH_SOURCE[0]}")/utils.sh" + +function usage() { + >&2 cat << EOM +Build binaries for the release. + +This script requires that 'git config branch.${BRANCH}.release' is set to the +release version for the release branch. + +EOM + exit 1 +} + +BRANCH="$(git rev-parse --abbrev-ref HEAD)" +VERSION="$(git config "branch.${BRANCH}.release")" || usage +REPO=docker/compose + +# Build the binaries +script/clean +script/build-linux +# TODO: build osx binary +# script/prepare-osx +# script/build-osx +# TODO: build or fetch the windows binary +echo "You need to build the osx/windows binaries, that step is not automated yet." + +echo "Building the container distribution" +script/build-image $VERSION + +echo "Create a github release" +# TODO: script more of this https://developer.github.com/v3/repos/releases/ +browser https://github.com/$REPO/releases/new diff --git a/script/release/cherry-pick-pr b/script/release/cherry-pick-pr new file mode 100755 index 0000000000..f4a5a7406b --- /dev/null +++ b/script/release/cherry-pick-pr @@ -0,0 +1,34 @@ +#!/bin/bash +# +# Cherry-pick a PR into the release branch +# + +set -e +set -o pipefail + + +function usage() { + >&2 cat << EOM +Cherry-pick commits from a github pull request. + +Usage: + + $0 +EOM + exit 1 +} + +[ -n "$1" ] || usage + +if [ -z "$(command -v hub 2> /dev/null)" ]; then + >&2 echo "$0 requires https://hub.github.com/." + >&2 echo "Please install it and make sure it is available on your \$PATH." + exit 2 +fi + + +REPO=docker/compose +GITHUB=https://github.com/$REPO/pull +PR=$1 +url="$GITHUB/$PR" +hub am -3 $url diff --git a/script/release/contributors b/script/release/contributors new file mode 100755 index 0000000000..bb9fe871ca --- /dev/null +++ b/script/release/contributors @@ -0,0 +1,27 @@ +#!/bin/bash +set -e + + +function usage() { + >&2 cat << EOM +Print the list of github contributors for the release + +Usage: + + $0 +EOM + exit 1 +} + +[[ -n "$1" ]] || usage +PREV_RELEASE=$1 +VERSION=HEAD +URL="https://api.github.com/repos/docker/compose/compare" + +curl -sf "$URL/$PREV_RELEASE...$VERSION" | \ + jq -r '.commits[].author.login' | \ + sort | \ + uniq -c | \ + sort -nr | \ + awk '{print "@"$2","}' | \ + xargs echo diff --git a/script/release/make-branch b/script/release/make-branch new file mode 100755 index 0000000000..e2eae4d5f2 --- /dev/null +++ b/script/release/make-branch @@ -0,0 +1,99 @@ +#!/bin/bash +# +# Prepare a new release branch +# + +. "$(dirname "${BASH_SOURCE[0]}")/utils.sh" + +function usage() { + >&2 cat << EOM +Create a new release branch 'release-' + +Usage: + + $0 [] + +Options: + + version version string for the release (ex: 1.6.0) + base_version branch or tag to start from. Defaults to master. For + bug-fix releases use the previous stage release tag. + +EOM + exit 1 +} + + +[ -n "$1" ] || usage +VERSION=$1 +BRANCH=bump-$VERSION +REPO=docker/compose +GITHUB_REPO=git@github.com:$REPO + +if [ -z "$2" ]; then + BASE_VERSION="master" +else + BASE_VERSION=$2 +fi + + +DEFAULT_REMOTE=release +REMOTE="$(find_remote "$GITHUB_REPO")" +# If we don't have a docker remote add one +if [ -z "$REMOTE" ]; then + echo "Creating $DEFAULT_REMOTE remote" + git remote add ${DEFAULT_REMOTE} ${GITHUB_REPO} +fi + +# handle the difference between a branch and a tag +if [ -z "$(git name-rev $BASE_VERSION | grep tags)" ]; then + BASE_VERSION=$REMOTE/$BASE_VERSION +fi + +echo "Creating a release branch $VERSION from $BASE_VERSION" +read -n1 -r -p "Continue? (ctrl+c to cancel)" +git fetch $REMOTE -p +git checkout -b $BRANCH $BASE_VERSION + +echo "Merging remote release branch into new release branch" +git merge --strategy=ours --no-edit $REMOTE/release + +# Store the release version for this branch in git, so that other release +# scripts can use it +git config "branch.${BRANCH}.release" $VERSION + + +echo "Update versions in docs/install.md, compose/__init__.py, script/run.sh" +$EDITOR docs/install.md +$EDITOR compose/__init__.py +$EDITOR script/run.sh + + +echo "Write release notes in CHANGELOG.md" +browser "https://github.com/docker/compose/issues?q=milestone%3A$VERSION+is%3Aclosed" +$EDITOR CHANGELOG.md + + +git diff +echo "Verify changes before commit. Exit the shell to commit changes" +$SHELL || true +git commit -a -m "Bump $VERSION" --signoff --no-verify + + +echo "Push branch to user remote" +GITHUB_USER=$USER +USER_REMOTE="$(find_remote $GITHUB_USER/compose)" +if [ -z "$USER_REMOTE" ]; then + echo "No user remote found for $GITHUB_USER" + read -r -p "Enter the name of your github user: " GITHUB_USER + # assumes there is already a user remote somewhere + USER_REMOTE=$(find_remote $GITHUB_USER/compose) +fi +if [ -z "$USER_REMOTE" ]; then + >&2 echo "No user remote found. You need to 'git push' your branch." + exit 2 +fi + + +git push $USER_REMOTE +browser https://github.com/$REPO/compare/docker:release...$GITHUB_USER:$BRANCH?expand=1 diff --git a/script/release/push-release b/script/release/push-release new file mode 100755 index 0000000000..ccdf249607 --- /dev/null +++ b/script/release/push-release @@ -0,0 +1,76 @@ +#!/bin/bash +# +# Create the official release +# + +. "$(dirname "${BASH_SOURCE[0]}")/utils.sh" + +function usage() { + >&2 cat << EOM +Publish a release by building all artifacts and pushing them. + +This script requires that 'git config branch.${BRANCH}.release' is set to the +release version for the release branch. + +EOM + exit 1 +} + +BRANCH="$(git rev-parse --abbrev-ref HEAD)" +VERSION="$(git config "branch.${BRANCH}.release")" || usage + +if [ -z "$(command -v jq 2> /dev/null)" ]; then + >&2 echo "$0 requires https://stedolan.github.io/jq/" + >&2 echo "Please install it and make sure it is available on your \$PATH." + exit 2 +fi + + +if [ -z "$(command -v pandoc 2> /dev/null)" ]; then + >&2 echo "$0 requires http://pandoc.org/" + >&2 echo "Please install it and make sure it is available on your \$PATH." + exit 2 +fi + +API=https://api.github.com/repos +REPO=docker/compose +GITHUB_REPO=git@github.com:$REPO + +# Check the build status is green +sha=$(git rev-parse HEAD) +url=$API/$REPO/statuses/$sha +build_status=$(curl -s $url | jq -r '.[0].state') +if [ -n "$SKIP_BUILD_CHECK" ]; then + echo "Skipping build status check..." +elif [[ "$build_status" != "success" ]]; then + >&2 echo "Build status is $build_status, but it should be success." + exit -1 +fi + +echo "Tagging the release as $VERSION" +git tag $VERSION +git push $GITHUB_REPO $VERSION + +echo "Uploading the docker image" +docker push docker/compose:$VERSION + +echo "Uploading sdist to pypi" +pandoc -f markdown -t rst README.md -o README.rst +sed -i -e 's/logo.png?raw=true/https:\/\/github.com\/docker\/compose\/raw\/master\/logo.png?raw=true/' README.rst +python setup.py sdist +if [ "$(command -v twine 2> /dev/null)" ]; then + twine upload ./dist/docker-compose-${VERSION}.tar.gz +else + python setup.py upload +fi + +echo "Testing pip package" +virtualenv venv-test +source venv-test/bin/activate +pip install docker-compose==$VERSION +docker-compose version +deactivate +rm -rf venv-test + +echo "Now publish the github release, and test the downloads." +echo "Email maintainers@dockerproject.org and engineering@docker.com about the new release." diff --git a/script/release/rebase-bump-commit b/script/release/rebase-bump-commit new file mode 100755 index 0000000000..14ad22a982 --- /dev/null +++ b/script/release/rebase-bump-commit @@ -0,0 +1,38 @@ +#!/bin/bash +# +# Move the "bump to " commit to the HEAD of the branch +# + +. "$(dirname "${BASH_SOURCE[0]}")/utils.sh" + +function usage() { + >&2 cat << EOM +Move the "bump to " commit to the HEAD of the branch + +This script requires that 'git config branch.${BRANCH}.release' is set to the +release version for the release branch. + +EOM + exit 1 +} + + +BRANCH="$(git rev-parse --abbrev-ref HEAD)" +VERSION="$(git config "branch.${BRANCH}.release")" || usage + + +COMMIT_MSG="Bump $VERSION" +sha="$(git log --grep "$COMMIT_MSG" --format="%H")" +if [ -z "$sha" ]; then + >&2 echo "No commit with message \"$COMMIT_MSG\"" + exit 2 +fi +if [[ "$sha" == "$(git rev-parse HEAD)" ]]; then + >&2 echo "Bump commit already at HEAD" + exit 0 +fi + +commits=$(git log --format="%H" "$sha..HEAD" | wc -l) + +git rebase --onto $sha~1 HEAD~$commits $BRANCH +git cherry-pick $sha diff --git a/script/release/utils.sh b/script/release/utils.sh new file mode 100644 index 0000000000..b4e5a2e6a0 --- /dev/null +++ b/script/release/utils.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# +# Util functions for release scritps +# + +set -e +set -o pipefail + + +function browser() { + local url=$1 + xdg-open $url || open $url +} + + +function find_remote() { + local url=$1 + for remote in $(git remote); do + git config --get remote.${remote}.url | grep $url > /dev/null && echo -n $remote + done + # Always return true, extra remotes cause it to return false + true +} diff --git a/script/run.sh b/script/run.sh new file mode 100755 index 0000000000..cf46c143c3 --- /dev/null +++ b/script/run.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# +# Run docker-compose in a container +# +# This script will attempt to mirror the host paths by using volumes for the +# following paths: +# * $(pwd) +# * $(dirname $COMPOSE_FILE) if it's set +# * $HOME if it's set +# +# You can add additional volumes (or any docker run options) using +# the $COMPOSE_OPTIONS environment variable. +# + + +set -e + +VERSION="1.5.0" +IMAGE="docker/compose:$VERSION" + + +# Setup options for connecting to docker host +if [ -z "$DOCKER_HOST" ]; then + DOCKER_HOST="/var/run/docker.sock" +fi +if [ -S "$DOCKER_HOST" ]; then + DOCKER_ADDR="-v $DOCKER_HOST:$DOCKER_HOST -e DOCKER_HOST" +else + DOCKER_ADDR="-e DOCKER_HOST" +fi + + +# Setup volume mounts for compose config and context +VOLUMES="-v $(pwd):$(pwd)" +if [ -n "$COMPOSE_FILE" ]; then + compose_dir=$(dirname $COMPOSE_FILE) +fi +# TODO: also check --file argument +if [ -n "$compose_dir" ]; then + VOLUMES="$VOLUMES -v $compose_dir:$compose_dir" +fi +if [ -n "$HOME" ]; then + VOLUMES="$VOLUMES -v $HOME:$HOME" +fi + + +exec docker run --rm -ti $DOCKER_ADDR $COMPOSE_OPTIONS $VOLUMES -w $(pwd) $IMAGE $@ diff --git a/script/test b/script/test index 625af09b35..bdb3579b01 100755 --- a/script/test +++ b/script/test @@ -5,13 +5,11 @@ set -ex TAG="docker-compose:$(git rev-parse --short HEAD)" +rm -rf coverage-html +# Create the host directory so it's owned by $USER +mkdir -p coverage-html + docker build -t "$TAG" . -docker run \ - --rm \ - --volume="/var/run/docker.sock:/var/run/docker.sock" \ - -e DOCKER_VERSIONS \ - -e "TAG=$TAG" \ - -e "affinity:image==$TAG" \ - --entrypoint="script/test-versions" \ - "$TAG" \ - "$@" + +GIT_VOLUME="--volume=$(pwd)/.git:/code/.git" +. script/test-versions diff --git a/script/test-versions b/script/test-versions index 7f1a14a9b1..623b107b93 100755 --- a/script/test-versions +++ b/script/test-versions @@ -5,22 +5,54 @@ set -e >&2 echo "Running lint checks" -flake8 compose tests setup.py +docker run --rm \ + ${GIT_VOLUME} \ + --entrypoint="tox" \ + "$TAG" -e pre-commit + +get_versions="docker run --rm + --entrypoint=/code/.tox/py27/bin/python + $TAG + /code/script/versions.py docker/docker" if [ "$DOCKER_VERSIONS" == "" ]; then - DOCKER_VERSIONS="default" + DOCKER_VERSIONS="$($get_versions default)" elif [ "$DOCKER_VERSIONS" == "all" ]; then - DOCKER_VERSIONS="$ALL_DOCKER_VERSIONS" + DOCKER_VERSIONS="$($get_versions recent -n 2)" fi + +BUILD_NUMBER=${BUILD_NUMBER-$USER} + for version in $DOCKER_VERSIONS; do >&2 echo "Running tests against Docker $version" + + daemon_container="compose-dind-$version-$BUILD_NUMBER" + + function on_exit() { + if [[ "$?" != "0" ]]; then + docker logs "$daemon_container" 2>&1 | tail -n 100 + fi + docker rm -vf "$daemon_container" + } + + trap "on_exit" EXIT + docker run \ - --rm \ + -d \ + --name "$daemon_container" \ --privileged \ --volume="/var/lib/docker" \ - -e "DOCKER_VERSION=$version" \ - --entrypoint="script/dind" \ + dockerswarm/dind:$version \ + docker daemon -H tcp://0.0.0.0:2375 $DOCKER_DAEMON_ARGS \ + 2>&1 | tail -n 10 + + docker run \ + --rm \ + --link="$daemon_container:docker" \ + --env="DOCKER_HOST=tcp://docker:2375" \ + --entrypoint="tox" \ "$TAG" \ - script/wrapdocker nosetests "$@" + -e py27,py34 -- "$@" + done diff --git a/script/travis/bintray.json.tmpl b/script/travis/bintray.json.tmpl new file mode 100644 index 0000000000..7d0adbebcd --- /dev/null +++ b/script/travis/bintray.json.tmpl @@ -0,0 +1,29 @@ +{ + "package": { + "name": "${TRAVIS_OS_NAME}", + "repo": "master", + "subject": "docker-compose", + "desc": "Automated build of master branch from travis ci.", + "website_url": "https://github.com/docker/compose", + "issue_tracker_url": "https://github.com/docker/compose/issues", + "vcs_url": "https://github.com/docker/compose.git", + "licenses": ["Apache-2.0"] + }, + + "version": { + "name": "master", + "desc": "Automated build of the master branch.", + "released": "${DATE}", + "vcs_tag": "master" + }, + + "files": [ + { + "includePattern": "dist/(.*)", + "excludePattern": ".*\.tar.gz", + "uploadPattern": "$1", + "matrixParams": { "override": 1 } + } + ], + "publish": true +} diff --git a/script/travis/build-binary b/script/travis/build-binary new file mode 100755 index 0000000000..0becee7f61 --- /dev/null +++ b/script/travis/build-binary @@ -0,0 +1,13 @@ +#!/bin/bash + +set -ex + +if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then + script/build-linux + script/build-image master + # TODO: requires auth + # docker push docker/compose:master +else + script/prepare-osx + script/build-osx +fi diff --git a/script/travis/ci b/script/travis/ci new file mode 100755 index 0000000000..4cce1bc844 --- /dev/null +++ b/script/travis/ci @@ -0,0 +1,10 @@ +#!/bin/bash + +set -e + +if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then + tox -e py27,py34 -- tests/unit +else + # TODO: we could also install py34 and test against it + python -m tox -e py27 -- tests/unit +fi diff --git a/script/travis/install b/script/travis/install new file mode 100755 index 0000000000..a23667bffc --- /dev/null +++ b/script/travis/install @@ -0,0 +1,9 @@ +#!/bin/bash + +set -ex + +if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then + pip install tox==2.1.1 +else + pip install --user tox==2.1.1 +fi diff --git a/script/travis/render-bintray-config.py b/script/travis/render-bintray-config.py new file mode 100755 index 0000000000..6aa468d6dc --- /dev/null +++ b/script/travis/render-bintray-config.py @@ -0,0 +1,9 @@ +#!/usr/bin/env python +import datetime +import os.path +import sys + +os.environ['DATE'] = str(datetime.date.today()) + +for line in sys.stdin: + print os.path.expandvars(line), diff --git a/script/versions.py b/script/versions.py new file mode 100755 index 0000000000..513ca754c0 --- /dev/null +++ b/script/versions.py @@ -0,0 +1,139 @@ +#!/usr/bin/env python +""" +Query the github API for the git tags of a project, and return a list of +version tags for recent releases, or the default release. + +The default release is the most recent non-RC version. + +Recent is a list of unqiue major.minor versions, where each is the most +recent version in the series. + +For example, if the list of versions is: + + 1.8.0-rc2 + 1.8.0-rc1 + 1.7.1 + 1.7.0 + 1.7.0-rc1 + 1.6.2 + 1.6.1 + +`default` would return `1.7.1` and +`recent -n 3` would return `1.8.0-rc2 1.7.1 1.6.2` +""" +from __future__ import print_function + +import argparse +import itertools +import operator +from collections import namedtuple + +import requests + + +GITHUB_API = 'https://api.github.com/repos' + + +class Version(namedtuple('_Version', 'major minor patch rc')): + + @classmethod + def parse(cls, version): + version = version.lstrip('v') + version, _, rc = version.partition('-') + major, minor, patch = version.split('.', 3) + return cls(int(major), int(minor), int(patch), rc) + + @property + def major_minor(self): + return self.major, self.minor + + @property + def order(self): + """Return a representation that allows this object to be sorted + correctly with the default comparator. + """ + # rc releases should appear before official releases + rc = (0, self.rc) if self.rc else (1, ) + return (self.major, self.minor, self.patch) + rc + + def __str__(self): + rc = '-{}'.format(self.rc) if self.rc else '' + return '.'.join(map(str, self[:3])) + rc + + +def group_versions(versions): + """Group versions by `major.minor` releases. + + Example: + + >>> group_versions([ + Version(1, 0, 0), + Version(2, 0, 0, 'rc1'), + Version(2, 0, 0), + Version(2, 1, 0), + ]) + + [ + [Version(1, 0, 0)], + [Version(2, 0, 0), Version(2, 0, 0, 'rc1')], + [Version(2, 1, 0)], + ] + """ + return list( + list(releases) + for _, releases + in itertools.groupby(versions, operator.attrgetter('major_minor')) + ) + + +def get_latest_versions(versions, num=1): + """Return a list of the most recent versions for each major.minor version + group. + """ + versions = group_versions(versions) + return [versions[index][0] for index in range(num)] + + +def get_default(versions): + """Return a :class:`Version` for the latest non-rc version.""" + for version in versions: + if not version.rc: + return version + + +def get_github_releases(project): + """Query the Github API for a list of version tags and return them in + sorted order. + + See https://developer.github.com/v3/repos/#list-tags + """ + url = '{}/{}/tags'.format(GITHUB_API, project) + response = requests.get(url) + response.raise_for_status() + versions = [Version.parse(tag['name']) for tag in response.json()] + return sorted(versions, reverse=True, key=operator.attrgetter('order')) + + +def parse_args(argv): + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument('project', help="Github project name (ex: docker/docker)") + parser.add_argument('command', choices=['recent', 'default']) + parser.add_argument('-n', '--num', type=int, default=2, + help="Number of versions to return from `recent`") + return parser.parse_args(argv) + + +def main(argv=None): + args = parse_args(argv) + versions = get_github_releases(args.project) + + if args.command == 'recent': + print(' '.join(map(str, get_latest_versions(versions, args.num)))) + elif args.command == 'default': + print(get_default(versions)) + else: + raise ValueError("Unknown command {}".format(args.command)) + + +if __name__ == "__main__": + main() diff --git a/script/wrapdocker b/script/wrapdocker deleted file mode 100755 index 3e669b5d7a..0000000000 --- a/script/wrapdocker +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -if [ "$DOCKER_VERSION" != "" ] && [ "$DOCKER_VERSION" != "default" ]; then - ln -fs "/usr/local/bin/docker-$DOCKER_VERSION" "/usr/local/bin/docker" -fi - -# If a pidfile is still around (for example after a container restart), -# delete it so that docker can start. -rm -rf /var/run/docker.pid -docker -d --storage-driver="overlay" &>/var/log/docker.log & -docker_pid=$! - ->&2 echo "Waiting for Docker to start..." -while ! docker ps &>/dev/null; do - if ! kill -0 "$docker_pid" &>/dev/null; then - >&2 echo "Docker failed to start" - cat /var/log/docker.log - exit 1 - fi - - sleep 1 -done - ->&2 echo ">" "$@" -exec "$@" diff --git a/setup.py b/setup.py index 9bca4752de..4020122b15 100644 --- a/setup.py +++ b/setup.py @@ -1,13 +1,16 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -from __future__ import unicode_literals from __future__ import absolute_import -from setuptools import setup, find_packages +from __future__ import unicode_literals + import codecs import os import re import sys +from setuptools import find_packages +from setuptools import setup + def read(*parts): path = os.path.join(os.path.dirname(__file__), *parts) @@ -27,25 +30,24 @@ def find_version(*file_paths): install_requires = [ 'docopt >= 0.6.1, < 0.7', 'PyYAML >= 3.10, < 4', - 'requests >= 2.6.1, < 2.7', + 'requests >= 2.6.1, < 2.8', 'texttable >= 0.8.1, < 0.9', 'websocket-client >= 0.32.0, < 1.0', - 'docker-py >= 1.3.1, < 1.4', + 'docker-py >= 1.5.0, < 2', 'dockerpty >= 0.3.4, < 0.4', 'six >= 1.3.0, < 2', + 'jsonschema >= 2.5.1, < 3', ] tests_require = [ - 'mock >= 1.0.1', - 'nose', - 'pyinstaller', - 'flake8', + 'pytest', ] -if sys.version_info < (2, 7): - tests_require.append('unittest2') +if sys.version_info[:2] < (3, 4): + tests_require.append('mock >= 1.0.1') + install_requires.append('enum34 >= 1.0.4, < 2') setup( diff --git a/tests/__init__.py b/tests/__init__.py index c7a1bd4a0c..d3cfb86491 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,8 +1,11 @@ import sys -import mock # noqa - if sys.version_info >= (2, 7): import unittest # NOQA else: import unittest2 as unittest # NOQA + +try: + from unittest import mock +except ImportError: + import mock # NOQA diff --git a/tests/fixtures/env/resolve.env b/tests/fixtures/env/resolve.env index 720520d29e..b4f76b29ed 100644 --- a/tests/fixtures/env/resolve.env +++ b/tests/fixtures/env/resolve.env @@ -1,4 +1,4 @@ -FILE_DEF=F1 +FILE_DEF=bär FILE_DEF_EMPTY= ENV_DEF NO_DEF diff --git a/tests/fixtures/environment-interpolation/docker-compose.yml b/tests/fixtures/environment-interpolation/docker-compose.yml new file mode 100644 index 0000000000..7ed43a812c --- /dev/null +++ b/tests/fixtures/environment-interpolation/docker-compose.yml @@ -0,0 +1,17 @@ +web: + # unbracketed name + image: $IMAGE + + # array element + ports: + - "${HOST_PORT}:8000" + + # dictionary item value + labels: + mylabel: "${LABEL_VALUE}" + + # unset value + hostname: "host-${UNSET_VALUE}" + + # escaped interpolation + command: "$${ESCAPED}" diff --git a/tests/fixtures/extends/invalid-links.yml b/tests/fixtures/extends/invalid-links.yml new file mode 100644 index 0000000000..edfeb8b231 --- /dev/null +++ b/tests/fixtures/extends/invalid-links.yml @@ -0,0 +1,9 @@ +myweb: + build: '.' + extends: + service: web + command: top +web: + build: '.' + links: + - "mydb:db" diff --git a/tests/fixtures/extends/invalid-net.yml b/tests/fixtures/extends/invalid-net.yml new file mode 100644 index 0000000000..fbcd020bcf --- /dev/null +++ b/tests/fixtures/extends/invalid-net.yml @@ -0,0 +1,8 @@ +myweb: + build: '.' + extends: + service: web + command: top +web: + build: '.' + net: "container:db" diff --git a/tests/fixtures/extends/invalid-volumes.yml b/tests/fixtures/extends/invalid-volumes.yml new file mode 100644 index 0000000000..3db0118e0e --- /dev/null +++ b/tests/fixtures/extends/invalid-volumes.yml @@ -0,0 +1,9 @@ +myweb: + build: '.' + extends: + service: web + command: top +web: + build: '.' + volumes_from: + - "db" diff --git a/tests/fixtures/extends/nonexistent-path-base.yml b/tests/fixtures/extends/nonexistent-path-base.yml index 1cf9a304ae..4e6c82b0d7 100644 --- a/tests/fixtures/extends/nonexistent-path-base.yml +++ b/tests/fixtures/extends/nonexistent-path-base.yml @@ -3,4 +3,4 @@ dnebase: command: /bin/true environment: - FOO=1 - - BAR=1 \ No newline at end of file + - BAR=1 diff --git a/tests/fixtures/extends/nonexistent-path-child.yml b/tests/fixtures/extends/nonexistent-path-child.yml index aab11459b1..d3b732f2a3 100644 --- a/tests/fixtures/extends/nonexistent-path-child.yml +++ b/tests/fixtures/extends/nonexistent-path-child.yml @@ -5,4 +5,4 @@ dnechild: image: busybox command: /bin/true environment: - - BAR=2 \ No newline at end of file + - BAR=2 diff --git a/tests/fixtures/extends/nonexistent-service.yml b/tests/fixtures/extends/nonexistent-service.yml new file mode 100644 index 0000000000..e9e17f1bdc --- /dev/null +++ b/tests/fixtures/extends/nonexistent-service.yml @@ -0,0 +1,4 @@ +web: + image: busybox + extends: + service: foo diff --git a/tests/fixtures/extends/service-with-invalid-schema.yml b/tests/fixtures/extends/service-with-invalid-schema.yml new file mode 100644 index 0000000000..00c36647ef --- /dev/null +++ b/tests/fixtures/extends/service-with-invalid-schema.yml @@ -0,0 +1,4 @@ +myweb: + extends: + file: valid-composite-extends.yml + service: web diff --git a/tests/fixtures/extends/service-with-valid-composite-extends.yml b/tests/fixtures/extends/service-with-valid-composite-extends.yml new file mode 100644 index 0000000000..6c419ed070 --- /dev/null +++ b/tests/fixtures/extends/service-with-valid-composite-extends.yml @@ -0,0 +1,5 @@ +myweb: + build: '.' + extends: + file: 'valid-composite-extends.yml' + service: web diff --git a/tests/fixtures/extends/specify-file-as-self.yml b/tests/fixtures/extends/specify-file-as-self.yml index 7e24997623..c24f10bc92 100644 --- a/tests/fixtures/extends/specify-file-as-self.yml +++ b/tests/fixtures/extends/specify-file-as-self.yml @@ -12,5 +12,6 @@ web: environment: - "BAZ=3" otherweb: + image: busybox environment: - "YEP=1" diff --git a/tests/fixtures/extends/valid-common-config.yml b/tests/fixtures/extends/valid-common-config.yml new file mode 100644 index 0000000000..d8f13e7a86 --- /dev/null +++ b/tests/fixtures/extends/valid-common-config.yml @@ -0,0 +1,6 @@ +myweb: + build: '.' + extends: + file: valid-common.yml + service: common-config + command: top diff --git a/tests/fixtures/extends/valid-common.yml b/tests/fixtures/extends/valid-common.yml new file mode 100644 index 0000000000..07ad68e3e7 --- /dev/null +++ b/tests/fixtures/extends/valid-common.yml @@ -0,0 +1,3 @@ +common-config: + environment: + - FOO=1 diff --git a/tests/fixtures/extends/valid-composite-extends.yml b/tests/fixtures/extends/valid-composite-extends.yml new file mode 100644 index 0000000000..8816c3f3b2 --- /dev/null +++ b/tests/fixtures/extends/valid-composite-extends.yml @@ -0,0 +1,2 @@ +web: + command: top diff --git a/tests/fixtures/extends/valid-interpolation-2.yml b/tests/fixtures/extends/valid-interpolation-2.yml new file mode 100644 index 0000000000..cb7bd93fc2 --- /dev/null +++ b/tests/fixtures/extends/valid-interpolation-2.yml @@ -0,0 +1,3 @@ +web: + build: '.' + hostname: "host-${HOSTNAME_VALUE}" diff --git a/tests/fixtures/extends/valid-interpolation.yml b/tests/fixtures/extends/valid-interpolation.yml new file mode 100644 index 0000000000..68e8740fb4 --- /dev/null +++ b/tests/fixtures/extends/valid-interpolation.yml @@ -0,0 +1,5 @@ +myweb: + extends: + service: web + file: valid-interpolation-2.yml + command: top diff --git a/tests/fixtures/extends/verbose-and-shorthand.yml b/tests/fixtures/extends/verbose-and-shorthand.yml new file mode 100644 index 0000000000..d381630275 --- /dev/null +++ b/tests/fixtures/extends/verbose-and-shorthand.yml @@ -0,0 +1,15 @@ +base: + image: busybox + environment: + - "BAR=1" + +verbose: + extends: + service: base + environment: + - "FOO=1" + +shorthand: + extends: base + environment: + - "FOO=2" diff --git a/tests/fixtures/longer-filename-composefile/docker-compose.yaml b/tests/fixtures/longer-filename-composefile/docker-compose.yaml index b55a9e1245..a4eba2d05d 100644 --- a/tests/fixtures/longer-filename-composefile/docker-compose.yaml +++ b/tests/fixtures/longer-filename-composefile/docker-compose.yaml @@ -1,3 +1,3 @@ definedinyamlnotyml: image: busybox:latest - command: top \ No newline at end of file + command: top diff --git a/tests/fixtures/override-files/docker-compose.override.yml b/tests/fixtures/override-files/docker-compose.override.yml new file mode 100644 index 0000000000..a03d3d6f5f --- /dev/null +++ b/tests/fixtures/override-files/docker-compose.override.yml @@ -0,0 +1,6 @@ + +web: + command: "top" + +db: + command: "top" diff --git a/tests/fixtures/override-files/docker-compose.yml b/tests/fixtures/override-files/docker-compose.yml new file mode 100644 index 0000000000..8eb43ddb06 --- /dev/null +++ b/tests/fixtures/override-files/docker-compose.yml @@ -0,0 +1,10 @@ + +web: + image: busybox:latest + command: "sleep 200" + links: + - db + +db: + image: busybox:latest + command: "sleep 200" diff --git a/tests/fixtures/override-files/extra.yml b/tests/fixtures/override-files/extra.yml new file mode 100644 index 0000000000..7b3ade9c2d --- /dev/null +++ b/tests/fixtures/override-files/extra.yml @@ -0,0 +1,9 @@ + +web: + links: + - db + - other + +other: + image: busybox:latest + command: "top" diff --git a/tests/fixtures/ports-composefile/docker-compose.yml b/tests/fixtures/ports-composefile/docker-compose.yml index 9496ee0826..c213068def 100644 --- a/tests/fixtures/ports-composefile/docker-compose.yml +++ b/tests/fixtures/ports-composefile/docker-compose.yml @@ -5,3 +5,4 @@ simple: ports: - '3000' - '49152:3001' + - '49153-49154:3002-3003' diff --git a/tests/fixtures/simple-composefile/digest.yml b/tests/fixtures/simple-composefile/digest.yml new file mode 100644 index 0000000000..08f1d993e9 --- /dev/null +++ b/tests/fixtures/simple-composefile/digest.yml @@ -0,0 +1,6 @@ +simple: + image: busybox:latest + command: top +digest: + image: busybox@sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d + command: top diff --git a/tests/fixtures/simple-composefile/ignore-pull-failures.yml b/tests/fixtures/simple-composefile/ignore-pull-failures.yml new file mode 100644 index 0000000000..a28f792233 --- /dev/null +++ b/tests/fixtures/simple-composefile/ignore-pull-failures.yml @@ -0,0 +1,6 @@ +simple: + image: busybox:latest + command: top +another: + image: nonexisting-image:latest + command: top diff --git a/tests/fixtures/volume-path-interpolation/docker-compose.yml b/tests/fixtures/volume-path-interpolation/docker-compose.yml new file mode 100644 index 0000000000..6d4e236af9 --- /dev/null +++ b/tests/fixtures/volume-path-interpolation/docker-compose.yml @@ -0,0 +1,5 @@ +test: + image: busybox + command: top + volumes: + - "~/${VOLUME_NAME}:/container-path" diff --git a/tests/integration/cli_test.py b/tests/integration/cli_test.py index c54a85bb2d..d621f2d132 100644 --- a/tests/integration/cli_test.py +++ b/tests/integration/cli_test.py @@ -1,16 +1,18 @@ from __future__ import absolute_import -from operator import attrgetter -import sys + import os import shlex +import sys +from operator import attrgetter -import mock from six import StringIO -from mock import patch +from .. import mock from .testcases import DockerClientTestCase -from compose.cli.main import TopLevelCommand +from compose.cli.command import get_project +from compose.cli.docker_client import docker_client from compose.cli.errors import UserError +from compose.cli.main import TopLevelCommand from compose.project import NoSuchService @@ -38,7 +40,7 @@ class CLITestCase(DockerClientTestCase): if hasattr(self, '_project'): return self._project - return self.command.get_project() + return get_project(self.command.base_dir) def test_help(self): old_base_dir = self.command.base_dir @@ -51,59 +53,107 @@ class CLITestCase(DockerClientTestCase): self.command.base_dir = old_base_dir # TODO: address the "Inappropriate ioctl for device" warnings in test output - @patch('sys.stdout', new_callable=StringIO) - def test_ps(self, mock_stdout): + def test_ps(self): self.project.get_service('simple').create_container() - self.command.dispatch(['ps'], None) + with mock.patch('sys.stdout', new_callable=StringIO) as mock_stdout: + self.command.dispatch(['ps'], None) self.assertIn('simplecomposefile_simple_1', mock_stdout.getvalue()) - @patch('sys.stdout', new_callable=StringIO) - def test_ps_default_composefile(self, mock_stdout): + def test_ps_default_composefile(self): self.command.base_dir = 'tests/fixtures/multiple-composefiles' - self.command.dispatch(['up', '-d'], None) - self.command.dispatch(['ps'], None) + with mock.patch('sys.stdout', new_callable=StringIO) as mock_stdout: + self.command.dispatch(['up', '-d'], None) + self.command.dispatch(['ps'], None) output = mock_stdout.getvalue() self.assertIn('multiplecomposefiles_simple_1', output) self.assertIn('multiplecomposefiles_another_1', output) self.assertNotIn('multiplecomposefiles_yetanother_1', output) - @patch('sys.stdout', new_callable=StringIO) - def test_ps_alternate_composefile(self, mock_stdout): + def test_ps_alternate_composefile(self): config_path = os.path.abspath( 'tests/fixtures/multiple-composefiles/compose2.yml') - self._project = self.command.get_project(config_path) + self._project = get_project(self.command.base_dir, [config_path]) self.command.base_dir = 'tests/fixtures/multiple-composefiles' - self.command.dispatch(['-f', 'compose2.yml', 'up', '-d'], None) - self.command.dispatch(['-f', 'compose2.yml', 'ps'], None) + with mock.patch('sys.stdout', new_callable=StringIO) as mock_stdout: + self.command.dispatch(['-f', 'compose2.yml', 'up', '-d'], None) + self.command.dispatch(['-f', 'compose2.yml', 'ps'], None) output = mock_stdout.getvalue() self.assertNotIn('multiplecomposefiles_simple_1', output) self.assertNotIn('multiplecomposefiles_another_1', output) self.assertIn('multiplecomposefiles_yetanother_1', output) - @patch('compose.service.log') + @mock.patch('compose.service.log') def test_pull(self, mock_logging): self.command.dispatch(['pull'], None) mock_logging.info.assert_any_call('Pulling simple (busybox:latest)...') mock_logging.info.assert_any_call('Pulling another (busybox:latest)...') - @patch('sys.stdout', new_callable=StringIO) - def test_build_no_cache(self, mock_stdout): + @mock.patch('compose.service.log') + def test_pull_with_digest(self, mock_logging): + self.command.dispatch(['-f', 'digest.yml', 'pull'], None) + mock_logging.info.assert_any_call('Pulling simple (busybox:latest)...') + mock_logging.info.assert_any_call( + 'Pulling digest (busybox@' + 'sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d)...') + + @mock.patch('compose.service.log') + def test_pull_with_ignore_pull_failures(self, mock_logging): + self.command.dispatch(['-f', 'ignore-pull-failures.yml', 'pull', '--ignore-pull-failures'], None) + mock_logging.info.assert_any_call('Pulling simple (busybox:latest)...') + mock_logging.info.assert_any_call('Pulling another (nonexisting-image:latest)...') + mock_logging.error.assert_any_call('Error: image library/nonexisting-image:latest not found') + + def test_build_plain(self): self.command.base_dir = 'tests/fixtures/simple-dockerfile' self.command.dispatch(['build', 'simple'], None) - mock_stdout.truncate(0) cache_indicator = 'Using cache' - self.command.dispatch(['build', 'simple'], None) + pull_indicator = 'Status: Image is up to date for busybox:latest' + + with mock.patch('sys.stdout', new_callable=StringIO) as mock_stdout: + self.command.dispatch(['build', 'simple'], None) output = mock_stdout.getvalue() self.assertIn(cache_indicator, output) + self.assertNotIn(pull_indicator, output) - mock_stdout.truncate(0) - self.command.dispatch(['build', '--no-cache', 'simple'], None) + def test_build_no_cache(self): + self.command.base_dir = 'tests/fixtures/simple-dockerfile' + self.command.dispatch(['build', 'simple'], None) + + cache_indicator = 'Using cache' + pull_indicator = 'Status: Image is up to date for busybox:latest' + with mock.patch('sys.stdout', new_callable=StringIO) as mock_stdout: + self.command.dispatch(['build', '--no-cache', 'simple'], None) output = mock_stdout.getvalue() self.assertNotIn(cache_indicator, output) + self.assertNotIn(pull_indicator, output) + + def test_build_pull(self): + self.command.base_dir = 'tests/fixtures/simple-dockerfile' + self.command.dispatch(['build', 'simple'], None) + + cache_indicator = 'Using cache' + pull_indicator = 'Status: Image is up to date for busybox:latest' + with mock.patch('sys.stdout', new_callable=StringIO) as mock_stdout: + self.command.dispatch(['build', '--pull', 'simple'], None) + output = mock_stdout.getvalue() + self.assertIn(cache_indicator, output) + self.assertIn(pull_indicator, output) + + def test_build_no_cache_pull(self): + self.command.base_dir = 'tests/fixtures/simple-dockerfile' + self.command.dispatch(['build', 'simple'], None) + + cache_indicator = 'Using cache' + pull_indicator = 'Status: Image is up to date for busybox:latest' + with mock.patch('sys.stdout', new_callable=StringIO) as mock_stdout: + self.command.dispatch(['build', '--no-cache', '--pull', 'simple'], None) + output = mock_stdout.getvalue() + self.assertNotIn(cache_indicator, output) + self.assertIn(pull_indicator, output) def test_up_detached(self): self.command.dispatch(['up', '-d'], None) @@ -136,6 +186,51 @@ class CLITestCase(DockerClientTestCase): set(self.project.containers()) ) + def test_up_without_networking(self): + self.require_api_version('1.21') + + self.command.base_dir = 'tests/fixtures/links-composefile' + self.command.dispatch(['up', '-d'], None) + client = docker_client(version='1.21') + + networks = client.networks(names=[self.project.name]) + self.assertEqual(len(networks), 0) + + for service in self.project.get_services(): + containers = service.containers() + self.assertEqual(len(containers), 1) + self.assertNotEqual(containers[0].get('Config.Hostname'), service.name) + + web_container = self.project.get_service('web').containers()[0] + self.assertTrue(web_container.get('HostConfig.Links')) + + def test_up_with_networking(self): + self.require_api_version('1.21') + + self.command.base_dir = 'tests/fixtures/links-composefile' + self.command.dispatch(['--x-networking', 'up', '-d'], None) + client = docker_client(version='1.21') + + services = self.project.get_services() + + networks = client.networks(names=[self.project.name]) + for n in networks: + self.addCleanup(client.remove_network, n['Id']) + self.assertEqual(len(networks), 1) + self.assertEqual(networks[0]['Driver'], 'bridge') + + network = client.inspect_network(networks[0]['Id']) + self.assertEqual(len(network['Containers']), len(services)) + + for service in services: + containers = service.containers() + self.assertEqual(len(containers), 1) + self.assertIn(containers[0].id, network['Containers']) + self.assertEqual(containers[0].get('Config.Hostname'), service.name) + + web_container = self.project.get_service('web').containers()[0] + self.assertFalse(web_container.get('HostConfig.Links')) + def test_up_with_links(self): self.command.base_dir = 'tests/fixtures/links-composefile' self.command.dispatch(['up', '-d', 'web'], None) @@ -201,7 +296,7 @@ class CLITestCase(DockerClientTestCase): self.assertFalse(config['AttachStdout']) self.assertFalse(config['AttachStdin']) - @patch('dockerpty.start') + @mock.patch('dockerpty.start') def test_run_service_without_links(self, mock_stdout): self.command.base_dir = 'tests/fixtures/links-composefile' self.command.dispatch(['run', 'console', '/bin/true'], None) @@ -214,8 +309,8 @@ class CLITestCase(DockerClientTestCase): self.assertTrue(config['AttachStdout']) self.assertTrue(config['AttachStdin']) - @patch('dockerpty.start') - def test_run_service_with_links(self, __): + @mock.patch('dockerpty.start') + def test_run_service_with_links(self, _): self.command.base_dir = 'tests/fixtures/links-composefile' self.command.dispatch(['run', 'web', '/bin/true'], None) db = self.project.get_service('db') @@ -223,15 +318,15 @@ class CLITestCase(DockerClientTestCase): self.assertEqual(len(db.containers()), 1) self.assertEqual(len(console.containers()), 0) - @patch('dockerpty.start') - def test_run_with_no_deps(self, __): + @mock.patch('dockerpty.start') + def test_run_with_no_deps(self, _): self.command.base_dir = 'tests/fixtures/links-composefile' self.command.dispatch(['run', '--no-deps', 'web', '/bin/true'], None) db = self.project.get_service('db') self.assertEqual(len(db.containers()), 0) - @patch('dockerpty.start') - def test_run_does_not_recreate_linked_containers(self, __): + @mock.patch('dockerpty.start') + def test_run_does_not_recreate_linked_containers(self, _): self.command.base_dir = 'tests/fixtures/links-composefile' self.command.dispatch(['up', '-d', 'db'], None) db = self.project.get_service('db') @@ -246,7 +341,7 @@ class CLITestCase(DockerClientTestCase): self.assertEqual(old_ids, new_ids) - @patch('dockerpty.start') + @mock.patch('dockerpty.start') def test_run_without_command(self, _): self.command.base_dir = 'tests/fixtures/commands-composefile' self.check_build('tests/fixtures/simple-dockerfile', tag='composetest_test') @@ -267,7 +362,7 @@ class CLITestCase(DockerClientTestCase): [u'/bin/true'], ) - @patch('dockerpty.start') + @mock.patch('dockerpty.start') def test_run_service_with_entrypoint_overridden(self, _): self.command.base_dir = 'tests/fixtures/dockerfile_with_entrypoint' name = 'service' @@ -282,18 +377,18 @@ class CLITestCase(DockerClientTestCase): [u'/bin/echo', u'helloworld'], ) - @patch('dockerpty.start') + @mock.patch('dockerpty.start') def test_run_service_with_user_overridden(self, _): self.command.base_dir = 'tests/fixtures/user-composefile' name = 'service' user = 'sshd' - args = ['run', '--user={}'.format(user), name] + args = ['run', '--user={user}'.format(user=user), name] self.command.dispatch(args, None) service = self.project.get_service(name) container = service.containers(stopped=True, one_off=True)[0] self.assertEqual(user, container.get('Config.User')) - @patch('dockerpty.start') + @mock.patch('dockerpty.start') def test_run_service_with_user_overridden_short_form(self, _): self.command.base_dir = 'tests/fixtures/user-composefile' name = 'service' @@ -304,7 +399,7 @@ class CLITestCase(DockerClientTestCase): container = service.containers(stopped=True, one_off=True)[0] self.assertEqual(user, container.get('Config.User')) - @patch('dockerpty.start') + @mock.patch('dockerpty.start') def test_run_service_with_environement_overridden(self, _): name = 'service' self.command.base_dir = 'tests/fixtures/environment-composefile' @@ -324,8 +419,8 @@ class CLITestCase(DockerClientTestCase): # make sure a value with a = don't crash out self.assertEqual('moto=bobo', container.environment['allo']) - @patch('dockerpty.start') - def test_run_service_without_map_ports(self, __): + @mock.patch('dockerpty.start') + def test_run_service_without_map_ports(self, _): # create one off container self.command.base_dir = 'tests/fixtures/ports-composefile' self.command.dispatch(['run', '-d', 'simple'], None) @@ -342,8 +437,8 @@ class CLITestCase(DockerClientTestCase): self.assertEqual(port_random, None) self.assertEqual(port_assigned, None) - @patch('dockerpty.start') - def test_run_service_with_map_ports(self, __): + @mock.patch('dockerpty.start') + def test_run_service_with_map_ports(self, _): # create one off container self.command.base_dir = 'tests/fixtures/ports-composefile' @@ -353,6 +448,7 @@ class CLITestCase(DockerClientTestCase): # get port information port_random = container.get_local_port(3000) port_assigned = container.get_local_port(3001) + port_range = container.get_local_port(3002), container.get_local_port(3003) # close all one off containers we just created container.stop() @@ -361,6 +457,70 @@ class CLITestCase(DockerClientTestCase): self.assertNotEqual(port_random, None) self.assertIn("0.0.0.0", port_random) self.assertEqual(port_assigned, "0.0.0.0:49152") + self.assertEqual(port_range[0], "0.0.0.0:49153") + self.assertEqual(port_range[1], "0.0.0.0:49154") + + @mock.patch('dockerpty.start') + def test_run_service_with_explicitly_maped_ports(self, _): + + # create one off container + self.command.base_dir = 'tests/fixtures/ports-composefile' + self.command.dispatch(['run', '-d', '-p', '30000:3000', '--publish', '30001:3001', 'simple'], None) + container = self.project.get_service('simple').containers(one_off=True)[0] + + # get port information + port_short = container.get_local_port(3000) + port_full = container.get_local_port(3001) + + # close all one off containers we just created + container.stop() + + # check the ports + self.assertEqual(port_short, "0.0.0.0:30000") + self.assertEqual(port_full, "0.0.0.0:30001") + + @mock.patch('dockerpty.start') + def test_run_service_with_explicitly_maped_ip_ports(self, _): + + # create one off container + self.command.base_dir = 'tests/fixtures/ports-composefile' + self.command.dispatch(['run', '-d', '-p', '127.0.0.1:30000:3000', '--publish', '127.0.0.1:30001:3001', 'simple'], None) + container = self.project.get_service('simple').containers(one_off=True)[0] + + # get port information + port_short = container.get_local_port(3000) + port_full = container.get_local_port(3001) + + # close all one off containers we just created + container.stop() + + # check the ports + self.assertEqual(port_short, "127.0.0.1:30000") + self.assertEqual(port_full, "127.0.0.1:30001") + + @mock.patch('dockerpty.start') + def test_run_with_custom_name(self, _): + self.command.base_dir = 'tests/fixtures/environment-composefile' + name = 'the-container-name' + self.command.dispatch(['run', '--name', name, 'service'], None) + + service = self.project.get_service('service') + container, = service.containers(stopped=True, one_off=True) + self.assertEqual(container.name, name) + + @mock.patch('dockerpty.start') + def test_run_with_networking(self, _): + self.require_api_version('1.21') + client = docker_client(version='1.21') + self.command.base_dir = 'tests/fixtures/simple-dockerfile' + self.command.dispatch(['--x-networking', 'run', 'simple', 'true'], None) + service = self.project.get_service('simple') + container, = service.containers(stopped=True, one_off=True) + networks = client.networks(names=[self.project.name]) + for n in networks: + self.addCleanup(client.remove_network, n['Id']) + self.assertEqual(len(networks), 1) + self.assertEqual(container.human_readable_command, u'true') def test_rm(self): service = self.project.get_service('simple') @@ -387,6 +547,17 @@ class CLITestCase(DockerClientTestCase): self.assertEqual(len(service.containers(stopped=True)), 1) self.assertFalse(service.containers(stopped=True)[0].is_running) + def test_pause_unpause(self): + self.command.dispatch(['up', '-d'], None) + service = self.project.get_service('simple') + self.assertFalse(service.containers()[0].is_paused) + + self.command.dispatch(['pause'], None) + self.assertTrue(service.containers()[0].is_paused) + + self.command.dispatch(['unpause'], None) + self.assertFalse(service.containers()[0].is_paused) + def test_logs_invalid_service_name(self): with self.assertRaises(NoSuchService): self.command.dispatch(['logs', 'madeupname'], None) @@ -468,24 +639,23 @@ class CLITestCase(DockerClientTestCase): self.command.dispatch(['up', '-d'], None) container = self.project.get_service('simple').get_container() - @patch('sys.stdout', new_callable=StringIO) + @mock.patch('sys.stdout', new_callable=StringIO) def get_port(number, mock_stdout): self.command.dispatch(['port', 'simple', str(number)], None) return mock_stdout.getvalue().rstrip() self.assertEqual(get_port(3000), container.get_local_port(3000)) self.assertEqual(get_port(3001), "0.0.0.0:49152") - self.assertEqual(get_port(3002), "") + self.assertEqual(get_port(3002), "0.0.0.0:49153") def test_port_with_scale(self): - self.command.base_dir = 'tests/fixtures/ports-composefile-scale' self.command.dispatch(['scale', 'simple=2'], None) containers = sorted( self.project.containers(service_names=['simple']), key=attrgetter('name')) - @patch('sys.stdout', new_callable=StringIO) + @mock.patch('sys.stdout', new_callable=StringIO) def get_port(number, mock_stdout, index=None): if index is None: self.command.dispatch(['port', 'simple', str(number)], None) @@ -501,12 +671,65 @@ class CLITestCase(DockerClientTestCase): def test_env_file_relative_to_compose_file(self): config_path = os.path.abspath('tests/fixtures/env-file/docker-compose.yml') self.command.dispatch(['-f', config_path, 'up', '-d'], None) - self._project = self.command.get_project(config_path) + self._project = get_project(self.command.base_dir, [config_path]) containers = self.project.containers(stopped=True) self.assertEqual(len(containers), 1) self.assertIn("FOO=1", containers[0].get('Config.Env')) + @mock.patch.dict(os.environ) + def test_home_and_env_var_in_volume_path(self): + os.environ['VOLUME_NAME'] = 'my-volume' + os.environ['HOME'] = '/tmp/home-dir' + expected_host_path = os.path.join(os.environ['HOME'], os.environ['VOLUME_NAME']) + + self.command.base_dir = 'tests/fixtures/volume-path-interpolation' + self.command.dispatch(['up', '-d'], None) + + container = self.project.containers(stopped=True)[0] + actual_host_path = container.get('Volumes')['/container-path'] + components = actual_host_path.split('/') + self.assertTrue(components[-2:] == ['home-dir', 'my-volume'], + msg="Last two components differ: %s, %s" % (actual_host_path, expected_host_path)) + + def test_up_with_default_override_file(self): + self.command.base_dir = 'tests/fixtures/override-files' + self.command.dispatch(['up', '-d'], None) + + containers = self.project.containers() + self.assertEqual(len(containers), 2) + + web, db = containers + self.assertEqual(web.human_readable_command, 'top') + self.assertEqual(db.human_readable_command, 'top') + + def test_up_with_multiple_files(self): + self.command.base_dir = 'tests/fixtures/override-files' + config_paths = [ + 'docker-compose.yml', + 'docker-compose.override.yml', + 'extra.yml', + + ] + self._project = get_project(self.command.base_dir, config_paths) + self.command.dispatch( + [ + '-f', config_paths[0], + '-f', config_paths[1], + '-f', config_paths[2], + 'up', '-d', + ], + None) + + containers = self.project.containers() + self.assertEqual(len(containers), 3) + + web, other, db = containers + self.assertEqual(web.human_readable_command, 'top') + self.assertTrue({'db', 'other'} <= set(web.links())) + self.assertEqual(db.human_readable_command, 'top') + self.assertEqual(other.human_readable_command, 'top') + def test_up_with_extends(self): self.command.base_dir = 'tests/fixtures/extends' self.command.dispatch(['up', '-d'], None) diff --git a/tests/integration/legacy_test.py b/tests/integration/legacy_test.py index 9913bbb0fe..3465d57f49 100644 --- a/tests/integration/legacy_test.py +++ b/tests/integration/legacy_test.py @@ -1,11 +1,11 @@ import unittest -from mock import Mock from docker.errors import APIError +from .. import mock +from .testcases import DockerClientTestCase from compose import legacy from compose.project import Project -from .testcases import DockerClientTestCase class UtilitiesTestCase(unittest.TestCase): @@ -66,7 +66,7 @@ class UtilitiesTestCase(unittest.TestCase): ) def test_get_legacy_containers(self): - client = Mock() + client = mock.Mock() client.containers.return_value = [ { "Id": "abc123", diff --git a/tests/integration/project_test.py b/tests/integration/project_test.py index a0fbe3e1f7..950523878e 100644 --- a/tests/integration/project_test.py +++ b/tests/integration/project_test.py @@ -1,14 +1,20 @@ from __future__ import unicode_literals -from compose import config -from compose.const import LABEL_PROJECT -from compose.project import Project -from compose.container import Container from .testcases import DockerClientTestCase +from compose.cli.docker_client import docker_client +from compose.config import config +from compose.const import LABEL_PROJECT +from compose.container import Container +from compose.project import Project +from compose.service import ConvergenceStrategy +from compose.service import VolumeFromSpec def build_service_dicts(service_config): - return config.load(config.ConfigDetails(service_config, 'working_dir', None)) + return config.load( + config.ConfigDetails( + 'working_dir', + [config.ConfigFile(None, service_config)])) class ProjectTest(DockerClientTestCase): @@ -68,7 +74,7 @@ class ProjectTest(DockerClientTestCase): ) db = project.get_service('db') data = project.get_service('data') - self.assertEqual(db.volumes_from, [data]) + self.assertEqual(db.volumes_from, [VolumeFromSpec(data, 'rw')]) def test_volumes_from_container(self): data_container = Container.create( @@ -89,7 +95,23 @@ class ProjectTest(DockerClientTestCase): client=self.client, ) db = project.get_service('db') - self.assertEqual(db.volumes_from, [data_container]) + self.assertEqual(db._get_volumes_from(), [data_container.id + ':rw']) + + def test_get_network_does_not_exist(self): + self.require_api_version('1.21') + client = docker_client(version='1.21') + + project = Project('composetest', [], client) + assert project.get_network() is None + + def test_get_network(self): + self.require_api_version('1.21') + client = docker_client(version='1.21') + + network_name = 'network_does_exist' + project = Project(network_name, [], client) + client.create_network(network_name) + assert project.get_network()['Name'] == network_name def test_net_from_service(self): project = Project.from_dicts( @@ -140,7 +162,7 @@ class ProjectTest(DockerClientTestCase): web = project.get_service('web') self.assertEqual(web.net.mode, 'container:' + net_container.id) - def test_start_stop_kill_remove(self): + def test_start_pause_unpause_stop_kill_remove(self): web = self.create_service('web') db = self.create_service('db') project = Project('composetest', [web, db], self.client) @@ -158,7 +180,22 @@ class ProjectTest(DockerClientTestCase): self.assertEqual(set(c.name for c in project.containers()), set([web_container_1.name, web_container_2.name])) project.start() - self.assertEqual(set(c.name for c in project.containers()), set([web_container_1.name, web_container_2.name, db_container.name])) + self.assertEqual(set(c.name for c in project.containers()), + set([web_container_1.name, web_container_2.name, db_container.name])) + + project.pause(service_names=['web']) + self.assertEqual(set([c.name for c in project.containers() if c.is_paused]), + set([web_container_1.name, web_container_2.name])) + + project.pause() + self.assertEqual(set([c.name for c in project.containers() if c.is_paused]), + set([web_container_1.name, web_container_2.name, db_container.name])) + + project.unpause(service_names=['db']) + self.assertEqual(len([c.name for c in project.containers() if c.is_paused]), 2) + + project.unpause() + self.assertEqual(len([c.name for c in project.containers() if c.is_paused]), 0) project.stop(service_names=['web'], timeout=1) self.assertEqual(set(c.name for c in project.containers()), set([db_container.name])) @@ -209,7 +246,7 @@ class ProjectTest(DockerClientTestCase): old_db_id = project.containers()[0].id db_volume_path = project.containers()[0].get('Volumes./etc') - project.up(force_recreate=True) + project.up(strategy=ConvergenceStrategy.always) self.assertEqual(len(project.containers()), 2) db_container = [c for c in project.containers() if 'db' in c.name][0] @@ -228,7 +265,7 @@ class ProjectTest(DockerClientTestCase): old_db_id = project.containers()[0].id db_volume_path = project.containers()[0].inspect()['Volumes']['/var/db'] - project.up(allow_recreate=False) + project.up(strategy=ConvergenceStrategy.never) self.assertEqual(len(project.containers()), 2) db_container = [c for c in project.containers() if 'db' in c.name][0] @@ -252,7 +289,7 @@ class ProjectTest(DockerClientTestCase): old_db_id = old_containers[0].id db_volume_path = old_containers[0].inspect()['Volumes']['/var/db'] - project.up(allow_recreate=False) + project.up(strategy=ConvergenceStrategy.never) new_containers = project.containers(stopped=True) self.assertEqual(len(new_containers), 2) diff --git a/tests/integration/resilience_test.py b/tests/integration/resilience_test.py index e0c76f299d..befd72c7f8 100644 --- a/tests/integration/resilience_test.py +++ b/tests/integration/resilience_test.py @@ -1,10 +1,10 @@ -from __future__ import unicode_literals from __future__ import absolute_import +from __future__ import unicode_literals -import mock - -from compose.project import Project +from .. import mock from .testcases import DockerClientTestCase +from compose.project import Project +from compose.service import ConvergenceStrategy class ResilienceTest(DockerClientTestCase): @@ -17,14 +17,14 @@ class ResilienceTest(DockerClientTestCase): self.host_path = container.get('Volumes')['/var/db'] def test_successful_recreate(self): - self.project.up(force_recreate=True) + self.project.up(strategy=ConvergenceStrategy.always) container = self.db.containers()[0] self.assertEqual(container.get('Volumes')['/var/db'], self.host_path) def test_create_failure(self): with mock.patch('compose.service.Service.create_container', crash): with self.assertRaises(Crash): - self.project.up(force_recreate=True) + self.project.up(strategy=ConvergenceStrategy.always) self.project.up() container = self.db.containers()[0] @@ -33,7 +33,7 @@ class ResilienceTest(DockerClientTestCase): def test_start_failure(self): with mock.patch('compose.service.Service.start_container', crash): with self.assertRaises(Crash): - self.project.up(force_recreate=True) + self.project.up(strategy=ConvergenceStrategy.always) self.project.up() container = self.db.containers()[0] diff --git a/tests/integration/service_test.py b/tests/integration/service_test.py index aec2caf1d2..4ac04545e1 100644 --- a/tests/integration/service_test.py +++ b/tests/integration/service_test.py @@ -1,30 +1,31 @@ -from __future__ import unicode_literals from __future__ import absolute_import +from __future__ import unicode_literals + import os +import shutil +import tempfile from os import path from docker.errors import APIError -from mock import patch -import tempfile -import shutil -from six import StringIO, text_type +from six import StringIO +from six import text_type +from .. import mock from .testcases import DockerClientTestCase from .testcases import pull_busybox from compose import __version__ -from compose.const import ( - LABEL_CONTAINER_NUMBER, - LABEL_ONE_OFF, - LABEL_PROJECT, - LABEL_SERVICE, - LABEL_VERSION, -) +from compose.const import LABEL_CONTAINER_NUMBER +from compose.const import LABEL_ONE_OFF +from compose.const import LABEL_PROJECT +from compose.const import LABEL_SERVICE +from compose.const import LABEL_VERSION from compose.container import Container from compose.service import build_extra_hosts from compose.service import ConfigError from compose.service import ConvergencePlan from compose.service import Net from compose.service import Service +from compose.service import VolumeFromSpec def create_and_start_container(service, **override_options): @@ -167,16 +168,6 @@ class ServiceTest(DockerClientTestCase): service.start_container(container) self.assertEqual(set(container.get('HostConfig.ExtraHosts')), set(extra_hosts)) - def test_create_container_with_extra_hosts_string(self): - extra_hosts = 'somehost:162.242.195.82' - service = self.create_service('db', extra_hosts=extra_hosts) - self.assertRaises(ConfigError, lambda: service.create_container()) - - def test_create_container_with_extra_hosts_list_of_dicts(self): - extra_hosts = [{'somehost': '162.242.195.82'}, {'otherhost': '50.31.209.229'}] - service = self.create_service('db', extra_hosts=extra_hosts) - self.assertRaises(ConfigError, lambda: service.create_container()) - def test_create_container_with_extra_hosts_dicts(self): extra_hosts = {'somehost': '162.242.195.82', 'otherhost': '50.31.209.229'} extra_hosts_list = ['somehost:162.242.195.82', 'otherhost:50.31.209.229'] @@ -273,24 +264,6 @@ class ServiceTest(DockerClientTestCase): self.assertEqual(service.containers(stopped=False), [new_container]) - @patch.dict(os.environ) - def test_create_container_with_home_and_env_var_in_volume_path(self): - os.environ['VOLUME_NAME'] = 'my-volume' - os.environ['HOME'] = '/tmp/home-dir' - expected_host_path = os.path.join(os.environ['HOME'], os.environ['VOLUME_NAME']) - - host_path = '~/${VOLUME_NAME}' - container_path = '/container-path' - - service = self.create_service('db', volumes=['%s:%s' % (host_path, container_path)]) - container = service.create_container() - service.start_container(container) - - actual_host_path = container.get('Volumes')[container_path] - components = actual_host_path.split('/') - self.assertTrue(components[-2:] == ['home-dir', 'my-volume'], - msg="Last two components differ: %s, %s" % (actual_host_path, expected_host_path)) - def test_create_container_with_volumes_from(self): volume_service = self.create_service('data') volume_container_1 = volume_service.create_container() @@ -300,12 +273,18 @@ class ServiceTest(DockerClientTestCase): command=["top"], labels={LABEL_PROJECT: 'composetest'}, ) - host_service = self.create_service('host', volumes_from=[volume_service, volume_container_2]) + host_service = self.create_service( + 'host', + volumes_from=[ + VolumeFromSpec(volume_service, 'rw'), + VolumeFromSpec(volume_container_2, 'rw') + ] + ) host_container = host_service.create_container() host_service.start_container(host_container) - self.assertIn(volume_container_1.id, + self.assertIn(volume_container_1.id + ':rw', host_container.get('HostConfig.VolumesFrom')) - self.assertIn(volume_container_2.id, + self.assertIn(volume_container_2.id + ':rw', host_container.get('HostConfig.VolumesFrom')) def test_execute_convergence_plan_recreate(self): @@ -378,12 +357,13 @@ class ServiceTest(DockerClientTestCase): ) old_container = create_and_start_container(service) - self.assertEqual(old_container.get('Volumes').keys(), ['/data']) + self.assertEqual(list(old_container.get('Volumes').keys()), ['/data']) volume_path = old_container.get('Volumes')['/data'] new_container, = service.execute_convergence_plan( ConvergencePlan('recreate', [old_container])) - self.assertEqual(new_container.get('Volumes').keys(), ['/data']) + + self.assertEqual(list(new_container.get('Volumes')), ['/data']) self.assertEqual(new_container.get('Volumes')['/data'], volume_path) def test_start_container_passes_through_options(self): @@ -480,7 +460,7 @@ class ServiceTest(DockerClientTestCase): ) container = create_and_start_container(service) container.wait() - self.assertIn('success', container.logs()) + self.assertIn(b'success', container.logs()) self.assertEqual(len(self.client.images(name='composetest_test')), 1) def test_start_container_uses_tagged_image_if_it_exists(self): @@ -493,7 +473,7 @@ class ServiceTest(DockerClientTestCase): ) container = create_and_start_container(service) container.wait() - self.assertIn('success', container.logs()) + self.assertIn(b'success', container.logs()) def test_start_container_creates_ports(self): service = self.create_service('web', ports=[8000]) @@ -518,7 +498,7 @@ class ServiceTest(DockerClientTestCase): with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f: f.write("FROM busybox\n") - with open(os.path.join(base_dir, b'foo\xE2bar'), 'w') as f: + with open(os.path.join(base_dir.encode('utf8'), b'foo\xE2bar'), 'w') as f: f.write("hello world\n") self.create_service('web', build=text_type(base_dir)).build() @@ -535,7 +515,7 @@ class ServiceTest(DockerClientTestCase): self.assertEqual(container['HostConfig']['Privileged'], True) def test_expose_does_not_publish_ports(self): - service = self.create_service('web', expose=[8000]) + service = self.create_service('web', expose=["8000"]) container = create_and_start_container(service).inspect() self.assertEqual(container['NetworkSettings']['Ports'], {'8000/tcp': None}) @@ -603,8 +583,7 @@ class ServiceTest(DockerClientTestCase): service.scale(0) self.assertEqual(len(service.containers()), 0) - @patch('sys.stdout', new_callable=StringIO) - def test_scale_with_stopped_containers(self, mock_stdout): + def test_scale_with_stopped_containers(self): """ Given there are some stopped containers and scale is called with a desired number that is the same as the number of stopped containers, @@ -613,15 +592,11 @@ class ServiceTest(DockerClientTestCase): service = self.create_service('web') next_number = service._next_container_number() valid_numbers = [next_number, next_number + 1] - service.create_container(number=next_number, quiet=True) - service.create_container(number=next_number + 1, quiet=True) + service.create_container(number=next_number) + service.create_container(number=next_number + 1) - for container in service.containers(): - self.assertFalse(container.is_running) - - service.scale(2) - - self.assertEqual(len(service.containers()), 2) + with mock.patch('sys.stdout', new_callable=StringIO) as mock_stdout: + service.scale(2) for container in service.containers(): self.assertTrue(container.is_running) self.assertTrue(container.number in valid_numbers) @@ -630,8 +605,7 @@ class ServiceTest(DockerClientTestCase): self.assertNotIn('Creating', captured_output) self.assertIn('Starting', captured_output) - @patch('sys.stdout', new_callable=StringIO) - def test_scale_with_stopped_containers_and_needing_creation(self, mock_stdout): + def test_scale_with_stopped_containers_and_needing_creation(self): """ Given there are some stopped containers and scale is called with a desired number that is greater than the number of stopped containers, @@ -644,7 +618,8 @@ class ServiceTest(DockerClientTestCase): for container in service.containers(): self.assertFalse(container.is_running) - service.scale(2) + with mock.patch('sys.stdout', new_callable=StringIO) as mock_stdout: + service.scale(2) self.assertEqual(len(service.containers()), 2) for container in service.containers(): @@ -654,8 +629,7 @@ class ServiceTest(DockerClientTestCase): self.assertIn('Creating', captured_output) self.assertIn('Starting', captured_output) - @patch('sys.stdout', new_callable=StringIO) - def test_scale_with_api_returns_errors(self, mock_stdout): + def test_scale_with_api_returns_errors(self): """ Test that when scaling if the API returns an error, that error is handled and the remaining threads continue. @@ -664,18 +638,18 @@ class ServiceTest(DockerClientTestCase): next_number = service._next_container_number() service.create_container(number=next_number, quiet=True) - with patch( + with mock.patch( 'compose.container.Container.create', side_effect=APIError(message="testing", response={}, explanation="Boom")): - service.scale(3) + with mock.patch('sys.stdout', new_callable=StringIO) as mock_stdout: + service.scale(3) self.assertEqual(len(service.containers()), 1) self.assertTrue(service.containers()[0].is_running) self.assertIn("ERROR: for 2 Boom", mock_stdout.getvalue()) - @patch('sys.stdout', new_callable=StringIO) - def test_scale_with_api_returns_unexpected_exception(self, mock_stdout): + def test_scale_with_api_returns_unexpected_exception(self): """ Test that when scaling if the API returns an error, that is not of type APIError, that error is re-raised. @@ -684,16 +658,17 @@ class ServiceTest(DockerClientTestCase): next_number = service._next_container_number() service.create_container(number=next_number, quiet=True) - with patch( + with mock.patch( 'compose.container.Container.create', - side_effect=ValueError("BOOM")): + side_effect=ValueError("BOOM") + ): with self.assertRaises(ValueError): service.scale(3) self.assertEqual(len(service.containers()), 1) self.assertTrue(service.containers()[0].is_running) - @patch('compose.service.log') + @mock.patch('compose.service.log') def test_scale_with_desired_number_already_achieved(self, mock_log): """ Test that calling scale with a desired number that is equal to the @@ -716,14 +691,14 @@ class ServiceTest(DockerClientTestCase): captured_output = mock_log.info.call_args[0] self.assertIn('Desired container number already achieved', captured_output) - @patch('compose.service.log') + @mock.patch('compose.service.log') def test_scale_with_custom_container_name_outputs_warning(self, mock_log): - """ - Test that calling scale on a service that has a custom container name + """Test that calling scale on a service that has a custom container name results in warning output. """ - service = self.create_service('web', container_name='custom-container') - + # Disable this test against earlier versions because it is flaky + self.require_api_version('1.21') + service = self.create_service('app', container_name='custom-container') self.assertEqual(service.custom_container_name(), 'custom-container') service.scale(3) @@ -832,12 +807,15 @@ class ServiceTest(DockerClientTestCase): self.assertEqual(env[k], v) def test_env_from_file_combined_with_env(self): - service = self.create_service('web', environment=['ONE=1', 'TWO=2', 'THREE=3'], env_file=['tests/fixtures/env/one.env', 'tests/fixtures/env/two.env']) + service = self.create_service( + 'web', + environment=['ONE=1', 'TWO=2', 'THREE=3'], + env_file=['tests/fixtures/env/one.env', 'tests/fixtures/env/two.env']) env = create_and_start_container(service).environment for k, v in {'ONE': '1', 'TWO': '2', 'THREE': '3', 'FOO': 'baz', 'DOO': 'dah'}.items(): self.assertEqual(env[k], v) - @patch.dict(os.environ) + @mock.patch.dict(os.environ) def test_resolve_env(self): os.environ['FILE_DEF'] = 'E1' os.environ['FILE_DEF_EMPTY'] = 'E2' @@ -847,6 +825,13 @@ class ServiceTest(DockerClientTestCase): for k, v in {'FILE_DEF': 'F1', 'FILE_DEF_EMPTY': '', 'ENV_DEF': 'E3', 'NO_DEF': ''}.items(): self.assertEqual(env[k], v) + def test_with_high_enough_api_version_we_get_default_network_mode(self): + # TODO: remove this test once minimum docker version is 1.8.x + with mock.patch.object(self.client, '_version', '1.20'): + service = self.create_service('web') + service_config = service._get_container_host_config({}) + self.assertEquals(service_config['NetworkMode'], 'default') + def test_labels(self): labels_dict = { 'com.example.description': "Accounting webapp", @@ -898,7 +883,10 @@ class ServiceTest(DockerClientTestCase): def test_log_drive_invalid(self): service = self.create_service('web', log_driver='xxx') - self.assertRaises(ValueError, lambda: create_and_start_container(service)) + expected_error_msg = "logger: no log driver named 'xxx' is registered" + + with self.assertRaisesRegexp(APIError, expected_error_msg): + create_and_start_container(service) def test_log_drive_empty_default_jsonfile(self): service = self.create_service('web') diff --git a/tests/integration/state_test.py b/tests/integration/state_test.py index b254376171..3230aefc61 100644 --- a/tests/integration/state_test.py +++ b/tests/integration/state_test.py @@ -3,31 +3,35 @@ Integration tests which cover state convergence (aka smart recreate) performed by `docker-compose up`. """ from __future__ import unicode_literals -import tempfile -import shutil -import os -from compose import config -from compose.project import Project -from compose.const import LABEL_CONFIG_HASH +import os +import shutil +import tempfile from .testcases import DockerClientTestCase +from compose.config import config +from compose.const import LABEL_CONFIG_HASH +from compose.project import Project +from compose.service import ConvergenceStrategy class ProjectTestCase(DockerClientTestCase): def run_up(self, cfg, **kwargs): kwargs.setdefault('timeout', 1) + kwargs.setdefault('detached', True) project = self.make_project(cfg) project.up(**kwargs) return set(project.containers(stopped=True)) def make_project(self, cfg): + details = config.ConfigDetails( + 'working_dir', + [config.ConfigFile(None, cfg)]) return Project.from_dicts( name='composetest', client=self.client, - service_dicts=config.load(config.ConfigDetails(cfg, 'working_dir', None)) - ) + service_dicts=config.load(details)) class BasicProjectTest(ProjectTestCase): @@ -151,7 +155,9 @@ class ProjectWithDependenciesTest(ProjectTestCase): old_containers = self.run_up(self.cfg) self.cfg['db']['environment'] = {'NEW_VAR': '1'} - new_containers = self.run_up(self.cfg, allow_recreate=False) + new_containers = self.run_up( + self.cfg, + strategy=ConvergenceStrategy.never) self.assertEqual(new_containers - old_containers, set()) @@ -175,23 +181,11 @@ class ProjectWithDependenciesTest(ProjectTestCase): def converge(service, - allow_recreate=True, - force_recreate=False, + strategy=ConvergenceStrategy.changed, do_build=True): - """ - If a container for this service doesn't exist, create and start one. If there are - any, stop them, create+start new ones, and remove the old containers. - """ - plan = service.convergence_plan( - allow_recreate=allow_recreate, - force_recreate=force_recreate, - ) - - return service.execute_convergence_plan( - plan, - do_build=do_build, - timeout=1, - ) + """Create a converge plan from a strategy and execute the plan.""" + plan = service.convergence_plan(strategy) + return service.execute_convergence_plan(plan, do_build=do_build, timeout=1) class ServiceStateTest(DockerClientTestCase): @@ -221,7 +215,6 @@ class ServiceStateTest(DockerClientTestCase): self.assertEqual([c.is_running for c in containers], [False, True]) - web = self.create_service('web', **options) self.assertEqual( ('start', containers[0:1]), web.convergence_plan(), diff --git a/tests/integration/testcases.py b/tests/integration/testcases.py index 41b50a815e..686a2b69a4 100644 --- a/tests/integration/testcases.py +++ b/tests/integration/testcases.py @@ -1,14 +1,16 @@ -from __future__ import unicode_literals from __future__ import absolute_import +from __future__ import unicode_literals from docker import errors +from docker.utils import version_lt +from pytest import skip -from compose.service import Service -from compose.config import ServiceLoader -from compose.const import LABEL_PROJECT -from compose.cli.docker_client import docker_client -from compose.progress_stream import stream_output from .. import unittest +from compose.cli.docker_client import docker_client +from compose.config.config import ServiceLoader +from compose.const import LABEL_PROJECT +from compose.progress_stream import stream_output +from compose.service import Service def pull_busybox(client): @@ -40,7 +42,28 @@ class DockerClientTestCase(unittest.TestCase): if 'command' not in kwargs: kwargs['command'] = ["top"] - options = ServiceLoader(working_dir='.').make_service_dict(name, kwargs) + links = kwargs.get('links', None) + volumes_from = kwargs.get('volumes_from', None) + net = kwargs.get('net', None) + + workaround_options = ['links', 'volumes_from', 'net'] + for key in workaround_options: + try: + del kwargs[key] + except KeyError: + pass + + options = ServiceLoader(working_dir='.', filename=None, service_name=name, service_dict=kwargs).make_service_dict() + + labels = options.setdefault('labels', {}) + labels['com.docker.compose.test-name'] = self.id() + + if links: + options['links'] = links + if volumes_from: + options['volumes_from'] = volumes_from + if net: + options['net'] = net return Service( project='composetest', @@ -52,3 +75,8 @@ class DockerClientTestCase(unittest.TestCase): kwargs.setdefault('rm', True) build_output = self.client.build(*args, **kwargs) stream_output(build_output, open('/dev/null', 'w')) + + def require_api_version(self, minimum): + api_version = self.client.version()['ApiVersion'] + if version_lt(api_version, minimum): + skip("API version is too low ({} < {})".format(api_version, minimum)) diff --git a/tests/unit/cli/command_test.py b/tests/unit/cli/command_test.py new file mode 100644 index 0000000000..0d4324e355 --- /dev/null +++ b/tests/unit/cli/command_test.py @@ -0,0 +1,22 @@ +from __future__ import absolute_import + +import pytest +from requests.exceptions import ConnectionError + +from compose.cli import errors +from compose.cli.command import friendly_error_message +from tests import mock +from tests import unittest + + +class FriendlyErrorMessageTestCase(unittest.TestCase): + + def test_dispatch_generic_connection_error(self): + with pytest.raises(errors.ConnectionErrorGeneric): + with mock.patch( + 'compose.cli.command.call_silently', + autospec=True, + side_effect=[0, 1] + ): + with friendly_error_message(): + raise ConnectionError() diff --git a/tests/unit/cli/docker_client_test.py b/tests/unit/cli/docker_client_test.py index 44bdbb291e..d497495b40 100644 --- a/tests/unit/cli/docker_client_test.py +++ b/tests/unit/cli/docker_client_test.py @@ -1,11 +1,11 @@ -from __future__ import unicode_literals from __future__ import absolute_import +from __future__ import unicode_literals + import os -import mock -from tests import unittest - from compose.cli import docker_client +from tests import mock +from tests import unittest class DockerClientTestCase(unittest.TestCase): @@ -16,7 +16,7 @@ class DockerClientTestCase(unittest.TestCase): docker_client.docker_client() def test_docker_client_with_custom_timeout(self): - with mock.patch.dict(os.environ): - os.environ['DOCKER_CLIENT_TIMEOUT'] = timeout = "300" + timeout = 300 + with mock.patch('compose.cli.docker_client.HTTP_TIMEOUT', 300): client = docker_client.docker_client() - self.assertEqual(client.timeout, int(timeout)) + self.assertEqual(client.timeout, int(timeout)) diff --git a/tests/unit/cli/formatter_test.py b/tests/unit/cli/formatter_test.py new file mode 100644 index 0000000000..1c3b6a68ef --- /dev/null +++ b/tests/unit/cli/formatter_test.py @@ -0,0 +1,35 @@ +from __future__ import absolute_import +from __future__ import unicode_literals + +import logging + +from compose.cli import colors +from compose.cli.formatter import ConsoleWarningFormatter +from tests import unittest + + +MESSAGE = 'this is the message' + + +def makeLogRecord(level): + return logging.LogRecord('name', level, 'pathame', 0, MESSAGE, (), None) + + +class ConsoleWarningFormatterTestCase(unittest.TestCase): + + def setUp(self): + self.formatter = ConsoleWarningFormatter() + + def test_format_warn(self): + output = self.formatter.format(makeLogRecord(logging.WARN)) + expected = colors.yellow('WARNING') + ': ' + assert output == expected + MESSAGE + + def test_format_error(self): + output = self.formatter.format(makeLogRecord(logging.ERROR)) + expected = colors.red('ERROR') + ': ' + assert output == expected + MESSAGE + + def test_format_info(self): + output = self.formatter.format(makeLogRecord(logging.INFO)) + assert output == MESSAGE diff --git a/tests/unit/cli/log_printer_test.py b/tests/unit/cli/log_printer_test.py new file mode 100644 index 0000000000..575fcaf7b5 --- /dev/null +++ b/tests/unit/cli/log_printer_test.py @@ -0,0 +1,88 @@ +from __future__ import absolute_import +from __future__ import unicode_literals + +import mock +import six + +from compose.cli.log_printer import LogPrinter +from compose.cli.log_printer import wait_on_exit +from compose.container import Container +from tests import unittest + + +def build_mock_container(reader): + return mock.Mock( + spec=Container, + name='myapp_web_1', + name_without_project='web_1', + has_api_logs=True, + log_stream=None, + attach=reader, + wait=mock.Mock(return_value=0), + ) + + +class LogPrinterTest(unittest.TestCase): + def get_default_output(self, monochrome=False): + def reader(*args, **kwargs): + yield b"hello\nworld" + container = build_mock_container(reader) + output = run_log_printer([container], monochrome=monochrome) + return output + + def test_single_container(self): + output = self.get_default_output() + + self.assertIn('hello', output) + self.assertIn('world', output) + + def test_monochrome(self): + output = self.get_default_output(monochrome=True) + self.assertNotIn('\033[', output) + + def test_polychrome(self): + output = self.get_default_output() + self.assertIn('\033[', output) + + def test_unicode(self): + glyph = u'\u2022' + + def reader(*args, **kwargs): + yield glyph.encode('utf-8') + b'\n' + + container = build_mock_container(reader) + output = run_log_printer([container]) + if six.PY2: + output = output.decode('utf-8') + + self.assertIn(glyph, output) + + def test_wait_on_exit(self): + exit_status = 3 + mock_container = mock.Mock( + spec=Container, + name='cname', + wait=mock.Mock(return_value=exit_status)) + + expected = '{} exited with code {}\n'.format(mock_container.name, exit_status) + self.assertEqual(expected, wait_on_exit(mock_container)) + + def test_generator_with_no_logs(self): + mock_container = mock.Mock( + spec=Container, + has_api_logs=False, + log_driver='none', + name_without_project='web_1', + wait=mock.Mock(return_value=0)) + + output = run_log_printer([mock_container]) + self.assertIn( + "WARNING: no logs are available with the 'none' log driver\n", + output + ) + + +def run_log_printer(containers, monochrome=False): + output = six.StringIO() + LogPrinter(containers, output=output, monochrome=monochrome).run() + return output.getvalue() diff --git a/tests/unit/cli/main_test.py b/tests/unit/cli/main_test.py index e3a4629e53..ee837fcd45 100644 --- a/tests/unit/cli/main_test.py +++ b/tests/unit/cli/main_test.py @@ -1,10 +1,17 @@ from __future__ import absolute_import +import logging + from compose import container +from compose.cli.errors import UserError +from compose.cli.formatter import ConsoleWarningFormatter from compose.cli.log_printer import LogPrinter from compose.cli.main import attach_to_logs from compose.cli.main import build_log_printer +from compose.cli.main import convergence_strategy_from_opts +from compose.cli.main import setup_console_handler from compose.project import Project +from compose.service import ConvergenceStrategy from tests import mock from tests import unittest @@ -55,3 +62,57 @@ class CLIMainTestCase(unittest.TestCase): project.stop.assert_called_once_with( service_names=service_names, timeout=timeout) + + +class SetupConsoleHandlerTestCase(unittest.TestCase): + + def setUp(self): + self.stream = mock.Mock() + self.stream.isatty.return_value = True + self.handler = logging.StreamHandler(stream=self.stream) + + def test_with_tty_verbose(self): + setup_console_handler(self.handler, True) + assert type(self.handler.formatter) == ConsoleWarningFormatter + assert '%(name)s' in self.handler.formatter._fmt + assert '%(funcName)s' in self.handler.formatter._fmt + + def test_with_tty_not_verbose(self): + setup_console_handler(self.handler, False) + assert type(self.handler.formatter) == ConsoleWarningFormatter + assert '%(name)s' not in self.handler.formatter._fmt + assert '%(funcName)s' not in self.handler.formatter._fmt + + def test_with_not_a_tty(self): + self.stream.isatty.return_value = False + setup_console_handler(self.handler, False) + assert type(self.handler.formatter) == logging.Formatter + + +class ConvergeStrategyFromOptsTestCase(unittest.TestCase): + + def test_invalid_opts(self): + options = {'--force-recreate': True, '--no-recreate': True} + with self.assertRaises(UserError): + convergence_strategy_from_opts(options) + + def test_always(self): + options = {'--force-recreate': True, '--no-recreate': False} + self.assertEqual( + convergence_strategy_from_opts(options), + ConvergenceStrategy.always + ) + + def test_never(self): + options = {'--force-recreate': False, '--no-recreate': True} + self.assertEqual( + convergence_strategy_from_opts(options), + ConvergenceStrategy.never + ) + + def test_changed(self): + options = {'--force-recreate': False, '--no-recreate': False} + self.assertEqual( + convergence_strategy_from_opts(options), + ConvergenceStrategy.changed + ) diff --git a/tests/unit/cli/verbose_proxy_test.py b/tests/unit/cli/verbose_proxy_test.py index 59417bb3ef..f77568dc08 100644 --- a/tests/unit/cli/verbose_proxy_test.py +++ b/tests/unit/cli/verbose_proxy_test.py @@ -1,14 +1,17 @@ -from __future__ import unicode_literals from __future__ import absolute_import -from tests import unittest +from __future__ import unicode_literals + +import six from compose.cli import verbose_proxy +from tests import unittest class VerboseProxyTestCase(unittest.TestCase): def test_format_call(self): - expected = "(u'arg1', True, key=u'value')" + prefix = '' if six.PY3 else 'u' + expected = "(%(p)s'arg1', True, key=%(p)s'value')" % dict(p=prefix) actual = verbose_proxy.format_call( ("arg1", True), {'key': 'value'}) @@ -21,7 +24,7 @@ class VerboseProxyTestCase(unittest.TestCase): self.assertEqual(expected, actual) def test_format_return(self): - expected = "{u'Id': u'ok'}" + expected = repr({'Id': 'ok'}) actual = verbose_proxy.format_return({'Id': 'ok'}, 2) self.assertEqual(expected, actual) diff --git a/tests/unit/cli_test.py b/tests/unit/cli_test.py index 3f50003292..5b63d2e84a 100644 --- a/tests/unit/cli_test.py +++ b/tests/unit/cli_test.py @@ -1,66 +1,64 @@ -from __future__ import unicode_literals +# encoding: utf-8 from __future__ import absolute_import +from __future__ import unicode_literals + import os -from .. import unittest import docker -import mock +import py +import pytest +from .. import mock +from .. import unittest +from compose.cli.command import get_project +from compose.cli.command import get_project_name from compose.cli.docopt_command import NoSuchCommand +from compose.cli.errors import UserError from compose.cli.main import TopLevelCommand +from compose.const import IS_WINDOWS_PLATFORM from compose.service import Service class CLITestCase(unittest.TestCase): - def test_default_project_name(self): - cwd = os.getcwd() - try: - os.chdir('tests/fixtures/simple-composefile') - command = TopLevelCommand() - project_name = command.get_project_name('.') + def test_default_project_name(self): + test_dir = py._path.local.LocalPath('tests/fixtures/simple-composefile') + with test_dir.as_cwd(): + project_name = get_project_name('.') self.assertEquals('simplecomposefile', project_name) - finally: - os.chdir(cwd) def test_project_name_with_explicit_base_dir(self): - command = TopLevelCommand() - command.base_dir = 'tests/fixtures/simple-composefile' - project_name = command.get_project_name(command.base_dir) + base_dir = 'tests/fixtures/simple-composefile' + project_name = get_project_name(base_dir) self.assertEquals('simplecomposefile', project_name) def test_project_name_with_explicit_uppercase_base_dir(self): - command = TopLevelCommand() - command.base_dir = 'tests/fixtures/UpperCaseDir' - project_name = command.get_project_name(command.base_dir) + base_dir = 'tests/fixtures/UpperCaseDir' + project_name = get_project_name(base_dir) self.assertEquals('uppercasedir', project_name) def test_project_name_with_explicit_project_name(self): - command = TopLevelCommand() name = 'explicit-project-name' - project_name = command.get_project_name(None, project_name=name) + project_name = get_project_name(None, project_name=name) self.assertEquals('explicitprojectname', project_name) def test_project_name_from_environment_old_var(self): - command = TopLevelCommand() name = 'namefromenv' with mock.patch.dict(os.environ): os.environ['FIG_PROJECT_NAME'] = name - project_name = command.get_project_name(None) + project_name = get_project_name(None) self.assertEquals(project_name, name) def test_project_name_from_environment_new_var(self): - command = TopLevelCommand() name = 'namefromenv' with mock.patch.dict(os.environ): os.environ['COMPOSE_PROJECT_NAME'] = name - project_name = command.get_project_name(None) + project_name = get_project_name(None) self.assertEquals(project_name, name) def test_get_project(self): - command = TopLevelCommand() - command.base_dir = 'tests/fixtures/longer-filename-composefile' - project = command.get_project() + base_dir = 'tests/fixtures/longer-filename-composefile' + project = get_project(base_dir) self.assertEqual(project.name, 'longerfilenamecomposefile') self.assertTrue(project.client) self.assertTrue(project.services) @@ -86,6 +84,7 @@ class CLITestCase(unittest.TestCase): with self.assertRaises(NoSuchCommand): TopLevelCommand().dispatch(['help', 'nonexistent'], None) + @pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason="requires dockerpty") @mock.patch('compose.cli.main.dockerpty', autospec=True) def test_run_with_environment_merged_with_options_list(self, mock_dockerpty): command = TopLevelCommand() @@ -100,7 +99,7 @@ class CLITestCase(unittest.TestCase): command.run(mock_project, { 'SERVICE': 'service', 'COMMAND': None, - '-e': ['BAR=NEW', 'OTHER=THREE'], + '-e': ['BAR=NEW', 'OTHER=bär'.encode('utf-8')], '--user': None, '--no-deps': None, '--allow-insecure-ssl': None, @@ -108,13 +107,15 @@ class CLITestCase(unittest.TestCase): '-T': None, '--entrypoint': None, '--service-ports': None, + '--publish': [], '--rm': None, + '--name': None, }) _, _, call_kwargs = mock_client.create_container.mock_calls[0] self.assertEqual( call_kwargs['environment'], - {'FOO': 'ONE', 'BAR': 'NEW', 'OTHER': 'THREE'}) + {'FOO': 'ONE', 'BAR': 'NEW', 'OTHER': u'bär'}) def test_run_service_with_restart_always(self): command = TopLevelCommand() @@ -136,10 +137,15 @@ class CLITestCase(unittest.TestCase): '-T': None, '--entrypoint': None, '--service-ports': None, + '--publish': [], '--rm': None, + '--name': None, }) - _, _, call_kwargs = mock_client.create_container.mock_calls[0] - self.assertEquals(call_kwargs['host_config']['RestartPolicy']['Name'], 'always') + + self.assertEquals( + mock_client.create_host_config.call_args[1]['restart_policy']['Name'], + 'always' + ) command = TopLevelCommand() mock_client = mock.create_autospec(docker.Client) @@ -160,7 +166,39 @@ class CLITestCase(unittest.TestCase): '-T': None, '--entrypoint': None, '--service-ports': None, + '--publish': [], '--rm': True, + '--name': None, }) - _, _, call_kwargs = mock_client.create_container.mock_calls[0] - self.assertFalse('RestartPolicy' in call_kwargs['host_config']) + + self.assertFalse( + mock_client.create_host_config.call_args[1].get('restart_policy') + ) + + def test_command_manula_and_service_ports_together(self): + command = TopLevelCommand() + mock_client = mock.create_autospec(docker.Client) + mock_project = mock.Mock(client=mock_client) + mock_project.get_service.return_value = Service( + 'service', + client=mock_client, + restart='always', + image='someimage', + ) + + with self.assertRaises(UserError): + command.run(mock_project, { + 'SERVICE': 'service', + 'COMMAND': None, + '-e': [], + '--user': None, + '--no-deps': None, + '--allow-insecure-ssl': None, + '-d': True, + '-T': None, + '--entrypoint': None, + '--service-ports': True, + '--publish': ['80:80'], + '--rm': None, + '--name': None, + }) diff --git a/tests/unit/config/__init__.py b/tests/unit/config/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit/config/config_test.py b/tests/unit/config/config_test.py new file mode 100644 index 0000000000..2835e9c805 --- /dev/null +++ b/tests/unit/config/config_test.py @@ -0,0 +1,1346 @@ +# encoding: utf-8 +from __future__ import print_function + +import os +import shutil +import tempfile +from operator import itemgetter + +import py +import pytest + +from compose.config import config +from compose.config.errors import ConfigurationError +from compose.const import IS_WINDOWS_PLATFORM +from tests import mock +from tests import unittest + + +def make_service_dict(name, service_dict, working_dir, filename=None): + """ + Test helper function to construct a ServiceLoader + """ + return config.ServiceLoader( + working_dir=working_dir, + filename=filename, + service_name=name, + service_dict=service_dict).make_service_dict() + + +def service_sort(services): + return sorted(services, key=itemgetter('name')) + + +def build_config_details(contents, working_dir, filename): + return config.ConfigDetails( + working_dir, + [config.ConfigFile(filename, contents)]) + + +class ConfigTest(unittest.TestCase): + def test_load(self): + service_dicts = config.load( + build_config_details( + { + 'foo': {'image': 'busybox'}, + 'bar': {'image': 'busybox', 'environment': ['FOO=1']}, + }, + 'tests/fixtures/extends', + 'common.yml' + ) + ) + + self.assertEqual( + service_sort(service_dicts), + service_sort([ + { + 'name': 'bar', + 'image': 'busybox', + 'environment': {'FOO': '1'}, + }, + { + 'name': 'foo', + 'image': 'busybox', + } + ]) + ) + + def test_load_throws_error_when_not_dict(self): + with self.assertRaises(ConfigurationError): + config.load( + build_config_details( + {'web': 'busybox:latest'}, + 'working_dir', + 'filename.yml' + ) + ) + + def test_config_invalid_service_names(self): + with self.assertRaises(ConfigurationError): + for invalid_name in ['?not?allowed', ' ', '', '!', '/', '\xe2']: + config.load( + build_config_details( + {invalid_name: {'image': 'busybox'}}, + 'working_dir', + 'filename.yml' + ) + ) + + def test_config_integer_service_name_raise_validation_error(self): + expected_error_msg = "Service name: 1 needs to be a string, eg '1'" + with self.assertRaisesRegexp(ConfigurationError, expected_error_msg): + config.load( + build_config_details( + {1: {'image': 'busybox'}}, + 'working_dir', + 'filename.yml' + ) + ) + + @pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason='paths use slash') + def test_load_with_multiple_files(self): + base_file = config.ConfigFile( + 'base.yaml', + { + 'web': { + 'image': 'example/web', + 'links': ['db'], + }, + 'db': { + 'image': 'example/db', + }, + }) + override_file = config.ConfigFile( + 'override.yaml', + { + 'web': { + 'build': '/', + 'volumes': ['/home/user/project:/code'], + }, + }) + details = config.ConfigDetails('.', [base_file, override_file]) + + service_dicts = config.load(details) + expected = [ + { + 'name': 'web', + 'build': '/', + 'links': ['db'], + 'volumes': ['/home/user/project:/code'], + }, + { + 'name': 'db', + 'image': 'example/db', + }, + ] + self.assertEqual(service_sort(service_dicts), service_sort(expected)) + + def test_load_with_multiple_files_and_empty_override(self): + base_file = config.ConfigFile( + 'base.yaml', + {'web': {'image': 'example/web'}}) + override_file = config.ConfigFile('override.yaml', None) + details = config.ConfigDetails('.', [base_file, override_file]) + + with pytest.raises(ConfigurationError) as exc: + config.load(details) + assert 'Top level object needs to be a dictionary' in exc.exconly() + + def test_load_with_multiple_files_and_empty_base(self): + base_file = config.ConfigFile('base.yaml', None) + override_file = config.ConfigFile( + 'override.yaml', + {'web': {'image': 'example/web'}}) + details = config.ConfigDetails('.', [base_file, override_file]) + + with pytest.raises(ConfigurationError) as exc: + config.load(details) + assert 'Top level object needs to be a dictionary' in exc.exconly() + + def test_load_with_multiple_files_and_extends_in_override_file(self): + base_file = config.ConfigFile( + 'base.yaml', + { + 'web': {'image': 'example/web'}, + }) + override_file = config.ConfigFile( + 'override.yaml', + { + 'web': { + 'extends': { + 'file': 'common.yml', + 'service': 'base', + }, + 'volumes': ['/home/user/project:/code'], + }, + }) + details = config.ConfigDetails('.', [base_file, override_file]) + + tmpdir = py.test.ensuretemp('config_test') + tmpdir.join('common.yml').write(""" + base: + labels: ['label=one'] + """) + with tmpdir.as_cwd(): + service_dicts = config.load(details) + + expected = [ + { + 'name': 'web', + 'image': 'example/web', + 'volumes': ['/home/user/project:/code'], + 'labels': {'label': 'one'}, + }, + ] + self.assertEqual(service_sort(service_dicts), service_sort(expected)) + + def test_config_valid_service_names(self): + for valid_name in ['_', '-', '.__.', '_what-up.', 'what_.up----', 'whatup']: + config.load( + build_config_details( + {valid_name: {'image': 'busybox'}}, + 'tests/fixtures/extends', + 'common.yml' + ) + ) + + def test_config_invalid_ports_format_validation(self): + expected_error_msg = "Service 'web' configuration key 'ports' contains an invalid type" + with self.assertRaisesRegexp(ConfigurationError, expected_error_msg): + for invalid_ports in [{"1": "8000"}, False, 0, "8000", 8000, ["8000", "8000"]]: + config.load( + build_config_details( + {'web': {'image': 'busybox', 'ports': invalid_ports}}, + 'working_dir', + 'filename.yml' + ) + ) + + def test_config_valid_ports_format_validation(self): + valid_ports = [["8000", "9000"], ["8000/8050"], ["8000"], [8000], ["49153-49154:3002-3003"]] + for ports in valid_ports: + config.load( + build_config_details( + {'web': {'image': 'busybox', 'ports': ports}}, + 'working_dir', + 'filename.yml' + ) + ) + + def test_config_hint(self): + expected_error_msg = "(did you mean 'privileged'?)" + with self.assertRaisesRegexp(ConfigurationError, expected_error_msg): + config.load( + build_config_details( + { + 'foo': {'image': 'busybox', 'privilige': 'something'}, + }, + 'tests/fixtures/extends', + 'filename.yml' + ) + ) + + def test_invalid_config_build_and_image_specified(self): + expected_error_msg = "Service 'foo' has both an image and build path specified." + with self.assertRaisesRegexp(ConfigurationError, expected_error_msg): + config.load( + build_config_details( + { + 'foo': {'image': 'busybox', 'build': '.'}, + }, + 'tests/fixtures/extends', + 'filename.yml' + ) + ) + + def test_invalid_config_type_should_be_an_array(self): + expected_error_msg = "Service 'foo' configuration key 'links' contains an invalid type, it should be an array" + with self.assertRaisesRegexp(ConfigurationError, expected_error_msg): + config.load( + build_config_details( + { + 'foo': {'image': 'busybox', 'links': 'an_link'}, + }, + 'tests/fixtures/extends', + 'filename.yml' + ) + ) + + def test_invalid_config_not_a_dictionary(self): + expected_error_msg = "Top level object needs to be a dictionary." + with self.assertRaisesRegexp(ConfigurationError, expected_error_msg): + config.load( + build_config_details( + ['foo', 'lol'], + 'tests/fixtures/extends', + 'filename.yml' + ) + ) + + def test_invalid_config_not_unique_items(self): + expected_error_msg = "has non-unique elements" + with self.assertRaisesRegexp(ConfigurationError, expected_error_msg): + config.load( + build_config_details( + { + 'web': {'build': '.', 'devices': ['/dev/foo:/dev/foo', '/dev/foo:/dev/foo']} + }, + 'tests/fixtures/extends', + 'filename.yml' + ) + ) + + def test_invalid_list_of_strings_format(self): + expected_error_msg = "Service 'web' configuration key 'command' contains 1" + expected_error_msg += ", which is an invalid type, it should be a string" + with self.assertRaisesRegexp(ConfigurationError, expected_error_msg): + config.load( + build_config_details( + { + 'web': {'build': '.', 'command': [1]} + }, + 'tests/fixtures/extends', + 'filename.yml' + ) + ) + + def test_config_image_and_dockerfile_raise_validation_error(self): + expected_error_msg = "Service 'web' has both an image and alternate Dockerfile." + with self.assertRaisesRegexp(ConfigurationError, expected_error_msg): + config.load( + build_config_details( + {'web': {'image': 'busybox', 'dockerfile': 'Dockerfile.alt'}}, + 'working_dir', + 'filename.yml' + ) + ) + + def test_config_extra_hosts_string_raises_validation_error(self): + expected_error_msg = "Service 'web' configuration key 'extra_hosts' contains an invalid type" + + with self.assertRaisesRegexp(ConfigurationError, expected_error_msg): + config.load( + build_config_details( + {'web': { + 'image': 'busybox', + 'extra_hosts': 'somehost:162.242.195.82' + }}, + 'working_dir', + 'filename.yml' + ) + ) + + def test_config_extra_hosts_list_of_dicts_validation_error(self): + expected_error_msg = "key 'extra_hosts' contains {'somehost': '162.242.195.82'}, which is an invalid type, it should be a string" + + with self.assertRaisesRegexp(ConfigurationError, expected_error_msg): + config.load( + build_config_details( + {'web': { + 'image': 'busybox', + 'extra_hosts': [ + {'somehost': '162.242.195.82'}, + {'otherhost': '50.31.209.229'} + ] + }}, + 'working_dir', + 'filename.yml' + ) + ) + + def test_valid_config_which_allows_two_type_definitions(self): + expose_values = [["8000"], [8000]] + for expose in expose_values: + service = config.load( + build_config_details( + {'web': { + 'image': 'busybox', + 'expose': expose + }}, + 'working_dir', + 'filename.yml' + ) + ) + self.assertEqual(service[0]['expose'], expose) + + def test_valid_config_oneof_string_or_list(self): + entrypoint_values = [["sh"], "sh"] + for entrypoint in entrypoint_values: + service = config.load( + build_config_details( + {'web': { + 'image': 'busybox', + 'entrypoint': entrypoint + }}, + 'working_dir', + 'filename.yml' + ) + ) + self.assertEqual(service[0]['entrypoint'], entrypoint) + + @mock.patch('compose.config.validation.log') + def test_logs_warning_for_boolean_in_environment(self, mock_logging): + expected_warning_msg = "There is a boolean value in the 'environment' key." + config.load( + build_config_details( + {'web': { + 'image': 'busybox', + 'environment': {'SHOW_STUFF': True} + }}, + 'working_dir', + 'filename.yml' + ) + ) + + self.assertTrue(mock_logging.warn.called) + self.assertTrue(expected_warning_msg in mock_logging.warn.call_args[0][0]) + + def test_config_invalid_environment_dict_key_raises_validation_error(self): + expected_error_msg = "Service 'web' configuration key 'environment' contains unsupported option: '---'" + + with self.assertRaisesRegexp(ConfigurationError, expected_error_msg): + config.load( + build_config_details( + {'web': { + 'image': 'busybox', + 'environment': {'---': 'nope'} + }}, + 'working_dir', + 'filename.yml' + ) + ) + + def test_load_yaml_with_yaml_error(self): + tmpdir = py.test.ensuretemp('invalid_yaml_test') + invalid_yaml_file = tmpdir.join('docker-compose.yml') + invalid_yaml_file.write(""" + web: + this is bogus: ok: what + """) + with pytest.raises(ConfigurationError) as exc: + config.load_yaml(str(invalid_yaml_file)) + + assert 'line 3, column 32' in exc.exconly() + + +class InterpolationTest(unittest.TestCase): + @mock.patch.dict(os.environ) + def test_config_file_with_environment_variable(self): + os.environ.update( + IMAGE="busybox", + HOST_PORT="80", + LABEL_VALUE="myvalue", + ) + + service_dicts = config.load( + config.find('tests/fixtures/environment-interpolation', None), + ) + + self.assertEqual(service_dicts, [ + { + 'name': 'web', + 'image': 'busybox', + 'ports': ['80:8000'], + 'labels': {'mylabel': 'myvalue'}, + 'hostname': 'host-', + 'command': '${ESCAPED}', + } + ]) + + @mock.patch.dict(os.environ) + def test_unset_variable_produces_warning(self): + os.environ.pop('FOO', None) + os.environ.pop('BAR', None) + config_details = build_config_details( + { + 'web': { + 'image': '${FOO}', + 'command': '${BAR}', + 'container_name': '${BAR}', + }, + }, + '.', + None, + ) + + with mock.patch('compose.config.interpolation.log') as log: + config.load(config_details) + + self.assertEqual(2, log.warn.call_count) + warnings = sorted(args[0][0] for args in log.warn.call_args_list) + self.assertIn('BAR', warnings[0]) + self.assertIn('FOO', warnings[1]) + + @mock.patch.dict(os.environ) + def test_invalid_interpolation(self): + with self.assertRaises(config.ConfigurationError) as cm: + config.load( + build_config_details( + {'web': {'image': '${'}}, + 'working_dir', + 'filename.yml' + ) + ) + + self.assertIn('Invalid', cm.exception.msg) + self.assertIn('for "image" option', cm.exception.msg) + self.assertIn('in service "web"', cm.exception.msg) + self.assertIn('"${"', cm.exception.msg) + + def test_empty_environment_key_allowed(self): + service_dict = config.load( + build_config_details( + { + 'web': { + 'build': '.', + 'environment': { + 'POSTGRES_PASSWORD': '' + }, + }, + }, + '.', + None, + ) + )[0] + self.assertEquals(service_dict['environment']['POSTGRES_PASSWORD'], '') + + +class VolumeConfigTest(unittest.TestCase): + def test_no_binding(self): + d = make_service_dict('foo', {'build': '.', 'volumes': ['/data']}, working_dir='.') + self.assertEqual(d['volumes'], ['/data']) + + @mock.patch.dict(os.environ) + def test_volume_binding_with_environment_variable(self): + os.environ['VOLUME_PATH'] = '/host/path' + d = config.load( + build_config_details( + {'foo': {'build': '.', 'volumes': ['${VOLUME_PATH}:/container/path']}}, + '.', + None, + ) + )[0] + self.assertEqual(d['volumes'], ['/host/path:/container/path']) + + @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='posix paths') + @mock.patch.dict(os.environ) + def test_volume_binding_with_home(self): + os.environ['HOME'] = '/home/user' + d = make_service_dict('foo', {'build': '.', 'volumes': ['~:/container/path']}, working_dir='.') + self.assertEqual(d['volumes'], ['/home/user:/container/path']) + + def test_name_does_not_expand(self): + d = make_service_dict('foo', {'build': '.', 'volumes': ['mydatavolume:/data']}, working_dir='.') + self.assertEqual(d['volumes'], ['mydatavolume:/data']) + + def test_absolute_posix_path_does_not_expand(self): + d = make_service_dict('foo', {'build': '.', 'volumes': ['/var/lib/data:/data']}, working_dir='.') + self.assertEqual(d['volumes'], ['/var/lib/data:/data']) + + def test_absolute_windows_path_does_not_expand(self): + d = make_service_dict('foo', {'build': '.', 'volumes': ['c:\\data:/data']}, working_dir='.') + self.assertEqual(d['volumes'], ['c:\\data:/data']) + + @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='posix paths') + def test_relative_path_does_expand_posix(self): + d = make_service_dict('foo', {'build': '.', 'volumes': ['./data:/data']}, working_dir='/home/me/myproject') + self.assertEqual(d['volumes'], ['/home/me/myproject/data:/data']) + + d = make_service_dict('foo', {'build': '.', 'volumes': ['.:/data']}, working_dir='/home/me/myproject') + self.assertEqual(d['volumes'], ['/home/me/myproject:/data']) + + d = make_service_dict('foo', {'build': '.', 'volumes': ['../otherproject:/data']}, working_dir='/home/me/myproject') + self.assertEqual(d['volumes'], ['/home/me/otherproject:/data']) + + @pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='windows paths') + def test_relative_path_does_expand_windows(self): + d = make_service_dict('foo', {'build': '.', 'volumes': ['./data:/data']}, working_dir='c:\\Users\\me\\myproject') + self.assertEqual(d['volumes'], ['c:\\Users\\me\\myproject\\data:/data']) + + d = make_service_dict('foo', {'build': '.', 'volumes': ['.:/data']}, working_dir='c:\\Users\\me\\myproject') + self.assertEqual(d['volumes'], ['c:\\Users\\me\\myproject:/data']) + + d = make_service_dict('foo', {'build': '.', 'volumes': ['../otherproject:/data']}, working_dir='c:\\Users\\me\\myproject') + self.assertEqual(d['volumes'], ['c:\\Users\\me\\otherproject:/data']) + + @mock.patch.dict(os.environ) + def test_home_directory_with_driver_does_not_expand(self): + os.environ['NAME'] = 'surprise!' + d = make_service_dict('foo', { + 'build': '.', + 'volumes': ['~:/data'], + 'volume_driver': 'foodriver', + }, working_dir='.') + self.assertEqual(d['volumes'], ['~:/data']) + + +class MergePathMappingTest(object): + def config_name(self): + return "" + + def test_empty(self): + service_dict = config.merge_service_dicts({}, {}) + self.assertNotIn(self.config_name(), service_dict) + + def test_no_override(self): + service_dict = config.merge_service_dicts( + {self.config_name(): ['/foo:/code', '/data']}, + {}, + ) + self.assertEqual(set(service_dict[self.config_name()]), set(['/foo:/code', '/data'])) + + def test_no_base(self): + service_dict = config.merge_service_dicts( + {}, + {self.config_name(): ['/bar:/code']}, + ) + self.assertEqual(set(service_dict[self.config_name()]), set(['/bar:/code'])) + + def test_override_explicit_path(self): + service_dict = config.merge_service_dicts( + {self.config_name(): ['/foo:/code', '/data']}, + {self.config_name(): ['/bar:/code']}, + ) + self.assertEqual(set(service_dict[self.config_name()]), set(['/bar:/code', '/data'])) + + def test_add_explicit_path(self): + service_dict = config.merge_service_dicts( + {self.config_name(): ['/foo:/code', '/data']}, + {self.config_name(): ['/bar:/code', '/quux:/data']}, + ) + self.assertEqual(set(service_dict[self.config_name()]), set(['/bar:/code', '/quux:/data'])) + + def test_remove_explicit_path(self): + service_dict = config.merge_service_dicts( + {self.config_name(): ['/foo:/code', '/quux:/data']}, + {self.config_name(): ['/bar:/code', '/data']}, + ) + self.assertEqual(set(service_dict[self.config_name()]), set(['/bar:/code', '/data'])) + + +class MergeVolumesTest(unittest.TestCase, MergePathMappingTest): + def config_name(self): + return 'volumes' + + +class MergeDevicesTest(unittest.TestCase, MergePathMappingTest): + def config_name(self): + return 'devices' + + +class BuildOrImageMergeTest(unittest.TestCase): + def test_merge_build_or_image_no_override(self): + self.assertEqual( + config.merge_service_dicts({'build': '.'}, {}), + {'build': '.'}, + ) + + self.assertEqual( + config.merge_service_dicts({'image': 'redis'}, {}), + {'image': 'redis'}, + ) + + def test_merge_build_or_image_override_with_same(self): + self.assertEqual( + config.merge_service_dicts({'build': '.'}, {'build': './web'}), + {'build': './web'}, + ) + + self.assertEqual( + config.merge_service_dicts({'image': 'redis'}, {'image': 'postgres'}), + {'image': 'postgres'}, + ) + + def test_merge_build_or_image_override_with_other(self): + self.assertEqual( + config.merge_service_dicts({'build': '.'}, {'image': 'redis'}), + {'image': 'redis'} + ) + + self.assertEqual( + config.merge_service_dicts({'image': 'redis'}, {'build': '.'}), + {'build': '.'} + ) + + +class MergeListsTest(unittest.TestCase): + def test_empty(self): + service_dict = config.merge_service_dicts({}, {}) + self.assertNotIn('ports', service_dict) + + def test_no_override(self): + service_dict = config.merge_service_dicts( + {'ports': ['10:8000', '9000']}, + {}, + ) + self.assertEqual(set(service_dict['ports']), set(['10:8000', '9000'])) + + def test_no_base(self): + service_dict = config.merge_service_dicts( + {}, + {'ports': ['10:8000', '9000']}, + ) + self.assertEqual(set(service_dict['ports']), set(['10:8000', '9000'])) + + def test_add_item(self): + service_dict = config.merge_service_dicts( + {'ports': ['10:8000', '9000']}, + {'ports': ['20:8000']}, + ) + self.assertEqual(set(service_dict['ports']), set(['10:8000', '9000', '20:8000'])) + + +class MergeStringsOrListsTest(unittest.TestCase): + def test_no_override(self): + service_dict = config.merge_service_dicts( + {'dns': '8.8.8.8'}, + {}, + ) + self.assertEqual(set(service_dict['dns']), set(['8.8.8.8'])) + + def test_no_base(self): + service_dict = config.merge_service_dicts( + {}, + {'dns': '8.8.8.8'}, + ) + self.assertEqual(set(service_dict['dns']), set(['8.8.8.8'])) + + def test_add_string(self): + service_dict = config.merge_service_dicts( + {'dns': ['8.8.8.8']}, + {'dns': '9.9.9.9'}, + ) + self.assertEqual(set(service_dict['dns']), set(['8.8.8.8', '9.9.9.9'])) + + def test_add_list(self): + service_dict = config.merge_service_dicts( + {'dns': '8.8.8.8'}, + {'dns': ['9.9.9.9']}, + ) + self.assertEqual(set(service_dict['dns']), set(['8.8.8.8', '9.9.9.9'])) + + +class MergeLabelsTest(unittest.TestCase): + def test_empty(self): + service_dict = config.merge_service_dicts({}, {}) + self.assertNotIn('labels', service_dict) + + def test_no_override(self): + service_dict = config.merge_service_dicts( + make_service_dict('foo', {'build': '.', 'labels': ['foo=1', 'bar']}, 'tests/'), + make_service_dict('foo', {'build': '.'}, 'tests/'), + ) + self.assertEqual(service_dict['labels'], {'foo': '1', 'bar': ''}) + + def test_no_base(self): + service_dict = config.merge_service_dicts( + make_service_dict('foo', {'build': '.'}, 'tests/'), + make_service_dict('foo', {'build': '.', 'labels': ['foo=2']}, 'tests/'), + ) + self.assertEqual(service_dict['labels'], {'foo': '2'}) + + def test_override_explicit_value(self): + service_dict = config.merge_service_dicts( + make_service_dict('foo', {'build': '.', 'labels': ['foo=1', 'bar']}, 'tests/'), + make_service_dict('foo', {'build': '.', 'labels': ['foo=2']}, 'tests/'), + ) + self.assertEqual(service_dict['labels'], {'foo': '2', 'bar': ''}) + + def test_add_explicit_value(self): + service_dict = config.merge_service_dicts( + make_service_dict('foo', {'build': '.', 'labels': ['foo=1', 'bar']}, 'tests/'), + make_service_dict('foo', {'build': '.', 'labels': ['bar=2']}, 'tests/'), + ) + self.assertEqual(service_dict['labels'], {'foo': '1', 'bar': '2'}) + + def test_remove_explicit_value(self): + service_dict = config.merge_service_dicts( + make_service_dict('foo', {'build': '.', 'labels': ['foo=1', 'bar=2']}, 'tests/'), + make_service_dict('foo', {'build': '.', 'labels': ['bar']}, 'tests/'), + ) + self.assertEqual(service_dict['labels'], {'foo': '1', 'bar': ''}) + + +class MemoryOptionsTest(unittest.TestCase): + def test_validation_fails_with_just_memswap_limit(self): + """ + When you set a 'memswap_limit' it is invalid config unless you also set + a mem_limit + """ + expected_error_msg = ( + "Invalid 'memswap_limit' configuration for 'foo' service: when " + "defining 'memswap_limit' you must set 'mem_limit' as well" + ) + with self.assertRaisesRegexp(ConfigurationError, expected_error_msg): + config.load( + build_config_details( + { + 'foo': {'image': 'busybox', 'memswap_limit': 2000000}, + }, + 'tests/fixtures/extends', + 'filename.yml' + ) + ) + + def test_validation_with_correct_memswap_values(self): + service_dict = config.load( + build_config_details( + {'foo': {'image': 'busybox', 'mem_limit': 1000000, 'memswap_limit': 2000000}}, + 'tests/fixtures/extends', + 'common.yml' + ) + ) + self.assertEqual(service_dict[0]['memswap_limit'], 2000000) + + def test_memswap_can_be_a_string(self): + service_dict = config.load( + build_config_details( + {'foo': {'image': 'busybox', 'mem_limit': "1G", 'memswap_limit': "512M"}}, + 'tests/fixtures/extends', + 'common.yml' + ) + ) + self.assertEqual(service_dict[0]['memswap_limit'], "512M") + + +class EnvTest(unittest.TestCase): + def test_parse_environment_as_list(self): + environment = [ + 'NORMAL=F1', + 'CONTAINS_EQUALS=F=2', + 'TRAILING_EQUALS=', + ] + self.assertEqual( + config.parse_environment(environment), + {'NORMAL': 'F1', 'CONTAINS_EQUALS': 'F=2', 'TRAILING_EQUALS': ''}, + ) + + def test_parse_environment_as_dict(self): + environment = { + 'NORMAL': 'F1', + 'CONTAINS_EQUALS': 'F=2', + 'TRAILING_EQUALS': None, + } + self.assertEqual(config.parse_environment(environment), environment) + + def test_parse_environment_invalid(self): + with self.assertRaises(ConfigurationError): + config.parse_environment('a=b') + + def test_parse_environment_empty(self): + self.assertEqual(config.parse_environment(None), {}) + + @mock.patch.dict(os.environ) + def test_resolve_environment(self): + os.environ['FILE_DEF'] = 'E1' + os.environ['FILE_DEF_EMPTY'] = 'E2' + os.environ['ENV_DEF'] = 'E3' + + service_dict = make_service_dict( + 'foo', { + 'build': '.', + 'environment': { + 'FILE_DEF': 'F1', + 'FILE_DEF_EMPTY': '', + 'ENV_DEF': None, + 'NO_DEF': None + }, + }, + 'tests/' + ) + + self.assertEqual( + service_dict['environment'], + {'FILE_DEF': 'F1', 'FILE_DEF_EMPTY': '', 'ENV_DEF': 'E3', 'NO_DEF': ''}, + ) + + def test_env_from_file(self): + service_dict = make_service_dict( + 'foo', + {'build': '.', 'env_file': 'one.env'}, + 'tests/fixtures/env', + ) + self.assertEqual( + service_dict['environment'], + {'ONE': '2', 'TWO': '1', 'THREE': '3', 'FOO': 'bar'}, + ) + + def test_env_from_multiple_files(self): + service_dict = make_service_dict( + 'foo', + {'build': '.', 'env_file': ['one.env', 'two.env']}, + 'tests/fixtures/env', + ) + self.assertEqual( + service_dict['environment'], + {'ONE': '2', 'TWO': '1', 'THREE': '3', 'FOO': 'baz', 'DOO': 'dah'}, + ) + + def test_env_nonexistent_file(self): + options = {'env_file': 'nonexistent.env'} + self.assertRaises( + ConfigurationError, + lambda: make_service_dict('foo', options, 'tests/fixtures/env'), + ) + + @mock.patch.dict(os.environ) + def test_resolve_environment_from_file(self): + os.environ['FILE_DEF'] = 'E1' + os.environ['FILE_DEF_EMPTY'] = 'E2' + os.environ['ENV_DEF'] = 'E3' + service_dict = make_service_dict( + 'foo', + {'build': '.', 'env_file': 'resolve.env'}, + 'tests/fixtures/env', + ) + self.assertEqual( + service_dict['environment'], + { + 'FILE_DEF': u'bär', + 'FILE_DEF_EMPTY': '', + 'ENV_DEF': 'E3', + 'NO_DEF': '' + }, + ) + + @pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason='paths use slash') + @mock.patch.dict(os.environ) + def test_resolve_path(self): + os.environ['HOSTENV'] = '/tmp' + os.environ['CONTAINERENV'] = '/host/tmp' + + service_dict = config.load( + build_config_details( + {'foo': {'build': '.', 'volumes': ['$HOSTENV:$CONTAINERENV']}}, + "tests/fixtures/env", + None, + ) + )[0] + self.assertEqual(set(service_dict['volumes']), set(['/tmp:/host/tmp'])) + + service_dict = config.load( + build_config_details( + {'foo': {'build': '.', 'volumes': ['/opt${HOSTENV}:/opt${CONTAINERENV}']}}, + "tests/fixtures/env", + None, + ) + )[0] + self.assertEqual(set(service_dict['volumes']), set(['/opt/tmp:/opt/host/tmp'])) + + +def load_from_filename(filename): + return config.load(config.find('.', [filename])) + + +class ExtendsTest(unittest.TestCase): + def test_extends(self): + service_dicts = load_from_filename('tests/fixtures/extends/docker-compose.yml') + + self.assertEqual(service_sort(service_dicts), service_sort([ + { + 'name': 'mydb', + 'image': 'busybox', + 'command': 'top', + }, + { + 'name': 'myweb', + 'image': 'busybox', + 'command': 'top', + 'links': ['mydb:db'], + 'environment': { + "FOO": "1", + "BAR": "2", + "BAZ": "2", + }, + } + ])) + + def test_nested(self): + service_dicts = load_from_filename('tests/fixtures/extends/nested.yml') + + self.assertEqual(service_dicts, [ + { + 'name': 'myweb', + 'image': 'busybox', + 'command': '/bin/true', + 'environment': { + "FOO": "2", + "BAR": "2", + }, + }, + ]) + + def test_self_referencing_file(self): + """ + We specify a 'file' key that is the filename we're already in. + """ + service_dicts = load_from_filename('tests/fixtures/extends/specify-file-as-self.yml') + self.assertEqual(service_sort(service_dicts), service_sort([ + { + 'environment': + { + 'YEP': '1', 'BAR': '1', 'BAZ': '3' + }, + 'image': 'busybox', + 'name': 'myweb' + }, + { + 'environment': + {'YEP': '1'}, + 'image': 'busybox', + 'name': 'otherweb' + }, + { + 'environment': + {'YEP': '1', 'BAZ': '3'}, + 'image': 'busybox', + 'name': 'web' + } + ])) + + def test_circular(self): + try: + load_from_filename('tests/fixtures/extends/circle-1.yml') + raise Exception("Expected config.CircularReference to be raised") + except config.CircularReference as e: + self.assertEqual( + [(os.path.basename(filename), service_name) for (filename, service_name) in e.trail], + [ + ('circle-1.yml', 'web'), + ('circle-2.yml', 'web'), + ('circle-1.yml', 'web'), + ], + ) + + def test_extends_validation_empty_dictionary(self): + with self.assertRaisesRegexp(ConfigurationError, 'service'): + config.load( + build_config_details( + { + 'web': {'image': 'busybox', 'extends': {}}, + }, + 'tests/fixtures/extends', + 'filename.yml' + ) + ) + + def test_extends_validation_missing_service_key(self): + with self.assertRaisesRegexp(ConfigurationError, "'service' is a required property"): + config.load( + build_config_details( + { + 'web': {'image': 'busybox', 'extends': {'file': 'common.yml'}}, + }, + 'tests/fixtures/extends', + 'filename.yml' + ) + ) + + def test_extends_validation_invalid_key(self): + expected_error_msg = ( + "Service 'web' configuration key 'extends' " + "contains unsupported option: 'rogue_key'" + ) + with self.assertRaisesRegexp(ConfigurationError, expected_error_msg): + config.load( + build_config_details( + { + 'web': { + 'image': 'busybox', + 'extends': { + 'file': 'common.yml', + 'service': 'web', + 'rogue_key': 'is not allowed' + } + }, + }, + 'tests/fixtures/extends', + 'filename.yml' + ) + ) + + def test_extends_validation_sub_property_key(self): + expected_error_msg = ( + "Service 'web' configuration key 'extends' 'file' contains 1, " + "which is an invalid type, it should be a string" + ) + with self.assertRaisesRegexp(ConfigurationError, expected_error_msg): + config.load( + build_config_details( + { + 'web': { + 'image': 'busybox', + 'extends': { + 'file': 1, + 'service': 'web', + } + }, + }, + 'tests/fixtures/extends', + 'filename.yml' + ) + ) + + def test_extends_validation_no_file_key_no_filename_set(self): + dictionary = {'extends': {'service': 'web'}} + + def load_config(): + return make_service_dict('myweb', dictionary, working_dir='tests/fixtures/extends') + + self.assertRaisesRegexp(ConfigurationError, 'file', load_config) + + def test_extends_validation_valid_config(self): + service = config.load( + build_config_details( + { + 'web': {'image': 'busybox', 'extends': {'service': 'web', 'file': 'common.yml'}}, + }, + 'tests/fixtures/extends', + 'common.yml' + ) + ) + + self.assertEquals(len(service), 1) + self.assertIsInstance(service[0], dict) + self.assertEquals(service[0]['command'], "/bin/true") + + def test_extended_service_with_invalid_config(self): + expected_error_msg = "Service 'myweb' has neither an image nor a build path specified" + + with self.assertRaisesRegexp(ConfigurationError, expected_error_msg): + load_from_filename('tests/fixtures/extends/service-with-invalid-schema.yml') + + def test_extended_service_with_valid_config(self): + service = load_from_filename('tests/fixtures/extends/service-with-valid-composite-extends.yml') + self.assertEquals(service[0]['command'], "top") + + def test_extends_file_defaults_to_self(self): + """ + Test not specifying a file in our extends options that the + config is valid and correctly extends from itself. + """ + service_dicts = load_from_filename('tests/fixtures/extends/no-file-specified.yml') + self.assertEqual(service_sort(service_dicts), service_sort([ + { + 'name': 'myweb', + 'image': 'busybox', + 'environment': { + "BAR": "1", + "BAZ": "3", + } + }, + { + 'name': 'web', + 'image': 'busybox', + 'environment': { + "BAZ": "3", + } + } + ])) + + def test_invalid_links_in_extended_service(self): + expected_error_msg = "services with 'links' cannot be extended" + with self.assertRaisesRegexp(ConfigurationError, expected_error_msg): + load_from_filename('tests/fixtures/extends/invalid-links.yml') + + def test_invalid_volumes_from_in_extended_service(self): + expected_error_msg = "services with 'volumes_from' cannot be extended" + + with self.assertRaisesRegexp(ConfigurationError, expected_error_msg): + load_from_filename('tests/fixtures/extends/invalid-volumes.yml') + + def test_invalid_net_in_extended_service(self): + expected_error_msg = "services with 'net: container' cannot be extended" + + with self.assertRaisesRegexp(ConfigurationError, expected_error_msg): + load_from_filename('tests/fixtures/extends/invalid-net.yml') + + @mock.patch.dict(os.environ) + def test_valid_interpolation_in_extended_service(self): + os.environ.update( + HOSTNAME_VALUE="penguin", + ) + expected_interpolated_value = "host-penguin" + + service_dicts = load_from_filename('tests/fixtures/extends/valid-interpolation.yml') + for service in service_dicts: + self.assertTrue(service['hostname'], expected_interpolated_value) + + @pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason='paths use slash') + def test_volume_path(self): + dicts = load_from_filename('tests/fixtures/volume-path/docker-compose.yml') + + paths = [ + '%s:/foo' % os.path.abspath('tests/fixtures/volume-path/common/foo'), + '%s:/bar' % os.path.abspath('tests/fixtures/volume-path/bar'), + ] + + self.assertEqual(set(dicts[0]['volumes']), set(paths)) + + def test_parent_build_path_dne(self): + child = load_from_filename('tests/fixtures/extends/nonexistent-path-child.yml') + + self.assertEqual(child, [ + { + 'name': 'dnechild', + 'image': 'busybox', + 'command': '/bin/true', + 'environment': { + "FOO": "1", + "BAR": "2", + }, + }, + ]) + + def test_load_throws_error_when_base_service_does_not_exist(self): + err_msg = r'''Cannot extend service 'foo' in .*: Service not found''' + with self.assertRaisesRegexp(ConfigurationError, err_msg): + load_from_filename('tests/fixtures/extends/nonexistent-service.yml') + + def test_partial_service_config_in_extends_is_still_valid(self): + dicts = load_from_filename('tests/fixtures/extends/valid-common-config.yml') + self.assertEqual(dicts[0]['environment'], {'FOO': '1'}) + + def test_extended_service_with_verbose_and_shorthand_way(self): + services = load_from_filename('tests/fixtures/extends/verbose-and-shorthand.yml') + self.assertEqual(service_sort(services), service_sort([ + { + 'name': 'base', + 'image': 'busybox', + 'environment': {'BAR': '1'}, + }, + { + 'name': 'verbose', + 'image': 'busybox', + 'environment': {'BAR': '1', 'FOO': '1'}, + }, + { + 'name': 'shorthand', + 'image': 'busybox', + 'environment': {'BAR': '1', 'FOO': '2'}, + }, + ])) + + +@pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason='paths use slash') +class ExpandPathTest(unittest.TestCase): + working_dir = '/home/user/somedir' + + def test_expand_path_normal(self): + result = config.expand_path(self.working_dir, 'myfile') + self.assertEqual(result, self.working_dir + '/' + 'myfile') + + def test_expand_path_absolute(self): + abs_path = '/home/user/otherdir/somefile' + result = config.expand_path(self.working_dir, abs_path) + self.assertEqual(result, abs_path) + + def test_expand_path_with_tilde(self): + test_path = '~/otherdir/somefile' + with mock.patch.dict(os.environ): + os.environ['HOME'] = user_path = '/home/user/' + result = config.expand_path(self.working_dir, test_path) + + self.assertEqual(result, user_path + 'otherdir/somefile') + + +class VolumePathTest(unittest.TestCase): + + @pytest.mark.xfail((not IS_WINDOWS_PLATFORM), reason='does not have a drive') + def test_split_path_mapping_with_windows_path(self): + windows_volume_path = "c:\\Users\\msamblanet\\Documents\\anvil\\connect\\config:/opt/connect/config:ro" + expected_mapping = ( + "/opt/connect/config:ro", + "c:\\Users\\msamblanet\\Documents\\anvil\\connect\\config" + ) + + mapping = config.split_path_mapping(windows_volume_path) + + self.assertEqual(mapping, expected_mapping) + + +@pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason='paths use slash') +class BuildPathTest(unittest.TestCase): + def setUp(self): + self.abs_context_path = os.path.join(os.getcwd(), 'tests/fixtures/build-ctx') + + def test_nonexistent_path(self): + with self.assertRaises(ConfigurationError): + config.load( + build_config_details( + { + 'foo': {'build': 'nonexistent.path'}, + }, + 'working_dir', + 'filename.yml' + ) + ) + + def test_relative_path(self): + relative_build_path = '../build-ctx/' + service_dict = make_service_dict( + 'relpath', + {'build': relative_build_path}, + working_dir='tests/fixtures/build-path' + ) + self.assertEquals(service_dict['build'], self.abs_context_path) + + def test_absolute_path(self): + service_dict = make_service_dict( + 'abspath', + {'build': self.abs_context_path}, + working_dir='tests/fixtures/build-path' + ) + self.assertEquals(service_dict['build'], self.abs_context_path) + + def test_from_file(self): + service_dict = load_from_filename('tests/fixtures/build-path/docker-compose.yml') + self.assertEquals(service_dict, [{'name': 'foo', 'build': self.abs_context_path}]) + + +class GetDefaultConfigFilesTestCase(unittest.TestCase): + + files = [ + 'docker-compose.yml', + 'docker-compose.yaml', + 'fig.yml', + 'fig.yaml', + ] + + def test_get_config_path_default_file_in_basedir(self): + for index, filename in enumerate(self.files): + self.assertEqual( + filename, + get_config_filename_for_files(self.files[index:])) + with self.assertRaises(config.ComposeFileNotFound): + get_config_filename_for_files([]) + + def test_get_config_path_default_file_in_parent_dir(self): + """Test with files placed in the subdir""" + + def get_config_in_subdir(files): + return get_config_filename_for_files(files, subdir=True) + + for index, filename in enumerate(self.files): + self.assertEqual(filename, get_config_in_subdir(self.files[index:])) + with self.assertRaises(config.ComposeFileNotFound): + get_config_in_subdir([]) + + +def get_config_filename_for_files(filenames, subdir=None): + def make_files(dirname, filenames): + for fname in filenames: + with open(os.path.join(dirname, fname), 'w') as f: + f.write('') + + project_dir = tempfile.mkdtemp() + try: + make_files(project_dir, filenames) + if subdir: + base_dir = tempfile.mkdtemp(dir=project_dir) + else: + base_dir = project_dir + filename, = config.get_default_config_files(base_dir) + return os.path.basename(filename) + finally: + shutil.rmtree(project_dir) diff --git a/tests/unit/config_test.py b/tests/unit/config_test.py deleted file mode 100644 index cefb1a20ff..0000000000 --- a/tests/unit/config_test.py +++ /dev/null @@ -1,767 +0,0 @@ -import mock -import os -import shutil -import tempfile -from .. import unittest - -from compose import config - - -def make_service_dict(name, service_dict, working_dir): - """ - Test helper function to construct a ServiceLoader - """ - return config.ServiceLoader(working_dir=working_dir).make_service_dict(name, service_dict) - - -class ConfigTest(unittest.TestCase): - def test_load(self): - service_dicts = config.load( - config.ConfigDetails( - { - 'foo': {'image': 'busybox'}, - 'bar': {'environment': ['FOO=1']}, - }, - 'working_dir', - 'filename.yml' - ) - ) - - self.assertEqual( - sorted(service_dicts, key=lambda d: d['name']), - sorted([ - { - 'name': 'bar', - 'environment': {'FOO': '1'}, - }, - { - 'name': 'foo', - 'image': 'busybox', - } - ]) - ) - - def test_load_throws_error_when_not_dict(self): - with self.assertRaises(config.ConfigurationError): - config.load( - config.ConfigDetails( - {'web': 'busybox:latest'}, - 'working_dir', - 'filename.yml' - ) - ) - - def test_config_validation(self): - self.assertRaises( - config.ConfigurationError, - lambda: make_service_dict('foo', {'port': ['8000']}, 'tests/') - ) - make_service_dict('foo', {'ports': ['8000']}, 'tests/') - - -class VolumePathTest(unittest.TestCase): - @mock.patch.dict(os.environ) - def test_volume_binding_with_environ(self): - os.environ['VOLUME_PATH'] = '/host/path' - d = make_service_dict('foo', {'volumes': ['${VOLUME_PATH}:/container/path']}, working_dir='.') - self.assertEqual(d['volumes'], ['/host/path:/container/path']) - - @mock.patch.dict(os.environ) - def test_volume_binding_with_home(self): - os.environ['HOME'] = '/home/user' - d = make_service_dict('foo', {'volumes': ['~:/container/path']}, working_dir='.') - self.assertEqual(d['volumes'], ['/home/user:/container/path']) - - @mock.patch.dict(os.environ) - def test_volume_binding_with_local_dir_name_raises_warning(self): - def make_dict(**config): - make_service_dict('foo', config, working_dir='.') - - with mock.patch('compose.config.log.warn') as warn: - make_dict(volumes=['/container/path']) - self.assertEqual(0, warn.call_count) - - make_dict(volumes=['/data:/container/path']) - self.assertEqual(0, warn.call_count) - - make_dict(volumes=['.:/container/path']) - self.assertEqual(0, warn.call_count) - - make_dict(volumes=['..:/container/path']) - self.assertEqual(0, warn.call_count) - - make_dict(volumes=['./data:/container/path']) - self.assertEqual(0, warn.call_count) - - make_dict(volumes=['../data:/container/path']) - self.assertEqual(0, warn.call_count) - - make_dict(volumes=['.profile:/container/path']) - self.assertEqual(0, warn.call_count) - - make_dict(volumes=['~:/container/path']) - self.assertEqual(0, warn.call_count) - - make_dict(volumes=['~/data:/container/path']) - self.assertEqual(0, warn.call_count) - - make_dict(volumes=['~tmp:/container/path']) - self.assertEqual(0, warn.call_count) - - make_dict(volumes=['data:/container/path'], volume_driver='mydriver') - self.assertEqual(0, warn.call_count) - - make_dict(volumes=['data:/container/path']) - self.assertEqual(1, warn.call_count) - warning = warn.call_args[0][0] - self.assertIn('"data:/container/path"', warning) - self.assertIn('"./data:/container/path"', warning) - - def test_named_volume_with_driver_does_not_expand(self): - d = make_service_dict('foo', { - 'volumes': ['namedvolume:/data'], - 'volume_driver': 'foodriver', - }, working_dir='.') - self.assertEqual(d['volumes'], ['namedvolume:/data']) - - @mock.patch.dict(os.environ) - def test_named_volume_with_special_chars(self): - os.environ['NAME'] = 'surprise!' - d = make_service_dict('foo', { - 'volumes': ['~/${NAME}:/data'], - 'volume_driver': 'foodriver', - }, working_dir='.') - self.assertEqual(d['volumes'], ['~/${NAME}:/data']) - - -class MergePathMappingTest(object): - def config_name(self): - return "" - - def test_empty(self): - service_dict = config.merge_service_dicts({}, {}) - self.assertNotIn(self.config_name(), service_dict) - - def test_no_override(self): - service_dict = config.merge_service_dicts( - {self.config_name(): ['/foo:/code', '/data']}, - {}, - ) - self.assertEqual(set(service_dict[self.config_name()]), set(['/foo:/code', '/data'])) - - def test_no_base(self): - service_dict = config.merge_service_dicts( - {}, - {self.config_name(): ['/bar:/code']}, - ) - self.assertEqual(set(service_dict[self.config_name()]), set(['/bar:/code'])) - - def test_override_explicit_path(self): - service_dict = config.merge_service_dicts( - {self.config_name(): ['/foo:/code', '/data']}, - {self.config_name(): ['/bar:/code']}, - ) - self.assertEqual(set(service_dict[self.config_name()]), set(['/bar:/code', '/data'])) - - def test_add_explicit_path(self): - service_dict = config.merge_service_dicts( - {self.config_name(): ['/foo:/code', '/data']}, - {self.config_name(): ['/bar:/code', '/quux:/data']}, - ) - self.assertEqual(set(service_dict[self.config_name()]), set(['/bar:/code', '/quux:/data'])) - - def test_remove_explicit_path(self): - service_dict = config.merge_service_dicts( - {self.config_name(): ['/foo:/code', '/quux:/data']}, - {self.config_name(): ['/bar:/code', '/data']}, - ) - self.assertEqual(set(service_dict[self.config_name()]), set(['/bar:/code', '/data'])) - - -class MergeVolumesTest(unittest.TestCase, MergePathMappingTest): - def config_name(self): - return 'volumes' - - -class MergeDevicesTest(unittest.TestCase, MergePathMappingTest): - def config_name(self): - return 'devices' - - -class BuildOrImageMergeTest(unittest.TestCase): - def test_merge_build_or_image_no_override(self): - self.assertEqual( - config.merge_service_dicts({'build': '.'}, {}), - {'build': '.'}, - ) - - self.assertEqual( - config.merge_service_dicts({'image': 'redis'}, {}), - {'image': 'redis'}, - ) - - def test_merge_build_or_image_override_with_same(self): - self.assertEqual( - config.merge_service_dicts({'build': '.'}, {'build': './web'}), - {'build': './web'}, - ) - - self.assertEqual( - config.merge_service_dicts({'image': 'redis'}, {'image': 'postgres'}), - {'image': 'postgres'}, - ) - - def test_merge_build_or_image_override_with_other(self): - self.assertEqual( - config.merge_service_dicts({'build': '.'}, {'image': 'redis'}), - {'image': 'redis'} - ) - - self.assertEqual( - config.merge_service_dicts({'image': 'redis'}, {'build': '.'}), - {'build': '.'} - ) - - -class MergeListsTest(unittest.TestCase): - def test_empty(self): - service_dict = config.merge_service_dicts({}, {}) - self.assertNotIn('ports', service_dict) - - def test_no_override(self): - service_dict = config.merge_service_dicts( - {'ports': ['10:8000', '9000']}, - {}, - ) - self.assertEqual(set(service_dict['ports']), set(['10:8000', '9000'])) - - def test_no_base(self): - service_dict = config.merge_service_dicts( - {}, - {'ports': ['10:8000', '9000']}, - ) - self.assertEqual(set(service_dict['ports']), set(['10:8000', '9000'])) - - def test_add_item(self): - service_dict = config.merge_service_dicts( - {'ports': ['10:8000', '9000']}, - {'ports': ['20:8000']}, - ) - self.assertEqual(set(service_dict['ports']), set(['10:8000', '9000', '20:8000'])) - - -class MergeStringsOrListsTest(unittest.TestCase): - def test_no_override(self): - service_dict = config.merge_service_dicts( - {'dns': '8.8.8.8'}, - {}, - ) - self.assertEqual(set(service_dict['dns']), set(['8.8.8.8'])) - - def test_no_base(self): - service_dict = config.merge_service_dicts( - {}, - {'dns': '8.8.8.8'}, - ) - self.assertEqual(set(service_dict['dns']), set(['8.8.8.8'])) - - def test_add_string(self): - service_dict = config.merge_service_dicts( - {'dns': ['8.8.8.8']}, - {'dns': '9.9.9.9'}, - ) - self.assertEqual(set(service_dict['dns']), set(['8.8.8.8', '9.9.9.9'])) - - def test_add_list(self): - service_dict = config.merge_service_dicts( - {'dns': '8.8.8.8'}, - {'dns': ['9.9.9.9']}, - ) - self.assertEqual(set(service_dict['dns']), set(['8.8.8.8', '9.9.9.9'])) - - -class MergeLabelsTest(unittest.TestCase): - def test_empty(self): - service_dict = config.merge_service_dicts({}, {}) - self.assertNotIn('labels', service_dict) - - def test_no_override(self): - service_dict = config.merge_service_dicts( - make_service_dict('foo', {'labels': ['foo=1', 'bar']}, 'tests/'), - make_service_dict('foo', {}, 'tests/'), - ) - self.assertEqual(service_dict['labels'], {'foo': '1', 'bar': ''}) - - def test_no_base(self): - service_dict = config.merge_service_dicts( - make_service_dict('foo', {}, 'tests/'), - make_service_dict('foo', {'labels': ['foo=2']}, 'tests/'), - ) - self.assertEqual(service_dict['labels'], {'foo': '2'}) - - def test_override_explicit_value(self): - service_dict = config.merge_service_dicts( - make_service_dict('foo', {'labels': ['foo=1', 'bar']}, 'tests/'), - make_service_dict('foo', {'labels': ['foo=2']}, 'tests/'), - ) - self.assertEqual(service_dict['labels'], {'foo': '2', 'bar': ''}) - - def test_add_explicit_value(self): - service_dict = config.merge_service_dicts( - make_service_dict('foo', {'labels': ['foo=1', 'bar']}, 'tests/'), - make_service_dict('foo', {'labels': ['bar=2']}, 'tests/'), - ) - self.assertEqual(service_dict['labels'], {'foo': '1', 'bar': '2'}) - - def test_remove_explicit_value(self): - service_dict = config.merge_service_dicts( - make_service_dict('foo', {'labels': ['foo=1', 'bar=2']}, 'tests/'), - make_service_dict('foo', {'labels': ['bar']}, 'tests/'), - ) - self.assertEqual(service_dict['labels'], {'foo': '1', 'bar': ''}) - - -class MemoryOptionsTest(unittest.TestCase): - def test_validation_fails_with_just_memswap_limit(self): - """ - When you set a 'memswap_limit' it is invalid config unless you also set - a mem_limit - """ - with self.assertRaises(config.ConfigurationError): - make_service_dict( - 'foo', { - 'memswap_limit': 2000000, - }, - 'tests/' - ) - - def test_validation_with_correct_memswap_values(self): - service_dict = make_service_dict( - 'foo', { - 'mem_limit': 1000000, - 'memswap_limit': 2000000, - }, - 'tests/' - ) - self.assertEqual(service_dict['memswap_limit'], 2000000) - - -class EnvTest(unittest.TestCase): - def test_parse_environment_as_list(self): - environment = [ - 'NORMAL=F1', - 'CONTAINS_EQUALS=F=2', - 'TRAILING_EQUALS=', - ] - self.assertEqual( - config.parse_environment(environment), - {'NORMAL': 'F1', 'CONTAINS_EQUALS': 'F=2', 'TRAILING_EQUALS': ''}, - ) - - def test_parse_environment_as_dict(self): - environment = { - 'NORMAL': 'F1', - 'CONTAINS_EQUALS': 'F=2', - 'TRAILING_EQUALS': None, - } - self.assertEqual(config.parse_environment(environment), environment) - - def test_parse_environment_invalid(self): - with self.assertRaises(config.ConfigurationError): - config.parse_environment('a=b') - - def test_parse_environment_empty(self): - self.assertEqual(config.parse_environment(None), {}) - - @mock.patch.dict(os.environ) - def test_resolve_environment(self): - os.environ['FILE_DEF'] = 'E1' - os.environ['FILE_DEF_EMPTY'] = 'E2' - os.environ['ENV_DEF'] = 'E3' - - service_dict = make_service_dict( - 'foo', { - 'environment': { - 'FILE_DEF': 'F1', - 'FILE_DEF_EMPTY': '', - 'ENV_DEF': None, - 'NO_DEF': None - }, - }, - 'tests/' - ) - - self.assertEqual( - service_dict['environment'], - {'FILE_DEF': 'F1', 'FILE_DEF_EMPTY': '', 'ENV_DEF': 'E3', 'NO_DEF': ''}, - ) - - def test_env_from_file(self): - service_dict = make_service_dict( - 'foo', - {'env_file': 'one.env'}, - 'tests/fixtures/env', - ) - self.assertEqual( - service_dict['environment'], - {'ONE': '2', 'TWO': '1', 'THREE': '3', 'FOO': 'bar'}, - ) - - def test_env_from_multiple_files(self): - service_dict = make_service_dict( - 'foo', - {'env_file': ['one.env', 'two.env']}, - 'tests/fixtures/env', - ) - self.assertEqual( - service_dict['environment'], - {'ONE': '2', 'TWO': '1', 'THREE': '3', 'FOO': 'baz', 'DOO': 'dah'}, - ) - - def test_env_nonexistent_file(self): - options = {'env_file': 'nonexistent.env'} - self.assertRaises( - config.ConfigurationError, - lambda: make_service_dict('foo', options, 'tests/fixtures/env'), - ) - - @mock.patch.dict(os.environ) - def test_resolve_environment_from_file(self): - os.environ['FILE_DEF'] = 'E1' - os.environ['FILE_DEF_EMPTY'] = 'E2' - os.environ['ENV_DEF'] = 'E3' - service_dict = make_service_dict( - 'foo', - {'env_file': 'resolve.env'}, - 'tests/fixtures/env', - ) - self.assertEqual( - service_dict['environment'], - {'FILE_DEF': 'F1', 'FILE_DEF_EMPTY': '', 'ENV_DEF': 'E3', 'NO_DEF': ''}, - ) - - @mock.patch.dict(os.environ) - def test_resolve_path(self): - os.environ['HOSTENV'] = '/tmp' - os.environ['CONTAINERENV'] = '/host/tmp' - - service_dict = make_service_dict( - 'foo', - {'volumes': ['$HOSTENV:$CONTAINERENV']}, - working_dir="tests/fixtures/env" - ) - self.assertEqual(set(service_dict['volumes']), set(['/tmp:/host/tmp'])) - - service_dict = make_service_dict( - 'foo', - {'volumes': ['/opt${HOSTENV}:/opt${CONTAINERENV}']}, - working_dir="tests/fixtures/env" - ) - self.assertEqual(set(service_dict['volumes']), set(['/opt/tmp:/opt/host/tmp'])) - - -def load_from_filename(filename): - return config.load(config.find('.', filename)) - - -class ExtendsTest(unittest.TestCase): - def test_extends(self): - service_dicts = load_from_filename('tests/fixtures/extends/docker-compose.yml') - - service_dicts = sorted( - service_dicts, - key=lambda sd: sd['name'], - ) - - self.assertEqual(service_dicts, [ - { - 'name': 'mydb', - 'image': 'busybox', - 'command': 'top', - }, - { - 'name': 'myweb', - 'image': 'busybox', - 'command': 'top', - 'links': ['mydb:db'], - 'environment': { - "FOO": "1", - "BAR": "2", - "BAZ": "2", - }, - } - ]) - - def test_nested(self): - service_dicts = load_from_filename('tests/fixtures/extends/nested.yml') - - self.assertEqual(service_dicts, [ - { - 'name': 'myweb', - 'image': 'busybox', - 'command': '/bin/true', - 'environment': { - "FOO": "2", - "BAR": "2", - }, - }, - ]) - - def test_self_referencing_file(self): - """ - We specify a 'file' key that is the filename we're already in. - """ - service_dicts = load_from_filename('tests/fixtures/extends/specify-file-as-self.yml') - self.assertEqual(sorted(service_dicts), sorted([ - { - 'environment': - { - 'YEP': '1', 'BAR': '1', 'BAZ': '3' - }, - 'image': 'busybox', - 'name': 'myweb' - }, - { - 'environment': - {'YEP': '1'}, - 'name': 'otherweb' - }, - { - 'environment': - {'YEP': '1', 'BAZ': '3'}, - 'image': 'busybox', - 'name': 'web' - } - ])) - - def test_circular(self): - try: - load_from_filename('tests/fixtures/extends/circle-1.yml') - raise Exception("Expected config.CircularReference to be raised") - except config.CircularReference as e: - self.assertEqual( - [(os.path.basename(filename), service_name) for (filename, service_name) in e.trail], - [ - ('circle-1.yml', 'web'), - ('circle-2.yml', 'web'), - ('circle-1.yml', 'web'), - ], - ) - - def test_extends_validation_empty_dictionary(self): - dictionary = {'extends': None} - - def load_config(): - return make_service_dict('myweb', dictionary, working_dir='tests/fixtures/extends') - - self.assertRaisesRegexp(config.ConfigurationError, 'dictionary', load_config) - - dictionary['extends'] = {} - self.assertRaises(config.ConfigurationError, load_config) - - def test_extends_validation_missing_service_key(self): - dictionary = {'extends': {'file': 'common.yml'}} - - def load_config(): - return make_service_dict('myweb', dictionary, working_dir='tests/fixtures/extends') - - self.assertRaisesRegexp(config.ConfigurationError, 'service', load_config) - - def test_extends_validation_invalid_key(self): - dictionary = { - 'extends': - { - 'service': 'web', 'file': 'common.yml', 'what': 'is this' - } - } - - def load_config(): - return make_service_dict('myweb', dictionary, working_dir='tests/fixtures/extends') - - self.assertRaisesRegexp(config.ConfigurationError, 'what', load_config) - - def test_extends_validation_no_file_key_no_filename_set(self): - dictionary = {'extends': {'service': 'web'}} - - def load_config(): - return make_service_dict('myweb', dictionary, working_dir='tests/fixtures/extends') - - self.assertRaisesRegexp(config.ConfigurationError, 'file', load_config) - - def test_extends_validation_valid_config(self): - dictionary = {'extends': {'service': 'web', 'file': 'common.yml'}} - - def load_config(): - return make_service_dict('myweb', dictionary, working_dir='tests/fixtures/extends') - - self.assertIsInstance(load_config(), dict) - - def test_extends_file_defaults_to_self(self): - """ - Test not specifying a file in our extends options that the - config is valid and correctly extends from itself. - """ - service_dicts = load_from_filename('tests/fixtures/extends/no-file-specified.yml') - self.assertEqual(service_dicts, [ - { - 'name': 'myweb', - 'image': 'busybox', - 'environment': { - "BAR": "1", - "BAZ": "3", - } - }, - { - 'name': 'web', - 'image': 'busybox', - 'environment': { - "BAZ": "3", - } - } - ]) - - def test_blacklisted_options(self): - def load_config(): - return make_service_dict('myweb', { - 'extends': { - 'file': 'whatever', - 'service': 'web', - } - }, '.') - - with self.assertRaisesRegexp(config.ConfigurationError, 'links'): - other_config = {'web': {'links': ['db']}} - - with mock.patch.object(config, 'load_yaml', return_value=other_config): - print load_config() - - with self.assertRaisesRegexp(config.ConfigurationError, 'volumes_from'): - other_config = {'web': {'volumes_from': ['db']}} - - with mock.patch.object(config, 'load_yaml', return_value=other_config): - print load_config() - - with self.assertRaisesRegexp(config.ConfigurationError, 'net'): - other_config = {'web': {'net': 'container:db'}} - - with mock.patch.object(config, 'load_yaml', return_value=other_config): - print load_config() - - other_config = {'web': {'net': 'host'}} - - with mock.patch.object(config, 'load_yaml', return_value=other_config): - print load_config() - - def test_volume_path(self): - dicts = load_from_filename('tests/fixtures/volume-path/docker-compose.yml') - - paths = [ - '%s:/foo' % os.path.abspath('tests/fixtures/volume-path/common/foo'), - '%s:/bar' % os.path.abspath('tests/fixtures/volume-path/bar'), - ] - - self.assertEqual(set(dicts[0]['volumes']), set(paths)) - - def test_parent_build_path_dne(self): - child = load_from_filename('tests/fixtures/extends/nonexistent-path-child.yml') - - self.assertEqual(child, [ - { - 'name': 'dnechild', - 'image': 'busybox', - 'command': '/bin/true', - 'environment': { - "FOO": "1", - "BAR": "2", - }, - }, - ]) - - -class BuildPathTest(unittest.TestCase): - def setUp(self): - self.abs_context_path = os.path.join(os.getcwd(), 'tests/fixtures/build-ctx') - - def test_nonexistent_path(self): - with self.assertRaises(config.ConfigurationError): - config.load( - config.ConfigDetails( - { - 'foo': {'build': 'nonexistent.path'}, - }, - 'working_dir', - 'filename.yml' - ) - ) - - def test_relative_path(self): - relative_build_path = '../build-ctx/' - service_dict = make_service_dict( - 'relpath', - {'build': relative_build_path}, - working_dir='tests/fixtures/build-path' - ) - self.assertEquals(service_dict['build'], self.abs_context_path) - - def test_absolute_path(self): - service_dict = make_service_dict( - 'abspath', - {'build': self.abs_context_path}, - working_dir='tests/fixtures/build-path' - ) - self.assertEquals(service_dict['build'], self.abs_context_path) - - def test_from_file(self): - service_dict = load_from_filename('tests/fixtures/build-path/docker-compose.yml') - self.assertEquals(service_dict, [{'name': 'foo', 'build': self.abs_context_path}]) - - -class GetConfigPathTestCase(unittest.TestCase): - - files = [ - 'docker-compose.yml', - 'docker-compose.yaml', - 'fig.yml', - 'fig.yaml', - ] - - def test_get_config_path_default_file_in_basedir(self): - files = self.files - self.assertEqual('docker-compose.yml', get_config_filename_for_files(files[0:])) - self.assertEqual('docker-compose.yaml', get_config_filename_for_files(files[1:])) - self.assertEqual('fig.yml', get_config_filename_for_files(files[2:])) - self.assertEqual('fig.yaml', get_config_filename_for_files(files[3:])) - with self.assertRaises(config.ComposeFileNotFound): - get_config_filename_for_files([]) - - def test_get_config_path_default_file_in_parent_dir(self): - """Test with files placed in the subdir""" - files = self.files - - def get_config_in_subdir(files): - return get_config_filename_for_files(files, subdir=True) - - self.assertEqual('docker-compose.yml', get_config_in_subdir(files[0:])) - self.assertEqual('docker-compose.yaml', get_config_in_subdir(files[1:])) - self.assertEqual('fig.yml', get_config_in_subdir(files[2:])) - self.assertEqual('fig.yaml', get_config_in_subdir(files[3:])) - with self.assertRaises(config.ComposeFileNotFound): - get_config_in_subdir([]) - - -def get_config_filename_for_files(filenames, subdir=None): - def make_files(dirname, filenames): - for fname in filenames: - with open(os.path.join(dirname, fname), 'w') as f: - f.write('') - - project_dir = tempfile.mkdtemp() - try: - make_files(project_dir, filenames) - if subdir: - base_dir = tempfile.mkdtemp(dir=project_dir) - else: - base_dir = project_dir - return os.path.basename(config.get_config_path(base_dir)) - finally: - shutil.rmtree(project_dir) diff --git a/tests/unit/container_test.py b/tests/unit/container_test.py index c537a8cf55..5f7bf1ea7e 100644 --- a/tests/unit/container_test.py +++ b/tests/unit/container_test.py @@ -1,9 +1,9 @@ from __future__ import unicode_literals -from .. import unittest -import mock import docker +from .. import mock +from .. import unittest from compose.container import Container from compose.container import get_container_name @@ -83,9 +83,15 @@ class ContainerTest(unittest.TestCase): self.assertEqual(container.name, "composetest_db_1") def test_name_without_project(self): + self.container_dict['Name'] = "/composetest_web_7" container = Container(None, self.container_dict, has_been_inspected=True) self.assertEqual(container.name_without_project, "web_7") + def test_name_without_project_custom_container_name(self): + self.container_dict['Name'] = "/custom_name_of_container" + container = Container(None, self.container_dict, has_been_inspected=True) + self.assertEqual(container.name_without_project, "custom_name_of_container") + def test_inspect_if_not_inspected(self): mock_client = mock.create_autospec(docker.Client) container = Container(mock_client, dict(Id="the_id")) @@ -142,4 +148,12 @@ class GetContainerNameTestCase(unittest.TestCase): self.assertIsNone(get_container_name({})) self.assertEqual(get_container_name({'Name': 'myproject_db_1'}), 'myproject_db_1') self.assertEqual(get_container_name({'Names': ['/myproject_db_1', '/myproject_web_1/db']}), 'myproject_db_1') - self.assertEqual(get_container_name({'Names': ['/swarm-host-1/myproject_db_1', '/swarm-host-1/myproject_web_1/db']}), 'myproject_db_1') + self.assertEqual( + get_container_name({ + 'Names': [ + '/swarm-host-1/myproject_db_1', + '/swarm-host-1/myproject_web_1/db' + ] + }), + 'myproject_db_1' + ) diff --git a/tests/unit/interpolation_test.py b/tests/unit/interpolation_test.py new file mode 100644 index 0000000000..7444884cb8 --- /dev/null +++ b/tests/unit/interpolation_test.py @@ -0,0 +1,33 @@ +import unittest + +from compose.config.interpolation import BlankDefaultDict as bddict +from compose.config.interpolation import interpolate +from compose.config.interpolation import InvalidInterpolation + + +class InterpolationTest(unittest.TestCase): + def test_valid_interpolations(self): + self.assertEqual(interpolate('$foo', bddict(foo='hi')), 'hi') + self.assertEqual(interpolate('${foo}', bddict(foo='hi')), 'hi') + + self.assertEqual(interpolate('${subject} love you', bddict(subject='i')), 'i love you') + self.assertEqual(interpolate('i ${verb} you', bddict(verb='love')), 'i love you') + self.assertEqual(interpolate('i love ${object}', bddict(object='you')), 'i love you') + + def test_empty_value(self): + self.assertEqual(interpolate('${foo}', bddict(foo='')), '') + + def test_unset_value(self): + self.assertEqual(interpolate('${foo}', bddict()), '') + + def test_escaped_interpolation(self): + self.assertEqual(interpolate('$${foo}', bddict(foo='hi')), '${foo}') + + def test_invalid_strings(self): + self.assertRaises(InvalidInterpolation, lambda: interpolate('${', bddict())) + self.assertRaises(InvalidInterpolation, lambda: interpolate('$}', bddict())) + self.assertRaises(InvalidInterpolation, lambda: interpolate('${}', bddict())) + self.assertRaises(InvalidInterpolation, lambda: interpolate('${ }', bddict())) + self.assertRaises(InvalidInterpolation, lambda: interpolate('${ foo}', bddict())) + self.assertRaises(InvalidInterpolation, lambda: interpolate('${foo }', bddict())) + self.assertRaises(InvalidInterpolation, lambda: interpolate('${foo!}', bddict())) diff --git a/tests/unit/log_printer_test.py b/tests/unit/log_printer_test.py deleted file mode 100644 index e40a1f75da..0000000000 --- a/tests/unit/log_printer_test.py +++ /dev/null @@ -1,69 +0,0 @@ -from __future__ import unicode_literals -from __future__ import absolute_import -import os - -from compose.cli.log_printer import LogPrinter -from .. import unittest - - -class LogPrinterTest(unittest.TestCase): - def get_default_output(self, monochrome=False): - def reader(*args, **kwargs): - yield "hello\nworld" - - container = MockContainer(reader) - output = run_log_printer([container], monochrome=monochrome) - return output - - def test_single_container(self): - output = self.get_default_output() - - self.assertIn('hello', output) - self.assertIn('world', output) - - def test_monochrome(self): - output = self.get_default_output(monochrome=True) - self.assertNotIn('\033[', output) - - def test_polychrome(self): - output = self.get_default_output() - self.assertIn('\033[', output) - - def test_unicode(self): - glyph = u'\u2022'.encode('utf-8') - - def reader(*args, **kwargs): - yield glyph + b'\n' - - container = MockContainer(reader) - output = run_log_printer([container]) - - self.assertIn(glyph, output) - - -def run_log_printer(containers, monochrome=False): - r, w = os.pipe() - reader, writer = os.fdopen(r, 'r'), os.fdopen(w, 'w') - printer = LogPrinter(containers, output=writer, monochrome=monochrome) - printer.run() - writer.close() - return reader.read() - - -class MockContainer(object): - def __init__(self, reader): - self._reader = reader - - @property - def name(self): - return 'myapp_web_1' - - @property - def name_without_project(self): - return 'web_1' - - def attach(self, *args, **kwargs): - return self._reader() - - def wait(self, *args, **kwargs): - return 0 diff --git a/tests/unit/multiplexer_test.py b/tests/unit/multiplexer_test.py new file mode 100644 index 0000000000..d565d39d1b --- /dev/null +++ b/tests/unit/multiplexer_test.py @@ -0,0 +1,45 @@ +import unittest + +from compose.cli.multiplexer import Multiplexer + + +class MultiplexerTest(unittest.TestCase): + def test_no_iterators(self): + mux = Multiplexer([]) + self.assertEqual([], list(mux.loop())) + + def test_empty_iterators(self): + mux = Multiplexer([ + (x for x in []), + (x for x in []), + ]) + + self.assertEqual([], list(mux.loop())) + + def test_aggregates_output(self): + mux = Multiplexer([ + (x for x in [0, 2, 4]), + (x for x in [1, 3, 5]), + ]) + + self.assertEqual( + [0, 1, 2, 3, 4, 5], + sorted(list(mux.loop())), + ) + + def test_exception(self): + class Problem(Exception): + pass + + def problematic_iterator(): + yield 0 + yield 2 + raise Problem(":(") + + mux = Multiplexer([ + problematic_iterator(), + (x for x in [1, 3, 5]), + ]) + + with self.assertRaises(Problem): + list(mux.loop()) diff --git a/tests/unit/progress_stream_test.py b/tests/unit/progress_stream_test.py index 317b77e9f2..d8f7ec8363 100644 --- a/tests/unit/progress_stream_test.py +++ b/tests/unit/progress_stream_test.py @@ -1,37 +1,36 @@ -from __future__ import unicode_literals from __future__ import absolute_import -from tests import unittest +from __future__ import unicode_literals from six import StringIO from compose import progress_stream +from tests import unittest class ProgressStreamTestCase(unittest.TestCase): - def test_stream_output(self): output = [ - '{"status": "Downloading", "progressDetail": {"current": ' - '31019763, "start": 1413653874, "total": 62763875}, ' - '"progress": "..."}', + b'{"status": "Downloading", "progressDetail": {"current": ' + b'31019763, "start": 1413653874, "total": 62763875}, ' + b'"progress": "..."}', ] events = progress_stream.stream_output(output, StringIO()) self.assertEqual(len(events), 1) def test_stream_output_div_zero(self): output = [ - '{"status": "Downloading", "progressDetail": {"current": ' - '0, "start": 1413653874, "total": 0}, ' - '"progress": "..."}', + b'{"status": "Downloading", "progressDetail": {"current": ' + b'0, "start": 1413653874, "total": 0}, ' + b'"progress": "..."}', ] events = progress_stream.stream_output(output, StringIO()) self.assertEqual(len(events), 1) def test_stream_output_null_total(self): output = [ - '{"status": "Downloading", "progressDetail": {"current": ' - '0, "start": 1413653874, "total": null}, ' - '"progress": "..."}', + b'{"status": "Downloading", "progressDetail": {"current": ' + b'0, "start": 1413653874, "total": null}, ' + b'"progress": "..."}', ] events = progress_stream.stream_output(output, StringIO()) self.assertEqual(len(events), 1) diff --git a/tests/unit/project_test.py b/tests/unit/project_test.py index a66aaf5d27..fc189fbb15 100644 --- a/tests/unit/project_test.py +++ b/tests/unit/project_test.py @@ -1,13 +1,14 @@ from __future__ import unicode_literals -from .. import unittest -from compose.service import Service -from compose.project import Project -from compose.container import Container -from compose.const import LABEL_SERVICE -import mock import docker +from .. import mock +from .. import unittest +from compose.const import LABEL_SERVICE +from compose.container import Container +from compose.project import Project +from compose.service import Service + class ProjectTest(unittest.TestCase): def setUp(self): @@ -167,7 +168,7 @@ class ProjectTest(unittest.TestCase): 'volumes_from': ['aaa'] } ], self.mock_client) - self.assertEqual(project.get_service('test')._get_volumes_from(), [container_id]) + self.assertEqual(project.get_service('test')._get_volumes_from(), [container_id + ":rw"]) def test_use_volumes_from_service_no_container(self): container_name = 'test_vol_1' @@ -190,7 +191,7 @@ class ProjectTest(unittest.TestCase): 'volumes_from': ['vol'] } ], self.mock_client) - self.assertEqual(project.get_service('test')._get_volumes_from(), [container_name]) + self.assertEqual(project.get_service('test')._get_volumes_from(), [container_name + ":rw"]) @mock.patch.object(Service, 'containers') def test_use_volumes_from_service_container(self, mock_return): @@ -210,7 +211,7 @@ class ProjectTest(unittest.TestCase): 'volumes_from': ['vol'] } ], None) - self.assertEqual(project.get_service('test')._get_volumes_from(), container_ids) + self.assertEqual(project.get_service('test')._get_volumes_from(), [container_ids[0] + ':rw']) def test_net_unset(self): project = Project.from_dicts('test', [ diff --git a/tests/unit/service_test.py b/tests/unit/service_test.py index 263c9b329d..d86f80f730 100644 --- a/tests/unit/service_test.py +++ b/tests/unit/service_test.py @@ -1,31 +1,30 @@ -from __future__ import unicode_literals from __future__ import absolute_import - -from .. import unittest -import mock +from __future__ import unicode_literals import docker -from docker.utils import LogConfig +import pytest +from .. import mock +from .. import unittest +from compose.const import IS_WINDOWS_PLATFORM from compose.const import LABEL_CONFIG_HASH from compose.const import LABEL_ONE_OFF from compose.const import LABEL_PROJECT from compose.const import LABEL_SERVICE from compose.container import Container +from compose.service import build_volume_binding from compose.service import ConfigError from compose.service import ContainerNet +from compose.service import get_container_data_volumes +from compose.service import merge_volume_bindings from compose.service import NeedsBuildError from compose.service import Net from compose.service import NoSuchImageError -from compose.service import Service -from compose.service import ServiceNet -from compose.service import build_port_bindings -from compose.service import build_volume_binding -from compose.service import get_container_data_volumes -from compose.service import merge_volume_bindings from compose.service import parse_repository_tag from compose.service import parse_volume_spec -from compose.service import split_port +from compose.service import Service +from compose.service import ServiceNet +from compose.service import VolumeFromSpec class ServiceTest(unittest.TestCase): @@ -33,27 +32,7 @@ class ServiceTest(unittest.TestCase): def setUp(self): self.mock_client = mock.create_autospec(docker.Client) - def test_name_validations(self): - self.assertRaises(ConfigError, lambda: Service(name='', image='foo')) - - self.assertRaises(ConfigError, lambda: Service(name=' ', image='foo')) - self.assertRaises(ConfigError, lambda: Service(name='/', image='foo')) - self.assertRaises(ConfigError, lambda: Service(name='!', image='foo')) - self.assertRaises(ConfigError, lambda: Service(name='\xe2', image='foo')) - - Service('a', image='foo') - Service('foo', image='foo') - Service('foo-bar', image='foo') - Service('foo.bar', image='foo') - Service('foo_bar', image='foo') - Service('_', image='foo') - Service('___', image='foo') - Service('-', image='foo') - Service('--', image='foo') - Service('.__.', image='foo') - def test_project_validation(self): - self.assertRaises(ConfigError, lambda: Service('bar')) self.assertRaises(ConfigError, lambda: Service(name='foo', project='>', image='foo')) Service(name='foo', project='bar.bar__', image='foo') @@ -61,14 +40,14 @@ class ServiceTest(unittest.TestCase): def test_containers(self): service = Service('db', self.mock_client, 'myproject', image='foo') self.mock_client.containers.return_value = [] - self.assertEqual(service.containers(), []) + self.assertEqual(list(service.containers()), []) def test_containers_with_containers(self): self.mock_client.containers.return_value = [ dict(Name=str(i), Image='foo', Id=i) for i in range(3) ] service = Service('db', self.mock_client, 'myproject', image='foo') - self.assertEqual([c.id for c in service.containers()], range(3)) + self.assertEqual([c.id for c in service.containers()], list(range(3))) expected_labels = [ '{0}=myproject'.format(LABEL_PROJECT), @@ -97,9 +76,18 @@ class ServiceTest(unittest.TestCase): service = Service( 'test', image='foo', - volumes_from=[mock.Mock(id=container_id, spec=Container)]) + volumes_from=[VolumeFromSpec(mock.Mock(id=container_id, spec=Container), 'rw')]) - self.assertEqual(service._get_volumes_from(), [container_id]) + self.assertEqual(service._get_volumes_from(), [container_id + ':rw']) + + def test_get_volumes_from_container_read_only(self): + container_id = 'aabbccddee' + service = Service( + 'test', + image='foo', + volumes_from=[VolumeFromSpec(mock.Mock(id=container_id, spec=Container), 'ro')]) + + self.assertEqual(service._get_volumes_from(), [container_id + ':ro']) def test_get_volumes_from_service_container_exists(self): container_ids = ['aabbccddee', '12345'] @@ -108,9 +96,21 @@ class ServiceTest(unittest.TestCase): mock.Mock(id=container_id, spec=Container) for container_id in container_ids ] - service = Service('test', volumes_from=[from_service], image='foo') + service = Service('test', volumes_from=[VolumeFromSpec(from_service, 'rw')], image='foo') - self.assertEqual(service._get_volumes_from(), container_ids) + self.assertEqual(service._get_volumes_from(), [container_ids[0] + ":rw"]) + + def test_get_volumes_from_service_container_exists_with_flags(self): + for mode in ['ro', 'rw', 'z', 'rw,z', 'z,rw']: + container_ids = ['aabbccddee:' + mode, '12345:' + mode] + from_service = mock.create_autospec(Service) + from_service.containers.return_value = [ + mock.Mock(id=container_id.split(':')[0], spec=Container) + for container_id in container_ids + ] + service = Service('test', volumes_from=[VolumeFromSpec(from_service, mode)], image='foo') + + self.assertEqual(service._get_volumes_from(), [container_ids[0]]) def test_get_volumes_from_service_no_container(self): container_id = 'abababab' @@ -119,76 +119,57 @@ class ServiceTest(unittest.TestCase): from_service.create_container.return_value = mock.Mock( id=container_id, spec=Container) - service = Service('test', image='foo', volumes_from=[from_service]) + service = Service('test', image='foo', volumes_from=[VolumeFromSpec(from_service, 'rw')]) - self.assertEqual(service._get_volumes_from(), [container_id]) + self.assertEqual(service._get_volumes_from(), [container_id + ':rw']) from_service.create_container.assert_called_once_with() - def test_split_port_with_host_ip(self): - internal_port, external_port = split_port("127.0.0.1:1000:2000") - self.assertEqual(internal_port, "2000") - self.assertEqual(external_port, ("127.0.0.1", "1000")) - - def test_split_port_with_protocol(self): - internal_port, external_port = split_port("127.0.0.1:1000:2000/udp") - self.assertEqual(internal_port, "2000/udp") - self.assertEqual(external_port, ("127.0.0.1", "1000")) - - def test_split_port_with_host_ip_no_port(self): - internal_port, external_port = split_port("127.0.0.1::2000") - self.assertEqual(internal_port, "2000") - self.assertEqual(external_port, ("127.0.0.1", None)) - - def test_split_port_with_host_port(self): - internal_port, external_port = split_port("1000:2000") - self.assertEqual(internal_port, "2000") - self.assertEqual(external_port, "1000") - - def test_split_port_no_host_port(self): - internal_port, external_port = split_port("2000") - self.assertEqual(internal_port, "2000") - self.assertEqual(external_port, None) - - def test_split_port_invalid(self): - with self.assertRaises(ConfigError): - split_port("0.0.0.0:1000:2000:tcp") - - def test_build_port_bindings_with_one_port(self): - port_bindings = build_port_bindings(["127.0.0.1:1000:1000"]) - self.assertEqual(port_bindings["1000"], [("127.0.0.1", "1000")]) - - def test_build_port_bindings_with_matching_internal_ports(self): - port_bindings = build_port_bindings(["127.0.0.1:1000:1000", "127.0.0.1:2000:1000"]) - self.assertEqual(port_bindings["1000"], [("127.0.0.1", "1000"), ("127.0.0.1", "2000")]) - - def test_build_port_bindings_with_nonmatching_internal_ports(self): - port_bindings = build_port_bindings(["127.0.0.1:1000:1000", "127.0.0.1:2000:2000"]) - self.assertEqual(port_bindings["1000"], [("127.0.0.1", "1000")]) - self.assertEqual(port_bindings["2000"], [("127.0.0.1", "2000")]) - def test_split_domainname_none(self): service = Service('foo', image='foo', hostname='name', client=self.mock_client) - self.mock_client.containers.return_value = [] opts = service._get_container_create_options({'image': 'foo'}, 1) self.assertEqual(opts['hostname'], 'name', 'hostname') self.assertFalse('domainname' in opts, 'domainname') def test_memory_swap_limit(self): + self.mock_client.create_host_config.return_value = {} + service = Service(name='foo', image='foo', hostname='name', client=self.mock_client, mem_limit=1000000000, memswap_limit=2000000000) - self.mock_client.containers.return_value = [] - opts = service._get_container_create_options({'some': 'overrides'}, 1) - self.assertEqual(opts['host_config']['MemorySwap'], 2000000000) - self.assertEqual(opts['host_config']['Memory'], 1000000000) + service._get_container_create_options({'some': 'overrides'}, 1) + + self.assertTrue(self.mock_client.create_host_config.called) + self.assertEqual( + self.mock_client.create_host_config.call_args[1]['mem_limit'], + 1000000000 + ) + self.assertEqual( + self.mock_client.create_host_config.call_args[1]['memswap_limit'], + 2000000000 + ) + + def test_cgroup_parent(self): + self.mock_client.create_host_config.return_value = {} + + service = Service(name='foo', image='foo', hostname='name', client=self.mock_client, cgroup_parent='test') + service._get_container_create_options({'some': 'overrides'}, 1) + + self.assertTrue(self.mock_client.create_host_config.called) + self.assertEqual( + self.mock_client.create_host_config.call_args[1]['cgroup_parent'], + 'test' + ) def test_log_opt(self): - log_opt = {'address': 'tcp://192.168.0.42:123'} - service = Service(name='foo', image='foo', hostname='name', client=self.mock_client, log_driver='syslog', log_opt=log_opt) - self.mock_client.containers.return_value = [] - opts = service._get_container_create_options({'some': 'overrides'}, 1) + self.mock_client.create_host_config.return_value = {} - self.assertIsInstance(opts['host_config']['LogConfig'], LogConfig) - self.assertEqual(opts['host_config']['LogConfig'].type, 'syslog') - self.assertEqual(opts['host_config']['LogConfig'].config, log_opt) + log_opt = {'syslog-address': 'tcp://192.168.0.42:123'} + service = Service(name='foo', image='foo', hostname='name', client=self.mock_client, log_driver='syslog', log_opt=log_opt) + service._get_container_create_options({'some': 'overrides'}, 1) + + self.assertTrue(self.mock_client.create_host_config.called) + self.assertEqual( + self.mock_client.create_host_config.call_args[1]['log_config'], + {'Type': 'syslog', 'Config': {'syslog-address': 'tcp://192.168.0.42:123'}} + ) def test_split_domainname_fqdn(self): service = Service( @@ -196,7 +177,6 @@ class ServiceTest(unittest.TestCase): hostname='name.domain.tld', image='foo', client=self.mock_client) - self.mock_client.containers.return_value = [] opts = service._get_container_create_options({'image': 'foo'}, 1) self.assertEqual(opts['hostname'], 'name', 'hostname') self.assertEqual(opts['domainname'], 'domain.tld', 'domainname') @@ -208,7 +188,6 @@ class ServiceTest(unittest.TestCase): image='foo', domainname='domain.tld', client=self.mock_client) - self.mock_client.containers.return_value = [] opts = service._get_container_create_options({'image': 'foo'}, 1) self.assertEqual(opts['hostname'], 'name', 'hostname') self.assertEqual(opts['domainname'], 'domain.tld', 'domainname') @@ -220,11 +199,43 @@ class ServiceTest(unittest.TestCase): domainname='domain.tld', image='foo', client=self.mock_client) - self.mock_client.containers.return_value = [] opts = service._get_container_create_options({'image': 'foo'}, 1) self.assertEqual(opts['hostname'], 'name.sub', 'hostname') self.assertEqual(opts['domainname'], 'domain.tld', 'domainname') + def test_no_default_hostname_when_not_using_networking(self): + service = Service( + 'foo', + image='foo', + use_networking=False, + client=self.mock_client, + ) + opts = service._get_container_create_options({'image': 'foo'}, 1) + self.assertIsNone(opts.get('hostname')) + + def test_hostname_defaults_to_service_name_when_using_networking(self): + service = Service( + 'foo', + image='foo', + use_networking=True, + client=self.mock_client, + ) + opts = service._get_container_create_options({'image': 'foo'}, 1) + self.assertEqual(opts['hostname'], 'foo') + + def test_get_container_create_options_with_name_option(self): + service = Service( + 'foo', + image='foo', + client=self.mock_client, + container_name='foo1') + name = 'the_new_name' + opts = service._get_container_create_options( + {'name': name}, + 1, + one_off=True) + self.assertEqual(opts['name'], name) + def test_get_container_create_options_does_not_mutate_options(self): labels = {'thing': 'real'} environment = {'also': 'real'} @@ -294,6 +305,16 @@ class ServiceTest(unittest.TestCase): tag='latest', stream=True) + @mock.patch('compose.service.log', autospec=True) + def test_pull_image_digest(self, mock_log): + service = Service('foo', client=self.mock_client, image='someimage@sha256:1234') + service.pull() + self.mock_client.pull.assert_called_once_with( + 'someimage', + tag='sha256:1234', + stream=True) + mock_log.info.assert_called_once_with('Pulling foo (someimage@sha256:1234)...') + @mock.patch('compose.service.Container', autospec=True) def test_recreate_container(self, _): mock_container = mock.create_autospec(Container) @@ -302,9 +323,7 @@ class ServiceTest(unittest.TestCase): new_container = service.recreate_container(mock_container) mock_container.stop.assert_called_once_with(timeout=10) - self.mock_client.rename.assert_called_once_with( - mock_container.id, - '%s_%s' % (mock_container.short_id, mock_container.name)) + mock_container.rename_to_tmp_name.assert_called_once_with() new_container.start.assert_called_once_with() mock_container.remove.assert_called_once_with() @@ -319,12 +338,16 @@ class ServiceTest(unittest.TestCase): mock_container.stop.assert_called_once_with(timeout=1) def test_parse_repository_tag(self): - self.assertEqual(parse_repository_tag("root"), ("root", "")) - self.assertEqual(parse_repository_tag("root:tag"), ("root", "tag")) - self.assertEqual(parse_repository_tag("user/repo"), ("user/repo", "")) - self.assertEqual(parse_repository_tag("user/repo:tag"), ("user/repo", "tag")) - self.assertEqual(parse_repository_tag("url:5000/repo"), ("url:5000/repo", "")) - self.assertEqual(parse_repository_tag("url:5000/repo:tag"), ("url:5000/repo", "tag")) + self.assertEqual(parse_repository_tag("root"), ("root", "", ":")) + self.assertEqual(parse_repository_tag("root:tag"), ("root", "tag", ":")) + self.assertEqual(parse_repository_tag("user/repo"), ("user/repo", "", ":")) + self.assertEqual(parse_repository_tag("user/repo:tag"), ("user/repo", "tag", ":")) + self.assertEqual(parse_repository_tag("url:5000/repo"), ("url:5000/repo", "", ":")) + self.assertEqual(parse_repository_tag("url:5000/repo:tag"), ("url:5000/repo", "tag", ":")) + + self.assertEqual(parse_repository_tag("root@sha256:digest"), ("root", "sha256:digest", "@")) + self.assertEqual(parse_repository_tag("user/repo@sha256:digest"), ("user/repo", "sha256:digest", "@")) + self.assertEqual(parse_repository_tag("url:5000/repo@sha256:digest"), ("url:5000/repo", "sha256:digest", "@")) @mock.patch('compose.service.Container', autospec=True) def test_create_container_latest_is_used_when_no_tag_specified(self, mock_container): @@ -369,7 +392,7 @@ class ServiceTest(unittest.TestCase): def test_build_does_not_pull(self): self.mock_client.build.return_value = [ - '{"stream": "Successfully built 12345"}', + b'{"stream": "Successfully built 12345"}', ] service = Service('foo', client=self.mock_client, build='.') @@ -384,9 +407,9 @@ class ServiceTest(unittest.TestCase): 'foo', image='example.com/foo', client=self.mock_client, - net=ServiceNet(Service('other', image='foo')), - links=[(Service('one', image='foo'), 'one')], - volumes_from=[Service('two', image='foo')]) + net=ServiceNet(Service('other')), + links=[(Service('one'), 'one')], + volumes_from=[VolumeFromSpec(Service('two'), 'rw')]) config_dict = service.config_dict() expected = { @@ -419,6 +442,14 @@ class ServiceTest(unittest.TestCase): } self.assertEqual(config_dict, expected) + def test_get_links_with_networking(self): + service = Service( + 'foo', + image='foo', + links=[(Service('one'), 'one')], + use_networking=True) + self.assertEqual(service._get_links(link_to_self=True), []) + class NetTestCase(unittest.TestCase): @@ -443,7 +474,7 @@ class NetTestCase(unittest.TestCase): {'Id': container_id, 'Name': container_id, 'Image': 'abcd'}, ] - service = Service(name=service_name, client=mock_client, image='foo') + service = Service(name=service_name, client=mock_client) net = ServiceNet(service) self.assertEqual(net.id, service_name) @@ -455,7 +486,7 @@ class NetTestCase(unittest.TestCase): mock_client = mock.create_autospec(docker.Client) mock_client.containers.return_value = [] - service = Service(name=service_name, client=mock_client, image='foo') + service = Service(name=service_name, client=mock_client) net = ServiceNet(service) self.assertEqual(net.id, service_name) @@ -494,6 +525,21 @@ class ServiceVolumesTest(unittest.TestCase): with self.assertRaises(ConfigError): parse_volume_spec('one:two:three:four') + @pytest.mark.xfail((not IS_WINDOWS_PLATFORM), reason='does not have a drive') + def test_parse_volume_windows_absolute_path(self): + windows_absolute_path = "c:\\Users\\me\\Documents\\shiny\\config:\\opt\\shiny\\config:ro" + + spec = parse_volume_spec(windows_absolute_path) + + self.assertEqual( + spec, + ( + "/c/Users/me/Documents/shiny/config", + "/opt/shiny/config", + "ro" + ) + ) + def test_build_volume_binding(self): binding = build_volume_binding(parse_volume_spec('/outside:/inside')) self.assertEqual(binding, ('/inside', '/outside:/inside:rw')) @@ -574,13 +620,13 @@ class ServiceVolumesTest(unittest.TestCase): } } - create_options = service._get_container_create_options( + service._get_container_create_options( override_options={}, number=1, ) self.assertEqual( - set(create_options['host_config']['Binds']), + set(self.mock_client.create_host_config.call_args[1]['binds']), set([ '/host/path:/data1:rw', '/host/path:/data2:rw', @@ -612,14 +658,14 @@ class ServiceVolumesTest(unittest.TestCase): }, } - create_options = service._get_container_create_options( + service._get_container_create_options( override_options={}, number=1, previous_container=Container(self.mock_client, {'Id': '123123123'}), ) self.assertEqual( - create_options['host_config']['Binds'], + self.mock_client.create_host_config.call_args[1]['binds'], ['/mnt/sda1/host/path:/data:rw'], ) @@ -644,4 +690,4 @@ class ServiceVolumesTest(unittest.TestCase): ).create_container() self.assertEqual(len(create_calls), 1) - self.assertEqual(create_calls[0][1]['host_config']['Binds'], volumes) + self.assertEqual(self.mock_client.create_host_config.call_args[1]['binds'], volumes) diff --git a/tests/unit/sort_service_test.py b/tests/unit/sort_service_test.py index f42a947484..a7e522a1dd 100644 --- a/tests/unit/sort_service_test.py +++ b/tests/unit/sort_service_test.py @@ -1,5 +1,6 @@ -from compose.project import sort_service_dicts, DependencyError from .. import unittest +from compose.project import DependencyError +from compose.project import sort_service_dicts class SortServiceTest(unittest.TestCase): diff --git a/tests/unit/split_buffer_test.py b/tests/unit/split_buffer_test.py index 8eb54177aa..c41ea27d40 100644 --- a/tests/unit/split_buffer_test.py +++ b/tests/unit/split_buffer_test.py @@ -1,7 +1,8 @@ -from __future__ import unicode_literals from __future__ import absolute_import -from compose.cli.utils import split_buffer +from __future__ import unicode_literals + from .. import unittest +from compose.utils import split_buffer class SplitBufferTest(unittest.TestCase): @@ -11,7 +12,7 @@ class SplitBufferTest(unittest.TestCase): yield b'def\n' yield b'ghi\n' - self.assert_produces(reader, [b'abc\n', b'def\n', b'ghi\n']) + self.assert_produces(reader, ['abc\n', 'def\n', 'ghi\n']) def test_no_end_separator(self): def reader(): @@ -19,13 +20,13 @@ class SplitBufferTest(unittest.TestCase): yield b'def\n' yield b'ghi' - self.assert_produces(reader, [b'abc\n', b'def\n', b'ghi']) + self.assert_produces(reader, ['abc\n', 'def\n', 'ghi']) def test_multiple_line_chunk(self): def reader(): yield b'abc\ndef\nghi' - self.assert_produces(reader, [b'abc\n', b'def\n', b'ghi']) + self.assert_produces(reader, ['abc\n', 'def\n', 'ghi']) def test_chunked_line(self): def reader(): @@ -35,18 +36,18 @@ class SplitBufferTest(unittest.TestCase): yield b'\n' yield b'd' - self.assert_produces(reader, [b'abc\n', b'd']) + self.assert_produces(reader, ['abc\n', 'd']) def test_preserves_unicode_sequences_within_lines(self): - string = u"a\u2022c\n".encode('utf-8') + string = u"a\u2022c\n" def reader(): - yield string + yield string.encode('utf-8') self.assert_produces(reader, [string]) def assert_produces(self, reader, expectations): - split = split_buffer(reader(), b'\n') + split = split_buffer(reader()) for (actual, expected) in zip(split, expectations): self.assertEqual(type(actual), type(expected)) diff --git a/tests/unit/utils_test.py b/tests/unit/utils_test.py new file mode 100644 index 0000000000..b272c7349a --- /dev/null +++ b/tests/unit/utils_test.py @@ -0,0 +1,16 @@ +from .. import unittest +from compose import utils + + +class JsonSplitterTestCase(unittest.TestCase): + + def test_json_splitter_no_object(self): + data = '{"foo": "bar' + self.assertEqual(utils.json_splitter(data), (None, None)) + + def test_json_splitter_with_object(self): + data = '{"foo": "bar"}\n \n{"next": "obj"}' + self.assertEqual( + utils.json_splitter(data), + ({'foo': 'bar'}, '{"next": "obj"}') + ) diff --git a/tox.ini b/tox.ini index 33cdee167f..f05c5ed260 100644 --- a/tox.ini +++ b/tox.ini @@ -1,16 +1,46 @@ [tox] -envlist = py26,py27 +envlist = py27,py34,pre-commit [testenv] usedevelop=True +passenv = + LD_LIBRARY_PATH + DOCKER_HOST + DOCKER_CERT_PATH + DOCKER_TLS_VERIFY +setenv = + HOME=/tmp deps = -rrequirements.txt -rrequirements-dev.txt commands = - nosetests -v {posargs} - flake8 compose tests setup.py + py.test -v -rxs \ + --cov=compose \ + --cov-report html \ + --cov-report term \ + --cov-config=tox.ini \ + {posargs:tests} + +[testenv:pre-commit] +skip_install = True +deps = + pre-commit +commands = + pre-commit install + pre-commit run --all-files + +# Coverage configuration +[run] +branch = True + +[report] +show_missing = true + +[html] +directory = coverage-html +# end coverage configuration [flake8] -# ignore line-length for now -ignore = E501,E203 +# Allow really long lines for now +max-line-length = 140 exclude = compose/packages