Merge remote-tracking branch 'upstream/master' into distribution_docs_from_upstream

This commit is contained in:
Misty Stanley-Jones 2016-11-30 10:22:57 -08:00
commit f2857417d1
46 changed files with 1249 additions and 419 deletions

5
.dockerignore Normal file
View File

@ -0,0 +1,5 @@
.dockerignore
.git
.gitignore
Dockerfile
docker-compose.yml

View File

@ -1,103 +1,35 @@
FROM starefossen/github-pages
# Basic Git set-up for throwaway commits
RUN git config --global user.email "gordon@docker.com"
RUN git config --global user.name "Gordon"
ENV VERSIONS="v1.4 v1.5 v1.6 v1.7 v1.8 v1.9 v1.10 v1.11"
# Clone the docs repo
RUN git clone https://www.github.com/docker/docker.github.io allv
# Create archive; check out each version, create HTML, tweak links
RUN git clone https://www.github.com/docker/docker.github.io temp; \
for VER in $VERSIONS; do \
git --git-dir=./temp/.git --work-tree=./temp checkout ${VER} \
&& mkdir -p allvbuild/${VER} \
&& jekyll build -s temp -d allvbuild/${VER} \
&& find allvbuild/${VER} -type f -name '*.html' -print0 | xargs -0 sed -i 's#href="/#href="/'"$VER"'/#g' \
&& find allvbuild/${VER} -type f -name '*.html' -print0 | xargs -0 sed -i 's#src="/#src="/'"$VER"'/#g' \
&& find allvbuild/${VER} -type f -name '*.html' -print0 | xargs -0 sed -i 's#href="https://docs.docker.com/#href="/'"$VER"'/#g'; \
done; \
rm -rf temp
COPY . allv
## Branch to pull from, per ref doc
ENV ENGINE_BRANCH="1.12.x"
ENV DISTRIBUTION_BRANCH="release/2.5"
# Engine
# Get docker/docker ref docs from $ENGINE_BRANCH branch to be used in master builds
# Uses Github Subversion gateway to limit the checkout
RUN svn co https://github.com/docker/docker/branches/$ENGINE_BRANCH/docs/reference allv/engine/reference
RUN svn co https://github.com/docker/docker/branches/$ENGINE_BRANCH/docs/extend allv/engine/extend
# Can't use the svn trick to get a single file, use wget instead
RUN wget -O allv/engine/deprecated.md https://raw.githubusercontent.com/docker/docker/$ENGINE_BRANCH/docs/deprecated.md
# Make a temporary commit for the files we added so we can check out other branches later
RUN git --git-dir=./allv/.git --work-tree=./allv add engine
RUN git --git-dir=./allv/.git --work-tree=./allv commit -m "Temporary commit"
# The statements below pull reference docs from upstream locations,
# then build the whole site to static HTML using Jekyll
# Distribution
# Get docker/distribution ref docs from $DISTRIBUTION_BRANCH tag to be used in master builds
# Uses Github Subversion gateway to limit the checkout
RUN svn co https://github.com/docker/distribution/branches/$DISTRIBUTION_BRANCH/docs/spec allv/registry/spec
# Can't use the svn trick to get a single file, use wget instead
RUN wget -O allv/registry/configuration.md https://raw.githubusercontent.com/docker/distribution/$DISTRIBUTION_BRANCH/docs/configuration.md
# Make a temporary commit for the files we added so we can check out other branches later
RUN git --git-dir=./allv/.git --work-tree=./allv add registry
RUN git --git-dir=./allv/.git --work-tree=./allv commit -m "Temporary commit"
# Create HTML for master
RUN jekyll build -s allv -d allvbuild
# Check out 1.4 branch, create HTML, tweak links
RUN git --git-dir=./allv/.git --work-tree=./allv checkout v1.4
RUN mkdir allvbuild/v1.4
RUN jekyll build -s allv -d allvbuild/v1.4
RUN find allvbuild/v1.4 -type f -name '*.html' -print0 | xargs -0 sed -i 's#href="/#href="/v1.4/#g'
RUN find allvbuild/v1.4 -type f -name '*.html' -print0 | xargs -0 sed -i 's#src="/#src="/v1.4/#g'
RUN find allvbuild/v1.4 -type f -name '*.html' -print0 | xargs -0 sed -i 's#href="https://docs.docker.com/#href="/v1.4/#g'
# Check out 1.5 branch, create HTML, tweak links
RUN git --git-dir=./allv/.git --work-tree=./allv checkout v1.5
RUN mkdir allvbuild/v1.5
RUN jekyll build -s allv -d allvbuild/v1.5
RUN find allvbuild/v1.5 -type f -name '*.html' -print0 | xargs -0 sed -i 's#href="/#href="/v1.5/#g'
RUN find allvbuild/v1.5 -type f -name '*.html' -print0 | xargs -0 sed -i 's#src="/#src="/v1.5/#g'
RUN find allvbuild/v1.5 -type f -name '*.html' -print0 | xargs -0 sed -i 's#href="https://docs.docker.com/#href="/v1.5/#g'
# Check out 1.6, create HTML, tweak links
RUN git --git-dir=./allv/.git --work-tree=./allv checkout v1.6
RUN mkdir allvbuild/v1.6
RUN jekyll build -s allv -d allvbuild/v1.6
RUN find allvbuild/v1.6 -type f -name '*.html' -print0 | xargs -0 sed -i 's#href="/#href="/v1.6/#g'
RUN find allvbuild/v1.6 -type f -name '*.html' -print0 | xargs -0 sed -i 's#src="/#src="/v1.6/#g'
RUN find allvbuild/v1.6 -type f -name '*.html' -print0 | xargs -0 sed -i 's#href="https://docs.docker.com/#href="/v1.6/#g'
# Check out 1.7, create HTML, tweak links
RUN git --git-dir=./allv/.git --work-tree=./allv checkout v1.7
RUN mkdir allvbuild/v1.7
RUN jekyll build -s allv -d allvbuild/v1.7
RUN find allvbuild/v1.7 -type f -name '*.html' -print0 | xargs -0 sed -i 's#href="/#href="/v1.7/#g'
RUN find allvbuild/v1.7 -type f -name '*.html' -print0 | xargs -0 sed -i 's#src="/#src="/v1.7/#g'
RUN find allvbuild/v1.7 -type f -name '*.html' -print0 | xargs -0 sed -i 's#href="https://docs.docker.com/#href="/v1.7/#g'
# Check out 1.8, create HTML, tweak links
RUN git --git-dir=./allv/.git --work-tree=./allv checkout v1.8
RUN mkdir allvbuild/v1.8
RUN jekyll build -s allv -d allvbuild/v1.8
RUN find allvbuild/v1.8 -type f -name '*.html' -print0 | xargs -0 sed -i 's#href="/#href="/v1.8/#g'
RUN find allvbuild/v1.8 -type f -name '*.html' -print0 | xargs -0 sed -i 's#src="/#src="/v1.8/#g'
RUN find allvbuild/v1.8 -type f -name '*.html' -print0 | xargs -0 sed -i 's#href="https://docs.docker.com/#href="/v1.8/#g'
# Check out 1.9, create HTML, tweak links
RUN git --git-dir=./allv/.git --work-tree=./allv checkout v1.9
RUN mkdir allvbuild/v1.9
RUN jekyll build -s allv -d allvbuild/v1.9
RUN find allvbuild/v1.9 -type f -name '*.html' -print0 | xargs -0 sed -i 's#href="/#href="/v1.9/#g'
RUN find allvbuild/v1.9 -type f -name '*.html' -print0 | xargs -0 sed -i 's#src="/#src="/v1.9/#g'
RUN find allvbuild/v1.9 -type f -name '*.html' -print0 | xargs -0 sed -i 's#href="https://docs.docker.com/#href="/v1.9/#g'
# Check out 1.10, create HTML, tweak links
RUN git --git-dir=./allv/.git --work-tree=./allv checkout v1.10
RUN mkdir allvbuild/v1.10
RUN jekyll build -s allv -d allvbuild/v1.10
RUN find allvbuild/v1.10 -type f -name '*.html' -print0 | xargs -0 sed -i 's#href="/#href="/v1.10/#g'
RUN find allvbuild/v1.10 -type f -name '*.html' -print0 | xargs -0 sed -i 's#src="/#src="/v1.10/#g'
RUN find allvbuild/v1.10 -type f -name '*.html' -print0 | xargs -0 sed -i 's#href="https://docs.docker.com/#href="/v1.10/#g'
# Check out 1.11, create HTML, tweak links
RUN git --git-dir=./allv/.git --work-tree=./allv checkout v1.11
RUN mkdir allvbuild/v1.11
RUN jekyll build -s allv -d allvbuild/v1.11
RUN find allvbuild/v1.11 -type f -name '*.html' -print0 | xargs -0 sed -i 's#href="/#href="/v1.11/#g'
RUN find allvbuild/v1.11 -type f -name '*.html' -print0 | xargs -0 sed -i 's#src="/#src="/v1.11/#g'
RUN find allvbuild/v1.11 -type f -name '*.html' -print0 | xargs -0 sed -i 's#href="https://docs.docker.com/#href="/v1.11/#g'
RUN svn co https://github.com/docker/docker/branches/$ENGINE_BRANCH/docs/reference allv/engine/reference \
&& svn co https://github.com/docker/docker/branches/$ENGINE_BRANCH/docs/extend allv/engine/extend \
&& wget -O allv/engine/deprecated.md https://raw.githubusercontent.com/docker/docker/$ENGINE_BRANCH/docs/deprecated.md \
&& svn co https://github.com/docker/distribution/branches/$DISTRIBUTION_BRANCH/docs/spec allv/registry/spec \
&& wget -O allv/registry/configuration.md https://raw.githubusercontent.com/docker/distribution/$DISTRIBUTION_BRANCH/docs/configuration.md \
&& jekyll build -s allv -d allvbuild \
&& rm -rf allv
# Serve the site, which is now all static HTML
CMD jekyll serve -s /usr/src/app/allvbuild -d /_site --no-watch -H 0.0.0.0 -P 4000

View File

@ -1,3 +1,3 @@
source "https://rubygems.org"
gem "github-pages", "104" #Update me once in a while: https://github.com/github/pages-gem/releases
gem "github-pages", "105" #Update me once in a while: https://github.com/github/pages-gem/releases

View File

@ -10,6 +10,7 @@ permalink: pretty
safe: false
lsi: false
url: https://docs.docker.com
keep_files: ["v1.4", "v1.5", "v1.6", "v1.7", "v1.8", "v1.9", "v1.10", "v1.11"]
gems:
- jekyll-redirect-from

View File

@ -143,6 +143,8 @@ toc:
title: Configuring and running Docker
- path: /engine/admin/host_integration/
title: Automatically start containers
- path: /engine/admin/resource_constraints/
title: Limit a container's resources
- path: /engine/admin/live-restore/
title: Keep containers alive during daemon downtime
- path: /engine/admin/systemd/
@ -183,6 +185,10 @@ toc:
title: Runtime metrics
- path: /engine/admin/ambassador_pattern_linking/
title: Link via an ambassador container
- sectiontitle: Troubleshoot Docker Engine
section:
- path: /engine/admin/troubleshooting_volume_errors.md
title: Troubleshoot volume problems
- sectiontitle: Manage a swarm
section:
- path: /engine/swarm/
@ -637,8 +643,10 @@ toc:
title: Prior CS Engine release notes
- sectiontitle: Docker Datacenter
section:
- path: /datacenter/try/
title: Try Docker Datacenter
- path: /datacenter/install/aws/
title: Deploy Datacenter on AWS
- path: /datacenter/install/linux/
title: Deploy Datacenter on Linux
- sectiontitle: Universal Control Plane 2.0
section:
- path: /datacenter/ucp/2.0/guides/
@ -765,6 +773,8 @@ toc:
title: Install offline
- path: /datacenter/dtr/2.1/guides/install/license/
title: License your deployment
- path: /datacenter/dtr/2.1/guides/install/scale-your-deployment/
title: Scale your deployment
- path: /datacenter/dtr/2.1/guides/install/upgrade/
title: Upgrade
- path: /datacenter/dtr/2.1/guides/install/uninstall/

View File

@ -16,7 +16,7 @@ On a Mac, install with `brew install bash-completion`
Place the completion script in `/etc/bash_completion.d/` (`/usr/local/etc/bash_completion.d/` on a Mac), using e.g.
curl -L https://raw.githubusercontent.com/docker/compose/$(docker-compose version --short)/contrib/completion/bash/docker-compose > /etc/bash_completion.d/docker-compose
curl -L https://raw.githubusercontent.com/docker/compose/$(docker-compose version --short)/contrib/completion/bash/docker-compose -o /etc/bash_completion.d/docker-compose
Completion will be available upon next login.
@ -58,4 +58,4 @@ Enjoy working with Compose faster and with less typos!
- [Get started with Rails](rails.md)
- [Get started with WordPress](wordpress.md)
- [Command line reference](./reference/index.md)
- [Compose file reference](compose-file.md)
- [Compose file reference](compose-file.md)

View File

@ -1167,7 +1167,7 @@ available with Docker Engine version **1.12.0+**
Introduces the following additional parameters:
- [`link_local_ips`](compose-file.md#link_local_ips)
- [`link_local_ips`](compose-file.md#linklocalips)
- [`isolation`](compose-file.md#isolation)
- `labels` for [volumes](compose-file.md#volume-configuration-reference) and
[networks](compose-file.md#network-configuration-reference)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 69 KiB

After

Width:  |  Height:  |  Size: 242 KiB

View File

@ -31,7 +31,7 @@ which the release page specifies, in your terminal.
The following is an example command illustrating the format:
$ curl -L "https://github.com/docker/compose/releases/download/1.8.1/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
$ curl -L "https://github.com/docker/compose/releases/download/1.9.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
If you have problems installing with `curl`, see
[Alternative Install Options](install.md#alternative-install-options).
@ -46,7 +46,7 @@ which the release page specifies, in your terminal.
7. Test the installation.
$ docker-compose --version
docker-compose version: 1.8.1
docker-compose version: 1.9.0
## Alternative install options
@ -69,7 +69,7 @@ to get started.
Compose can also be run inside a container, from a small bash script wrapper.
To install compose as a container run:
$ curl -L https://github.com/docker/compose/releases/download/1.8.1/run.sh > /usr/local/bin/docker-compose
$ curl -L https://github.com/docker/compose/releases/download/1.9.0/run.sh > /usr/local/bin/docker-compose
$ chmod +x /usr/local/bin/docker-compose
## Master builds

View File

@ -14,7 +14,7 @@ dependencies, you'll need to define exactly what needs to be included in the
container. This is done using a file called `Dockerfile`. To begin with, the
Dockerfile consists of:
FROM ruby:2.2.0
FROM ruby:2.3.3
RUN apt-get update -qq && apt-get install -y build-essential libpq-dev nodejs
RUN mkdir /myapp
WORKDIR /myapp
@ -30,7 +30,7 @@ how to write Dockerfiles, see the [Docker user guide](/engine/tutorials/dockerim
Next, create a bootstrap `Gemfile` which just loads Rails. It'll be overwritten in a moment by `rails new`.
source 'https://rubygems.org'
gem 'rails', '4.2.0'
gem 'rails', '5.0.0.1'
You'll need an empty `Gemfile.lock` in order to build our `Dockerfile`.
@ -99,12 +99,7 @@ If you are running Docker on Mac or Windows, you should already have ownership
of all files, including those generated by `rails new`. List the files just to
verify this.
Uncomment the line in your new `Gemfile` which loads `therubyracer`, so you've
got a Javascript runtime:
gem 'therubyracer', platforms: :ruby
Now that you've got a new `Gemfile`, you need to build the image again. (This,
If you edit `Gemfile` at this point or later, you will need to build the image again. (This,
and changes to the Dockerfile itself, should be the only times you'll need to
rebuild.)
@ -122,7 +117,7 @@ Replace the contents of `config/database.yml` with the following:
development: &default
adapter: postgresql
encoding: unicode
database: postgres
database: myapp_development
pool: 5
username: postgres
password:

View File

@ -97,33 +97,18 @@ to the `docker` group.
$ sudo apt-get update && sudo apt-get install apt-transport-https
```
4. Install additional virtual drivers not in the base image.
4. Install additional kernel modules to add AUFS support.
```bash
$ sudo apt-get install -y linux-image-extra-virtual
$ sudo apt-get install -y linux-image-extra-$(uname -r) linux-image-extra-virtual
```
You may need to reboot your server after updating the LTS kernel.
5. Add the repository for the new version:
```bash
$ echo "deb https://packages.docker.com/1.12/apt/repo ubuntu-trusty main" | sudo tee /etc/apt/sources.list.d/docker.list
```
This adds the repository of the latest version of CS Docker Engine for the
Ubuntu Trusty distribution. Change the "ubuntu-trusty" string to the
distribution you're using:
* debian-jessie (Debian 8)
* debian-stretch (future release)
* debian-wheezy (Debian 7)
* ubuntu-precise (Ubuntu 12.04)
* ubuntu-trusty (Ubuntu 14.04)
* ubuntu-utopic (Ubuntu 14.10)
* ubuntu-vivid (Ubuntu 15.04)
* ubuntu-wily (Ubuntu 15.10)
6. Run the following to install commercially supported Docker Engine and its
dependencies:

View File

@ -35,7 +35,7 @@ install, configure, and backup DTR.
Run the following command to install DTR:
```bash
```none
# Pull the latest version of DTR
$ docker pull docker/dtr
@ -98,14 +98,11 @@ replica fails.
For high-availability you should set 3, 5, or 7 DTR replicas. The nodes where
you're going to install these replicas also need to be managed by UCP.
To add replicas to a DTR cluster, use the `docker/dtr join` command. To add
replicas:
To add replicas to a DTR cluster, use the `docker/dtr join` command:
1. Make sure the DTR images are loaded into the node.
1. Load your UCP user bundle.
2. Load you UCP user bundle.
3. Run the join command.
2. Run the join command.
When you join a replica to a DTR cluster, you need to specify the
ID of a replica that is already part of the cluster. You can find an
@ -113,14 +110,14 @@ replicas:
Then run:
```bash
$ docker run -it --rm \
```none
docker run -it --rm \
docker/dtr join \
--ucp-node <ucp-node-name> \
--ucp-insecure-tls
```
4. Check that all replicas are running.
3. Check that all replicas are running.
In your browser, navigate to the Docker **Universal Control Plane**
web UI, and navigate to the **Applications** screen. All replicas should

View File

@ -0,0 +1,63 @@
---
title: Scale your deployment
description: Lean how to scale Docker Trusted Registry by adding and removing replicas.
keywords: docker, dtr, install, deploy
---
Docker Trusted Registry is designed to scale horizontally as your usage
increases. You can add or remove replicas to make DTR scale to your needs
or for high availability.
To set up DTR for [high availability](../high-availability/index.md),
you can add more replicas to your DTR cluster. Adding more replicas allows you
to load-balance requests across all replicas, and keep DTR working if a
replica fails.
For high-availability you should set 3, 5, or 7 DTR replicas. The nodes where
you're going to install these replicas also need to be managed by UCP.
## Join more DTR replicas
To add replicas to an existing DTR deployment:
1. Load your UCP user bundle.
2. Run the join command.
```none
docker run -it --rm \
docker/dtr join \
--ucp-node <ucp-node-name> \
--ucp-insecure-tls
```
Where the `--ucp-node` is the hostname of the UCP node where you want to
deploy the DTR replica. `--ucp-insecure-tls` tells the command to trust the
certificates used by UCP.
3. If you have a load balancer, add this DTR replica to the load balancing pool.
## Remove existing replicas
To remove a DTR replica from a deployment, run:
```none
docker run -it --rm \
docker/dtr remove \
--ucp-insecure-tls
```
You will be prompted for:
* Existing replica id: the id of any healthy DTR replica of that cluster
* Replica id: the id of the DTR replica you want to remove. It can be the id of an
unhealthy replica
* UCP username and password: the administrator credentials for UCP
If you're load-balancing user requests across multiple DTR replicas, don't
forget to remove this replica from the load balancing pool.
## Where to go next
* [Install DTR](index.md)
* [Uninstall DTR](uninstall.md)

View File

@ -5,56 +5,36 @@ keywords:
- docker, dtr, install, uninstall
---
Use the `remove` command, to remove a DTR replica from a cluster.
Use the `remove` command, to remove a DTR replica from an existing deployment.
To uninstall a DTR cluster you remove all DTR replicas one at a time.
The remove command:
* Removes the replica from the cluster,
* Stops and removes all DTR containers,
* Deletes all DTR volumes.
The remove command informs the DTR cluster that the node is about to be removed,
then it removes the replica, stops and removes all DTR containers from that node,
and deletes all DTR volumes.
To see what options are available in the uninstall command, check the
[uninstall command reference](../../reference/cli/remove.md), or run:
To uninstall a DTR replica, run:
```bash
$ docker run -it --rm docker/dtr remove --help
```
To remove a replica safely, you must tell the bootstrapper about one healthy replica
using the `--existing-replica-id` flag and the replica to remove with the
`--replica-id` flag. It uses the healthy replica to safely inform your DTR cluster
that the replica is about to be removed before it performs the actual removal.
## Example
The following example illustrates how use the remove command interactively to
remove a DTR replica from a cluster with multiple replicas:
```bash
$ docker run -it --rm \
```none
docker run -it --rm \
docker/dtr remove \
--ucp-insecure-tls
existing-replica-id (ID of an existing replica in a cluster): 7ae3cb044b70
replica-id (Specify the replica Id. Must be unique per replica, leave blank for random): a701a510126c
ucp-username (Specify the UCP admin username): $UCP_ADMIN
ucp-password: $UCP_PASSWORD
ucp-url (Specify the UCP host using the host[:port] format): $UCP_HOST
```
Where:
You will be prompted for:
* existing-replica-id: is the id of any healthy DTR replica of that cluster,
* replica-id: is the id of the DTR replica you want to remove,
* ucp-username and ucp-password: are the username and password of a UCP administrator.
* Existing replica id: the id of any healthy DTR replica of that cluster
* Replica id: the id of the DTR replica you want to remove. It can be the id of an
unhealthy replica that you want to remove from your deployment
* UCP username and password: the administrator credentials for UCP
To ensure you don't loose data, DTR will not remove the last replica from your
deployment. To confirm you really want to remove that replica, use the
`--force-remove` flag.
Now you can confirm on Docker Universal Control Plane that the DTR replica
`a701a510126c` no longer exists.
To see what options are available in the uninstall command, check the
[uninstall command reference documentation](../../reference/cli/remove.md).
## Where to go next
* [Scale your deployment](scale-your-deployment.md)
* [Install DTR](index.md)
* [Install DTR offline](install-offline.md)

View File

@ -19,13 +19,17 @@ to upgrade your installation to the latest release.
(28 Nov 2016)
**Features**
* Added Support for Scality as S3 compatible object storage by updating to use AWS v4 headers
* Updated backend storage configuration to use AWS v4 headers
* Added support for Scality, an Amazon S3 compatible object storage
**Other Improvements**
* Increase required consecutive failues for healthcheck to report error to 3
* Fix bootstrapper restore command to work with notary
* Health check now reports failures after 3 consecutive failures
* Restore command now restores Notary server data
* Fix subsequent joins after a failed join
## DTR 2.1.0
(10 Nov 2016)

BIN
datacenter/images/app.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 204 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 76 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 184 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 125 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 274 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 151 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 52 KiB

BIN
datacenter/images/dtr.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 172 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 58 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 150 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 171 KiB

View File

Before

Width:  |  Height:  |  Size: 64 KiB

After

Width:  |  Height:  |  Size: 64 KiB

View File

Before

Width:  |  Height:  |  Size: 90 KiB

After

Width:  |  Height:  |  Size: 90 KiB

View File

Before

Width:  |  Height:  |  Size: 114 KiB

After

Width:  |  Height:  |  Size: 114 KiB

BIN
datacenter/images/ucp.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 323 KiB

387
datacenter/install/aws.md Normal file
View File

@ -0,0 +1,387 @@
---
title: Deploy DDC on Amazon AWS
description: Learn how to deploy Docker Datacenter with one click, using an Amazon AWS CloudFormation template
keywords: docker, datacenter, install, orchestration, management
---
Docker Datacenter on Docker for AWS is a one-click deploy of highly-scalable
Docker Datacenter (Universal Control Plane and Docker Trusted Registry) based
on Docker and AWS best-practices. It is based on
[Docker for AWS](https://beta.docker.com/docs/) and currently should be used
for evaluation purposes only.
![ucp.png](../images/d4a_ddc_arch.png)
## How it Works
The solution uses an Amazon AWS CloudFormation template to create everything
that you need from scratch. The template first starts off by creating a new VPC
along with its subnets and security groups. Once the networking is set up, it
will create two Auto Scaling groups, one for the managers and one for the
workers, and set the desired capacity that was selected in the CloudFormation
setup form. The Managers will start up first and create a Swarm manager quorum
using Raft. The workers will then start up and join the swarm one by one, until
all of the workers are up and running. At this point you will have a number of
managers and workers in your swarm, that are ready to handle your application
deployments. It then bootstraps UCP controllers on manager nodes and UCP agents
on worker nodes. Next, it installs DTR on the manager nodes and configures it
to use an S3 bucket as an image storage backend. Three ELBs, one for UCP, one
for DTR and a third for your applications, are launched and automatically
configured to provide resilient loadbalancing across multiple AZs.
The application ELB gets automatically updated when services are launched or
removed. While UCP and DTR ELBs are configured for HTTPS only.
Both manager and worker nodes are part of separate ASG groups to allow you to
scale your cluster when needed. If you increase the number of instances running
in your worker Auto Scaling group (via the AWS console, or updating the
CloudFormation configuration), the new nodes that will start up will
automatically join the swarm. This architecture ensures that both manager
and worker nodes are spread across multiple AZs for resiliency and
high-availability. The template is adjustable and upgradeable meaning you can
adjust your configuration (e.g instance types or Docker engine version)
## Prerequisites
- Access to an AWS account with permissions to use CloudFormation and creating the following objects
- EC2 instances + Auto Scaling groups
- IAM profiles
- DynamoDB Tables
- SQS Queue
- VPC + subnets
- ELB
- CloudWatch Log Group
- S3 Bucket
- SSH key in AWS in the region where you want to deploy (required to access the completed Docker install)
- AWS account that support EC2-VPC
For more information about adding an SSH key pair to your account, please refer to the [Amazon EC2 Key Pairs docs](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html)
## Cloudformation Parameters
Here are the required configuration parameters for the Cloudformation template:
**KeyName**
SSH key that will be used when you SSH into the manager nodes. The key needs to
be configured in same region you launch the Cloudformation template in.
**InstanceType**
The EC2 instance type for your Worker nodes
**ManagerInstanceType**
The EC2 instance type for your Manager nodes. The larger your swarm, the larger
the instance size you should use.
**ClusterSize**
The number of Workers you want in your swarm (1-1000)
**ManagerSize**
The number of Managers in your swarm. You can pick either 3 or 5 managers
**DDCUsernameSet**
Docker Datacenter Username
**DDCPasswordSet**
Docker Datacenter Password
**License**
Docker Datacenter License in JSON format or an S3 URL to download it. You can
get a trial license [here](https://store.docker.com/bundles/docker-datacenter)
## Installation
There are two ways you can deploy Docker Datacenter on Docker for AWS. You can
use the AWS Management Console (browser based), or the AWS CLI. Both have the
above configuration options.
**1) AWS Management Console**
- Click on **Launch Stack** below. This link will take you to AWS cloudformation portal.
[![Docker Datacenter on Docker for AWS](https://s3.amazonaws.com/cloudformation-examples/cloudformation-launch-stack.png)](https://console.aws.amazon.com/cloudformation/home?#/stacks/new?stackName=DockerDatacenter&templateURL=https://s3-us-west-2.amazonaws.com/ddc-on-aws-public/aws/aws-v1.12.3-cs4-beta12-ddc.json)
- Confirm your AWS Region that you'd like to launch this stack in (top right corner)
- Provide the required parameters and click **Next** (see below)
![console_installation.png](../images/console_installation.png)
- **Confirm** and **Launch**
- Once the stack is successfully created (it does take between 10-15 mins), click on **Output** tab to see the URLs of UCP and DTR.
**2) AWS CLI**
- Upload your Docker Datacenter license to an S3 bucket.
- Run the following Docker container that uses `aws-cli` to launch the
Cloudformation stack. Alternatively, if you have `aws-cli` installed you can
run the command directly using it.
```
docker run --env AWS_ACCESS_KEY_ID=<AWS_ACCESS_KEY_ID> \
--env AWS_SECRET_ACCESS_KEY=<AWS_SECRET_ACCESS_KEY> \
--env AWS_DEFAULT_REGION=<AWS_REGION> \
garland/aws-cli-docker aws cloudformation create-stack \
--stack-name <STACK_NAME> \
--capabilities CAPABILITY_IAM \
--parameters \
ParameterKey=KeyName,ParameterValue=<SSH_KEY_NAME> \
ParameterKey=InstanceType,ParameterValue=<INSTANCE_TYPE> \
ParameterKey=ManagerInstanceType,ParameterValue=<INSTANCE_TYPE> \
ParameterKey=ClusterSize,ParameterValue=<CLUSTER_SIZE> \
ParameterKey=ManagerSize,ParameterValue=<MANAGER_SIZE> \
ParameterKey=DDCUsernameSet,ParameterValue=<DDC_USERNAME> \
ParameterKey=DDCPasswordSet,ParameterValue=<DDC_PASSWORD> \
ParameterKey=License,ParameterValue=<YOUR_DDC_LICENSE_S3_URL> \
--template-url https://s3-us-west-2.amazonaws.com/ddc-on-aws-public/aws/aws-v1.12.3-cs4-beta12-ddc.json
```
- Once successfully created ( it does take between 10-15 mins), you can get
stack outputs such as UCP and DTR URLs directly from CLI as follows:
```
docker run --env AWS_ACCESS_KEY_ID=<AWS_ACCESS_KEY_ID> \
--env AWS_SECRET_ACCESS_KEY=<AWS_SECRET_ACCESS_KEY> \
--env AWS_DEFAULT_REGION=<AWS_REGION> \
garland/aws-cli-docker aws cloudformation describe-stacks --stack-name <STACK_NAME>
```
- To fully automate installs, you can use the [AWS Cloudformation API](http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/Welcome.html)
## Software Versions
- Docker Commercially Supported Engine: `1.12.3-cs4`
- UCP: `2.0.1`
- DTR: `2.1.0`
## System containers
Each node will have a few system containers running on them to help run your swarm cluster. In order for everything to run smoothly, please keep those containers running, and don't make any changes. If you make any changes, we can't guarantee that Docker for AWS will work correctly.
## Supported Regions
- ap-northeast-1
- ap-northeast-2
- ap-south-1
- ap-southeast-1
- ap-southeast-2
- eu-central-1
- eu-west-1
- sa-east-1
- us-east-1
- us-east-2
- us-west-1
- us-west-2
## AMIs
Docker Datacenter on Docker for AWS currently only supports our custom AMI,
which is a highly optimized AMI built specifically for running Docker on AWS
## Accessing Docker Datacenter
Once the stack is successfully created, you can access UCP and DTR URLs in the
output tab as follows:
![insecure.png](../images/output.png)
When accessing UCP and DTR, log in using the username and password that you
provided when you launched the cloudformation stack. You should see the below
landing pages:
![ucp.png](../images/ucp.png)
![dtr.png](../images/dtr.png)
> Note: During the installation process, a self-signed certificate is generated
for both UCP and DTR. You can replace these certificates with your own
CA-signed certificate after the installation is complete. When you access UCP
and DTR URLs for the first time, you need to proceed insecurely (multiple times)
by accepting the provided certificate in the browser.
## Configuring Docker Datacenter DNS and Certificates
Most users would want to register their own DNS records with CA signed
certificates for both UCP and DTR instead of the randomly generated ELB DNS
names. To do that, please follow the instructions below:
1. Create an A or CNAME DNS record for UCP and DTR pointing to the UCP and DTR
ELB DNS/IP. You can find the ELB names in the **Output** tab.
2. Login to DTR using the DTR ELB URL and go to **Settings** page.
3. Update the **Domain** section with the your DNS and their respective
certificate. Make sure you click **Save** at the end.
4. Login to UCP using the UCP ELB URL and go to **Admin Settings** tab.
5. Under the **Cluster Configuration** update **EXTERNAL SERVICE LOAD BALANCER**
with your custom UCP DNS name. Then click on **Update Settings**.
6. Under the **Certificates** section, upload or paste your own certificates for
UCP. Then click on **Update**.
7. Final step is to reconfigure DTR to use new UCP DNS and certificates.
You need to run a `reconfigure` operation from the CLI. This step can be done
from any Docker engine ( local or on the cloud) as long as you have
UCP [client bundle](../datacenter/ucp/2.0/guides/access-ucp/cli-based-access.md).
Once you download the bundle and load it, run the following command:
```
UCP_URL=<YOUR_NEW_UCP_DNS>
DTR_URL=<YOUR_NEW_DTR_DNS>
USERNAME=<YOUR_DDC_USERNAME>
PASSWORD=<YOUR_DDC_PASSWORD>
curl -k https://$UCP_URL/ca > ucp-ca.pem
docker run -it --rm docker/dtr:2.1.0 \
reconfigure \
--ucp-url $UCP_URL \
--ucp-username $USERNAME \
--ucp-password $PASSWORD \
--debug \
--ucp-ca "$(cat ucp-ca.pem)" \
--dtr-external-url https://$DTR_URL:443
```
Once you run this Docker container, you'll be requested to Choose a replica
to reconfigure. Press **Enter** to proceed with the chosen one.
8. Now you may access UCP and DTR with your own custom DNS names
## Deploy and Access Your Applications on Docker Datacenter
Now that you have configured your custom DNS for both UCP and DTR, you can
start deploying your applications via CLI (with the client bundle) or via the
UCP web console.
#### Swarm Mode/Services Based Applications (Docker Engine 1.12+)
If you are deploying swarm-mode services, Docker Datacenter on Docker for AWS
provides multiple advantages to easily deploy and access your application.
1. **ELB Integration**
When you create Swarm-mode services and publish a TCP/UDP port, Docker for
AWS automatically reconfigures the application ELB (**DefaultExternalTarget**)
listeners setting to allow traffic on these ports to pass to cluster nodes.
For example, if you launch a service from CLI using client bundle and publish a
TCP port for it, you'll see the ELB's **Listeners** configuration automatically
get updated. Please see example below:
a. Creating a service with a published port `8080` using CLI:
```
$ docker service create --name demo -p 8080:8080 ehazlett/docker-demo:dcus
6s09w6gxxfz7mkce9ybl6x3cr
```
b. Notice the updated ELB configuration:
![elb_listeners_update.png](../images/elb_listeners_update.png)
c. Access your application using **DefaultExternalTarget** DNS and published port:
![app.png](../images/app.png)
2. **Swarm Mode Routing Mesh**
Routing mesh is a new feature in Docker 1.12 that combines ipvs and
iptables to create a powerful cluster-wide transport-layer (L4)
load balancer. It allows all the Swarm nodes to accept connections on the
services published ports. When any Swarm node receives traffic destined to
the published TCP/UDP port of a running service, it forwards it to
service's VIP using a pre-defined overlay network called ingress.
3. **HTTP Routing Mesh**
UCP now supports HTTP Routing Mesh (HRM) which is a new experimental
feature to enable service discovery on the application layer (L7). HRM
works with Swarm Mode Routing Mesh to link HTTP vhosts to L4 published
services. Please use the
[following documentation](https://success.docker.com/Datacenter/Apply/Docker_Reference_Architecture%3A_Universal_Control_Plane_2.0_Service_Discovery_and_Load_Balancing)
for more details.
Once you enable HRM, the ELB will be reconfigured automatically to forward
application traffic to the worker nodes. You may use your custom
application DNS names and map them to the **DefaultExternalTarget** DNS.
For example, if you want to access the previous service using a proper DNS
name like `foo.example.com`, all you need to do is enable HRM, create a
CNAME records using **DefaultExternalTarget**'s DNS, and launch your
service as follows:
```
docker service create -p 8080 \
--network ucp-hrm \
--name demo-hrm-app \
--label com.docker.ucp.mesh.http=8080=http://foo.example.com \
ehazlett/docker-demo:dcus
```
> Note: There is currently a caveat with HRM and Docker Datacenter running on
AWS. Before enabling the feature, you need to create a new network named
`ucp-hrm`. Please go to **Resources** > **Networks** > **Create Network** and
use the below info before clicking **Create**. Once you create the network,
you can enable HRM.
![create_ucp_hrm_network.png](../images/create_ucp_hrm.png)
#### Non-Swarm Mode Container Based Applications
If you are deploying non-swarm mode container-based applications, you can
follow [UCP docs](../datacenter/ucp/2.0/guides/applications/index.md) to
deploy your applications. Once you deploy your application on UCP, you can
access them by using the specific worker node's public IP address and exposed
port.
Additionally, you can deploy Interlock on any or all of the worker nodes to
dynamically register your applications and loadbalance traffic to them using an
L7 reverse proxy (e.g NGINX). Once you successfully deploy Interlock and NGINX,
you can create DNS records for your applications using the
**DefaultExternalTarget** IP. Full documentations is available
[here](https://success.docker.com/Datacenter/Apply/Docker_Reference_Architecture%3A_Universal_Control_Plane_2.0_Service_Discovery_and_Load_Balancing#Non_Swarm_Mode_Containers).
## Scaling Workers
You can scale the worker count using the AWS Auto Scaling group. Docker will
automatically join or remove new instances to the Swarm.
There are currently two ways to scale your worker group. You can "update" your
stack, and change the number of workers in the CloudFormation template
parameters, or you can manually update the Auto Scaling group in the AWS console
for EC2 auto scaling groups.
Changing manager count live is **_not_** currently supported.
### AWS Console
Login to the AWS console, and go to the EC2 dashboard. On the lower left hand
side select the "Auto Scaling Groups" link.
Look for the Auto Scaling group with the name that looks like
$STACK_NAME-NodeASG-* Where `$STACK_NAME` is the name of the stack you
created when filling out the CloudFormation template for Docker for AWS.
Once you find it, click the checkbox, next to the name. Then Click on the
"Edit" button on the lower detail pane.
![console_installation.png](../images/autoscale_update.png)
Change the "Desired" field to the size of the worker pool that you would like,
and hit "Save".
![console_installation.png](../images/autoscale_save.png)
This will take a few minutes and add the new workers to your swarm
automatically. To lower the number of workers back down, you just need to
update "Desired" again, with the lower number, and it will shrink the worker
pool until it reaches the new size.
### CloudFormation Update
Go to the CloudFormation management page, and click the checkbox next to the
stack you want to update. Then Click on the action button at the top, and
select "Update Stack".
![console_installation.png](../images/cloudformation_update.png)
Pick "Use current template", and then click "Next". Fill out the same parameters
you have specified before, but this time, change your worker count to the new
count, click "Next". Answer the rest of the form questions. CloudFormation will
show you a preview of the changes it will make. Review the changes and if they
look good, click "Update". CloudFormation will change the worker pool size to
the new value you specified. It will take a few minutes (longer for a larger
increase / decrease of nodes), but when complete you will have your swarm
with the new worker pool size.

View File

@ -1,7 +1,9 @@
---
title: Try Docker Datacenter
title: Deploy DDC on Linux servers
description: Learn how to get a trial license and install Docker Datacenter
keywords: docker, datacenter, install, orchestration
redirect_from:
- /datacenter/try/
---
The best way to try Docker Datacenter for yourself is to get the [30-day
@ -23,7 +25,7 @@ Also make sure the hosts are running one of these operating systems:
* Ubuntu 14.04 LTS
* SUSE Linux Enterprise 12
[Learn more about the Docker Datacenter system requirements](ucp/2.0/guides/installation/system-requirements.md)
[Learn more about the Docker Datacenter system requirements](../ucp/2.0/guides/installation/system-requirements.md)
### Step 2: Install CS Docker Engine
@ -37,7 +39,7 @@ Log in into each node using ssh, and install CS Docker Engine:
curl -SLf https://packages.docker.com/1.12/install.sh | sh
```
[You can also install CS Docker Engine using a package manager](../cs-engine/install.md)
[You can also install CS Docker Engine using a package manager](../../cs-engine/install.md)
### Step 3: Install Universal Control Plane
@ -62,7 +64,7 @@ for any necessary configuration values.
Now that UCP is installed, you need to license it. In your browser, navigate
to the UCP web UI and upload your license.
![](ucp/2.0/guides/images/try-ddc-1.png)
![](../images/try-ddc-1.png)
[Get a free trial license if you don't have one](https://store.docker.com/bundles/docker-datacenter).
@ -72,11 +74,11 @@ Join more nodes so that you can manage them from UCP.
Go to the **UCP web UI**, navigate to the **Resources** page, and go to
the **Nodes** section.
![](ucp/2.0/guides/images/try-ddc-2.png)
![](../images/try-ddc-2.png)
Click the **Add Node button** to add a new node.
![](ucp/2.0/guides/images/try-ddc-3.png)
![](../images/try-ddc-3.png)
Check the 'Add node as a manager' option to join the node as a manager
@ -108,7 +110,7 @@ by UCP.
## Where to go next
* [Create and manage users](ucp/2.0/guides/user-management/create-and-manage-users.md)
* [Deploy an application](ucp/2.0/guides/applications/index.md)
* [Push an image to DTR](dtr/2.1/guides/repos-and-images/push-an-image.md)
* [Considerations for a High Availability Deployment](ucp/2.0/guides/high-availability/index.md)
* [Create and manage users](../ucp/2.0/guides/user-management/create-and-manage-users.md)
* [Deploy an application](../ucp/2.0/guides/applications/index.md)
* [Push an image to DTR](../dtr/2.1/guides/repos-and-images/push-an-image.md)
* [Considerations for a High Availability Deployment](../ucp/2.0/guides/high-availability/index.md)

View File

@ -0,0 +1,172 @@
---
redirect_from:
- "/engine/articles/systemd/"
title: "Limit a container's resources"
description: "Limiting the system resources a container can use"
keywords: "docker, daemon, configuration"
---
By default, a container has no resource constraints and can use as much of a
given resource as the host's kernel scheduler will allow. Docker provides ways
to control how much memory, CPU, or block IO a container can use, setting runtime
configuration flags of the `docker run` command. This section provides details
on when you should set such limits and the possible implications of setting them.
## Memory
Docker can enforce hard memory limits, which allow the container to use no more
than a given amount of user or system memory, or soft limits, which allow the
container to use as much memory as it needs unless certain conditions are met,
such as when the kernel detects low memory or contention on the host machine.
Some of these options have different effects when used alone or when more than
one option is set.
Most of these options take a positive integer, followed by a suffix of `b`, `k`,
`m`, `g`, to indicate bytes, kilobytes, megabytes, or gigabytes.
| Option | Description |
|-----------------------|-----------------------------|
| `-m` or `--memory=` | The maximum amount of memory the container can use. If you set this option, the minimum allowed value is `4m` (4 megabyte). |
| `--memory-swap`* | The amount of memory this container is allowed to swap to disk. See [`--memory-swap` details](resource_constraints.md#memory-swap-details). |
| `--memory-swappiness` | By default, the host kernel can swap out a percentage of anonymous pages used by a container. You can set `--memory-swappiness` to a value between 0 and 100, to tune this percentage. See [`--memory-swappiness` details](resource_constraints.md#memory-swappiness-details). |
| `--memory-reservation` | Allows you to specify a soft limit smaller than `--memory` which is activated when Docker detects contention or low memory on the host machine. If you use `--memory-reservation`, it must be set lower than `--memory` in order for it to take precedence. Because it is a soft limit, it does not guarantee that the container will not exceed the limit. |
| `--kernel-memory` | The maximum amount of kernel memory the container can use. The minimum allowed value is `4m`. Because kernel memory cannot be swapped out, a container which is starved of kernel memory may block host machine resources, which can have side effects on the host machine and on other containers. See [`--kernel-memory` details](resource_constraints.md#kernel-memory-details). |
| `--oom-kill-disable` | By default, if an out-of-memory (OOM) error occurs, the kernel kills processes in a container. To change this behavior, use the `--oom-kill-disable` option. Only disable the OOM killer on containers where you have also set the `-m/--memory` option. If the `-m` flag is not set, the host can run out of memory and the kernel may need to kill the host system's processes to free memory. |
For more information about cgroups and memory in general, see the documentation
for [Memory Resource Controller](https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt).
### `--memory-swap` details
- If unset, and `--memory` is set, the container can use twice as much swap
as the `--memory` setting, if the host container has swap memory configured.
For instance, if `--memory="300m"` and `--memory-swap` is not set, the
container can use 300m of memory and 600m of swap.
- If set to a positive integer, and if both `--memory` and `--memory-swap`
are set, `--memory-swap` represents the total amount of memory and swap
that can be used, and `--memory` controls the amount used by non-swap
memory. So if `--memory="300m"` and `--memory-swap="1g"`, the container
can use 300m of memory and 700m (1g - 300m) swap.
- If set to `-1` (the default), the container is allowed to use unlimited swap memory.
### `--memory-swappiness` details
- A value of 0 turns off anonymous page swapping.
- A value of 100 sets all anonymous pages as swappable.
- By default, if you do not set `--memory-swappiness`, the value is
inherited from the host machine.
### `--kernel-memory` details
Kernel memory limits are expressed in terms of the overall memory allocated to
a container. Consider the following scenarios:
- **Unlimited memory, unlimited kernel memory**: This is the default
behavior.
- **Unlimited memory, limited kernel memory**: This is appropriate when the
amount of memory needed by all cgroups is greater than the amount of
memory that actually exists on the host machine. You can configure the
kernel memory to never go over what is available on the host machine,
and containers which need more memory need to wait for it.
- **Limited memory, umlimited kernel memory**: The overall memory is
limited, but the kernel memory is not.
- **Limited memory, limited kernel memory**: Limiting both user and kernel
memory can be useful for debugging memory-related problems. If a container
is using an unexpected amount of either type of memory, it will run out
of memory without affecting other containers or the host machine. Within
this setting, if the kernel memory limit is lower than the user memory
limit, running out of kernel memory will cause the container to experience
an OOM error. If the kernel memory limit is higher than the user memory
limit, the kernel limit will not cause the container to experience an OOM.
When you turn on any kernel memory limits, the host machine tracks "high water
mark" statistics on a per-process basis, so you can track which processes (in
this case, containers) are using excess memory. This can be seen per process
by viewing `/proc/<PID>/status` on the host machine.
## CPU
By default, each container's access to the host machine's CPU cycles is unlimited.
You can set various constraints to limit a given container's access to the host
machine's CPU cycles.
| Option | Description |
|-----------------------|-----------------------------|
| `--cpu-shares` | Set this flag to a value greater or less than the default of 1024 to increase or reduce the container's weight, and give it access to a greater or lesser proportion of the host machine's CPU cycles. This is only enforced when CPU cycles are constrained. When plenty of CPU cycles are available, all containers use as much CPU as they need. In that way, this is a soft limit. `--cpu-shares` does not prevent containers from being scheduled in swarm mode. It prioritizes container CPU resources for the available CPU cycles. It does not guarantee or reserve any specific CPU access. |
| `--cpu-period` | The scheduling period of one logical CPU on a container. `--cpu-period` defaults to a time value of 100000 (100 ms). |
| `--cpu-quota` | maximum amount of time that a container can be scheduled during the period set by `--cpu-period`. |
| `--cpuset-cpus` | Use this option to pin your container to one or more CPU cores, separated by commas. |
### Example with `--cpu-period` and `--cpu-qota`
If you have 1 vCPU system and your container runs with `--cpu-period=100000` and
`--cpu-quota=50000`, the container can consume up to 50% of 1 CPU.
```bash
$ docker run -ti --cpu-period=10000 --cpu-quota=50000 busybox
```
If you have a 4 vCPU system your container runs with `--cpu-period=100000` and
`--cpu-quota=200000`, your container can consume up to 2 logical CPUs (200% of
`--cpu-period`).
```bash
$ docker run -ti --cpu-period=100000 --cpu-quota=200000
```
### Example with `--cpuset-cpus`
To give a container access to exactly 4 CPUs, issue a command like the
following:
```bash
$ docker run -ti --cpuset-cpus=4 busybox
```
## Block IO (blkio)
Two option are available for tuning a given container's access to direct block IO
devices. You can also specify bandwidth limits in terms of bytes per second or
IO operations per second.
| Option | Description |
|-----------------------|-----------------------------|
| `blkio-weight` | By default, each container can use the same proportion of block IO bandwidth (blkio). The default weight is 500. To raise or lower the proportion of blkio used by a given container, set the `--blkio-weight` flag to a value between 10 and 1000. This setting affects all block IO devices equally. |
| `blkio-weight-device` | The same as `--blkio-weight`, but you can set a weight per device, using the syntax `--blkio-weight-device="DEVICE_NAME:WEIGHT"` The DEVICE_NAME:WEIGHT is a string containing a colon-separated device name and weight. |
| `--device-read-bps` and `--device-write-bps` | Limits the read or write rate to or from a device by size, using a suffix of `kb`, `mb`, or `gb`. |
| `--device-read-iops` or `--device-write-iops` | Limits the read or write rate to or from a device by IO operations per second. |
### Block IO weight examples
>**Note**: The `--blkio-weight` flag only affects direct IO and has no effect on
buffered IO.
If you specify both the `--blkio-weight` and `--blkio-weight-device`, Docker
uses `--blkio-weight` as the default weight and uses `--blkio-weight-device` to
override the default on the named device.
To set a container's device weight for `/dev/sda` to 200 and not specify a
default `blkio-weight`:
```bash
$ docker run -it \
--blkio-weight-device "/dev/sda:200" \
ubuntu
```
### Block bandwidth limit examples
This example limits the `ubuntu` container to a maximum write speed of 1mbps to
`/dev/sda`:
```bash
$ docker run -it --device-write-bps /dev/sda:1mb ubuntu
```
This example limits the `ubuntu` container to a maximum read rate of 1000 IO
operations per second from `/dev/sda`:
```bash
$ docker run -ti --device-read-iops /dev/sda:1000 ubuntu
```

View File

@ -0,0 +1,62 @@
---
description: Troubleshooting volume errors
keywords:
- cadvisor, troubleshooting, volumes, bind-mounts
title: Troubleshoot volume errors
---
# Troubleshoot volume errors
This topic discusses errors which may occur when you use Docker volumes or bind
mounts.
## `Error: Unable to remove filesystem`
Some container-based utilities, such
as [Google cAdvisor](https://github.com/google/cadvisor), mount Docker system
directories, such as `/var/lib/docker/`, into a container. For instance, the
documentation for `cadvisor` instructs you to run the `cadvisor` container as
follows:
```bash
$ sudo docker run \
--volume=/:/rootfs:ro \
--volume=/var/run:/var/run:rw \
--volume=/sys:/sys:ro \
--volume=/var/lib/docker/:/var/lib/docker:ro \
--publish=8080:8080 \
--detach=true \
--name=cadvisor \
google/cadvisor:latest
```
When you bind-mount `/var/lib/docker/`, this effectively mounts all resources of
all other running containers as filesystems within the container which mounts
`/var/lib/docker/`. When you attempt to remove any of these containers, the
removal attempt may fail with an error like the following:
```none
Error: Unable to remove filesystem for
74bef250361c7817bee19349c93139621b272bc8f654ae112dd4eb9652af9515:
remove /var/lib/docker/containers/74bef250361c7817bee19349c93139621b272bc8f654ae112dd4eb9652af9515/shm:
Device or resource busy
```
The problem occurs if the container which bind-mounts `/var/lib/docker/`
uses `statfs` or `fstatfs` on filesystem handles within `/var/lib/docker/`
and does not close them.
Typically, we would advise against bind-mounting `/var/lib/docker` in this way.
However, `cAdvisor` requires this bind-mount for core functionality.
If you are unsure which process is causing the path mentioned in the error to
be busy and preventing it from being removed, you can use the `lsof` command
to find its process. For instance, for the error above:
```bash
$ sudo lsof /var/lib/docker/containers/74bef250361c7817bee19349c93139621b272bc8f654ae112dd4eb9652af9515/shm
```
To work around this problem, stop the container which bind-mounts
`/var/lib/docker` and try again to remove the other container.

View File

@ -1,5 +1,5 @@
---
description: 'Instructions for installing Docker on Ubuntu. '
description: Instructions for installing Docker on Ubuntu
keywords: Docker, Docker documentation, requirements, apt, installation, ubuntu
redirect_from:
- /engine/installation/ubuntulinux/
@ -618,4 +618,4 @@ following command:
$ rm -rf /var/lib/docker
```
You must delete any customized configuration files manually.
You must delete any edited configuration files manually.

View File

@ -72,15 +72,46 @@ $ docker service update \
You can use `docker service inspect` to view the service's published port. For
instance:
```bash{% raw %}
```bash
{% raw %}
$ docker service inspect --format="{{json .Endpoint.Spec.Ports}}" my-web
[{"Protocol":"tcp","TargetPort":80,"PublishedPort":8080}]
{% endraw %}```
{% endraw %}
```
The output shows the `<TARGET-PORT>` from the containers and the
`<PUBLISHED-PORT>` where nodes listen for requests for the service.
### Publish a port for TCP only or UCP only
By default, when you publish a port, it is a TCP port. You can
specifically publish a UDP port instead of or in addition to a TCP port. When
you publish both TCP and UDP ports, Docker 1.12.2 and earlier require you to
add the suffix `/tcp` for TCP ports. Otherwise it is optional.
#### TCP only
The following two commands are equivalent.
```bash
$ docker service create --name dns-cache -p 53:53 dns-cache
$ docker service create --name dns-cache -p 53:53/tcp dns-cache
```
#### TCP and UDP
```bash
$ docker service create --name dns-cache -p 53:53/tcp -p 53:53/udp dns-cache
```
#### UDP only
```bash
$ docker service create --name dns-cache -p 53:53/udp dns-cache
```
## Configure an external load balancer
You can configure an external load balancer to route requests to a swarm

View File

@ -559,10 +559,39 @@ Delete the `training/sinatra` image as you don't need it anymore.
> **Note:** To remove an image from the host, please make sure
> that there are no containers actively based on it.
## Check size of images and containers
An image is
[stored in layers](../userguide/storagedriver/imagesandcontainers.md),
shared with other images on the host, so the real disk usage depends on
how much layer overlap is happening between images on a host.
A container runs on
[a writable layer](../userguide/storagedriver/imagesandcontainers.md#/container-and-layers)
on top of a readonly rootfs.
Use `docker history` to see the size of image layers on your host:
$ docker history centos:centos7
IMAGE CREATED CREATED BY SIZE
970633036444 6 weeks ago /bin/sh -c #(nop) CMD ["/bin/bash"] 0 B
<missing> 6 weeks ago /bin/sh -c #(nop) LABEL name=CentOS Base Imag 0 B
<missing> 6 weeks ago /bin/sh -c #(nop) ADD file:44ef4e10b27d8c464a 196.7 MB
<missing> 10 weeks ago /bin/sh -c #(nop) MAINTAINER https://github.c 0 B
Check the size of containers with `docker ps -s`:
$ docker ps -s
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES SIZE
cb7827c19ef7 docker-docs:is-11160-explain-image-container-size-prediction "hugo server --port=8" About a minute ago Up About a minute 0.0.0.0:8000->8000/tcp evil_hodgkin 0 B (virtual 949.2 MB)
# Next steps
Until now you've seen how to build individual applications inside Docker
containers. Now learn how to build whole application stacks with Docker
by networking together multiple Docker containers.
Go to [Network containers](networkingcontainers.md).
Go to [Network containers](networkingcontainers.md).

View File

@ -190,10 +190,11 @@ You may also use the `docker volume create` command, to create a volume before
using it in a container.
The following example also creates the `my-named-volume` volume, this time
using the `docker volume create` command.
using the `docker volume create` command. Options are specified as key-value
pairs in the format `o=<key>=<value>`.
```bash
$ docker volume create -d flocker -o size=20GB my-named-volume
$ docker volume create -d flocker --opt o=size=20GB my-named-volume
$ docker run -d -P \
-v my-named-volume:/webapp \

View File

@ -37,6 +37,7 @@ The size of the VM's disk can be configured this way:
- `--virtualbox-no-share`: Disable the mount of your home directory
- `--virtualbox-no-dns-proxy`: Disable proxying all DNS requests to the host (Boolean value, default to false)
- `--virtualbox-no-vtx-check`: Disable checking for the availability of hardware virtualization before the vm is started
- `--virtualbox-share-folder`: Mount the specified directory instead of the default home location. Format: `dir:name`
The `--virtualbox-boot2docker-url` flag takes a few different forms. By
default, if no value is specified for this flag, Machine will check locally for
@ -80,6 +81,7 @@ upper bound of `192.168.24.254`.
| `--virtualbox-no-share` | `VIRTUALBOX_NO_SHARE` | `false` |
| `--virtualbox-no-dns-proxy` | `VIRTUALBOX_NO_DNS_PROXY` | `false` |
| `--virtualbox-no-vtx-check` | `VIRTUALBOX_NO_VTX_CHECK` | `false` |
| `--virtualbox-share-folder` | `VIRTUALBOX_SHARE_FOLDER` | `~:users` |
## Known Issues
@ -89,4 +91,4 @@ contents.
This will often cause problems when using a web server such as nginx to serve
static files from a shared volume. For development environments, a good
workaround is to disable sendfile in your server configuration.
workaround is to disable sendfile in your server configuration.

View File

@ -1,254 +1,69 @@
---
description: How to set up a server to test Docker Windows client
keywords: development, inception, container, image Dockerfile, dependencies, Go, artifacts, windows
title: Get the required software for Windows
description: How to set up a server to test Docker Engine on Windows
keywords: development, inception, container, image Dockerfile, dependencies, Go, artifacts, windows
title: Build and test Docker on Windows
---
This page explains how to get the software you need to use a Windows Server
2012 or Windows 8 machine for Docker development. Before you begin contributing
you must have:
This page explains how to get the software you need to build, test and run the Docker source code for Windows and setup the required software and services:
- a GitHub account
- Git for Windows (msysGit)
- TDM-GCC, a compiler suite for Windows
- MinGW (tar and xz)
- Go language
- Windows containers
- GitHub account
- Git
> **Note**: This installation procedure refers to the `C:\` drive. If you system's main drive
is `D:\` you'll need to substitute that in where appropriate in these
instructions.
## 1. Docker Windows containers
## Task 1. Get a GitHub account
To test and run the Windows Docker daemon, you need a system that supports Windows Containers:
To contribute to the Docker project, you will need a <a
href="https://github.com" target="_blank">GitHub account</a>. A free account is
fine. All the Docker project repositories are public and visible to everyone.
* Windows 10 Anniversary Edition
* Windows Server 2016 running in a VM, on bare metal or in the cloud
You should also have some experience using both the GitHub application and `git`
on the command line.
Check out the [getting started documentation](https://github.com/docker/labs/blob/master/windows/windows-containers/Setup.md) for details.
## Task 2. Install Git for Windows
## 2. GitHub account
Git for Windows includes several tools including msysGit, which is a build
environment. The environment contains the tools you need for development such as
Git and a Git Bash shell.
To contribute to the Docker project, you need a <a href="https://github.com" target="_blank">GitHub account</a>. A free account is fine. All the Docker project repositories are public and visible to everyone.
1. Browse to the [Git for Windows](https://msysgit.github.io/) download page.
This guide assumes that you have basic familiarity with Git and Github terminology and usage. Refer to [GitHub For Beginners: Dont Get Scared, Get Started](http://readwrite.com/2013/09/30/understanding-github-a-journey-for-beginners-part-1/) to get up to speed on Github.
2. Click **Download**.
## 3. Git
Windows prompts you to save the file to your machine.
In PowerShell, run:
3. Run the saved file.
Invoke-Webrequest "https://github.com/git-for-windows/git/releases/download/v2.7.2.windows.1/Git-2.7.2-64-bit.exe" -OutFile git.exe -UseBasicParsing
Start-Process git.exe -ArgumentList '/VERYSILENT /SUPPRESSMSGBOXES /CLOSEAPPLICATIONS /DIR=c:\git\' -Wait
setx /M PATH "$env:Path;c:\git\cmd"
The system displays the **Git Setup** wizard.
You are now ready clone and build the Docker source code.
4. Click the **Next** button to move through the wizard and accept all the defaults.
## 4. Clone Docker
5. Click **Finish** when you are done.
In a new (to pick up the path change) PowerShell prompt, run:
## Task 3. Install TDM-GCC
git clone https://github.com/docker/docker
cd docker
TDM-GCC is a compiler suite for Windows. You'll use this suite to compile the
Docker Go code as you develop.
This clones the main Docker repository. Check out [Docker on GitHub](github.com/docker) to learn about the other software that powers the Docker platform.
1. Browse to
[tdm-gcc download page](http://tdm-gcc.tdragon.net/download).
## 5. Build and run
2. Click on the latest 64-bit version of the package.
Create a builder-container with the Docker source code. You can change the source code on your system and rebuild any time:
Windows prompts you to save the file to your machine
docker build -t nativebuildimage -f .\Dockerfile.windows .
3. Set up the suite by running the downloaded file.
To build Docker, run:
The system opens the **TDM-GCC Setup** wizard.
docker run --name out nativebuildimage sh -c 'cd /c/go/src/github.com/docker/docker; hack/make.sh binary'
4. Click **Create**.
Copy out the resulting Windows Docker daemon binary to dockerd.exe in the current directory:
5. Click the **Next** button to move through the wizard and accept all the defaults.
docker cp out:C:\go\src\github.com\docker\docker\bundles\$(cat VERSION)\binary-daemon\dockerd-$(cat VERSION).exe dockerd.exe
6. Click **Finish** when you are done.
To test it, stop the system Docker daemon and start the one you just built:
Stop-Service Docker
.\dockerd-1.13.0-dev.exe -D
## Task 4. Install MinGW (tar and xz)
MinGW is a minimalist port of the GNU Compiler Collection (GCC). In this
procedure, you first download and install the MinGW installation manager. Then,
you use the manager to install the `tar` and `xz` tools from the collection.
1. Browse to MinGW
[SourceForge](http://sourceforge.net/projects/mingw/).
2. Click **Download**.
Windows prompts you to save the file to your machine
3. Run the downloaded file.
The system opens the **MinGW Installation Manager Setup Tool**
4. Choose **Install** install the MinGW Installation Manager.
5. Press **Continue**.
The system installs and then opens the MinGW Installation Manager.
6. Press **Continue** after the install completes to open the manager.
7. Select **All Packages > MSYS Base System** from the left hand menu.
The system displays the available packages.
8. Click on the **msys-tar bin** package and choose **Mark for Installation**.
9. Click on the **msys-xz bin** package and choose **Mark for Installation**.
10. Select **Installation > Apply Changes**, to install the selected packages.
The system displays the **Schedule of Pending Actions Dialog**.
![windows-mingw](images/windows-mingw.png)
11. Press **Apply**
MingGW installs the packages for you.
12. Close the dialog and the MinGW Installation Manager.
## Task 5. Set up your environment variables
You'll need to add the compiler to your `Path` environment variable.
1. Open the **Control Panel**.
2. Choose **System and Security > System**.
3. Click the **Advanced system settings** link in the sidebar.
The system opens the **System Properties** dialog.
3. Select the **Advanced** tab.
4. Click **Environment Variables**.
The system opens the **Environment Variables dialog** dialog.
5. Locate the **System variables** area and scroll to the **Path**
variable.
![windows-mingw](images/path_variable.png)
6. Click **Edit** to edit the variable (you can also double-click it).
The system opens the **Edit System Variable** dialog.
7. Make sure the `Path` includes `C:\TDM-GCC64\bin`
![include gcc](images/include_gcc.png)
If you don't see `C:\TDM-GCC64\bin`, add it.
8. Press **OK** to close this dialog.
9. Press **OK** twice to close out of the remaining dialogs.
## Install Go and cross-compile it
In this section, you install the Go language. Then, you build the source so that it can cross-compile for `linux/amd64` architectures.
1. Open [Go Language download](http://golang.org/dl/) page in your browser.
2. Locate and click the latest `.msi` installer.
The system prompts you to save the file.
3. Run the installer.
The system opens the **Go Programming Language Setup** dialog.
4. Select all the defaults to install.
5. Press **Finish** to close the installation dialog.
6. Start a command prompt.
7. Change to the Go `src` directory.
cd c:\Go\src
8. Set the following Go variables
c:\Go\src> set GOOS=linux
c:\Go\src> set GOARCH=amd64
9. Compile the source.
c:\Go\src> make.bat
Compiling the source also adds a number of variables to your Windows environment.
## Task 6. Get the Docker repository
In this step, you start a Git `bash` terminal and get the Docker source code
from GitHub.
1. Locate the **Git Bash** program and start it.
Recall that **Git Bash** came with the Git for Windows installation. **Git
Bash** just as it sounds allows you to run a Bash terminal on Windows.
![Git Bash](images/git_bash.png)
2. Change to the root directory.
$ cd /c/
3. Make a `gopath` directory.
$ mkdir gopath
4. Go get the `docker/docker` repository.
$ go.exe get github.com/docker/docker package github.com/docker/docker
imports github.com/docker/docker
imports github.com/docker/docker: no buildable Go source files in C:\gopath\src\github.com\docker\docker
In the next steps, you create environment variables for you Go paths.
5. Open the **Control Panel** on your system.
6. Choose **System and Security > System**.
7. Click the **Advanced system settings** link in the sidebar.
The system opens the **System Properties** dialog.
8. Select the **Advanced** tab.
9. Click **Environment Variables**.
The system opens the **Environment Variables dialog** dialog.
10. Locate the **System variables** area and scroll to the **Path**
variable.
11. Click **New**.
Now you are going to create some new variables. These paths you'll create in the next procedure; but you can set them now.
12. Enter `GOPATH` for the **Variable Name**.
13. For the **Variable Value** enter the following:
C:\gopath;C:\gopath\src\github.com\docker\docker\vendor
14. Press **OK** to close this dialog.
The system adds `GOPATH` to the list of **System Variables**.
15. Press **OK** twice to close out of the remaining dialogs.
The other make targets work too, to run unit tests try: `docker run --rm docker-builder sh -c 'cd /c/go/src/github.com/docker/docker; hack/make.sh test-unit'`.
## Where to go next

View File

@ -174,9 +174,9 @@ host as one of the Swarm managers.
2. From the output, copy the `eth0` IP address from `inet addr`.
3. Paste the launch command into the command line:
3. To set up a discovery backend, use the following command, replacing `<consul0_ip>` with the IP address from the previous command:
$ docker run -d -p 8500:8500 --name=consul progrium/consul -server -bootstrap
$ docker run -d -p 8500:8500 --name=consul progrium/consul -server -bootstrap -advertise=<consul0_ip>
4. Enter `docker ps`.

357
test.md Normal file
View File

@ -0,0 +1,357 @@
---
description: Smoketest page
title: Testing page
hide_from_sitemap: true
---
# Heading 1
Plain block of text.
Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor
incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis
nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.
Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu
fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in
culpa qui officia deserunt mollit anim id est laborum.
## Heading 2
Text with various styles, basic markdown formatting. You should **not** see a single line comment below this line.
<!-- This is a comment. You should not see it rendered in the page. -->
Once you create or link to a repository in Docker Cloud, you can set up [automated testing](/docker-cloud/builds/automated-testing.md) and [automated builds](/docker-cloud/builds/automated-build.md).
![a small cute image](/images/footer_moby_icon.png)
> **Note**: This is a note. Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam.
![a pretty wide image](/images/banner_image_24512.png)
<center>
This line is centered with HTML.
</center>
This line is centered with curly-brace injection.
{: style="text-align:center" }
Some Lorem ipsum text with formatting and styling.
**Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt** ut labore `et dolore magna aliqua`. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo _consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore_ eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt _**in culpa quiofficia deserunt mollit anim id est laborum.**_
### Heading 3
A selection of lists, ordered and unordered, with indented sub elements.
> **Note**: This is some note text.
1. Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod
tempor incididunt ut labore et dolore magna aliqua.
> **Note**: This is indented note text with followon image
![a small cute image](/images/footer_moby_icon.png)
2. Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod
tempor incididunt ut labore et dolore magna aliqua.
1. A second level ordered list
1. A third level ordered list
2. A second third level ordered list
3. A second third level ordered list
2. A second second level ordered list.
> **Tip**: this is doubly indented note text
3. A third second level ordered list.
```
with code block
```
3. Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod
tempor incididunt ut labore et dolore magna aliqua.
![a small cute image](/images/footer_moby_icon.png)
4. Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod
tempor incididunt ut labore et dolore magna aliqua.
5. Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod
tempor incididunt ut labore et dolore magna aliqua.
* A second level unordered list.
* A third level unordered list.
* A fourth level unordered list.
* A second line of fourth level unordered list.
* A third line of fourth level unordered list.
* A second line of third level unordered list.
* A third line of third level unordered list.
* A second second level unordered list.
> **Note**: this is indented note text
* A third second level unordered list.
```
with codeblock
```
6. Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod
tempor incididunt ut labore et dolore magna aliqua.
7. Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod
tempor incididunt ut labore et dolore magna aliqua. eu fugiat nulla pariatur.
8. Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod
tempor incididunt ut labore et dolore magna aliqua.
![a pretty wide image](/images/banner_image_24512.png)
9. proident, sunt in culpa qui
10. officia deserunt mollit anim id
```none
This is unstyled (none) text. This tells us if the Kramdown gotcha about indenting the exact number of spaces works or not.
```
11. est laborum.
12. And another line, because reasons.
> **Well**: This is a Note block with a nested code block in it.
>
> ```json
> "server": {
> "http_addr": ":4443",
> "tls_key_file": "./fixtures/notary-server.key",
> "tls_cert_file": "./fixtures/notary-server.crt"
> }
> ```
#### Heading 4
> **Note**: This is the lowest heading included in the right-nav.
Some tables in markdown and html.
| Permission level | Access |
| ------------- | ------------- |
| **Subheading (boring old bold styling)** | |
| Read | Pull |
| Read/Write | Pull, push |
| Admin | All of the above, plus update description, create and delete |
<table style="width:100%">
<tr>
<th style="font-size: x-large; font-family: arial">Left channel</th>
<th style="font-size: x-large; font-family: arial">Right channel</th>
</tr>
<tr valign="top">
<td width="50%">This is some test text. <br><br>This is more text on a new line. <br><br>Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
</td>
<td width="50%">This is some more text about the right hand side. There is a <a href="https://github.com/docker/docker/tree/master/experimental" target="_blank">link here to the Docker Experimental Features README</a> on GitHub.<br><br>Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.</td>
</tr>
<tr valign="top">
<td width="50%">
<a class="button darkblue-btn" href="https://docs.docker.com/">Go to the docs!</a><br><br>
<a href="https://docs.docker.com/"><font color="#BDBDBD" size="-1">It is dark here. You are likely to be eaten by a grue.</font></a>
</td>
<td width="50%">
<a class="button darkblue-btn" href="https://docs.docker.com/">Go to the docs!</a><br><br>
<a href="https://docs.docker.com/"><font color="#BDBDBD" size="-1">It is dark here. You are likely to be eaten by a grue.</font></a>
</td>
</tr>
</table>
##### Heading 5
This heading is not included in the right-nav.
Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
###### Heading 6?!
This heading is not included in the right-nav.
Probably not the most useful thing, but nice to know it exists.
## Some code block samples
#### Rawstyle
```none
none with raw
{% raw %}
$ some command with {{double braces}}
$ some other command
{% endraw %}
```
```bash
bash with raw
{% raw %}
$ some command with {{double braces}}
$ some other command
{% endraw %}
```
#### Bash
```bash
$ echo "deb https://packages.docker.com/1.12/apt/repo ubuntu-trusty main" | sudo tee /etc/apt/sources.list.d/docker.list
```
#### GO
```go
incoming := map[string]interface{}{
"asdf": 1,
"qwer": []interface{}{},
"zxcv": []interface{}{
map[string]interface{}{},
true,
int(1e9),
"tyui",
},
}
canonical, err := json.Marshal(incoming)
if err != nil {
// ... handle error
}
```
#### Python
```python
return html.format(name=os.getenv('NAME', "world"), hostname=socket.gethostname(), visits=visits)
```
#### Ruby
```ruby
docker_service 'default' do
action [:create, :start]
end
docker_image 'busybox' do
action :pull
end
docker_container 'an echo server' do
repo 'busybox'
port '1234:1234'
command "nc -ll -p 1234 -e /bin/cat"
end
```
#### JSON
```json
"server": {
"http_addr": ":4443",
"tls_key_file": "./fixtures/notary-server.key",
"tls_cert_file": "./fixtures/notary-server.crt"
}
```
#### HTML
```html
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
</head>
</html>
```
#### Markdown
```md
[![Deploy to Docker Cloud](https://files.cloud.docker.com/images/deploy-to-dockercloud.svg)](https://cloud.docker.com/stack/deploy/?repo=<repo_url>)
```
#### ini
```ini
[supervisord]
nodaemon=true
[program:sshd]
command=/usr/sbin/sshd -D
[program:apache2]
command=/bin/bash -c "source /etc/apache2/envvars && exec /usr/sbin/apache2 -DFOREGROUND"
```
#### Dockerfile
```dockerfile
#
# example Dockerfile for https://docs.docker.com/examples/postgresql_service/
#
FROM ubuntu
MAINTAINER SvenDowideit@docker.com
# Add the PostgreSQL PGP key to verify their Debian packages.
# It should be the same key as https://www.postgresql.org/media/keys/ACCC4CF8.asc
RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8
# Add PostgreSQL's repository. It contains the most recent stable release
# of PostgreSQL, ``9.3``.
RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main" > /etc/apt/sources.list.d/pgdg.list
# Install ``python-software-properties``, ``software-properties-common`` and PostgreSQL 9.3
# There are some warnings (in red) that show up during the build. You can hide
# them by prefixing each apt-get statement with DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install -y python-software-properties software-properties-common postgresql-9.3 postgresql-client-9.3 postgresql-contrib-9.3
# Note: The official Debian and Ubuntu images automatically ``apt-get clean``
# after each ``apt-get``
# Run the rest of the commands as the ``postgres`` user created by the ``postgres-9.3`` package when it was ``apt-get installed``
USER postgres
# Create a PostgreSQL role named ``docker`` with ``docker`` as the password and
# then create a database `docker` owned by the ``docker`` role.
# Note: here we use ``&&\`` to run commands one after the other - the ``\``
# allows the RUN command to span multiple lines.
RUN /etc/init.d/postgresql start &&\
psql --command "CREATE USER docker WITH SUPERUSER PASSWORD 'docker';" &&\
createdb -O docker docker
# Adjust PostgreSQL configuration so that remote connections to the
# database are possible.
RUN echo "host all all 0.0.0.0/0 md5" >> /etc/postgresql/9.3/main/pg_hba.conf
# And add ``listen_addresses`` to ``/etc/postgresql/9.3/main/postgresql.conf``
RUN echo "listen_addresses='*'" >> /etc/postgresql/9.3/main/postgresql.conf
# Expose the PostgreSQL port
EXPOSE 5432
# Add VOLUMEs to allow backup of config, logs and databases
VOLUME ["/etc/postgresql", "/var/log/postgresql", "/var/lib/postgresql"]
# Set the default command to run when starting the container
CMD ["/usr/lib/postgresql/9.3/bin/postgres", "-D", "/var/lib/postgresql/9.3/main", "-c", "config_file=/etc/postgresql/9.3/main/postgresql.conf"]
```
#### YAML
```yaml
authorizedkeys:
image: dockercloud/authorizedkeys
deployment_strategy: every_node
autodestroy: always
environment:
- AUTHORIZED_KEYS=ssh-rsa AAAAB3Nsomelongsshkeystringhereu9UzQbVKy9o00NqXa5jkmZ9Yd0BJBjFmb3WwUR8sJWZVTPFL
volumes:
/root:/user:rw
```