Merge branch 'amberjack' into engine-release-notes

This commit is contained in:
Adrian Plata 2019-07-01 16:31:25 -07:00 committed by GitHub
commit ac0590f035
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
300 changed files with 6530 additions and 2981 deletions

View File

@ -13,7 +13,7 @@
# Engine # Engine
ARG ENGINE_BRANCH="18.09.x" ARG ENGINE_BRANCH="19.03.x"
# Distribution # Distribution
ARG DISTRIBUTION_BRANCH="release/2.6" ARG DISTRIBUTION_BRANCH="release/2.6"

101
Jenkinsfile vendored
View File

@ -5,7 +5,6 @@ pipeline {
label 'ubuntu-1604-aufs-stable' label 'ubuntu-1604-aufs-stable'
} }
environment { environment {
DTR_VPN_ADDRESS = credentials('dtr-vpn-address')
DTR_URL = credentials('dtr-url') DTR_URL = credentials('dtr-url')
DOCKER_HOST_STRING = credentials('docker-host') DOCKER_HOST_STRING = credentials('docker-host')
UCP_BUNDLE = credentials('ucp-bundle') UCP_BUNDLE = credentials('ucp-bundle')
@ -26,19 +25,17 @@ pipeline {
branch 'master' branch 'master'
} }
steps { steps {
withVpn("$DTR_VPN_ADDRESS") { sh """
sh """ cat $SUCCESS_BOT_TOKEN | docker login $DTR_URL --username 'success_bot' --password-stdin
cat $SUCCESS_BOT_TOKEN | docker login $DTR_URL --username 'success_bot' --password-stdin docker build -t $DTR_URL/docker/docker.github.io:stage-${env.BUILD_NUMBER} .
docker build -t $DTR_URL/docker/docker.github.io:stage-${env.BUILD_NUMBER} . docker push $DTR_URL/docker/docker.github.io:stage-${env.BUILD_NUMBER}
docker push $DTR_URL/docker/docker.github.io:stage-${env.BUILD_NUMBER} unzip -o $UCP_BUNDLE
unzip -o $UCP_BUNDLE export DOCKER_TLS_VERIFY=1
export DOCKER_TLS_VERIFY=1 export COMPOSE_TLS_VERSION=TLSv1_2
export COMPOSE_TLS_VERSION=TLSv1_2 export DOCKER_CERT_PATH=${WORKSPACE}/ucp-bundle-success_bot
export DOCKER_CERT_PATH=${WORKSPACE}/ucp-bundle-success_bot export DOCKER_HOST=$DOCKER_HOST_STRING
export DOCKER_HOST=$DOCKER_HOST_STRING docker service update --detach=false --force --image $DTR_URL/docker/docker.github.io:stage-${env.BUILD_NUMBER} docs-stage-docker-com_docs --with-registry-auth
docker service update --detach=false --force --image $DTR_URL/docker/docker.github.io:stage-${env.BUILD_NUMBER} docs-stage-docker-com_docs --with-registry-auth """
"""
}
} }
} }
stage( 'build + push prod image, update prod swarm' ) { stage( 'build + push prod image, update prod swarm' ) {
@ -46,21 +43,19 @@ pipeline {
branch 'published' branch 'published'
} }
steps { steps {
withVpn("$DTR_VPN_ADDRESS") { withDockerRegistry(reg) {
withDockerRegistry(reg) { sh """
sh """ docker build -t docs/docker.github.io:prod-${env.BUILD_NUMBER} .
docker build -t docs/docker.github.io:prod-${env.BUILD_NUMBER} . docker push docs/docker.github.io:prod-${env.BUILD_NUMBER}
docker push docs/docker.github.io:prod-${env.BUILD_NUMBER} unzip -o $UCP_BUNDLE
unzip -o $UCP_BUNDLE cd ucp-bundle-success_bot
cd ucp-bundle-success_bot export DOCKER_TLS_VERIFY=1
export DOCKER_TLS_VERIFY=1 export COMPOSE_TLS_VERSION=TLSv1_2
export COMPOSE_TLS_VERSION=TLSv1_2 export DOCKER_CERT_PATH=${WORKSPACE}/ucp-bundle-success_bot
export DOCKER_CERT_PATH=${WORKSPACE}/ucp-bundle-success_bot export DOCKER_HOST=$DOCKER_HOST_STRING
export DOCKER_HOST=$DOCKER_HOST_STRING docker service update --detach=false --force --image docs/docker.github.io:prod-${env.BUILD_NUMBER} docs-docker-com_docs --with-registry-auth
docker service update --detach=false --force --image docs/docker.github.io:prod-${env.BUILD_NUMBER} docs-docker-com_docs --with-registry-auth curl -X POST -H 'Content-type: application/json' --data '{"text":"Successfully published docs. https://docs.docker.com/"}' $SLACK
curl -X POST -H 'Content-type: application/json' --data '{"text":"Successfully published docs. https://docs.docker.com/"}' $SLACK """
"""
}
} }
} }
} }
@ -76,19 +71,17 @@ pipeline {
branch 'amberjack' branch 'amberjack'
} }
steps { steps {
withVpn("$DTR_VPN_ADDRESS") { sh """
sh """ cat $SUCCESS_BOT_TOKEN | docker login $DTR_URL --username 'success_bot' --password-stdin
cat $SUCCESS_BOT_TOKEN | docker login $DTR_URL --username 'success_bot' --password-stdin docker build -t $DTR_URL/docker/docs-private:beta-stage-${env.BUILD_NUMBER} .
docker build -t $DTR_URL/docker/docs-private:beta-stage-${env.BUILD_NUMBER} . docker push $DTR_URL/docker/docs-private:beta-stage-${env.BUILD_NUMBER}
docker push $DTR_URL/docker/docs-private:beta-stage-${env.BUILD_NUMBER} unzip -o $UCP_BUNDLE
unzip -o $UCP_BUNDLE export DOCKER_TLS_VERIFY=1
export DOCKER_TLS_VERIFY=1 export COMPOSE_TLS_VERSION=TLSv1_2
export COMPOSE_TLS_VERSION=TLSv1_2 export DOCKER_CERT_PATH=${WORKSPACE}/ucp-bundle-success_bot
export DOCKER_CERT_PATH=${WORKSPACE}/ucp-bundle-success_bot export DOCKER_HOST=$DOCKER_HOST_STRING
export DOCKER_HOST=$DOCKER_HOST_STRING docker service update --detach=false --force --image $DTR_URL/docker/docs-private:beta-stage-${env.BUILD_NUMBER} docs-beta-stage-docker-com_docs --with-registry-auth
docker service update --detach=false --force --image $DTR_URL/docker/docs-private:beta-stage-${env.BUILD_NUMBER} docs-beta-stage-docker-com_docs --with-registry-auth """
"""
}
} }
} }
stage( 'build + push beta image, update beta swarm' ) { stage( 'build + push beta image, update beta swarm' ) {
@ -96,19 +89,17 @@ pipeline {
branch 'published' branch 'published'
} }
steps { steps {
withVpn("$DTR_VPN_ADDRESS") { sh """
sh """ cat $SUCCESS_BOT_TOKEN | docker login $DTR_URL --username 'success_bot' --password-stdin
cat $SUCCESS_BOT_TOKEN | docker login $DTR_URL --username 'success_bot' --password-stdin docker build -t $DTR_URL/docker/docs-private:beta-${env.BUILD_NUMBER} .
docker build -t $DTR_URL/docker/docs-private:beta-${env.BUILD_NUMBER} . docker push $DTR_URL/docker/docs-private:beta-${env.BUILD_NUMBER}
docker push $DTR_URL/docker/docs-private:beta-${env.BUILD_NUMBER} unzip -o $UCP_BUNDLE
unzip -o $UCP_BUNDLE export DOCKER_TLS_VERIFY=1
export DOCKER_TLS_VERIFY=1 export COMPOSE_TLS_VERSION=TLSv1_2
export COMPOSE_TLS_VERSION=TLSv1_2 export DOCKER_CERT_PATH=${WORKSPACE}/ucp-bundle-success_bot
export DOCKER_CERT_PATH=${WORKSPACE}/ucp-bundle-success_bot export DOCKER_HOST=$DOCKER_HOST_STRING
export DOCKER_HOST=$DOCKER_HOST_STRING docker service update --detach=false --force --image $DTR_URL/docker/docs-private:beta-${env.BUILD_NUMBER} docs-beta-docker-com_docs --with-registry-auth
docker service update --detach=false --force --image $DTR_URL/docker/docs-private:beta-${env.BUILD_NUMBER} docs-beta-docker-com_docs --with-registry-auth """
"""
}
} }
} }
} }

View File

@ -363,7 +363,7 @@ branch](https://github.com/docker/docker.github.io/blob/publish-tools/README.md)
## Creating a new archive ## Creating a new archive
When a new Docker CE Stable version is released, the previous state of `master` When a new Docker Engine - Community Stable version is released, the previous state of `master`
is archived into a version-specific branch like `v17.09`, by doing the following: is archived into a version-specific branch like `v17.09`, by doing the following:
1. Create branch based off the commit hash before the new version was released. 1. Create branch based off the commit hash before the new version was released.

View File

@ -13,27 +13,29 @@ safe: false
lsi: false lsi: false
url: https://docs.docker.com url: https://docs.docker.com
# This needs to have all the directories you expect to be in the archives (delivered by docs-base in the Dockerfile) # This needs to have all the directories you expect to be in the archives (delivered by docs-base in the Dockerfile)
keep_files: ["v17.03", "v17.06", "v17.09", "v17.12", "v18.03"] keep_files: ["v17.03", "v17.06", "v17.09", "v17.12", "v18.03", "v18.09"]
exclude: ["_scripts", "apidocs/layouts", "Gemfile", "hooks", "index.html", "404.html"] exclude: ["_scripts", "apidocs/layouts", "Gemfile", "hooks", "index.html", "404.html"]
# Component versions -- address like site.docker_ce_version # Component versions -- address like site.docker_ce_version
# You can't have - characters in these for non-YAML reasons # You can't have - characters in these for non-YAML reasons
latest_engine_api_version: "1.39" latest_engine_api_version: "1.40"
docker_ce_version: "18.09" docker_ce_version: "19.03"
docker_ee_version: "18.09" docker_ee_version: "19.03"
compose_version: "1.24.0" compose_version: "1.24.1"
compose_file_v3: "3.7" compose_file_v3: "3.7"
compose_file_v2: "2.4" compose_file_v2: "2.4"
machine_version: "0.16.0" machine_version: "0.16.1"
distribution_version: "2.6" distribution_version: "2.7"
dtr_version: "2.6" dtr_version: "2.7"
ucp_version: "3.1" ucp_version: "3.2"
ucp_versions: ucp_versions:
- version: "3.1" - version: "3.2"
path: /ee/ucp/ path: /ee/ucp/
latest: true latest: true
- version: "3.1"
path: /datacenter/ucp/3.1/guides/
- version: "3.0" - version: "3.0"
path: /datacenter/ucp/3.0/guides/ path: /datacenter/ucp/3.0/guides/
- version: "2.2" - version: "2.2"
@ -46,9 +48,11 @@ ucp_versions:
path: /datacenter/ucp/1.1/overview/ path: /datacenter/ucp/1.1/overview/
dtr_versions: dtr_versions:
- version: "2.6" - version: "2.7"
path: /ee/dtr/ path: /ee/dtr/
latest: true latest: true
- version: "2.6"
path: /datacenter/dtr/2.6/guides/
- version: "2.5" - version: "2.5"
path: /datacenter/dtr/2.5/guides/ path: /datacenter/dtr/2.5/guides/
- version: "2.4" - version: "2.4"
@ -63,11 +67,22 @@ dtr_versions:
path: /datacenter/dtr/2.0/ path: /datacenter/dtr/2.0/
tablabels: tablabels:
dee-3.0: Docker Enterprise Edition 3.0
dee-2.1: Docker Enterprise Edition 2.1
dee-2.0: Docker Enterprise Edition 2.0 dee-2.0: Docker Enterprise Edition 2.0
ucp-3.2: Universal Control Plane 3.2
ucp-3.1: Universal Control Plane 3.1
ucp-3.0: Universal Control Plane 3.0 ucp-3.0: Universal Control Plane 3.0
ucp-2.2: Universal Control Plane 2.2 ucp-2.2: Universal Control Plane 2.2
dtr-2.7: Docker Trusted Registry 2.7
dtr-2.6: Docker Trusted Registry 2.6
dtr-2.5: Docker Trusted Registry 2.5 dtr-2.5: Docker Trusted Registry 2.5
dtr-2.4: Docker Trusted Registry 2.4 dtr-2.4: Docker Trusted Registry 2.4
engine-19.03: Docker EE Engine 19.03
engine-18.09: Docker EE Engine 18.09
engine-18.03: Docker EE Engine 18.03
engine-17.12: Docker EE Engine 17.12
engine-17.09: Docker EE Engine 17.09
engine-17.06: Docker EE Engine 17.06 engine-17.06: Docker EE Engine 17.06
engine-17.03: Docker EE Engine 17.03 engine-17.03: Docker EE Engine 17.03
docker-cli-linux: Docker CLI on Mac/Linux docker-cli-linux: Docker CLI on Mac/Linux
@ -96,7 +111,7 @@ defaults:
- scope: - scope:
path: "install" path: "install"
values: values:
win_latest_build: "docker-18.09.6" win_latest_build: "docker-19.03.0"
- scope: - scope:
path: "datacenter" path: "datacenter"
values: values:
@ -106,7 +121,14 @@ defaults:
values: values:
dtr_org: "docker" dtr_org: "docker"
dtr_repo: "dtr" dtr_repo: "dtr"
dtr_version: "2.7.0-beta4" dtr_version: "2.7.0"
- scope:
path: "datacenter/dtr/2.6"
values:
hide_from_sitemap: true
dtr_org: "docker"
dtr_repo: "dtr"
dtr_version: "2.6.6"
- scope: - scope:
path: "datacenter/dtr/2.5" path: "datacenter/dtr/2.5"
values: values:
@ -149,15 +171,22 @@ defaults:
values: values:
ucp_org: "docker" ucp_org: "docker"
ucp_repo: "ucp" ucp_repo: "ucp"
ucp_version: "3.2.0-beta4" ucp_version: "3.2.0"
- scope: # This is a bit of a hack for the get-support.md topic. - scope: # This is a bit of a hack for the get-support.md topic.
path: "ee" path: "ee"
values: values:
ucp_org: "docker" ucp_org: "docker"
ucp_repo: "ucp" ucp_repo: "ucp"
dtr_repo: "dtr" dtr_repo: "dtr"
ucp_version: "3.2.0-beta4" ucp_version: "3.2.0"
dtr_version: "2.7.0-beta4" dtr_version: "2.7.0"
- scope:
path: "datacenter/ucp/3.1"
values:
hide_from_sitemap: true
ucp_org: "docker"
ucp_repo: "ucp"
ucp_version: "3.1.7"
- scope: - scope:
path: "datacenter/ucp/3.0" path: "datacenter/ucp/3.0"
values: values:

View File

@ -19,10 +19,10 @@ url: https://docs.docker.com
# TO USE ME: # TO USE ME:
# jekyll serve --incremental --config _config_authoring.yml # jekyll serve --incremental --config _config_authoring.yml
latest_engine_api_version: "1.39" latest_engine_api_version: "1.40"
docker_ce_version: "18.09" docker_ce_version: "19.03"
docker_ee_version: "18.09" docker_ee_version: "19.03"
compose_version: "1.24.0" compose_version: "1.24.1"
compose_file_v3: "3.7" compose_file_v3: "3.7"
compose_file_v2: "2.4" compose_file_v2: "2.4"
machine_version: "0.16.0" machine_version: "0.16.0"
@ -96,7 +96,7 @@ defaults:
- scope: - scope:
path: "install" path: "install"
values: values:
win_latest_build: "docker-18.09.1" win_latest_build: "docker-19.03.0"
- scope: - scope:
path: "datacenter" path: "datacenter"
values: values:

View File

@ -17,7 +17,7 @@ clink:
- docker_template_version.yaml - docker_template_version.yaml
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -11,7 +11,7 @@ clink:
- docker_template_config_view.yaml - docker_template_config_view.yaml
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -42,7 +42,7 @@ options:
swarm: false swarm: false
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -16,7 +16,7 @@ options:
swarm: false swarm: false
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -16,7 +16,7 @@ options:
swarm: false swarm: false
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -26,7 +26,7 @@ options:
swarm: false swarm: false
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -64,7 +64,7 @@ examples: "docker template scaffold react-java-mysql -s back.java=10 -s front.ex
template scaffold react-java-mysql --server=myregistry:5000 --org=myorg" template scaffold react-java-mysql --server=myregistry:5000 --org=myorg"
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -6,7 +6,7 @@ pname: docker template
plink: docker_template.yaml plink: docker_template.yaml
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -58,7 +58,7 @@ options:
swarm: false swarm: false
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -29,7 +29,7 @@ inherited_options:
swarm: false swarm: false
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -21,7 +21,7 @@ inherited_options:
swarm: false swarm: false
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -16,7 +16,7 @@ inherited_options:
swarm: false swarm: false
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -16,7 +16,7 @@ inherited_options:
swarm: false swarm: false
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -25,7 +25,7 @@ inherited_options:
swarm: false swarm: false
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -16,7 +16,7 @@ inherited_options:
swarm: false swarm: false
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -26,7 +26,7 @@ inherited_options:
swarm: false swarm: false
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -54,7 +54,7 @@ inherited_options:
swarm: false swarm: false
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -26,7 +26,7 @@ inherited_options:
swarm: false swarm: false
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -144,7 +144,7 @@ inherited_options:
swarm: false swarm: false
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -16,7 +16,7 @@ inherited_options:
swarm: false swarm: false
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -0,0 +1,33 @@
command: docker buildx
short: Build with BuildKit
long: Build with BuildKit
pname: docker
plink: docker.yaml
cname:
- docker buildx bake
- docker buildx build
- docker buildx create
- docker buildx imagetools
- docker buildx inspect
- docker buildx ls
- docker buildx rm
- docker buildx stop
- docker buildx use
- docker buildx version
clink:
- docker_buildx_bake.yaml
- docker_buildx_build.yaml
- docker_buildx_create.yaml
- docker_buildx_imagetools.yaml
- docker_buildx_inspect.yaml
- docker_buildx_ls.yaml
- docker_buildx_rm.yaml
- docker_buildx_stop.yaml
- docker_buildx_use.yaml
- docker_buildx_version.yaml
deprecated: false
experimental: false
experimentalcli: true
kubernetes: false
swarm: false

View File

@ -0,0 +1,70 @@
command: docker buildx bake
aliases: f
short: Build from a file
long: Build from a file
usage: docker buildx bake [OPTIONS] [TARGET...]
pname: docker buildx
plink: docker_buildx.yaml
options:
- option: file
shorthand: f
value_type: stringArray
default_value: '[]'
description: Build definition file
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: no-cache
value_type: bool
default_value: "false"
description: Do not use cache when building the image
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: print
value_type: bool
default_value: "false"
description: Print the options without building
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: progress
value_type: string
default_value: auto
description: |
Set type of progress output (auto, plain, tty). Use plain to show container output
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: pull
value_type: bool
default_value: "false"
description: Always attempt to pull a newer version of the image
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: set
value_type: stringArray
default_value: '[]'
description: 'Override target value (eg: target.key=value)'
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
deprecated: false
experimental: false
experimentalcli: true
kubernetes: false
swarm: false

View File

@ -0,0 +1,340 @@
command: docker buildx build
aliases: b
short: Start a build
long: Start a build
usage: docker buildx build [OPTIONS] PATH | URL | -
pname: docker buildx
plink: docker_buildx.yaml
options:
- option: add-host
value_type: stringSlice
default_value: '[]'
description: Add a custom host-to-IP mapping (host:ip)
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: build-arg
value_type: stringArray
default_value: '[]'
description: Set build-time variables
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: cache-from
value_type: stringArray
default_value: '[]'
description: |
External cache sources (eg. user/app:cache, type=local,src=path/to/dir)
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: cache-to
value_type: stringArray
default_value: '[]'
description: |
Cache export destinations (eg. user/app:cache, type=local,dest=path/to/dir)
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: cgroup-parent
value_type: string
description: Optional parent cgroup for the container
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: compress
value_type: bool
default_value: "false"
description: Compress the build context using gzip
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: cpu-period
value_type: int64
default_value: "0"
description: Limit the CPU CFS (Completely Fair Scheduler) period
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: cpu-quota
value_type: int64
default_value: "0"
description: Limit the CPU CFS (Completely Fair Scheduler) quota
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: cpu-shares
shorthand: c
value_type: int64
default_value: "0"
description: CPU shares (relative weight)
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: cpuset-cpus
value_type: string
description: CPUs in which to allow execution (0-3, 0,1)
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: cpuset-mems
value_type: string
description: MEMs in which to allow execution (0-3, 0,1)
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: file
shorthand: f
value_type: string
description: Name of the Dockerfile (Default is 'PATH/Dockerfile')
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: force-rm
value_type: bool
default_value: "false"
description: Always remove intermediate containers
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: iidfile
value_type: string
description: Write the image ID to the file
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: isolation
value_type: string
description: Container isolation technology
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: label
value_type: stringArray
default_value: '[]'
description: Set metadata for an image
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: load
value_type: bool
default_value: "false"
description: Shorthand for --output=type=docker
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: memory
shorthand: m
value_type: string
description: Memory limit
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: memory-swap
value_type: string
description: |
Swap limit equal to memory plus swap: '-1' to enable unlimited swap
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: network
value_type: string
default_value: default
description: |
Set the networking mode for the RUN instructions during build
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: no-cache
value_type: bool
default_value: "false"
description: Do not use cache when building the image
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: output
shorthand: o
value_type: stringArray
default_value: '[]'
description: 'Output destination (format: type=local,dest=path)'
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: platform
value_type: stringArray
default_value: '[]'
description: Set target platform for build
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: progress
value_type: string
default_value: auto
description: |
Set type of progress output (auto, plain, tty). Use plain to show container output
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: pull
value_type: bool
default_value: "false"
description: Always attempt to pull a newer version of the image
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: push
value_type: bool
default_value: "false"
description: Shorthand for --output=type=registry
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: quiet
shorthand: q
value_type: bool
default_value: "false"
description: Suppress the build output and print image ID on success
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: rm
value_type: bool
default_value: "true"
description: Remove intermediate containers after a successful build
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: secret
value_type: stringArray
default_value: '[]'
description: |
Secret file to expose to the build: id=mysecret,src=/local/secret
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: security-opt
value_type: stringSlice
default_value: '[]'
description: Security options
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: shm-size
value_type: string
description: Size of /dev/shm
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: squash
value_type: bool
default_value: "false"
description: Squash newly built layers into a single new layer
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: ssh
value_type: stringArray
default_value: '[]'
description: |
SSH agent socket or keys to expose to the build (format: default|<id>[=<socket>|<key>[,<key>]])
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: tag
shorthand: t
value_type: stringArray
default_value: '[]'
description: Name and optionally a tag in the 'name:tag' format
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: target
value_type: string
description: Set the target build stage to build.
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: ulimit
value_type: string
description: Ulimit options
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
deprecated: false
experimental: false
experimentalcli: true
kubernetes: false
swarm: false

View File

@ -0,0 +1,73 @@
command: docker buildx create
short: Create a new builder instance
long: Create a new builder instance
usage: docker buildx create [OPTIONS] [CONTEXT|ENDPOINT]
pname: docker buildx
plink: docker_buildx.yaml
options:
- option: append
value_type: bool
default_value: "false"
description: Append a node to builder instead of changing it
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: driver
value_type: string
description: 'Driver to use (available: [])'
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: leave
value_type: bool
default_value: "false"
description: Remove a node from builder instead of changing it
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: name
value_type: string
description: Builder instance name
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: node
value_type: string
description: Create/modify node with given name
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: platform
value_type: stringArray
default_value: '[]'
description: Fixed platforms for current node
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: use
value_type: bool
default_value: "false"
description: Set the current builder instance
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
deprecated: false
experimental: false
experimentalcli: true
kubernetes: false
swarm: false

View File

@ -0,0 +1,17 @@
command: docker buildx imagetools
short: Commands to work on images in registry
long: Commands to work on images in registry
pname: docker buildx
plink: docker_buildx.yaml
cname:
- docker buildx imagetools create
- docker buildx imagetools inspect
clink:
- docker_buildx_imagetools_create.yaml
- docker_buildx_imagetools_inspect.yaml
deprecated: false
experimental: false
experimentalcli: true
kubernetes: false
swarm: false

View File

@ -0,0 +1,51 @@
command: docker buildx imagetools create
short: Create a new image based on source images
long: Create a new image based on source images
usage: docker buildx imagetools create [OPTIONS] [SOURCE] [SOURCE...]
pname: docker buildx imagetools
plink: docker_buildx_imagetools.yaml
options:
- option: append
value_type: bool
default_value: "false"
description: Append to existing manifest
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: dry-run
value_type: bool
default_value: "false"
description: Show final image instead of pushing
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: file
shorthand: f
value_type: stringArray
default_value: '[]'
description: Read source descriptor from file
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: tag
shorthand: t
value_type: stringArray
default_value: '[]'
description: Set reference for new image
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
deprecated: false
experimental: false
experimentalcli: true
kubernetes: false
swarm: false

View File

@ -0,0 +1,22 @@
command: docker buildx imagetools inspect
short: Show details of image in the registry
long: Show details of image in the registry
usage: docker buildx imagetools inspect [OPTIONS] NAME
pname: docker buildx imagetools
plink: docker_buildx_imagetools.yaml
options:
- option: raw
value_type: bool
default_value: "false"
description: Show original JSON manifest
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
deprecated: false
experimental: false
experimentalcli: true
kubernetes: false
swarm: false

View File

@ -0,0 +1,22 @@
command: docker buildx inspect
short: Inspect current builder instance
long: Inspect current builder instance
usage: docker buildx inspect [NAME]
pname: docker buildx
plink: docker_buildx.yaml
options:
- option: bootstrap
value_type: bool
default_value: "false"
description: Ensure builder has booted before inspecting
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
deprecated: false
experimental: false
experimentalcli: true
kubernetes: false
swarm: false

View File

@ -0,0 +1,12 @@
command: docker buildx ls
short: List builder instances
long: List builder instances
usage: docker buildx ls
pname: docker buildx
plink: docker_buildx.yaml
deprecated: false
experimental: false
experimentalcli: true
kubernetes: false
swarm: false

View File

@ -0,0 +1,12 @@
command: docker buildx rm
short: Remove a builder instance
long: Remove a builder instance
usage: docker buildx rm [NAME]
pname: docker buildx
plink: docker_buildx.yaml
deprecated: false
experimental: false
experimentalcli: true
kubernetes: false
swarm: false

View File

@ -0,0 +1,12 @@
command: docker buildx stop
short: Stop builder instance
long: Stop builder instance
usage: docker buildx stop [NAME]
pname: docker buildx
plink: docker_buildx.yaml
deprecated: false
experimental: false
experimentalcli: true
kubernetes: false
swarm: false

View File

@ -0,0 +1,31 @@
command: docker buildx use
short: Set the current builder instance
long: Set the current builder instance
usage: docker buildx use [OPTIONS] NAME
pname: docker buildx
plink: docker_buildx.yaml
options:
- option: default
value_type: bool
default_value: "false"
description: Set builder as default for current context
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: global
value_type: bool
default_value: "false"
description: Builder persists context changes
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
deprecated: false
experimental: false
experimentalcli: true
kubernetes: false
swarm: false

View File

@ -0,0 +1,12 @@
command: docker buildx version
short: 'Show buildx version information '
long: 'Show buildx version information '
usage: docker buildx version
pname: docker buildx
plink: docker_buildx.yaml
deprecated: false
experimental: false
experimentalcli: true
kubernetes: false
swarm: false

View File

@ -0,0 +1,49 @@
command: docker cluster
short: Docker Cluster
long: A tool to build and manage Docker Clusters.
pname: docker
plink: docker.yaml
cname:
- docker cluster backup
- docker cluster create
- docker cluster inspect
- docker cluster ls
- docker cluster restore
- docker cluster rm
- docker cluster update
- docker cluster version
clink:
- docker_cluster_backup.yaml
- docker_cluster_create.yaml
- docker_cluster_inspect.yaml
- docker_cluster_ls.yaml
- docker_cluster_restore.yaml
- docker_cluster_rm.yaml
- docker_cluster_update.yaml
- docker_cluster_version.yaml
options:
- option: dry-run
value_type: bool
default_value: "false"
description: Skip provisioning resources
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: log-level
value_type: string
default_value: warn
description: |
Set the logging level ("trace"|"debug"|"info"|"warn"|"error"|"fatal")
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false

View File

@ -0,0 +1,60 @@
command: docker cluster backup
short: Backup a running cluster
long: Backup a running cluster
usage: docker cluster backup [OPTIONS] cluster
pname: docker cluster
plink: docker_cluster.yaml
options:
- option: env
shorthand: e
value_type: stringSlice
default_value: '[]'
description: Set environment variables
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: file
value_type: string
default_value: backup.tar.gz
description: Cluster backup filename
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: passphrase
value_type: string
description: Cluster backup passphrase
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
inherited_options:
- option: dry-run
value_type: bool
default_value: "false"
description: Skip provisioning resources
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: log-level
value_type: string
default_value: warn
description: |
Set the logging level ("trace"|"debug"|"info"|"warn"|"error"|"fatal")
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false

View File

@ -0,0 +1,81 @@
command: docker cluster create
short: Create a new Docker Cluster
long: Create a new Docker Cluster
usage: docker cluster create [OPTIONS]
pname: docker cluster
plink: docker_cluster.yaml
options:
- option: env
shorthand: e
value_type: stringSlice
default_value: '[]'
description: Set environment variables
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: example
value_type: string
default_value: aws
description: Display an example cluster declaration
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: file
shorthand: f
value_type: string
default_value: cluster.yml
description: Cluster declaration
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: name
shorthand: "n"
value_type: string
description: Name for the cluster
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: switch-context
shorthand: s
value_type: bool
default_value: "false"
description: Switch context after cluster create.
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
inherited_options:
- option: dry-run
value_type: bool
default_value: "false"
description: Skip provisioning resources
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: log-level
value_type: string
default_value: warn
description: |
Set the logging level ("trace"|"debug"|"info"|"warn"|"error"|"fatal")
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false

View File

@ -0,0 +1,43 @@
command: docker cluster inspect
short: Display detailed information about a cluster
long: Display detailed information about a cluster
usage: docker cluster inspect [OPTIONS] cluster
pname: docker cluster
plink: docker_cluster.yaml
options:
- option: all
shorthand: a
value_type: bool
default_value: "false"
description: Display complete info about cluster
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
inherited_options:
- option: dry-run
value_type: bool
default_value: "false"
description: Skip provisioning resources
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: log-level
value_type: string
default_value: warn
description: |
Set the logging level ("trace"|"debug"|"info"|"warn"|"error"|"fatal")
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false

View File

@ -0,0 +1,43 @@
command: docker cluster ls
short: List all available clusters
long: List all available clusters
usage: docker cluster ls [OPTIONS]
pname: docker cluster
plink: docker_cluster.yaml
options:
- option: quiet
shorthand: q
value_type: bool
default_value: "false"
description: Only display numeric IDs
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
inherited_options:
- option: dry-run
value_type: bool
default_value: "false"
description: Skip provisioning resources
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: log-level
value_type: string
default_value: warn
description: |
Set the logging level ("trace"|"debug"|"info"|"warn"|"error"|"fatal")
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false

View File

@ -0,0 +1,60 @@
command: docker cluster restore
short: Restore a cluster from a backup
long: Restore a cluster from a backup
usage: docker cluster restore [OPTIONS] cluster
pname: docker cluster
plink: docker_cluster.yaml
options:
- option: env
shorthand: e
value_type: stringSlice
default_value: '[]'
description: Set environment variables
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: file
value_type: string
default_value: backup.tar.gz
description: Cluster backup filename
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: passphrase
value_type: string
description: Cluster backup passphrase
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
inherited_options:
- option: dry-run
value_type: bool
default_value: "false"
description: Skip provisioning resources
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: log-level
value_type: string
default_value: warn
description: |
Set the logging level ("trace"|"debug"|"info"|"warn"|"error"|"fatal")
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false

View File

@ -0,0 +1,53 @@
command: docker cluster rm
short: Remove a cluster
long: Remove a cluster
usage: docker cluster rm [OPTIONS] cluster
pname: docker cluster
plink: docker_cluster.yaml
options:
- option: env
shorthand: e
value_type: stringSlice
default_value: '[]'
description: Set environment variables
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: force
shorthand: f
value_type: bool
default_value: "false"
description: Force removal of the cluster files
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
inherited_options:
- option: dry-run
value_type: bool
default_value: "false"
description: Skip provisioning resources
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: log-level
value_type: string
default_value: warn
description: |
Set the logging level ("trace"|"debug"|"info"|"warn"|"error"|"fatal")
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false

View File

@ -0,0 +1,52 @@
command: docker cluster update
short: Update a running cluster's desired state
long: Update a running cluster's desired state
usage: docker cluster update [OPTIONS] cluster
pname: docker cluster
plink: docker_cluster.yaml
options:
- option: env
shorthand: e
value_type: stringSlice
default_value: '[]'
description: Set environment variables
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: file
shorthand: f
value_type: string
description: Cluster definition
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
inherited_options:
- option: dry-run
value_type: bool
default_value: "false"
description: Skip provisioning resources
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: log-level
value_type: string
default_value: warn
description: |
Set the logging level ("trace"|"debug"|"info"|"warn"|"error"|"fatal")
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false

View File

@ -0,0 +1,42 @@
command: docker cluster version
short: Print Version, Commit, and Build type
long: Print Version, Commit, and Build type
usage: docker cluster version
pname: docker cluster
plink: docker_cluster.yaml
options:
- option: json
value_type: bool
default_value: "false"
description: Formats output as JSON. Implies '--log-level error'
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
inherited_options:
- option: dry-run
value_type: bool
default_value: "false"
description: Skip provisioning resources
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: log-level
value_type: string
default_value: warn
description: |
Set the logging level ("trace"|"debug"|"info"|"warn"|"error"|"fatal")
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
deprecated: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false

View File

@ -12,7 +12,7 @@
url: https://packages.docker.com/caas/ucp_images_win_2016_3.1.7.tar.gz url: https://packages.docker.com/caas/ucp_images_win_2016_3.1.7.tar.gz
- description: "3.1.7 Windows Server 1709" - description: "3.1.7 Windows Server 1709"
url: https://packages.docker.com/caas/ucp_images_win_1709_3.1.7.tar.gz url: https://packages.docker.com/caas/ucp_images_win_1709_3.1.7.tar.gz
- description: "3.1.7 Windows Server 1803" - description: "3.1.7 Windows Server 1803"
url: https://packages.docker.com/caas/ucp_images_win_1803_3.1.7.tar.gz url: https://packages.docker.com/caas/ucp_images_win_1803_3.1.7.tar.gz
- description: "3.1.7 Windows Server 2019 LTSC" - description: "3.1.7 Windows Server 2019 LTSC"
url: https://packages.docker.com/caas/ucp_images_win_2019_3.1.7.tar.gz url: https://packages.docker.com/caas/ucp_images_win_2019_3.1.7.tar.gz
@ -288,6 +288,11 @@
url: https://packages.docker.com/caas/ucp_images_s390x_2.2.0.tar.gz url: https://packages.docker.com/caas/ucp_images_s390x_2.2.0.tar.gz
- description: "2.2.0 Windows" - description: "2.2.0 Windows"
url: https://packages.docker.com/caas/ucp_images_win_2.2.0.tar.gz url: https://packages.docker.com/caas/ucp_images_win_2.2.0.tar.gz
- product: "dtr"
version: "2.7"
tar-files:
- description: "DTR 2.7.0 Linux x86"
url: https://packages.docker.com/caas/dtr_images_2.7.0.tar.gz
- product: "dtr" - product: "dtr"
version: "2.6" version: "2.6"
tar-files: tar-files:

View File

@ -39,7 +39,7 @@ clink:
- docker_app_version.yaml - docker_app_version.yaml
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -18,7 +18,7 @@ options:
examples: $ docker app bundle myapp.dockerapp examples: $ docker app bundle myapp.dockerapp
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -18,7 +18,7 @@ plink: docker_app.yaml
examples: $ . <(docker app completion bash) examples: $ . <(docker app completion bash)
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -45,7 +45,7 @@ options:
examples: $ docker app init myapp --description "a useful description" examples: $ docker app init myapp --description "a useful description"
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -46,7 +46,7 @@ options:
examples: $ docker app inspect myapp.dockerapp examples: $ docker app inspect myapp.dockerapp
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -109,7 +109,7 @@ examples: |-
$ docker app install bundle.json --name myinstallation --credential-set=mycredentials.yml $ docker app install bundle.json --name myinstallation --credential-set=mycredentials.yml
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -16,7 +16,7 @@ options:
swarm: false swarm: false
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -17,7 +17,7 @@ options:
examples: $ docker app merge myapp.dockerapp --output myapp-single.dockerapp examples: $ docker app merge myapp.dockerapp --output myapp-single.dockerapp
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -18,7 +18,7 @@ options:
examples: $ docker app pull docker/app-example:0.1.0 examples: $ docker app pull docker/app-example:0.1.0
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -38,7 +38,7 @@ options:
examples: $ docker app push myapp --tag myrepo/myapp:mytag examples: $ docker app push myapp --tag myrepo/myapp:mytag
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -66,7 +66,7 @@ options:
examples: $ docker app render myapp.dockerapp --set key=value examples: $ docker app render myapp.dockerapp --set key=value
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -17,7 +17,7 @@ options:
examples: $ docker app split myapp.dockerapp --output myapp-directory.dockerapp examples: $ docker app split myapp.dockerapp --output myapp-directory.dockerapp
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -37,7 +37,7 @@ options:
examples: $ docker app status myinstallation --target-context=mycontext examples: $ docker app status myinstallation --target-context=mycontext
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -45,7 +45,7 @@ options:
examples: $ docker app uninstall myinstallation --target-context=mycontext examples: $ docker app uninstall myinstallation --target-context=mycontext
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -82,7 +82,7 @@ options:
examples: $ docker app upgrade myinstallation --target-context=mycontext --set key=value examples: $ docker app upgrade myinstallation --target-context=mycontext --set key=value
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -26,7 +26,7 @@ options:
swarm: false swarm: false
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -6,7 +6,7 @@ pname: docker app
plink: docker_app.yaml plink: docker_app.yaml
deprecated: false deprecated: false
experimental: false experimental: false
experimentalcli: false experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -1,5 +1,5 @@
- archive: - archive:
name: v18.09 name: v19.03
image: docs/docker.github.io:latest image: docs/docker.github.io:latest
current: true current: true
# When you make a new stable archive version, move the edge one to be second in # When you make a new stable archive version, move the edge one to be second in
@ -7,6 +7,9 @@
- archive: - archive:
name: edge name: edge
image: docs/docker.github.io:latest image: docs/docker.github.io:latest
- archive:
name: v18.09
image: docs/docker.github.io:v18.09
- archive: - archive:
name: v18.03 name: v18.03
image: docs/docker.github.io:v18.03 image: docs/docker.github.io:v18.03

View File

@ -1,5 +1,5 @@
command: docker context import command: docker context import
short: Import a context from a tar file short: Import a context from a tar or zip file
long: Imports a context previously exported with `docker context export`. To import long: Imports a context previously exported with `docker context export`. To import
from stdin, use a hyphen (`-`) as filename. from stdin, use a hyphen (`-`) as filename.
usage: docker context import CONTEXT FILE|- usage: docker context import CONTEXT FILE|-

View File

@ -27,7 +27,7 @@ cgroups: |
*Also known as : control groups* *Also known as : control groups*
collection: | collection: |
A collection is a group of swarm resources that Docker EE uses for role-based A collection is a group of swarm resources that Docker Engine - Enterprise uses for role-based
access control. Collections enable organizing permissions for resources like access control. Collections enable organizing permissions for resources like
nodes, services, containers, volumes, networks, and secrets. [Learn how to manage collections](/datacenter/ucp/2.2/guides/access-control/manage-access-with-collections/). nodes, services, containers, volumes, networks, and secrets. [Learn how to manage collections](/datacenter/ucp/2.2/guides/access-control/manage-access-with-collections/).
Compose: | Compose: |
@ -76,8 +76,8 @@ Docker: |
develop, ship, and run applications develop, ship, and run applications
- The docker daemon process running on the host which manages images and containers - The docker daemon process running on the host which manages images and containers
(also called Docker Engine) (also called Docker Engine)
Docker Enterprise Edition: | Docker Enterprise: |
Docker Enterprise Edition (Docker EE) is a platform to build, ship, and run Docker Enterprise is a platform to build, ship, and run
containerized applications, that you can deploy in the cloud or on-premise. It containerized applications, that you can deploy in the cloud or on-premise. It
includes a tested and certified version of Docker, web UIs for managing includes a tested and certified version of Docker, web UIs for managing
your app resources, and support. your app resources, and support.
@ -152,7 +152,7 @@ filesystem: |
- macOS : HFS+ - macOS : HFS+
grant: | grant: |
A grant enables role-based access control for managing how users and A grant enables role-based access control for managing how users and
organizations access Docker EE swarm resources. A grant is made up of a organizations access Docker Engine - Enterprise swarm resources. A grant is made up of a
subject, a role, and a collection. For more about grants and role-based access subject, a role, and a collection. For more about grants and role-based access
control, see [Grant permissions to users based on roles](/datacenter/ucp/2.2/guides/access-control/grant-permissions/). control, see [Grant permissions to users based on roles](/datacenter/ucp/2.2/guides/access-control/grant-permissions/).
image: | image: |
@ -233,12 +233,12 @@ repository: |
Here is an example of the shared [nginx repository](https://hub.docker.com/_/nginx/) Here is an example of the shared [nginx repository](https://hub.docker.com/_/nginx/)
and its [tags](https://hub.docker.com/r/library/nginx/tags/). and its [tags](https://hub.docker.com/r/library/nginx/tags/).
role: | role: |
A role is a set of permitted API operations on a collection of Docker EE swarm A role is a set of permitted API operations on a collection of Docker Engine - Enterprise swarm
resources. As part of a grant, a role is assigned to a subject (a user, team, or resources. As part of a grant, a role is assigned to a subject (a user, team, or
organization) and a collection. For more about roles, see [Roles and organization) and a collection. For more about roles, see [Roles and
permission levels](/datacenter/ucp/2.2/guides/access-control/permission-levels/). permission levels](/datacenter/ucp/2.2/guides/access-control/permission-levels/).
role-based access control: | role-based access control: |
Role-based access control enables managing how Docker EE users can access Role-based access control enables managing how Docker Engine - Enterprise users can access
swarm resources. UCP administrators create grants to control how users access swarm resources. UCP administrators create grants to control how users access
resource collections. A grant is made up of a subject, a role, and a collection. resource collections. A grant is made up of a subject, a role, and a collection.
A grant defines who (subject) has how much access (role) to a set of resources A grant defines who (subject) has how much access (role) to a set of resources
@ -271,7 +271,7 @@ service discovery: |
other services on the same overlay network. The swarms internal load balancer other services on the same overlay network. The swarms internal load balancer
automatically distributes requests to the service VIP among the active tasks. automatically distributes requests to the service VIP among the active tasks.
subject: | subject: |
A subject represents a user, team, or organization in Docker EE. A subject is A subject represents a user, team, or organization in Docker Enterprise. A subject is
granted a role for access to a collection of swarm resources. granted a role for access to a collection of swarm resources.
For more about role-based access, see [Authentication](/datacenter/ucp/2.2/guides/access-control/). For more about role-based access, see [Authentication](/datacenter/ucp/2.2/guides/access-control/).
swarm: | swarm: |

View File

@ -23,7 +23,7 @@ clink:
- docker_registry_ls.yaml - docker_registry_ls.yaml
- docker_registry_rmi.yaml - docker_registry_rmi.yaml
deprecated: false deprecated: false
experimental: true experimental: false
experimentalcli: true experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -50,7 +50,7 @@ options:
kubernetes: false kubernetes: false
swarm: false swarm: false
deprecated: false deprecated: false
experimental: true experimental: false
experimentalcli: true experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -33,7 +33,7 @@ options:
kubernetes: false kubernetes: false
swarm: false swarm: false
deprecated: false deprecated: false
experimental: true experimental: false
experimentalcli: true experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -15,7 +15,7 @@ options:
kubernetes: false kubernetes: false
swarm: false swarm: false
deprecated: false deprecated: false
experimental: true experimental: false
experimentalcli: true experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -14,7 +14,7 @@ options:
kubernetes: false kubernetes: false
swarm: false swarm: false
deprecated: false deprecated: false
experimental: true experimental: false
experimentalcli: true experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -14,7 +14,7 @@ options:
kubernetes: false kubernetes: false
swarm: false swarm: false
deprecated: false deprecated: false
experimental: true experimental: false
experimentalcli: true experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -42,7 +42,7 @@ options:
kubernetes: false kubernetes: false
swarm: false swarm: false
deprecated: false deprecated: false
experimental: true experimental: false
experimentalcli: true experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -42,7 +42,7 @@ options:
kubernetes: false kubernetes: false
swarm: false swarm: false
deprecated: false deprecated: false
experimental: true experimental: false
experimentalcli: true experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -5,7 +5,7 @@ usage: docker registry rmi REPOSITORY:TAG [OPTIONS]
pname: docker registry pname: docker registry
plink: docker_registry.yaml plink: docker_registry.yaml
deprecated: false deprecated: false
experimental: true experimental: false
experimentalcli: true experimentalcli: true
kubernetes: false kubernetes: false
swarm: false swarm: false

View File

@ -92,34 +92,7 @@ guides:
- title: Microsoft Windows - title: Microsoft Windows
path: /docker-for-windows/install/ path: /docker-for-windows/install/
nosync: true nosync: true
- sectiontitle: Docker Toolbox (legacy) - sectiontitle: Docker Enterprise
section:
- path: /toolbox/overview/
title: Toolbox overview
- path: /toolbox/toolbox_install_mac/
title: Install Toolbox on Mac
- path: /toolbox/toolbox_install_windows/
title: Install Toolbox on Windows
- sectiontitle: Kitematic
section:
- path: /kitematic/userguide/
title: "Kitematic user guide: intro &amp; overview"
- path: /kitematic/nginx-web-server/
title: Set up an Nginx web server
- path: /kitematic/minecraft-server/
title: Set up a Minecraft Server
- path: /kitematic/rethinkdb-dev-database/
title: Creating a local RethinkDB database for development
- path: /kitematic/faq/
title: Frequently asked questions
- path: /kitematic/known-issues/
title: Known issues
- path: /toolbox/faqs/troubleshoot/
title: Troubleshooting
- title: Release notes
path: /engine/release-notes/
nosync: true
- sectiontitle: Docker Enterprise Platform
section: section:
- title: About Docker Enterprise - title: About Docker Enterprise
path: /ee/supported-platforms/ path: /ee/supported-platforms/
@ -578,7 +551,7 @@ reference:
title: docker app upgrade title: docker app upgrade
- path: /engine/reference/commandline/app_validate/ - path: /engine/reference/commandline/app_validate/
title: docker app validate title: docker app validate
- path: /engine/reference/commandine/app_version/ - path: /engine/reference/commandline/app_version/
title: docker app version title: docker app version
- sectiontitle: docker assemble * - sectiontitle: docker assemble *
section: section:
@ -618,6 +591,34 @@ reference:
title: docker builder build title: docker builder build
- path: /engine/reference/commandline/builder_prune/ - path: /engine/reference/commandline/builder_prune/
title: docker builder prune title: docker builder prune
- sectiontitle: docker buildx *
section:
- path: /engine/reference/commandline/buildx/
title: docker buildx
- path: /engine/reference/commandline/buildx_bake/
title: docker buildx bake
- path: /engine/reference/commandline/buildx_build/
title: docker buildx build
- path: /engine/reference/commandline/buildx_create/
title: docker buildx create
- path: /engine/reference/commandline/buildx_imagetools/
title: docker buildx imagetools
- path: /engine/reference/commandline/buildx_imagetools_create/
title: docker buildx imagetools create
- path: /engine/reference/commandline/buildx_imagetools_inspect/
title: docker buildx imagetools inspect
- path: /engine/reference/commandline/buildx_inspect/
title: docker buildx inspect
- path: /engine/reference/commandline/buildx_ls/
title: docker buildx ls
- path: /engine/reference/commandline/buildx_rm/
title: docker buildx rm
- path: /engine/reference/commandline/buildx_stop/
title: docker buildx stop
- path: /engine/reference/commandline/buildx_use/
title: docker buildx use
- path: /engine/reference/commandline/buildx_version/
title: docker buildx version
- sectiontitle: docker checkpoint * - sectiontitle: docker checkpoint *
section: section:
- path: /engine/reference/commandline/checkpoint/ - path: /engine/reference/commandline/checkpoint/
@ -628,6 +629,27 @@ reference:
title: docker checkpoint ls title: docker checkpoint ls
- path: /engine/reference/commandline/checkpoint_rm/ - path: /engine/reference/commandline/checkpoint_rm/
title: docker checkpoint rm title: docker checkpoint rm
- sectiontitle: docker cluster *
section:
- path: /engine/reference/commandline/cluster/
title: docker cluster
- path: /engine/reference/commandline/cluster_backup/
title: docker cluster backup
- path: /engine/reference/commandline/cluster_create/
title: docker cluster create
- path: /engine/reference/commandline/cluster_inspect/
title: docker cluster inspect
- path: /engine/reference/commandline/cluster_ls/
title: docker cluster ls
- path: /engine/reference/commandline/cluster_restore/
title: docker cluster restore
- path: /engine/reference/commandline/cluster_rm/
title: docker cluster rm
- path: /engine/reference/commandline/cluster_update/
title: docker cluster update
- path: /engine/reference/commandline/cluster_version/
title: docker cluster version
- path: /engine/reference/commandline/commit/ - path: /engine/reference/commandline/commit/
title: docker commit title: docker commit
- sectiontitle: docker config * - sectiontitle: docker config *
@ -1065,7 +1087,7 @@ reference:
- path: /reference/dtr/2.7/cli/destroy/ - path: /reference/dtr/2.7/cli/destroy/
title: destroy title: destroy
- path: /reference/dtr/2.7/cli/emergency-repair/ - path: /reference/dtr/2.7/cli/emergency-repair/
title: emergency-repair7 title: emergency-repair
- path: /reference/dtr/2.7/cli/install/ - path: /reference/dtr/2.7/cli/install/
title: install title: install
- path: /reference/dtr/2.7/cli/join/ - path: /reference/dtr/2.7/cli/join/
@ -1123,6 +1145,12 @@ reference:
section: section:
- path: /engine/api/version-history/ - path: /engine/api/version-history/
title: Version history overview title: Version history overview
- path: /engine/api/v1.40/
title: v1.40 reference
- path: /engine/api/v1.39/
title: v1.39 reference
- path: /engine/api/v1.38/
title: v1.38 reference
- path: /engine/api/v1.37/ - path: /engine/api/v1.37/
title: v1.37 reference title: v1.37 reference
- path: /engine/api/v1.36/ - path: /engine/api/v1.36/
@ -1258,49 +1286,42 @@ samples:
title: SSHd title: SSHd
manuals: manuals:
- sectiontitle: Docker Enterprise Edition - sectiontitle: Docker Enterprise
section: section:
- path: /ee/ - path: /ee/
title: About Docker Enterprise title: Overview
- path: /ee/docker-ee-architecture/ - sectiontitle: Release notes
title: Docker Enterprise Architecture section:
- path: /ee/supported-platforms/ - path: /ee/release-notes/
title: Supported platforms title: Platform
nosync: true - path: /engine/release-notes/
- path: /ee/end-to-end-install/ title: Docker Engine - Enterprise and Engine - Community
title: Deploy Docker Enterprise nosync: true
- sectiontitle: Back up Docker Enterprise - path: /ee/ucp/release-notes/
title: Docker Universal Control Plane
nosync: true
- path: /ee/dtr/release-notes/
title: Docker Trusted Registry
nosync: true
- path: /ee/desktop/release-notes/
title: Docker Desktop Enterprise
nosync: true
- sectiontitle: Docker Cluster
section: section:
- path: /ee/admin/backup/ - path: /cluster/
title: Overview title: Overview
- path: /ee/admin/backup/back-up-swarm/ - path: /cluster/aws/
title: Back up Docker Swarm title: Docker Cluster on AWS
- path: /ee/admin/backup/back-up-ucp/ - path: /cluster/cluster-file/
title: Back up UCP title: Cluster file structure
- path: /ee/admin/backup/back-up-dtr/ - path: /cluster/reference/envvars/
title: Back up DTR title: Environment variables
- sectiontitle: Restore Docker Enterprise - path: /cluster/reference/
section: title: Subcommands
- path: /ee/admin/restore/
title: Overview
- path: /ee/admin/restore/restore-swarm/
title: Restore Docker Swarm
- path: /ee/admin/restore/restore-ucp/
title: Restore UCP
- path: /ee/admin/restore/restore-dtr/
title: Restore DTR
- sectiontitle: Disaster Recovery
section:
- path: /ee/admin/disaster-recovery/
title: Overview
- path: /ee/upgrade/
title: Upgrade Docker Enterprise
- path: /ee/telemetry/
title: Manage usage data collection
- sectiontitle: Docker Engine - Enterprise - sectiontitle: Docker Engine - Enterprise
section: section:
- path: /ee/supported-platforms/ - path: /ee/supported-platforms/
title: Install Docker Enterprise Engine title: Install Docker Engine - Enterprise
nosync: true nosync: true
- title: Release notes - title: Release notes
path: /engine/release-notes/ path: /engine/release-notes/
@ -1322,8 +1343,12 @@ manuals:
title: Install title: Install
- path: /ee/ucp/admin/install/install-offline/ - path: /ee/ucp/admin/install/install-offline/
title: Install offline title: Install offline
- path: /ee/ucp/admin/install/install-on-azure/ sectiontitle: Cloud Providers
title: Install on Azure section:
- path: /ee/ucp/admin/install/cloudproviders/install-on-azure/
title: Install on Azure
- path: /ee/ucp/admin/install/cloudproviders/install-on-aws/
title: Install on AWS
- path: /ee/ucp/admin/install/upgrade/ - path: /ee/ucp/admin/install/upgrade/
title: Upgrade title: Upgrade
- path: /ee/ucp/admin/install/upgrade-offline/ - path: /ee/ucp/admin/install/upgrade-offline/
@ -1506,7 +1531,7 @@ manuals:
- title: Securing services with TLS - title: Securing services with TLS
path: /ee/ucp/interlock/usage/tls/ path: /ee/ucp/interlock/usage/tls/
- title: Configuring websockets - title: Configuring websockets
path: /ee/ucp/interlock/usage/websockets/ path: /ee/ucp/interlock/usage/websockets/
- sectiontitle: Deploy apps with Kubernetes - sectiontitle: Deploy apps with Kubernetes
section: section:
- title: Access Kubernetes Resources - title: Access Kubernetes Resources
@ -1515,16 +1540,14 @@ manuals:
path: /ee/ucp/kubernetes/ path: /ee/ucp/kubernetes/
- title: Deploy a Compose-based app - title: Deploy a Compose-based app
path: /ee/ucp/kubernetes/deploy-with-compose/ path: /ee/ucp/kubernetes/deploy-with-compose/
- title: Deploy an ingress controller - title: Using Pod Security Policies
path: /ee/ucp/kubernetes/layer-7-routing/ path: /ee/ucp/kubernetes/pod-security-policies/
- title: Create a service account for a Kubernetes app - title: Create a service account for a Kubernetes app
path: /ee/ucp/kubernetes/create-service-account/ path: /ee/ucp/kubernetes/create-service-account/
- title: Install an unmanaged CNI plugin - title: Install an unmanaged CNI plugin
path: /ee/ucp/kubernetes/install-cni-plugin/ path: /ee/ucp/kubernetes/install-cni-plugin/
- title: Kubernetes network encryption - title: Kubernetes network encryption
path: /ee/ucp/kubernetes/kubernetes-network-encryption/ path: /ee/ucp/kubernetes/kubernetes-network-encryption/
- title: Deploy a CSI plugin
path: /ee/ucp/kubernetes/use-csi/
- sectiontitle: Persistent Storage - sectiontitle: Persistent Storage
section: section:
- title: Use NFS Storage - title: Use NFS Storage
@ -1537,6 +1560,20 @@ manuals:
path: /ee/ucp/kubernetes/storage/configure-aws-storage/ path: /ee/ucp/kubernetes/storage/configure-aws-storage/
- title: Configure iSCSI - title: Configure iSCSI
path: /ee/ucp/kubernetes/storage/use-iscsi/ path: /ee/ucp/kubernetes/storage/use-iscsi/
- title: Deploy a CSI plugin
path: /ee/ucp/kubernetes/storage/use-csi/
- sectiontitle: Cluster Ingress
section:
- title: Overview
path: /ee/ucp/kubernetes/cluster-ingress/
- title: Install Ingress
path: /ee/ucp/kubernetes/cluster-ingress/install/
- title: Deploy Simple Application
path: /ee/ucp/kubernetes/cluster-ingress/ingress/
- title: Deploy a Canary Deployment
path: /ee/ucp/kubernetes/cluster-ingress/canary/
- title: Implementing Persistent (sticky) Sessions
path: /ee/ucp/kubernetes/cluster-ingress/sticky/
- title: API reference - title: API reference
path: /reference/ucp/3.2/api/ path: /reference/ucp/3.2/api/
nosync: true nosync: true
@ -2319,6 +2356,8 @@ manuals:
- path: /ee/dtr/admin/configure/use-your-own-tls-certificates/ - path: /ee/dtr/admin/configure/use-your-own-tls-certificates/
title: Use your own TLS certificates title: Use your own TLS certificates
- path: /ee/dtr/admin/configure/enable-single-sign-on/ - path: /ee/dtr/admin/configure/enable-single-sign-on/
title: Disable persistent cookies
- path: /ee/dtr/admin/configure/disable-persistent-cookies/
title: Enable single sign-on title: Enable single sign-on
- sectiontitle: External storage - sectiontitle: External storage
section: section:
@ -2459,6 +2498,8 @@ manuals:
path: /ee/dtr/user/audit-repository-events/ path: /ee/dtr/user/audit-repository-events/
- title: Auto-delete repository events - title: Auto-delete repository events
path: /ee/dtr/admin/configure/auto-delete-repo-events/ path: /ee/dtr/admin/configure/auto-delete-repo-events/
- title: Manage applications
path: /ee/dtr/user/manage-applications/
- title: Manage access tokens - title: Manage access tokens
path: /ee/dtr/user/access-tokens/ path: /ee/dtr/user/access-tokens/
- title: Tag pruning - title: Tag pruning
@ -3268,6 +3309,49 @@ manuals:
title: Troubleshoot DDE issues on Mac title: Troubleshoot DDE issues on Mac
- path: /ee/desktop/troubleshoot/windows-issues/ - path: /ee/desktop/troubleshoot/windows-issues/
title: Troubleshoot DDE issues on Windows title: Troubleshoot DDE issues on Windows
- sectiontitle: Manage Docker Enterprise
section:
- path: /ee/docker-ee-architecture/
title: Docker Enterprise Architecture
- path: /ee/supported-platforms/
title: Supported platforms
nosync: true
- path: /ee/end-to-end-install/
title: Deploy Docker Enterprise
- path: /ee/upgrade/
title: Upgrade Docker Enterprise
- sectiontitle: Back up Docker Enterprise
section:
- path: /ee/admin/backup/
title: Overview
- path: /ee/admin/backup/back-up-swarm/
title: Back up Docker Swarm
- path: /ee/admin/backup/back-up-ucp/
title: Back up UCP
- path: /ee/admin/backup/back-up-dtr/
title: Back up DTR
- path: /cluster/reference/backup/
title: Back up clusters with Docker Cluster
- sectiontitle: Restore Docker Enterprise
section:
- path: /ee/admin/restore/
title: Overview
- path: /ee/admin/restore/restore-swarm/
title: Restore Docker Swarm
- path: /ee/admin/restore/restore-ucp/
title: Restore UCP
- path: /ee/admin/restore/restore-dtr/
title: Restore DTR
- path: /cluster/reference/restore/
title: Restore clusters with Docker Cluster
- sectiontitle: Disaster Recovery
section:
- path: /ee/admin/disaster-recovery/
title: Overview
- path: /ee/enable-client-certificate-authentication/
title: Enable client certificate authentication with your PKI
- path: /ee/telemetry/
title: Manage usage data collection
- title: Get support - title: Get support
path: /ee/get-support/ path: /ee/get-support/
- sectiontitle: Docker Assemble - sectiontitle: Docker Assemble
@ -3300,6 +3384,12 @@ manuals:
title: API reference title: API reference
- path: /engine/reference/commandline/template/ - path: /engine/reference/commandline/template/
title: CLI reference title: CLI reference
- sectiontitle: Docker Buildx
section:
- path: /buildx/working-with-buildx/
title: Working with Docker Buildx
- path: /engine/reference/commandline/buildx/
title: CLI reference
- sectiontitle: Docker Compose - sectiontitle: Docker Compose
section: section:
- path: /compose/overview/ - path: /compose/overview/
@ -3458,110 +3548,6 @@ manuals:
title: Edge release notes title: Edge release notes
- title: Docker ID accounts - title: Docker ID accounts
path: /docker-id/ path: /docker-id/
- sectiontitle: Docker Machine
section:
- path: /machine/overview/
title: Machine overview
- path: /machine/install-machine/
title: Install Machine
- path: /machine/get-started/
title: Get started with a local VM
- path: /machine/get-started-cloud/
title: Provision hosts in the cloud
- sectiontitle: Learn by example
section:
- path: /machine/examples/ocean/
title: Provision Digital Ocean Droplets
- path: /machine/examples/aws/
title: Provision AWS EC2 instances
- path: /machine/concepts/
title: Machine concepts and help
- sectiontitle: Machine (docker-machine) CLI
section:
- path: /machine/reference/
title: Machine CLI overview
- path: /machine/completion/
title: Machine command-line completion
- path: /machine/reference/active/
title: active
- path: /machine/reference/config/
title: config
- path: /machine/reference/create/
title: create
- path: /machine/reference/env/
title: env
- path: /machine/reference/help/
title: help
- path: /machine/reference/inspect/
title: inspect
- path: /machine/reference/ip/
title: ip
- path: /machine/reference/kill/
title: kill
- path: /machine/reference/ls/
title: ls
- path: /machine/reference/mount/
title: mount
- path: /machine/reference/provision/
title: provision
- path: /machine/reference/regenerate-certs/
title: regenerate-certs
- path: /machine/reference/restart/
title: restart
- path: /machine/reference/rm/
title: rm
- path: /machine/reference/scp/
title: scp
- path: /machine/reference/ssh/
title: ssh
- path: /machine/reference/start/
title: start
- path: /machine/reference/status/
title: status
- path: /machine/reference/stop/
title: stop
- path: /machine/reference/upgrade/
title: upgrade
- path: /machine/reference/url/
title: url
- sectiontitle: Machine drivers
section:
- path: /machine/drivers/
title: Drivers overview
- path: /machine/drivers/os-base/
title: Driver options and operating system defaults
- path: /machine/drivers/aws/
title: Amazon Web Services
- path: /machine/drivers/digital-ocean/
title: Digital Ocean
- path: /machine/drivers/exoscale/
title: Exoscale
- path: /machine/drivers/generic/
title: Generic
- path: /machine/drivers/gce/
title: Google Compute Engine
- path: /machine/drivers/soft-layer/
title: IBM Softlayer
- path: /machine/drivers/azure/
title: Microsoft Azure
- path: /machine/drivers/hyper-v/
title: Microsoft Hyper-V
- path: /machine/drivers/openstack/
title: OpenStack
- path: /machine/drivers/virtualbox/
title: Oracle VirtualBox
- path: /machine/drivers/rackspace/
title: Rackspace
- path: /machine/drivers/vm-fusion/
title: VMware Fusion
- path: /machine/drivers/vm-cloud/
title: VMware vCloud Air
- path: /machine/drivers/vsphere/
title: VMware vSphere
- path: /machine/migrate-to-machine/
title: Migrate from Boot2Docker to Machine
- path: /release-notes/docker-machine/
title: Docker Machine release notes
- sectiontitle: Docker Hub - sectiontitle: Docker Hub
section: section:
- path: /docker-hub/ - path: /docker-hub/
@ -3594,6 +3580,8 @@ manuals:
section: section:
- path: /docker-hub/publish/ - path: /docker-hub/publish/
title: Overview title: Overview
- path: /docker-hub/publish/publish/
title: Submit a product for Docker Hub
- path: /docker-hub/publish/customer_faq/ - path: /docker-hub/publish/customer_faq/
title: User FAQs title: User FAQs
- path: /docker-hub/publish/publisher_faq/ - path: /docker-hub/publish/publisher_faq/
@ -3708,14 +3696,22 @@ manuals:
section: section:
- path: /release-notes/ - path: /release-notes/
title: Overview title: Overview
- path: /engine/release-notes/ - sectiontitle: Docker Enterprise Platform
title: Docker Enterprise and Engine - Community section:
nosync: true - path: /ee/release-notes/
- path: /cs-engine/1.13/release-notes/ title: Platform
title: CS Docker Engine - path: /engine/release-notes/
nosync: true title: Docker Engine - Enterprise and Engine - Community
- path: /release-notes/docker-engine/ nosync: true
title: Docker (1.13 and earlier) - path: /ee/ucp/release-notes/
title: Docker Universal Control Plane
nosync: true
- path: /ee/dtr/release-notes/
title: Docker Trusted Registry
nosync: true
- path: /ee/desktop/release-notes/
title: Docker Desktop Enterprise
nosync: true
- path: /docker-for-mac/release-notes/ - path: /docker-for-mac/release-notes/
title: Docker Desktop for Mac title: Docker Desktop for Mac
nosync: true nosync: true
@ -3731,14 +3727,119 @@ manuals:
- path: /docker-for-azure/release-notes/ - path: /docker-for-azure/release-notes/
title: Docker for Azure title: Docker for Azure
nosync: true nosync: true
- path: /release-notes/docker-machine/
title: Docker Machine release notes
nosync: true
- path: /release-notes/docker-swarm/ - path: /release-notes/docker-swarm/
title: Docker Swarm release notes title: Docker Swarm release notes
nosync: true nosync: true
- sectiontitle: Superseded products and tools - sectiontitle: Superseded products and tools
section: section:
- path: /cs-engine/1.13/release-notes/
title: CS Docker Engine
- path: /release-notes/docker-engine/
title: Docker (1.13 and earlier)
- sectiontitle: Docker Machine
section:
- path: /machine/overview/
title: Machine overview
- path: /machine/install-machine/
title: Install Machine
- path: /machine/get-started/
title: Get started with a local VM
- path: /machine/get-started-cloud/
title: Provision hosts in the cloud
- sectiontitle: Learn by example
section:
- path: /machine/examples/ocean/
title: Provision Digital Ocean Droplets
- path: /machine/examples/aws/
title: Provision AWS EC2 instances
- path: /machine/concepts/
title: Machine concepts and help
- sectiontitle: Machine (docker-machine) CLI
section:
- path: /machine/reference/
title: Machine CLI overview
- path: /machine/completion/
title: Machine command-line completion
- path: /machine/reference/active/
title: active
- path: /machine/reference/config/
title: config
- path: /machine/reference/create/
title: create
- path: /machine/reference/env/
title: env
- path: /machine/reference/help/
title: help
- path: /machine/reference/inspect/
title: inspect
- path: /machine/reference/ip/
title: ip
- path: /machine/reference/kill/
title: kill
- path: /machine/reference/ls/
title: ls
- path: /machine/reference/mount/
title: mount
- path: /machine/reference/provision/
title: provision
- path: /machine/reference/regenerate-certs/
title: regenerate-certs
- path: /machine/reference/restart/
title: restart
- path: /machine/reference/rm/
title: rm
- path: /machine/reference/scp/
title: scp
- path: /machine/reference/ssh/
title: ssh
- path: /machine/reference/start/
title: start
- path: /machine/reference/status/
title: status
- path: /machine/reference/stop/
title: stop
- path: /machine/reference/upgrade/
title: upgrade
- path: /machine/reference/url/
title: url
- sectiontitle: Machine drivers
section:
- path: /machine/drivers/
title: Drivers overview
- path: /machine/drivers/os-base/
title: Driver options and operating system defaults
- path: /machine/drivers/aws/
title: Amazon Web Services
- path: /machine/drivers/digital-ocean/
title: Digital Ocean
- path: /machine/drivers/exoscale/
title: Exoscale
- path: /machine/drivers/generic/
title: Generic
- path: /machine/drivers/gce/
title: Google Compute Engine
- path: /machine/drivers/soft-layer/
title: IBM Softlayer
- path: /machine/drivers/azure/
title: Microsoft Azure
- path: /machine/drivers/hyper-v/
title: Microsoft Hyper-V
- path: /machine/drivers/openstack/
title: OpenStack
- path: /machine/drivers/virtualbox/
title: Oracle VirtualBox
- path: /machine/drivers/rackspace/
title: Rackspace
- path: /machine/drivers/vm-fusion/
title: VMware Fusion
- path: /machine/drivers/vm-cloud/
title: VMware vCloud Air
- path: /machine/drivers/vsphere/
title: VMware vSphere
- path: /machine/migrate-to-machine/
title: Migrate from Boot2Docker to Machine
- path: /release-notes/docker-machine/
title: Docker Machine release notes
- sectiontitle: Docker Swarm (standalone) - sectiontitle: Docker Swarm (standalone)
section: section:
- path: /swarm/overview/ - path: /swarm/overview/
@ -3800,4 +3901,29 @@ manuals:
- path: /swarm/swarm-api/ - path: /swarm/swarm-api/
title: Docker Swarm API title: Docker Swarm API
- path: /release-notes/docker-swarm/ - path: /release-notes/docker-swarm/
title: Docker Swarm release notes title: Docker Swarm release notes
- sectiontitle: Docker Toolbox (legacy)
section:
- path: /toolbox/overview/
title: Toolbox overview
- path: /toolbox/toolbox_install_mac/
title: Install Toolbox on Mac
- path: /toolbox/toolbox_install_windows/
title: Install Toolbox on Windows
- sectiontitle: Kitematic
section:
- path: /kitematic/userguide/
title: "Kitematic user guide: intro &amp; overview"
- path: /kitematic/nginx-web-server/
title: Set up an Nginx web server
- path: /kitematic/minecraft-server/
title: Set up a Minecraft Server
- path: /kitematic/rethinkdb-dev-database/
title: Creating a local RethinkDB database for development
- path: /kitematic/faq/
title: Frequently asked questions
- path: /kitematic/known-issues/
title: Known issues
- path: /toolbox/faqs/troubleshoot/
title: Troubleshooting
nosync: true

View File

@ -1,6 +1,7 @@
| Docker version | Maximum API version | Change log | | Docker version | Maximum API version | Change log |
|:---------------|:---------------------------|:---------------------------------------------------------| |:---------------|:---------------------------|:---------------------------------------------------------|
| 19.03 | [1.40](/engine/api/v1.40/) | [changes](/engine/api/version-history/#v140-api-changes) |
| 18.09 | [1.39](/engine/api/v1.39/) | [changes](/engine/api/version-history/#v139-api-changes) | | 18.09 | [1.39](/engine/api/v1.39/) | [changes](/engine/api/version-history/#v139-api-changes) |
| 18.06 | [1.38](/engine/api/v1.38/) | [changes](/engine/api/version-history/#v138-api-changes) | | 18.06 | [1.38](/engine/api/v1.38/) | [changes](/engine/api/version-history/#v138-api-changes) |
| 18.05 | [1.37](/engine/api/v1.37/) | [changes](/engine/api/version-history/#v137-api-changes) | | 18.05 | [1.37](/engine/api/v1.37/) | [changes](/engine/api/version-history/#v137-api-changes) |

View File

@ -37,6 +37,8 @@ your client and daemon API versions.
> To enable experimental features on the Docker daemon, edit the > To enable experimental features on the Docker daemon, edit the
> [daemon.json](/engine/reference/commandline/dockerd.md#daemon-configuration-file) > [daemon.json](/engine/reference/commandline/dockerd.md#daemon-configuration-file)
> and set `experimental` to `true`. > and set `experimental` to `true`.
>
> {% include experimental.md %}
{: .important } {: .important }
{% endif %} {% endif %}
@ -49,6 +51,8 @@ your client and daemon API versions.
> To enable experimental features in the Docker CLI, edit the > To enable experimental features in the Docker CLI, edit the
> [config.json](/engine/reference/commandline/cli.md#configuration-files) > [config.json](/engine/reference/commandline/cli.md#configuration-files)
> and set `experimental` to `enabled`. > and set `experimental` to `enabled`.
>
> {% include experimental.md %}
{: .important } {: .important }
{% endif %} {% endif %}

9
_includes/docker_ee.md Normal file
View File

@ -0,0 +1,9 @@
{% assign green-check = '![yes](/install/images/green-check.svg){: style="height: 14px; margin: 0 auto"}' %}
| Capabilities | Docker Engine - Enterprise | Docker Enterprise |
|:---------------------------------------------------------------------|:-------------------------:|:----------------------------:|
| Container engine and built in orchestration, networking, security | {{green-check}} | {{green-check}} |
| Certified infrastructure, plugins and ISV containers | {{green-check}} | {{green-check}} |
| Image management with Docker Trusted Registry security scanning | | {{green-check}} |
| Container app management with Universal Control Plane | | {{green-check}} |
| Developer solutions with Docker Desktop Enterprise | | {{green-check}} |

View File

@ -12,23 +12,23 @@ Usage: {% include ee-linux-install-reuse.md section="ee-install-intro" %}
{% if section == "ee-install-intro" %} {% if section == "ee-install-intro" %}
There are two ways to install and upgrade [Docker Enterprise Edition (Docker EE)](https://www.docker.com/enterprise-edition/){: target="_blank" class="_" } There are two ways to install and upgrade [Docker Enterprise](https://www.docker.com/enterprise-edition/){: target="_blank" class="_" }
on {{ linux-dist-long }}: on {{ linux-dist-long }}:
- [YUM repository](#repo-install-and-upgrade): Set up a Docker repository and install Docker EE from it. This is the recommended approach because installation and upgrades are managed with YUM and easier to do. - [YUM repository](#repo-install-and-upgrade): Set up a Docker repository and install Docker Engine - Enterprise from it. This is the recommended approach because installation and upgrades are managed with YUM and easier to do.
- [RPM package](#package-install-and-upgrade): Download the {{ package-format }} package, install it manually, and manage upgrades manually. This is useful when installing Docker EE on air-gapped systems with no access to the internet. - [RPM package](#package-install-and-upgrade): Download the {{ package-format }} package, install it manually, and manage upgrades manually. This is useful when installing Docker Engine - Enterprise on air-gapped systems with no access to the internet.
{% if linux-dist == "rhel" or linux-dist == "oraclelinux" %} {% if linux-dist == "rhel" or linux-dist == "oraclelinux" %}
Docker Community Edition (Docker CE) is _not_ supported on {{ linux-dist-long }}. Docker Engine - Community is _not_ supported on {{ linux-dist-long }}.
{% endif %} {% endif %}
{% if linux-dist == "centos" %} {% if linux-dist == "centos" %}
For Docker Community Edition on {{ linux-dist-cap }}, see [Get Docker CE for CentOS](/install/linux/docker-ce/centos.md). For Docker Community Edition on {{ linux-dist-cap }}, see [Get Docker Engine - Community for CentOS](/install/linux/docker-ce/centos.md).
{% endif %} {% endif %}
{% elsif section == "find-ee-repo-url" %} {% elsif section == "find-ee-repo-url" %}
To install Docker EE, you will need the URL of the Docker EE repository associated with your trial or subscription: To install Docker Enterprise, you will need the URL of the Docker Enterprise repository associated with your trial or subscription:
1. Go to [https://hub.docker.com/my-content](https://hub.docker.com/my-content){: target="_blank" class="_" }. All of your subscriptions and trials are listed. 1. Go to [https://hub.docker.com/my-content](https://hub.docker.com/my-content){: target="_blank" class="_" }. All of your subscriptions and trials are listed.
2. Click the **Setup** button for **Docker Enterprise Edition for {{ linux-dist-long }}**. 2. Click the **Setup** button for **Docker Enterprise Edition for {{ linux-dist-long }}**.
@ -39,11 +39,11 @@ You will use this URL in a later step to create a variable called, `DOCKERURL`.
{% elsif section == "using-yum-repo" %} {% elsif section == "using-yum-repo" %}
The advantage of using a repository from which to install Docker EE (or any software) is that it provides a certain level of automation. RPM-based distributions such as {{ linux-dist-long }}, use a tool called YUM that work with your repositories to manage dependencies and provide automatic updates. The advantage of using a repository from which to install Docker Engine - Enterprise (or any software) is that it provides a certain level of automation. RPM-based distributions such as {{ linux-dist-long }}, use a tool called YUM that work with your repositories to manage dependencies and provide automatic updates.
{% elsif section == "set-up-yum-repo" %} {% elsif section == "set-up-yum-repo" %}
You only need to set up the repository once, after which you can install Docker EE _from_ the repo and repeatedly upgrade as necessary. You only need to set up the repository once, after which you can install Docker Engine - Enterprise _from_ the repo and repeatedly upgrade as necessary.
1. Remove existing Docker repositories from `/etc/yum.repos.d/`: 1. Remove existing Docker repositories from `/etc/yum.repos.d/`:
@ -126,7 +126,7 @@ You only need to set up the repository once, after which you can install Docker
{% endif %} {% endif %}
6. Add the Docker EE **stable** repository: 6. Add the Docker Engine - Enterprise **stable** repository:
```bash ```bash
$ sudo -E yum-config-manager \ $ sudo -E yum-config-manager \
@ -137,8 +137,8 @@ You only need to set up the repository once, after which you can install Docker
{% elsif section == "install-using-yum-repo" %} {% elsif section == "install-using-yum-repo" %}
> **Note**: If you need to run Docker EE 2.0, please see the following instructions: > **Note**: If you need to run Docker Engine - Enterprise 2.0, please see the following instructions:
> * [18.03](https://docs.docker.com/v18.03/ee/supported-platforms/) - Older Docker EE Engine only release > * [18.03](https://docs.docker.com/v18.03/ee/supported-platforms/) - Older Docker Engine - Enterprise Engine only release
> * [17.06](https://docs.docker.com/v17.06/engine/installation/) - Docker Enterprise Edition 2.0 (Docker Engine, > * [17.06](https://docs.docker.com/v17.06/engine/installation/) - Docker Enterprise Edition 2.0 (Docker Engine,
> UCP, and DTR). > UCP, and DTR).
@ -151,7 +151,7 @@ You only need to set up the repository once, after which you can install Docker
If prompted to accept the GPG key, verify that the fingerprint matches `{{ gpg-fingerprint }}`, and if so, accept it. If prompted to accept the GPG key, verify that the fingerprint matches `{{ gpg-fingerprint }}`, and if so, accept it.
2. To install a _specific version_ of Docker EE (recommended in production), list versions and install: 2. To install a _specific version_ of Docker Engine - Enterprise (recommended in production), list versions and install:
a. List and sort the versions available in your repo. This example sorts results by version number, highest to lowest, and is truncated: a. List and sort the versions available in your repo. This example sorts results by version number, highest to lowest, and is truncated:
@ -185,7 +185,7 @@ You only need to set up the repository once, after which you can install Docker
$ sudo systemctl start docker $ sudo systemctl start docker
``` ```
4. Verify that Docker EE is installed correctly by running the `hello-world` 4. Verify that Docker Engine - Enterprise is installed correctly by running the `hello-world`
image. This command downloads a test image, runs it in a container, prints image. This command downloads a test image, runs it in a container, prints
an informational message, and exits: an informational message, and exits:
@ -193,7 +193,7 @@ You only need to set up the repository once, after which you can install Docker
$ sudo docker run hello-world $ sudo docker run hello-world
``` ```
Docker EE is installed and running. Use `sudo` to run Docker commands. See Docker Engine - Enterprise is installed and running. Use `sudo` to run Docker commands. See
[Linux postinstall](/install/linux/linux-postinstall.md){: target="_blank" class="_" } to allow [Linux postinstall](/install/linux/linux-postinstall.md){: target="_blank" class="_" } to allow
non-privileged users to run Docker commands. non-privileged users to run Docker commands.
@ -207,7 +207,7 @@ You only need to set up the repository once, after which you can install Docker
{% elsif section == "package-installation" %} {% elsif section == "package-installation" %}
To manually install Docker EE, download the `.{{ package-format | downcase }}` file for your release. You need to download a new file each time you want to upgrade Docker EE. To manually install Docker Enterprise, download the `.{{ package-format | downcase }}` file for your release. You need to download a new file each time you want to upgrade Docker Enterprise.
{% elsif section == "install-using-yum-package" %} {% elsif section == "install-using-yum-package" %}
@ -222,13 +222,13 @@ To manually install Docker EE, download the `.{{ package-format | downcase }}` f
{% endif %} {% endif %}
{% if linux-dist == "centos" %} {% if linux-dist == "centos" %}
1. Go to the Docker EE repository URL associated with your trial or subscription 1. Go to the Docker Engine - Enterprise repository URL associated with your trial or subscription
in your browser. Go to `{{ linux-dist-url-slug }}/7/x86_64/stable-<VERSION>/Packages` in your browser. Go to `{{ linux-dist-url-slug }}/7/x86_64/stable-<VERSION>/Packages`
and download the `.{{ package-format | downcase }}` file for the Docker version you want to install. and download the `.{{ package-format | downcase }}` file for the Docker version you want to install.
{% endif %} {% endif %}
{% if linux-dist == "rhel" or linux-dist == "oraclelinux" %} {% if linux-dist == "rhel" or linux-dist == "oraclelinux" %}
1. Go to the Docker EE repository URL associated with your 1. Go to the Docker Engine - Enterprise repository URL associated with your
trial or subscription in your browser. Go to trial or subscription in your browser. Go to
`{{ linux-dist-url-slug }}/`. Choose your {{ linux-dist-long }} version, `{{ linux-dist-url-slug }}/`. Choose your {{ linux-dist-long }} version,
architecture, and Docker version. Download the architecture, and Docker version. Download the
@ -240,7 +240,7 @@ To manually install Docker EE, download the `.{{ package-format | downcase }}` f
{% endif %} {% endif %}
{% endif %} {% endif %}
2. Install Docker EE, changing the path below to the path where you downloaded 2. Install Docker Enterprise, changing the path below to the path where you downloaded
the Docker package. the Docker package.
```bash ```bash
@ -258,7 +258,7 @@ To manually install Docker EE, download the `.{{ package-format | downcase }}` f
$ sudo systemctl start docker $ sudo systemctl start docker
``` ```
4. Verify that Docker EE is installed correctly by running the `hello-world` 4. Verify that Docker Engine - Enterprise is installed correctly by running the `hello-world`
image. This command downloads a test image, runs it in a container, prints image. This command downloads a test image, runs it in a container, prints
an informational message, and exits: an informational message, and exits:
@ -266,7 +266,7 @@ To manually install Docker EE, download the `.{{ package-format | downcase }}` f
$ sudo docker run hello-world $ sudo docker run hello-world
``` ```
Docker EE is installed and running. Use `sudo` to run Docker commands. See Docker Engine - Enterprise is installed and running. Use `sudo` to run Docker commands. See
[Linux postinstall](/install/linux/linux-postinstall.md){: target="_blank" class="_" } to allow [Linux postinstall](/install/linux/linux-postinstall.md){: target="_blank" class="_" } to allow
non-privileged users to run Docker commands. non-privileged users to run Docker commands.
@ -281,7 +281,7 @@ To manually install Docker EE, download the `.{{ package-format | downcase }}` f
{% elsif section == "yum-uninstall" %} {% elsif section == "yum-uninstall" %}
1. Uninstall the Docker EE package: 1. Uninstall the Docker Engine - Enterprise package:
```bash ```bash
$ sudo yum -y remove docker-ee $ sudo yum -y remove docker-ee

View File

@ -0,0 +1,3 @@
<!-- This text will be included in the docs that are marked as experimental -->
Experimental features provide early access to future product functionality. These features are intended for testing and feedback only as they may change between releases without warning or can be removed entirely from a future release. Experimental features must not be used in production environments. Docker does not offer support for experimental features.

View File

@ -0,0 +1,2 @@
<!-- This text will be included in the CLI plugin docs that are marked as experimental in Docker Enterprise 3.0 release -->
Experimental features provide early access to future product functionality. These features are intended for testing and feedback only as they may change between releases without warning or can be removed entirely from a future release. Experimental features must not be used in production environments. Docker does not offer support for experimental features. For more information, see [Experimental features](https://success.docker.com/article/experimental-features).

View File

@ -1,10 +1,10 @@
<!-- This file is included in Docker CE or EE installation docs for Linux. --> <!-- This file is included in Docker Engine - Community or EE installation docs for Linux. -->
### Install using the convenience script ### Install using the convenience script
Docker provides convenience scripts at [get.docker.com](https://get.docker.com/) Docker provides convenience scripts at [get.docker.com](https://get.docker.com/)
and [test.docker.com](https://test.docker.com/) for installing edge and and [test.docker.com](https://test.docker.com/) for installing edge and
testing versions of Docker CE into development environments quickly and testing versions of Docker Engine - Community into development environments quickly and
non-interactively. The source code for the scripts is in the non-interactively. The source code for the scripts is in the
[`docker-install` repository](https://github.com/docker/docker-install). [`docker-install` repository](https://github.com/docker/docker-install).
**Using these scripts is not recommended for production **Using these scripts is not recommended for production
@ -27,7 +27,7 @@ them:
host machine using another mechanism. host machine using another mechanism.
This example uses the script at [get.docker.com](https://get.docker.com/) to This example uses the script at [get.docker.com](https://get.docker.com/) to
install the latest release of Docker CE on Linux. To install the latest install the latest release of Docker Engine - Community on Linux. To install the latest
testing version, use [test.docker.com](https://test.docker.com/) instead. In testing version, use [test.docker.com](https://test.docker.com/) instead. In
each of the commands below, replace each occurrence of `get` with `test`. each of the commands below, replace each occurrence of `get` with `test`.
@ -61,7 +61,7 @@ Remember to log out and back in for this to take effect!
> for more information. > for more information.
{:.warning} {:.warning}
Docker CE is installed. It starts automatically on `DEB`-based distributions. On Docker Engine - Community is installed. It starts automatically on `DEB`-based distributions. On
`RPM`-based distributions, you need to start it manually using the appropriate `RPM`-based distributions, you need to start it manually using the appropriate
`systemctl` or `service` command. As the message indicates, non-root users can't `systemctl` or `service` command. As the message indicates, non-root users can't
run Docker commands by default. run Docker commands by default.

View File

@ -145,28 +145,23 @@ version: '3.3'
services: services:
web: web:
build: web image: dockersamples/k8s-wordsmith-web
image: dockerdemos/lab-web
volumes:
- "./web/static:/static"
ports: ports:
- "80:80" - "80:80"
words: words:
build: words image: dockersamples/k8s-wordsmith-api
image: dockerdemos/lab-words
deploy: deploy:
replicas: 5 replicas: 5
endpoint_mode: dnsrr endpoint_mode: dnsrr
resources: resources:
limits: limits:
memory: 16M memory: 50M
reservations: reservations:
memory: 16M memory: 50M
db: db:
build: db image: dockersamples/k8s-wordsmith-db
image: dockerdemos/lab-db
``` ```
If you already have a Kubernetes YAML file, you can deploy it using the If you already have a Kubernetes YAML file, you can deploy it using the

View File

@ -50,8 +50,8 @@ done < <(cat ./_config.yml |grep '_version:' |grep '^[a-z].*')
sedi "s/{{ site.latest_engine_api_version }}/$latest_engine_api_version/g" ./_data/toc.yaml sedi "s/{{ site.latest_engine_api_version }}/$latest_engine_api_version/g" ./_data/toc.yaml
# Engine stable # Engine stable
ENGINE_SVN_BRANCH="branches/18.09" ENGINE_SVN_BRANCH="branches/19.03"
ENGINE_BRANCH="18.09" ENGINE_BRANCH="19.03"
# Distribution # Distribution
DISTRIBUTION_SVN_BRANCH="branches/release/2.6" DISTRIBUTION_SVN_BRANCH="branches/release/2.6"
@ -83,6 +83,7 @@ wget --quiet --directory-prefix=./engine/api/v1.36/ https://raw.githubuserconten
wget --quiet --directory-prefix=./engine/api/v1.37/ https://raw.githubusercontent.com/docker/docker-ce/18.03/components/engine/api/swagger.yaml || (echo "Failed 1.37 swagger download" && exit 1) wget --quiet --directory-prefix=./engine/api/v1.37/ https://raw.githubusercontent.com/docker/docker-ce/18.03/components/engine/api/swagger.yaml || (echo "Failed 1.37 swagger download" && exit 1)
wget --quiet --directory-prefix=./engine/api/v1.38/ https://raw.githubusercontent.com/docker/docker-ce/18.06/components/engine/api/swagger.yaml || (echo "Failed 1.38 swagger download" && exit 1) wget --quiet --directory-prefix=./engine/api/v1.38/ https://raw.githubusercontent.com/docker/docker-ce/18.06/components/engine/api/swagger.yaml || (echo "Failed 1.38 swagger download" && exit 1)
wget --quiet --directory-prefix=./engine/api/v1.39/ https://raw.githubusercontent.com/docker/docker-ce/18.09/components/engine/api/swagger.yaml || (echo "Failed 1.39 swagger download" && exit 1) wget --quiet --directory-prefix=./engine/api/v1.39/ https://raw.githubusercontent.com/docker/docker-ce/18.09/components/engine/api/swagger.yaml || (echo "Failed 1.39 swagger download" && exit 1)
wget --quiet --directory-prefix=./engine/api/v1.40/ https://raw.githubusercontent.com/docker/docker-ce/19.03/components/engine/api/swagger.yaml || (echo "Failed 1.40 swagger download" && exit 1)
# Get a few one-off files that we use directly from upstream # Get a few one-off files that we use directly from upstream

View File

@ -1,9 +1,13 @@
--- ---
title: Working with Docker Template title: Working with Docker Template (experimental)
description: Working with Docker Application Template description: Working with Docker Application Template
keywords: Docker, application template, Application Designer keywords: Docker, application template, Application Designer,
--- ---
>This is an experimental feature.
>
>{% include experimental.md %}
## Overview ## Overview
Docker Template is a CLI plugin that introduces a top-level `docker template` command that allows users to create new Docker applications by using a library of templates. There are two types of templates — service templates and application templates. Docker Template is a CLI plugin that introduces a top-level `docker template` command that allows users to create new Docker applications by using a library of templates. There are two types of templates — service templates and application templates.
@ -31,7 +35,7 @@ A service template provides the description required by Docker Template to scaff
1. `/run/configuration`, a JSON file which contains all settings such as parameters, image name, etc. For example: 1. `/run/configuration`, a JSON file which contains all settings such as parameters, image name, etc. For example:
``` ```json
{ {
"parameters": { "parameters": {
"externalPort": "80", "externalPort": "80",
@ -49,7 +53,7 @@ To create a basic service template, you need to create two files — a dockerfil
`docker-compose.yaml` `docker-compose.yaml`
``` ```yaml
version: "3.6" version: "3.6"
services: services:
mysql: mysql:
@ -58,7 +62,7 @@ services:
`Dockerfile` `Dockerfile`
``` ```conf
FROM alpine FROM alpine
COPY docker-compose.yaml . COPY docker-compose.yaml .
CMD cp docker-compose.yaml /project/ CMD cp docker-compose.yaml /project/
@ -76,7 +80,7 @@ Services that generate a template using code must contain the following files th
Heres an example of a simple NodeJS service: Heres an example of a simple NodeJS service:
``` ```bash
my-service my-service
├── Dockerfile # The Dockerfile of the service template ├── Dockerfile # The Dockerfile of the service template
└── assets └── assets
@ -88,7 +92,7 @@ The NodeJS service contains the following files:
`my-service/Dockerfile` `my-service/Dockerfile`
``` ```conf
FROM alpine FROM alpine
COPY assets /assets COPY assets /assets
CMD ["cp", "/assets", "/project"] CMD ["cp", "/assets", "/project"]
@ -98,7 +102,8 @@ COPY assets /assets
`my-service/assets/docker-compose.yaml` `my-service/assets/docker-compose.yaml`
``` {% raw %}
```yaml
version: "3.6" version: "3.6"
services: services:
{{ .Name }}: {{ .Name }}:
@ -106,10 +111,11 @@ services:
ports: ports:
- {{ .Parameters.externalPort }}:3000 - {{ .Parameters.externalPort }}:3000
``` ```
{% endraw %}
`my-service/assets/Dockerfile` `my-service/assets/Dockerfile`
``` ```conf
FROM NODE:9 FROM NODE:9
WORKDIR /app WORKDIR /app
COPY package.json . COPY package.json .
@ -122,7 +128,7 @@ CMD ["yarn", "run", "start"]
The next step is to build and push the service template image to a remote repository by running the following command: The next step is to build and push the service template image to a remote repository by running the following command:
``` ```bash
cd [...]/my-service cd [...]/my-service
docker build -t org/my-service . docker build -t org/my-service .
docker push org/my-service docker push org/my-service
@ -130,7 +136,7 @@ docker push org/my-service
To build and push the image to an instance of Docker Trusted Registry(DTR), or to an external registry, specify the name of the repository: To build and push the image to an instance of Docker Trusted Registry(DTR), or to an external registry, specify the name of the repository:
``` ```bash
cd [...]/my-service cd [...]/my-service
docker build -t myrepo:5000/my-service . docker build -t myrepo:5000/my-service .
docker push myrepo:5000/my-service docker push myrepo:5000/my-service
@ -145,7 +151,7 @@ Of all the available service and application definitions, Docker Template has ac
Here is an example of the Express service definition: Here is an example of the Express service definition:
``` ```yaml
- apiVersion: v1alpha1 # constant - apiVersion: v1alpha1 # constant
kind: ServiceTemplate # constant kind: ServiceTemplate # constant
metadata: metadata:
@ -174,7 +180,7 @@ To customize a service, you need to complete the following tasks:
Add the parameters available to the application. The following example adds the NodeJS version and the external port: Add the parameters available to the application. The following example adds the NodeJS version and the external port:
``` ```yaml
- [...] - [...]
spec: spec:
[...] [...]
@ -203,7 +209,7 @@ When you run the service template container, a volume is mounted making the serv
The file matches the following go struct: The file matches the following go struct:
``` ```golang
type TemplateContext struct { type TemplateContext struct {
ServiceID string `json:"serviceId,omitempty"` ServiceID string `json:"serviceId,omitempty"`
Name string `json:"name,omitempty"` Name string `json:"name,omitempty"`
@ -218,7 +224,7 @@ type TemplateContext struct {
Where `ConfiguredService` is: Where `ConfiguredService` is:
``` ```go
type ConfiguredService struct { type ConfiguredService struct {
ID string `json:"serviceId,omitempty"` ID string `json:"serviceId,omitempty"`
Name string `json:"name,omitempty"` Name string `json:"name,omitempty"`
@ -230,7 +236,7 @@ You can then use the file to obtain values for the parameters and use this infor
To use the `interpolator` image, update `my-service/Dockerfile` to use the following Dockerfile: To use the `interpolator` image, update `my-service/Dockerfile` to use the following Dockerfile:
``` ```conf
FROM dockertemplate/interpolator:v0.0.3-beta1 FROM dockertemplate/interpolator:v0.0.3-beta1
COPY assets . COPY assets .
``` ```
@ -239,7 +245,7 @@ COPY assets .
This places the interpolator image in the `/assets` folder and copies the folder to the target `/project` folder. If you prefer to do this manually, use a Dockerfile instead: This places the interpolator image in the `/assets` folder and copies the folder to the target `/project` folder. If you prefer to do this manually, use a Dockerfile instead:
``` ```conf
WORKDIR /assets WORKDIR /assets
CMD ["/interpolator", "-config", "/run/configuration", "-source", "/assets", "-destination", "/project"] CMD ["/interpolator", "-config", "/run/configuration", "-source", "/assets", "-destination", "/project"]
``` ```
@ -250,7 +256,7 @@ When this is complete, use the newly added node option in `my-service/assets/Doc
with with
`FROM node:{{ .Parameters.node }}` {% raw %}`FROM node:{{ .Parameters.node }}`{% endraw %}
Now, build and push the image to your repository. Now, build and push the image to your repository.
@ -264,7 +270,7 @@ Create a local repository file called `library.yaml` anywhere on your local driv
`library.yaml` `library.yaml`
``` ```yaml
apiVersion: v1alpha1 apiVersion: v1alpha1
generated: "2018-06-13T09:24:07.392654524Z" generated: "2018-06-13T09:24:07.392654524Z"
kind: RepositoryContent kind: RepositoryContent
@ -285,7 +291,7 @@ Now that you have created a local repository and added service definitions to it
1. Edit `~/.docker/dockertemplate/preferences.yaml` as follows: 1. Edit `~/.docker/dockertemplate/preferences.yaml` as follows:
``` ```yaml
apiVersion: v1alpha1 apiVersion: v1alpha1
channel: master channel: master
kind: Preferences kind: Preferences
@ -296,7 +302,7 @@ repositories:
2. Add your local repository: 2. Add your local repository:
``` ```yaml
apiVersion: v1alpha1 apiVersion: v1alpha1
channel: master channel: master
kind: Preferences kind: Preferences
@ -307,6 +313,13 @@ repositories:
url: https://docker-application-template.s3.amazonaws.com/master/library.yaml url: https://docker-application-template.s3.amazonaws.com/master/library.yaml
``` ```
When configuring a local repository on Windows, the `url` structure is slightly different:
```yaml
- name: custom-services
url: file://c:/path/to/my/library.yaml
```
After updating the `preferences.yaml` file, run `docker template ls` or restart the Application Designer and select **Custom application**. The new service should now be visible in the list of available services. After updating the `preferences.yaml` file, run `docker template ls` or restart the Application Designer and select **Custom application**. The new service should now be visible in the list of available services.
### Share custom service templates ### Share custom service templates
@ -337,7 +350,7 @@ Before you create an application template definition, you must create a reposito
For example, to create an Express and MySQL application, the application definition must be similar to the following yaml file: For example, to create an Express and MySQL application, the application definition must be similar to the following yaml file:
``` ```yaml
apiVersion: v1alpha1 #constant apiVersion: v1alpha1 #constant
kind: ApplicationTemplate #constant kind: ApplicationTemplate #constant
metadata: metadata:
@ -360,7 +373,7 @@ Create a local repository file called `library.yaml` anywhere on your local driv
`library.yaml` `library.yaml`
``` ```yaml
apiVersion: v1alpha1 apiVersion: v1alpha1
generated: "2018-06-13T09:24:07.392654524Z" generated: "2018-06-13T09:24:07.392654524Z"
kind: RepositoryContent kind: RepositoryContent
@ -385,7 +398,7 @@ Now that you have created a local repository and added application definitions,
1. Edit `~/.docker/dockertemplate/preferences.yaml` as follows: 1. Edit `~/.docker/dockertemplate/preferences.yaml` as follows:
``` ```yaml
apiVersion: v1alpha1 apiVersion: v1alpha1
channel: master channel: master
kind: Preferences kind: Preferences
@ -396,7 +409,7 @@ repositories:
2. Add your local repository: 2. Add your local repository:
``` ```yaml
apiVersion: v1alpha1 apiVersion: v1alpha1
channel: master channel: master
kind: Preferences kind: Preferences
@ -407,6 +420,13 @@ repositories:
url: https://docker-application-template.s3.amazonaws.com/master/library.yaml url: https://docker-application-template.s3.amazonaws.com/master/library.yaml
``` ```
When configuring a local repository on Windows, the `url` structure is slightly different:
```yaml
- name: custom-services
url: file://c:/path/to/my/library.yaml
```
After updating the `preferences.yaml` file, run `docker template ls` or restart the Application Designer and select **Custom application**. The new template should now be visible in the list of available templates. After updating the `preferences.yaml` file, run `docker template ls` or restart the Application Designer and select **Custom application**. The new template should now be visible in the list of available templates.
### Share the custom application template ### Share the custom application template

View File

@ -1,346 +1,463 @@
--- ---
title: Working with Docker App title: Working with Docker App (experimental)
description: Learn about Docker App description: Learn about Docker App
keywords: Docker App, applications, compose, orchestration keywords: Docker App, applications, compose, orchestration
--- ---
## Overview >This is an experimental feature.
>
Docker App is a CLI plug-in that introduces a top-level `docker app` command that brings the _container experience_ to applications. The following table compares Docker containers with Docker applications. >{% include experimental.md %}
## Overview
| Object | Config file | Build with | Execute with |
| ------------- |---------------| -------------------|-----------------------| Docker App is a CLI plug-in that introduces a top-level `docker app` command to bring
| Container | Dockerfile | docker image build | docker container run | the _container experience_ to applications. The following table compares Docker containers with Docker applications.
| App | bundle.json | docker app bundle | docker app install |
| Object | Config file | Build with | Execute with | Share with |
With Docker App, entire applications can now be managed as easily as images and containers. For example, Docker App lets you _build_, _validate_ and _deploy_ applications with the `docker app` command. You can even leverage secure supply-chain features such as signed `push` and `pull` operations. | ------------- |---------------| -------------------|-----------------------|-------------------|
| Container | Dockerfile | docker image build | docker container run | docker image push |
This guide will walk you through two scenarios: | App | App Package | docker app bundle | docker app install | docker app push |
1. Initialize and deploy a new Docker App project from scratch
2. Convert an existing Compose app into a Docker App project (Added later in the beta process) With Docker App, entire applications can now be managed as easily as images and containers. For example,
Docker App lets you _build_, _validate_ and _deploy_ applications with the `docker app` command. You can
The first scenario will familiarize you with the basic components of a Docker App and get you comfortable with the tools and workflow. even leverage secure supply-chain features such as signed `push` and `pull` operations.
## Initialize and deploy a new Docker App project from scratch > **NOTE**: `docker app` works with `Engine - Community 19.03` or higher and `Engine - Enterprise 19.03` or higher.
In this section, we'll walk through the process of creating a new Docker App project. By then end, you'll be familiar with the workflow and most important commands. This guide walks you through two scenarios:
We'll complete the following steps: 1. Initialize and deploy a new Docker App project from scratch.
1. Convert an existing Compose app into a Docker App project (added later in the beta process).
1. Pre-requisites
2. Initialize an empty new project The first scenario describes basic components of a Docker App with tools and workflow.
3. Populate the project
4. Validate the app ## Initialize and deploy a new Docker App project from scratch
5. Deploy the app
6. Push the app to Docker Hub This section describes the steps for creating a new Docker App project to familiarize you with the workflow and most important commands.
7. Install the app directly from Docker Hub
1. Prerequisites
### Pre-requisites 1. Initialize an empty new project
1. Populate the project
In order to follow along, you'll need at least one Docker node operating in Swarm mode. You will also need the latest build of the Docker CLI with the APP CLI plugin included. 1. Validate the app
1. Deploy the app
Depending on your Linux distribution and your security context, you may need to prepend commands with `sudo`. 1. Push the app to Docker Hub or Docker Trusted Registry
1. Install the app directly from Docker Hub
### Initialize a new empty project ### Prerequisites
The `docker app init` command is used to initialize a new Docker application project. If you run it on its own, it initializes a new empty project. If you point it to an existing `docker-compose.yml` file, it initializes a new project based on the Compose file. You need at least one Docker node operating in Swarm mode. You also need the latest build of the Docker CLI
with the App CLI plugin included.
Use the following command to initialize a new empty project called "hello-world".
Depending on your Linux distribution and your security context, you might need to prepend commands with `sudo`.
```
$ docker app init --single-file hello-world ### Initialize a new empty project
Created "hello-world.dockerapp"
``` The `docker app init` command is used to initialize a new Docker application project. If you run it on
its own, it initializes a new empty project. If you point it to an existing `docker-compose.yml` file,
The command will produce a single file in your current directory called `hello-world.dockerapp`. The format of the file name is <project-name> appended with `.dockerapp`. it initializes a new project based on the Compose file.
``` Use the following command to initialize a new empty project called "hello-world".
$ ls
hello-world.dockerapp ```
``` $ docker app init --single-file hello-world
Created "hello-world.dockerapp"
If you run `docker app init` without the `--single-file` flag you will get a new directory containing three YAML files. The name of the directory will the name of the project with `.dockerapp` appended, and the three YAML files will be: ```
- `docker-compose.yml` The command produces a single file in your current directory called `hello-world.dockerapp`.
- `metadata.yml` The format of the file name is <project-name> appended with `.dockerapp`.
- `parameters.yml`
```
However, the `--single-file` option merges the three YAML files into a single YAML file with three sections. Each of these sections relates to one of the three YAML files mentioned above --- `docker-compose.yml`, `metadata.yml`, and `parameters.yml`. Using the `--single-file` option is great for enabling you to share your application via a single configuration file. $ ls
hello-world.dockerapp
Inspect the YAML with the following command. ```
``` If you run `docker app init` without the `--single-file` flag, you get a new directory containing three YAML files.
$ cat hello-world.dockerapp The name of the directory is the name of the project with `.dockerapp` appended, and the three YAML files are:
# Application metadata - equivalent to metadata.yml.
version: 0.1.0 - `docker-compose.yml`
name: hello-world - `metadata.yml`
description: - `parameters.yml`
---
# Application services - equivalent to docker-compose.yml. However, the `--single-file` option merges the three YAML files into a single YAML file with three sections.
version: "3.6" Each of these sections relates to one of the three YAML files mentioned previously: `docker-compose.yml`,
services: {} `metadata.yml`, and `parameters.yml`. Using the `--single-file` option enables you to share your application
--- using a single configuration file.
# Default application parameters - equivalent to parameters.yml.
``` Inspect the YAML with the following command.
Your file may be more verbose. ```
$ cat hello-world.dockerapp
Notice that each of the three sections is separated by a set of three dashes ("---"). Let's quickly describe each section. # Application metadata - equivalent to metadata.yml.
version: 0.1.0
The first section of the file is where you specify identification metadata such as name, version, and description. It accepts key-value pairs. This part of the file can be a separate file called `metadata.yml` name: hello-world
description:
The second section of the file describes the application. It can be a separate file called `docker-compose.yml`. ---
# Application services - equivalent to docker-compose.yml.
The final section is where default values for application parameters can be expressed. It can be a separate file called `parameters.yml` version: "3.6"
services: {}
### Populate the project ---
# Default application parameters - equivalent to parameters.yml.
In this section, we'll edit the project YAML file so that it runs a simple web app. ```
Use your preferred editor to edit the `hello-world.dockerapp` YMAL file and update the application section to the following: Your file might be more verbose.
``` Notice that each of the three sections is separated by a set of three dashes ("---"). Let's quickly describe each section.
version: "3.6"
services: The first section of the file specifies identification metadata such as name, version,
hello: description and maintainers. It accepts key-value pairs. This part of the file can be a separate file called `metadata.yml`
image: hashicorp/http-echo
command: ["-text", "${text}"] The second section of the file describes the application. It can be a separate file called `docker-compose.yml`.
ports:
- ${port}:5678 The final section specifies default values for application parameters. It can be a separate file called `parameters.yml`
```
### Populate the project
Update the Parameters section to the following:
This section describes editing the project YAML file so that it runs a simple web app.
```
port: 8080 Use your preferred editor to edit the `hello-world.dockerapp` YAML file and update the application section with
text: Hello world! the following information:
```
```
The sections of the YAML file are currently order-based. This means it's important they remain in the order we've explained, with the _metadata_ section being first, the _app_ section being second, and the _parameters_ section being last. This may change to name-based sections in future releases. version: "3.6"
services:
Save the changes. hello:
image: hashicorp/http-echo
The application has been updated to run a single-container application based on the `hashicorp/http-echo` web server image. This image will have it execute a single command that displays some text and exposes itself on a network port. command: ["-text", "${hello.text}"]
ports:
Following best-practices, the configuration of the application has been decoupled form the application itself using variables. In this case, the text displayed by the app, and the port it will be published on, are controlled by two variables defined in the Parameters section of the file. - ${hello.port}:5678
```
Docker App provides the `inspect` sub-command to provide a prettified summary of the application configuration. It's important to note that the application is not running at this point, and that the `inspect` operation inspects the configuration file(s).
Update the `Parameters` section to the following:
```
$ docker app inspect hello-world.dockerapp ```
hello-world 0.1.0 hello:
port: 8080
Service (1) Replicas Ports Image text: Hello world!
----------- -------- ----- ----- ```
hello 1 8080 hashicorp/http-echo
The sections of the YAML file are currently order-based. This means it's important they remain in the order we've explained, with the _metadata_ section being first, the _app_ section being second, and the _parameters_ section being last. This may change to name-based sections in future releases.
Parameters (2) Value
-------------- ----- Save the changes.
port 8080
text Hello world! The application is updated to run a single-container application based on the `hashicorp/http-echo` web server image.
``` This image has it execute a single command that displays some text and exposes itself on a network port.
`docker app inspect` operations will fail if the parameters section doesn't specify a default value for every parameter expressed in the app section. Following best practices, the configuration of the application is decoupled from the application itself using variables.
In this case, the text displayed by the app and the port on which it will be published are controlled by two variables defined in the `Parameters` section of the file.
The application is ready to validated and rendered.
Docker App provides the `inspect` subcommand to provide a prettified summary of the application configuration.
### Validate the app It is a quick way to check how to configure the application before deployment, without having to read
Docker App provides the `validate` sub-command to check syntax and other aspects of the configuration. If validation passes, the command returns no arguments. the `Compose file`. It's important to note that the application is not running at this point, and that
the `inspect` operation inspects the configuration file(s).
```
$ docker app validate hello-world.dockerapp ```
Validated "hello-world.dockerapp" $ docker app inspect hello-world.dockerapp
``` hello-world 0.1.0
`docker app validate` operations will fail if the parameters section doesn't specify a default value for every parameter expressed in the app section. Service (1) Replicas Ports Image
----------- -------- ----- -----
hello 1 8080 hashicorp/http-echo
As the `validate` operation has returned no problems, the app is ready to be deployed.
Parameters (2) Value
### Deploy the app -------------- -----
hello.port 8080
There are several options for deploying a Docker App project. hello.text Hello world!
```
1. Deploy as a native Docker App application
2. Deploy as a Compose app application `docker app inspect` operations fail if the `Parameters` section doesn't specify a default value for
3. Deploy as a Docker Stack application every parameter expressed in the app section.
We'll look at all three options, starting with deploying as a native Dock App application. The application is ready to be validated and rendered.
#### Deploy as a native Docker App ### Validate the app
The process for deploying as a native Docker app is as follows. Docker App provides the `validate` subcommand to check syntax and other aspects of the configuration.
If the app passes validation, the command returns no arguments.
1. Use `docker app install` to deploy the application
```
Use the following command to deploy (install) the application. $ docker app validate hello-world.dockerapp
Validated "hello-world.dockerapp"
``` ```
$ docker app install hello-world.dockerapp --name my-app
Creating network my-app_default `docker app validate` operations fail if the `Parameters` section doesn't specify a default value for
Creating service my-app_hello every parameter expressed in the app section.
```
As the `validate` operation has returned no problems, the app is ready to be deployed.
The app will be deployed using the stack orchestrator. This means you can inspect it with regular `docker stack` commands.
### Deploy the app
```
$ docker stack ls There are several options for deploying a Docker App project.
NAME SERVICES ORCHESTRATOR
my-app 1 Swarm - Deploy as a native Docker App application
``` - Deploy as a Compose app application
- Deploy as a Docker Stack application
You can also check the status of the app with the `docker app status <app-name>` command.
All three options are discussed, starting with deploying as a native Dock App application.
```
$ docker app status my-app #### Deploy as a native Docker App
ID NAME MODE REPLICAS IMAGE PORTS
miqdk1v7j3zk my-app_hello replicated 1/1 hashicorp/http-echo:latest *:8080->5678/tcp The process for deploying as a native Docker app is as follows:
```
Use `docker app install` to deploy the application.
Now that the app is running, you can point a web browser at the DNS name or public IP of the Docker node on port 8080 and see the app in all its glory. You will need to ensure traffic to port 8080 is allowed on the connection form your browser to your Docker host.
Use the following command to deploy (install) the application.
You can uninstall the app with `docker app uninstall my-app`.
```
#### Deploy as a Docker Compose app $ docker app install hello-world.dockerapp --name my-app
Creating network my-app_default
The process for deploying a as a Compose app comprises two major steps: Creating service my-app_hello
Application "my-app" installed on context "default"
1. Render the Docker app project as a `docker-compose.yml` file. ```
2. Deploy the app using `docker-compose up`.
By default, `docker app` uses the [current context](/engine/context/working-with-contexts) to run the
You will need a recent version of Docker Compose tom complete these steps. installation container and as a target context to deploy the application. You can override the second context
using the flag `--target-context` or by using the environment variable `DOCKER_TARGET_CONTEXT`. This flag is also
Rendering is the process of reading the entire application configuration and outputting it as a single `docker-compose.yml` file. This will create a Compose file with hard-coded values wherever a parameter was specified as a variable. available for the commands `status`, `upgrade`, and `uninstall`.
Use the following command to render the app to a Compose file called `docker-compose.yml` in the current directory. ```
$ docker app install hello-world.dockerapp --name my-app --target-context=my-big-production-cluster
``` Creating network my-app_default
$ docker app render --output docker-compose.yml hello-world.dockerapp Creating service my-app_hello
``` Application "my-app" installed on context "my-big-production-cluster"
```
Check the contents of the resulting `docker-compose.yml` file.
> **Note**: Two applications deployed on the same target context cannot share the same name, but this is
``` valid if they are deployed on different target contexts.
$ cat docker-compose.yml
version: "3.6" You can check the status of the app with the `docker app status <app-name>` command.
services:
hello: ```
command: $ docker app status my-app
- -text INSTALLATION
- Hello world! ------------
image: hashicorp/http-echo Name: my-app
ports: Created: 35 seconds
- mode: ingress Modified: 31 seconds
target: 5678 Revision: 01DCMY7MWW67AY03B029QATXFF
published: 8080 Last Action: install
protocol: tcp Result: SUCCESS
``` Orchestrator: swarm
Notice that the file contains hard-coded values that were expanded based on the contents of the Parameters section of the project's YAML file. For example, ${text} has been expanded to "Hello world!". APPLICATION
-----------
Use `docker-compose up` to deploy the app. Name: hello-world
Version: 0.1.0
``` Reference:
$ docker-compose up --detach
WARNING: The Docker Engine you're using is running in swarm mode. PARAMETERS
<Snip> ----------
``` hello.port: 8080
hello.text: Hello, World!
The application is now running as a Docker compose app and should be reachable on port `8080` on your Docker host. You will need to ensure traffic to port 8080 is allowed on the connection form your browser to your Docker host.
STATUS
You can use `docker-compose down` to stop and remove the application. ------
ID NAME MODE REPLICAS IMAGE PORTS
#### Deploy as a Docker Stack miqdk1v7j3zk my-app_hello replicated 1/1 hashicorp/http-echo:latest *:8080->5678/tcp
```
Deploying the app as a Docker stack is a two-step process very similar to deploying it as a Docker compose app.
The app is deployed using the stack orchestrator. This means you can also inspect it using the regular `docker stack` commands.
1. Render the Docker app project as a `docker-compose.yml` file.
2. Deploy the app using `docker stack deploy`. ```
$ docker stack ls
We'll assume that you've followed the steps to render the Docker app project as a compose file (shown in the previous section) and that you're ready to deploy it as a Docker Stack. Your Docker host will need to be in Swarm mode. NAME SERVICES ORCHESTRATOR
my-app 1 Swarm
``` ```
$ docker stack deploy hello-world-app -c docker-compose.yml
Creating network hello-world-app_default Now that the app is running, you can point a web browser at the DNS name or public IP of the Docker node on
Creating service hello-world-app_hello port 8080 and see the app. You must ensure traffic to port 8080 is allowed on
``` the connection form your browser to your Docker host.
The app is now deployed as a Docker stack and can be reached on port `8080` on your Docker host. Now change the port of the application using `docker app upgrade <app-name>` command.
```
Use the `docker stack rm hello-world-app` command to stop and remove the stack. You will need to ensure traffic to port 8080 is allowed on the connection form your browser to your Docker host. $ docker app upgrade my-app --hello.port=8181
Upgrading service my-app_hello
### Push the app to Docker Hub Application "my-app" upgraded on context "default"
```
As mentioned in the intro, `docker app` lets you manage entire applications the same way that we currently manage container images. For example, you can push and pull entire applications from registries like Docker Hub with `docker app push` and `docker app pull`. Other `docker app` commands, such as `install`, `upgrade`, and `render` can be performed directly on applications while they are stored in a registry.
You can uninstall the app with `docker app uninstall my-app`.
Let's see some examples.
#### Deploy as a Docker Compose app
Push the application to Docker Hub. To complete this step, you'll need a valid Docker ID and you'll need to be logged in to the registry you are pushing the app to.
The process for deploying as a Compose app comprises two major steps:
Be sure to replace the registry ID in the example below with your own.
1. Render the Docker app project as a `docker-compose.yml` file.
``` 2. Deploy the app using `docker-compose up`.
$ docker app push my-app --tag nigelpoulton/app-test:0.1.0
docker app push hello-world.dockerapp --tag nigelpoulton/app-test:0.1.0 You need a recent version of Docker Compose to complete these steps.
docker.io/nigelpoulton/app-test:0.1.0-invoc
hashicorp/http-echo Rendering is the process of reading the entire application configuration and outputting it as a single `docker-compose.yml` file. This creates a Compose file with hard-coded values wherever a parameter was specified as a variable.
application/vnd.docker.distribution.manifest.v2+json [2/2] (sha256:ba27d460...)
<Snip> Use the following command to render the app to a Compose file called `docker-compose.yml` in the current directory.
```
```
The app is now stored in the container registry. $ docker app render --output docker-compose.yml hello-world.dockerapp
```
### Install the app directly from Docker Hub
Check the contents of the resulting `docker-compose.yml` file.
Now that the app is pushed to the registry, try an `inspect` and `install` command against it. The location of your app will be different to the one shown in the examples.
```
``` $ cat docker-compose.yml
$ docker app inspect nigelpoulton/app-test:0.1.0 version: "3.6"
hello-world 0.1.0 services:
hello:
Service (1) Replicas Ports Image command:
----------- -------- ----- ----- - -text
hello 1 8080 nigelpoulton/app-test@sha256:ba27d460cd1f22a1a4331bdf74f4fccbc025552357e8a3249c40ae216275de96 - Hello world!
image: hashicorp/http-echo
Parameters (2) Value ports:
-------------- ----- - mode: ingress
port 8080 target: 5678
text Hello world! published: 8080
``` protocol: tcp
```
This action was performed directly against the app in the registry.
Notice that the file contains hard-coded values that were expanded based on the contents of the `Parameters`
Now install it as a native Docker App by referencing the app in the registry. section of the project's YAML file. For example, `${hello.text}` has been expanded to "Hello world!".
``` > **Note**: Almost all the `docker app` commands propose the `--set key=value` flag to override a default parameter.
$ docker app install nigelpoulton/app-test:0.1.0
Creating network hello-world_default Try to render the application with a different text:
Creating service hello-world_hello
``` ```
$ docker app render hello-world.dockerapp --set hello.text="Hello whales!"
Test that the app is working. version: "3.6"
services:
The app used in these examples is a simple web server that displays the text "Hello world!" on port 8080, your app may be different. hello:
command:
``` - -text
$ curl http://localhost:8080 - Hello whales!
Hello world! image: hashicorp/http-echo
``` ports:
- mode: ingress
Uninstall the app. target: 5678
published: 8080
``` protocol: tcp
$ docker app uninstall hello-world ```
Removing service hello-world_hello
Removing network hello-world_default Use `docker-compose up` to deploy the app.
```
```
You can see the name of your Docker App with the `docker stack ls` command. $ docker-compose up --detach
WARNING: The Docker Engine you're using is running in swarm mode.
## Convert an existing Compose app into a Docker App project <Snip>
```
Content TBA
The application is now running as a Docker Compose app and should be reachable on port `8080` on your Docker host.
You must ensure traffic to port `8080` is allowed on the connection form your browser to your Docker host.
You can use `docker-compose down` to stop and remove the application.
#### Deploy as a Docker Stack
Deploying the app as a Docker stack is a two-step process very similar to deploying it as a Docker Compose app.
1. Render the Docker app project as a `docker-compose.yml` file.
2. Deploy the app using `docker stack deploy`.
Complete the steps in the previous section to render the Docker app project as a Compose file and make sure
you're ready to deploy it as a Docker Stack. Your Docker host must be in Swarm mode.
```
$ docker stack deploy hello-world-app -c docker-compose.yml
Creating network hello-world-app_default
Creating service hello-world-app_hello
```
The app is now deployed as a Docker stack and can be reached on port `8080` on your Docker host.
Use the `docker stack rm hello-world-app` command to stop and remove the stack. You must ensure traffic to
port `8080` is allowed on the connection form your browser to your Docker host.
### Push the app to Docker Hub
As mentioned in the introduction, `docker app` lets you manage entire applications the same way that you
currently manage container images. For example, you can push and pull entire applications from registries like
Docker Hub with `docker app push` and `docker app pull`. Other `docker app` commands, such
as `install`, `upgrade`, `inspect` and `render` can be performed directly on applications while they are
stored in a registry.
The following section contains some examples.
Push the application to Docker Hub. To complete this step, you need a valid Docker ID and you must be
logged in to the registry to which you are pushing the app.
Be sure to replace the registry ID in the following example with your own.
```
$ docker app push my-app --tag nigelpoulton/app-test:0.1.0
docker app push hello-world.dockerapp --tag nigelpoulton/app-test:0.1.0
docker.io/nigelpoulton/app-test:0.1.0-invoc
hashicorp/http-echo
application/vnd.docker.distribution.manifest.v2+json [2/2] (sha256:ba27d460...)
<Snip>
```
The app is now stored in the container registry.
### Push the app to DTR
Pushing an app to Docker Trusted Registry (DTR) involves the same procedure as [pushing an app to Docker Hub](#push-the-app-to-docker-hub) except that you need your DTR user credentials and [your DTR repository information](/ee/dtr/user/manage-images/review-repository-info/). To use client certificates for DTR authentication, see [Enable Client Certificate Authentication](/ee/enable-client-certificate-authentication/).
```bash
$ docker app push my-app --tag <dtr-fqdn>/nigelpoulton/app-test:0.1.0
<dtr-fqdn>/nigelpoulton/app-test:0.1.0-invoc
hashicorp/http-echo
application/vnd.docker.distribution.manifest.v2+json [2/2] (sha256:bd1a813b...)
Successfully pushed bundle to <dtr-fqdn>/nigelpoulton/app-test:0.1.0.
Digest is sha256:bd1a813b6301939fa46e617f96711e0cca1e4065d2d724eb86abde6ef7b18e23.
```
The app is now stored in your DTR.
### Install the app directly from Docker Hub or DTR
Now that the app is pushed to the registry, try an `inspect` and `install` command against it.
The location of your app is different from the one provided in the examples.
```
$ docker app inspect nigelpoulton/app-test:0.1.0
hello-world 0.1.0
Service (1) Replicas Ports Image
----------- -------- ----- -----
hello 1 8080 nigelpoulton/app-test@sha256:ba27d460cd1f22a1a4331bdf74f4fccbc025552357e8a3249c40ae216275de96
Parameters (2) Value
-------------- -----
hello.port 8080
hello.text Hello world!
```
This action was performed directly against the app in the registry. Note that for DTR, the application will be prefixed with the Fully Qualified Domain Name (FQDN) of your trusted registry.
Now install it as a native Docker App by referencing the app in the registry, with a different port.
```
$ docker app install nigelpoulton/app-test:0.1.0 --set hello.port=8181
Creating network hello-world_default
Creating service hello-world_hello
Application "hello-world" installed on context "default"
```
Test that the app is working.
The app used in these examples is a simple web server that displays the text "Hello world!" on port 8181,
your app might be different.
```
$ curl http://localhost:8181
Hello world!
```
Uninstall the app.
```
$ docker app uninstall hello-world
Removing service hello-world_hello
Removing network hello-world_default
Application "hello-world" uninstalled on context "default"
```
You can see the name of your Docker App with the `docker stack ls` command.

View File

@ -1,9 +1,13 @@
--- ---
title: Install Docker Assemble title: Docker Assemble (experimental)
description: Installing Docker Assemble description: Installing Docker Assemble
keywords: Assemble, Docker Enterprise, plugin, Spring Boot, .NET, c#, F# keywords: Assemble, Docker Enterprise, plugin, Spring Boot, .NET, c#, F#
--- ---
>This is an experimental feature.
>
>{% include experimental.md %}
## Overview ## Overview
Docker Assemble (`docker assemble`) is a plugin which provides a language and framework-aware tool that enables users to build an application into an optimized Docker container. With Docker Assemble, users can quickly build Docker images without providing configuration information (like Dockerfile) by auto-detecting the required information from existing framework configuration. Docker Assemble (`docker assemble`) is a plugin which provides a language and framework-aware tool that enables users to build an application into an optimized Docker container. With Docker Assemble, users can quickly build Docker images without providing configuration information (like Dockerfile) by auto-detecting the required information from existing framework configuration.

View File

@ -0,0 +1,105 @@
---
title: Working with Docker Buildx (experimental)
description: Working with Docker Buildx
keywords: Docker, buildx, multi-arch
---
>This is an experimental feature.
>
>{% include experimental.md %}
## Overview
Docker Buildx is a CLI plugin that extends the docker command with the full support of the features provided by [Moby BuildKit](https://github.com/moby/buildkit) builder toolkit. It provides the same user experience as docker build with many new features like creating scoped builder instances and building against multiple nodes concurrently.
## Install
Docker Buildx is included in Docker 19.03 and is also bundled with the following Docker Desktop releases. Note that you must enable the 'Experimental features' option to use Docker Buildx.
- Docker Desktop Enterprise version 2.1.0
- Docker Desktop Edge version 2.0.4.0 or higher
You can also download the latest `buildx` binary from the [Docker buildx](https://github.com/docker/buildx/) repository.
## Build with `buildx`
To start a new build, run the command `docker buildx build .`
```
$ docker buildx build .
[+] Building 8.4s (23/32)
=> ...
```
Buildx builds using the BuildKit engine and does not require `DOCKER_BUILDKIT=1` environment variable to start the builds.
The `docker buildx build` command supports features available for `docker build`, including the new features in Docker 19.03 such as outputs configuration, inline build caching, and specifying target platform. In addition, Buildx also supports new features that are not yet available for regular `docker build` like building manifest lists, distributed caching, and exporting build results to OCI image tarballs.
You can run Buildx in different configurations that are exposed through a driver concept. Currently, Docker supports a "docker" driver that uses the BuildKit library bundled into the docker daemon binary, and a "docker-container" driver that automatically launches BuildKit inside a Docker container.
The user experience of using Buildx is very similar across drivers. However, there are some features that are not currently supported by the "docker" driver, because the BuildKit library which is bundled into docker daemon uses a different storage component. In contrast, all images built with the "docker" driver are automatically added to the "docker images" view by default, whereas when using other drivers, the method for outputting an image needs to be selected with `--output`.
## Work with builder instances
By default, Buildx uses the "docker" driver if it is supported, providing a user experience very similar to the native docker build. Note that you must use a local shared daemon to build your applications.
Buildx allows you to create new instances of isolated builders. You can use this to get a scoped environment for your CI builds that does not change the state of the shared daemon, or for isolating builds for different projects. You can create a new instance for a set of remote nodes, forming a build farm, and quickly switch between them.
You can create new instances using the `docker buildx create` command. This creates a new builder instance with a single node based on your current configuration.
To use a remote node you can specify the `DOCKER_HOST` or the remote context name while creating the new builder. After creating a new instance, you can manage its lifecycle using the inspect, stop and rm commands. To list all available builders, use ls. After creating a new builder you can also append new nodes to it.
To switch between different builders use `docker buildx use <name>`. After running this command, the build commands will automatically use this builder.
Docker 19.03 also features a new docker context command that you can use to provide names for remote Docker API endpoints. Buildx integrates with docker context to ensure all the contexts automatically get a default builder instance. You can also set the context name as the target when you create a new builder instance or when you add a node to it.
## Build multi-platform images
BuildKit is designed to work well for building for multiple platforms and not only for the architecture and operating system that the user invoking the build happens to run.
When you invoke a build, you can set the `--platform` flag to specify the target platform for the build output, (for example, linux/amd64, linux/arm64, darwin/amd64).
When the current builder instance is backed by the "docker-container" driver, you can specify multiple platforms together. In this case, it builds a manifest list which contains images for all of the specified architectures. When you use this image in `docker run` or `docker service`, Docker picks the correct image based on the nodes platform.
You can build multi-platform images using three different strategies that are supported by Buildx and Dockerfiles:
1. Using the QEMU emulation support in the kernel
2. Building on multiple native nodes using the same builder instance
3. Using a stage in Dockerfile to cross-compile to different architectures
QEMU is the easiest way to get started if your node already supports it (for example. if you are using Docker Desktop). It requires no changes to your Dockerfile and BuildKit automatically detects the secondary architectures that are available. When BuildKit needs to run a binary for a different architecture, it automatically loads it through a binary registered in the `binfmt_misc` handler.
Using multiple native nodes provide better support for more complicated cases that are not handled by QEMU and generally have better performance. You can add additional nodes to the builder instance using the `--append` flag.
```bash
# assuming contexts node-amd64 and node-arm64 exist in "docker context ls"
$ docker buildx create --use --name mybuild node-amd64
mybuild
$ docker buildx create --append --name mybuild node-arm64
$ docker buildx build --platform linux/amd64,linux/arm64 .
```
Finally, depending on your project, the language that you use may have good support for cross-compilation. In that case, multi-stage builds in Dockerfiles can be effectively used to build binaries for the platform specified with `--platform` using the native architecture of the build node. A list of build arguments like `BUILDPLATFORM` and `TARGETPLATFORM` is available automatically inside your Dockerfile and can be leveraged by the processes running as part of your build.
```
FROM --platform $BUILDPLATFORM golang:alpine AS build
ARG TARGETPLATFORM
ARG BUILDPLATFORM
RUN echo "I am running on $BUILDPLATFORM, building for $TARGETPLATFORM" > /log
FROM alpine
COPY --from=build /log /log
```
## High-level build options
Buildx also aims to provide support for high-level build concepts that go beyond invoking a single build command.
BuildKit efficiently handles multiple concurrent build requests and deduplicating work. The build commands can be combined with general-purpose command runners (for example, `make`). However, these tools generally invoke builds in sequence and therefore cannot leverage the full potential of BuildKit parallelization, or combine BuildKits output for the user. For this use case, we have added a command called `docker buildx bake`.
The `bake` command supports building images from compose files, similar to a compose build, but allowing all the services to be built concurrently as part of a single request.
## Set `buildx` as the default builder
Running the command `docker buildx install` sets up docker builder command as an alias to `docker buildx`. This results in the ability to have `docker build` use the current buildx builder.
To remove this alias, run `docker buildx uninstall`.

View File

@ -398,5 +398,5 @@ All provisioned resources are destroyed and the context for the cluster is remov
## Where to go next ## Where to go next
- View the quick start guide for [Azure](azure.md) or [vSphere](vsphere.md) - View the quick start guide for [Azure](azure.md) or [vSphere](vsphere.md)
- [Explore the full list of Cluster commands](./reference/index.md) - [Explore the full list of Cluster commands](/engine/reference/commandline/cluster/)
- [Cluster configuration file reference](./cluster-file/index.md) - [Cluster configuration file reference](./cluster-file.md)

529
cluster/cluster-file.md Normal file
View File

@ -0,0 +1,529 @@
---
description: Cluster file reference and guidelines
keywords: documentation, docs, docker, cluster, infrastructure, automation
title: Cluster file version 1 reference
toc_max: 5
toc_min: 1
---
This topic describes version 1 of the Cluster file format.
## Cluster file structure and examples
<div class="panel panel-default">
<div class="panel-heading collapsed" data-toggle="collapse" data-target="#collapseSample1" style="cursor: pointer">
Example Cluster file version 1
<i class="chevron fa fa-fw"></i></div>
<div class="collapse block" id="collapseSample1">
<pre><code>
variable:
domain: "YOUR DOMAIN, e.g. docker.com"
subdomain: "A SUBDOMAIN, e.g. cluster"
region: "THE AWS REGION TO DEPLOY, e.g. us-east-1"
email: "YOUR.EMAIL@COMPANY.COM"
ucp_password:
type: prompt
provider:
acme:
email: ${email}
server_url: https://acme-staging-v02.api.letsencrypt.org/directory
aws:
region: ${region}
cluster:
dtr:
version: docker/dtr:2.6.5
engine:
version: ee-stable-18.09.5
ucp:
username: admin
password: ${ucp_password}
version: docker/ucp:3.1.6
resource:
aws_instance:
managers:
instance_type: t2.xlarge
os: Ubuntu 16.04
quantity: 3
registry:
instance_type: t2.xlarge
os: Ubuntu 16.04
quantity: 3
workers:
instance_type: t2.xlarge
os: Ubuntu 16.04
quantity: 3
aws_lb:
apps:
domain: ${subdomain}.${domain}
instances:
- workers
ports:
- 80:8080
- 443:8443
dtr:
domain: ${subdomain}.${domain}
instances:
- registry
ports:
- 443:443
ucp:
domain: ${subdomain}.${domain}
instances:
- managers
ports:
- 443:443
- 6443:6443
aws_route53_zone:
dns:
domain: ${domain}
subdomain: ${subdomain}
</code></pre>
</div>
</div>
The topics on this reference page are organized alphabetically by top-level keys
to reflect the structure of the Cluster file. Top-level keys that define
a section in the configuration file, such as `cluster`, `provider`, and `resource`,
are listed with the options that support them as sub-topics. This information
maps to the indent structure of the Cluster file.
### cluster
Specifies components to install and configure for a cluster.
The following components are available:
- `subscription`: (Optional) A string value representing the subscription ID.
- `license`: (Optional) A path to the cluster's license file.
- `cloudstor`: (Optional) Configuration options for Docker CloudStor
- `dtr`: (Optional) Configuration options for Docker Trusted Registry
- `engine`: (Optional) Configuration options for Docker Engine
- `ucp`: (Optional) Configuration options for Docker Universal Control Plane
- `registry`: (Optional) Configuration options for authenticating nodes with a registry to pull Docker images.
#### cloudstor
Customizes the installation of Docker Cloudstor.
- `version`: (Optional) The version of Cloudstor to install. Default is `1.0`
- `use_efs`: (Optional) Specifies whether an Elastic File System should be provisioned. Defaults to `false`.
#### dtr
Customizes the installation of Docker Trusted Registry.
```yaml
cluster:
dtr:
version: "docker/dtr:2.6.5"
install_options:
- "--debug"
- "--enable-pprof"
```
The following optional elements can be specified:
- `version`: (Optional) The version of DTR to install. Defaults to `docker/dtr:2.6.5`.
- `ca`: (Optional) The path to a root CA public certificate.
- `key`: (Optional) The path to a TLS private key.
- `cert`: (Optional) The path to a public key certificate.
- `install_options`: (Optional) Additional [DTR install options](https://docs.docker.com/reference/dtr/2.6/cli/install/)
#### engine
Customizes the installation of Docker Enterprise Engine.
```yaml
cluster:
engine:
channel: "stable"
edition: "ee"
version: "19.03"
```
The following optional elements can be specified:
- `version`: (Optional) The version of the Docker Engine to install. Defaults to `19.03`.
- `edition`: (Optional) The family of Docker Engine to install. Defaults to `ee` for Enterprise edition.
- `channel`: (Optional) The channel on the repository to pull updated packages. Defaults to `stable`.
- `url`: (Optional) Defaults to "https://storebits.docker.com/ee".
- `storage_driver`: (Optional) The storage driver to use for the storage volume. Default
value is dependent on the operating system.
- Amazon Linux 2 is `overlay2`.
- Centos is `overlay2`.
- Oracle Linux is `overlay2`.
- RedHat is `overlay2`.
- SLES is `btrfs`.
- Ubuntu is `overlay2`.
- `storage_fstype`: (Optional) File system to use for storage volume. Default value is dependent on the operating system.
- Amazon Linux 2 is `xfs`.
- Centos is `xfs`.
- Oracle Linux is `xfs`.
- RedHat is `xfs`.
- SLES is `btrfs`.
- Ubuntu is `ext4`.
- `storage_volume`: (Optional) Docker storage volume path for `/var/lib/docker` Default value is provider dependent.
- AWS
- non-NVME is `/dev/xvdb`.
- NVME disks are one of `/dev/nvme[0-26]n1`.
- Azure is `/dev/disk/azure/scsi1/lun0`.
- `daemon`: (Optional) Provides docker daemon options. Defaults to "".
- `ca`: (dev) Defaults to "".
- `key`: (dev) Defaults to "".
- `enable_remote_tcp`: (dev) Enables direct access to docker engine. Defaults to `false`.
*dev indicates that the functionality is only for development and testing.
#### kubernetes
Enables provider-specific options for Kubernetes support.
##### AWS Kubernetes options
- `cloud_provider`: (Optional)Enable cloud provider support for Kubernetes. Defaults to `false`.
- `ebs_persistent_volumes`: (Optional) Enable persistent volume support with EBS volumes. Defaults to `false`.
- `efs_persistent_volumes`: (Optional) Enable persistent volume support with EFS. Defaults to `false`.
- `load_balancer`: (Optional) Enable Kubernetes pods to instantiate a load-balancer. Defaults to `false`.
- `nfs_storage`: (Optional) Install additional packages on node for NFS support. Defaults to `false`.
- `lifecycle`: (Optional) Defaults to `owned`.
#### registry
Customizes the registry from which the installation should pull images. By default, Docker Hub and credentials to access Docker Hub are used.
```yaml
cluster:
registry:
password: ${base64decode("TVJYeTNDQWpTSk5HTW1ZRzJQcE1kM0tVRlQ=")}
url: https://index.docker.io/v1/
username: user
```
The following optional elements can be specified:
- `username`: The username for logging in to the registry on each node. Default value is the current docker user.
- `url`: The registry to use for pulling Docker images. Defaults to "https://index.docker.io/v1/".
- `password`: The password for logging in to the registry on each node. Default value is the current docker user's password base64 encoded and wrapped in a call to base64decode.
#### ucp
- `version`: Specifies the version of UCP to install. Defaults to `docker/ucp:3.1.6`.
- `username`: Specifies the username of the first user to create in UCP. Defaults to `admin`.
- `password`: Specifies the password of the first user to create in UCP. Defaults to `dockerdocker`.
- `ca`: Specifies a path to a root CA public certificate.
- `key`: Specifies a path to a TLS private key.
- `cert`: Specifies a path to a public key certificate.
- `install_options`: Lists additional [UCP install options](https://docs.docker.com/reference/ucp/3.1/cli/install/)
##### Additional UCP configuration options:
Docker Cluster also accepts all UCP configuration options and creates the initial UCP config on
installation. The following list provides supported options:
- `anonymize_tracking`: Anonymizes analytic data. Specify 'true' to hide the license ID. Defaults to 'false'.
- `audit_level`: Specifies the audit logging level. Leave empty for disabling audit logs (default).
Other valid values are 'metadata' and 'request'.
- `auto_refresh`: Specify 'true' to enable attempted automatic license renewal when the license
nears expiration. If disabled, you must manually upload renewed license after expiration. Defaults to 'true'.
- `azure_ip_count`: Sets the IP count for azure allocator to allocate IPs per Azure virtual machine.
- `backend`: Specifie the name of the authorization backend to use, either 'managed' or 'ldap'. Defaults to 'managed'.
- `calico_mtu`: Specifies the MTU (maximum transmission unit) size for the Calico plugin. Defaults to '1480'.
- `cloud_provider`: Specifies the cloud provider for the kubernetes cluster.
- `cluster_label`: Specifies a label to be included with analytics/.
- `cni_installer_url`: Specifies the URL of a Kubernetes YAML file to be used for installing a CNI plugin.
Only applies during initial installation. If empty, the default CNI plugin is used.
- `controller_port`: Configures the port that the 'ucp-controller' listens to. Defaults to '443'.
- `custom_header_name`: Specifies the name of the custom header with 'name' = '*X-Custom-Header-Name*'.
- `custom_header_value`: Specifies the value of the custom header with 'value' = '*Custom Header Value*'.
- `default_new_user_role`: Specifies the role that new users get for their private resource sets.
Values are 'admin', 'viewonly', 'scheduler', 'restrictedcontrol', or 'fullcontrol'. Defaults to 'restrictedcontrol'.
- `default_node_orchestrator`: Specifies the type of orchestrator to use for new nodes that are
joined to the cluster. Can be 'swarm' or 'kubernetes'. Defaults to 'swarm'.
- `disable_tracking`: Specify 'true' to disable analytics of API call information. Defaults to 'false'.
- `disable_usageinfo`: Specify 'true' to disable analytics of usage information. Defaults to 'false'.
- `dns`: Specifies a CSV list of IP addresses to add as nameservers.
- `dns_opt`: Specifies a CSV list of options used by DNS resolvers.
- `dns_search`: Specifies a CSV list of domain names to search when a bare unqualified hostname is
used inside of a container.
- `enable_admin_ucp_scheduling`: Specify 'true' to allow admins to schedule on containers on manager nodes.
Defaults to 'false'.
- `external_service_lb`: Specifies an optional external load balancer for default links to services with
exposed ports in the web interface.
- `host_address`: Specifies the address for connecting to the DTR instance tied to this UCP cluster.
- `log_host`: Specifies a remote syslog server to send UCP controller logs to. If omitted, controller
logs are sent through the default docker daemon logging driver from the 'ucp-controller' container.
- `idpMetadataURL`: Specifies the Identity Provider Metadata URL.
- `image_repository`: Specifies the repository to use for UCP images.
- `install_args`: Specifies additional arguments to pass to the UCP installer.
- `ipip_mtu`: Specifies the IPIP MTU size for the calico IPIP tunnel interface.
- `kube_apiserver_port`: Configures the port to which the Kubernetes API server listens.
- `kv_snapshot_count`: Sets the key-value store snapshot count setting. Defaults to '20000'.
- `kv_timeout`: Sets the key-value store timeout setting, in milliseconds. Defaults to '5000'.
- `lifetime_minutes`: Specifies the initial session lifetime, in minutes. Defaults to `4320`, which is 72 hours.
- `local_volume_collection_mapping`: Stores data about collections for volumes in UCP's local KV store
instead of on the volume labels. This is used for enforcing access control on volumes.
- `log_level`: Specifies the logging level for UCP components. Values are syslog priority
levels (https://linux.die.net/man/5/syslog.conf): 'debug', 'info', 'notice', 'warning', 'err', 'crit', 'alert',
and 'emerg'.
- `managedPasswordDisabled`: Indicates if managed password is disabled. Defaults to false.
- `managedPasswordFallbackUser`: The fallback user when the managed password authentication is disabled. Defaults to "".
- `manager_kube_reserved_resources`: Specifies reserve resources for Docker UCP and Kubernetes components
that are running on manager nodes.
- `metrics_disk_usage_interval`: Specifies the interval for how frequently storage metrics are gathered.
This operation can impact performance when large volumes are present.
- `metrics_retention_time`: Adjusts the metrics retention time.
- `metrics_scrape_interval`: Specifies the interval for how frequently managers gather metrics from nodes in the cluster.
- `nodeport_range`: Specifies the port range that for Kubernetes services of type NodePort can be exposed in.
Defaults to '32768-35535'.
- `per_user_limit`: Specifies the maximum number of sessions that a user can have active simultaneously. If
the creation of a new session would put a user over this limit, the least recently used session is deleted.
A value of zero disables limiting the number of sessions that users can have. Defaults to `5`.
- `pod_cidr`: Specifies the subnet pool from which the IP for the Pod should be allocated from the CNI ipam plugin.
- `profiling_enabled`: Specify 'true' to enable specialized debugging endpoints for profiling UCP performance.
Defaults to 'false'.
- `log_protocol`: Specifies the protocol to use for remote logging. Values are 'tcp' and 'udp'. Defaults to 'tcp'.
- `renewal_threshold_minutes`: Specifies the length of time, in minutes, before the expiration of a
session. When used, a session is extended by the current configured lifetime from that point in time. A zero value disables session extension. Defaults to `1440`, which is 24 hours.
- `require_content_trust`: Specify 'true' to require images be signed by content trust. Defaults to 'false'.
- `require_signature_from`: Specifies a csv list of users or teams required to sign images.
- `rethinkdb_cache_size`: Sets the size of the cache used by UCP's RethinkDB servers. TDefaults to 1GB,
but leaving this field empty or specifying `auto` instructs RethinkDB to determine a cache size automatically.
- `rootCerts`: Defaults to empty.
- `samlEnabled`: Indicates if saml is used.
- `samlLoginText`: Specifies the customized SAML login button text.
- `service_id`: Specifies the DTR instance's OpenID Connect Client ID, as registered with the Docker
authentication provider.
- `spHost`: Specifies the Service Provider Host.
- `storage_driver`: Specifies the UCP storage driver to install.
- `support_dump_include_audit_logs`: When set to `true`, support dumps include audit logs in the logs
of the 'ucp-controller' container of each manager node. Defaults to 'false'.
- `swarm_port`: Configures the port that the 'ucp-swarm-manager' listens to. Defaults to '2376'.
- `swarm_strategy`: Configures placement strategy for container scheduling.
This doesn't affect swarm-mode services. Values are 'spread', 'binpack', and 'random'.
- `tlsSkipVerify`: Specifies TLS Skip verify for IdP Metadata.
- `unmanaged_cni`: Defaults to 'false'.
- `worker_kube_reserved_resources`: Reserves resources for Docker UCP and Kubernetes components
that are running on worker nodes.
- `custom_kube_api_server_flags`: Specifies the configuration options for the Kubernetes API server. (dev)
- `custom_kube_controller_manager_flags`: Specifies the configuration options for the Kubernetes controller manager. (dev)
- `custom_kube_scheduler_flags`: Specifies the configuration options for the Kubernetes scheduler. (dev)
- `custom_kubelet_flags`: Specifies the configuration options for Kubelets. (dev)
*dev indicates that the functionality is only for development and testing. Arbitrary Kubernetes configuration parameters are not tested and supported under the Docker Enterprise Software Support Agreement.
### provider
Defines where the cluster's resources are provisioned, as well as provider-specific configuration such as tags.
{% raw %}
```yaml
provider:
acme:
email: ${email}
server_url: https://acme-staging-v02.api.letsencrypt.org/directory
aws:
region: ${region}
```
{% endraw %}
#### acme
The Automated Certificate Management Environment (ACME) is an evolving standard for the automation of a domain-validated certificate authority. Docker Cluster uses the ACME provider to create SSL certificates that are signed by [Let's Encrypt](https://letsencrypt.org/).
The ACME provider Configuration for the ACME provider supports arguments that closely align with the [Terraform ACME provider](https://www.terraform.io/docs/providers/acme/index.html):
The following elements can be specified:
- `email`: (Required) The email to associate the certificates with.
- `server_url`: (Optional) The URL to the ACME endpoint's directory. Default is "https://acme-v02.api.letsencrypt.org/directory"
#### aws
Configuration for the AWS provider supports arguments that closely align with the [Terraform AWS provider](https://www.terraform.io/docs/providers/aws/index.html).
```yaml
aws:
region: "us-east-1"
tags:
Owner: "Infra"
Environment: "Test"
```
The following elements can be specified:
- `region` - (Required) This is the AWS region. It can be sourced from the `AWS_DEFAULT_REGION` environment variables, or
via a shared credentials file if `profile` is specified.
- `tags` - (Optional) Additional name value pairs to assign to every resource (which
supports tagging) in the cluster.
- `access_key` - (Required) This is the AWS access key. It can be sourced from
the `AWS_ACCESS_KEY_ID` environment variable, or via
a shared credentials file if `profile` is specified.
- `secret_key` - (Required) This is the AWS secret key. It can be sourced from
the `AWS_SECRET_ACCESS_KEY` environment variable, or
via a shared credentials file if `profile` is specified.
- `profile` - (Optional) This is the AWS profile name as set in the shared credentials
file.
- `assume_role` - (Optional) An `assume_role` block (documented below). Only one
`assume_role` block can be in the configuration.
- `endpoints` - (Optional) Configuration block for customizing service endpoints. See the
[Custom Service Endpoints Guide](/docs/providers/aws/guides/custom-service-endpoints.html)
for more information about connecting to alternate AWS endpoints or AWS compatible solutions.
- `shared_credentials_file` = (Optional) This is the path to the shared
credentials file. If this is not set and a profile is specified,
`~/.aws/credentials` is used.
- `token` - (Optional) Session token for validating temporary credentials.
Typically provided after successful identity federation or Multi-Factor
Authentication (MFA) login. With MFA login, this is the session token
provided afterwards, not the 6 digit MFA code used to get temporary
credentials. It can also be sourced from the `AWS_SESSION_TOKEN`
environment variable.
- `max_retries` - (Optional) This is the maximum number of times an API
call is retried, in the case where requests are being throttled or
experiencing transient failures. The delay between the subsequent API
calls increases exponentially.
- `allowed_account_ids` - (Optional) List of allowed, white listed, AWS
account IDs to prevent you from mistakenly using an incorrect one (and
potentially end up destroying a live environment). Conflicts with
`forbidden_account_ids`.
- `forbidden_account_ids` - (Optional) List of forbidden, blacklisted,
AWS account IDs to prevent you mistakenly using a wrong one (and
potentially end up destroying a live environment). Conflicts with
`allowed_account_ids`.
- `insecure` - (Optional) Explicitly allows the provider to
perform "insecure" SSL requests. If omitted, defaults to `false`.
- `skip_credentials_validation` - (Optional) Skips the credentials
validation via the STS API. Useful for AWS API implementations that do
not have STS available or implemented.
- `skip_get_ec2_platforms` - (Optional) Skips getting the supported EC2
platforms. Used by users that don't have `ec2:DescribeAccountAttributes`
permissions.
- `skip_region_validation` - (Optional) Skips validation of provided region name.
Useful for AWS-like implementations that use their own region names
or to bypass the validation for regions that aren't publicly available yet.
### resource
Resources to provision for a cluster. Resources are organized as shown in the following example:
```yaml
resource:
type:
name:
parameters
```
For a given `type`, there may be more one or more named resources to provision.
For a given `name`, a resource may have one or more parameters.
#### aws_instance
```yaml
resource:
aws_instance:
workers:
instance_type: t2.xlarge
price: 0.25
os: Ubuntu 16.04
```
- `quantity`: (Required) The number of instances to create.
- `os`: An alias that is expanded by `docker cluster` to the AMI owner and AMI name to install.
The following aliases are supported by `docker cluster`:
- `CentOS 7`
- `RHEL 7.1`
- `RHEL 7.2`
- `RHEL 7.3`
- `RHEL 7.4`
- `RHEL 7.5`
- `RHEL 7.6`
- `Oracle Linux 7.3`
- `Oracle Linux 7.4`
- `Oracle Linux 7.5`
- `SLES 12.2`
- `SLES 12.3`
- `SLES 15`
- `Ubuntu 14.04`
- `Ubuntu 16.04`
- `Ubuntu 18.04`
- `Windows Server 2016`
- `Windows Server 1709`
- `Windows Server 1803`
- `Windows Server 2019`
> Note: Make sure the OS you select is [compatible](https://success.docker.com/article/compatibility-matrix)
with the product you're installing. Docker Cluster validates the support during installation.
- `instance_type`: Specifies the [AWS instance type](https://aws.amazon.com/ec2/instance-types/) to provision.
- `key_name`: By default, Docker Cluster creates an [AWS EC2 Key Pair](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) and registers it with AWS for the cluster.
To use an existing AWS EC2 Key Pair, set this value to the name of the AWS EC2 Key Pair.
- `ssh_private_key`: By default, Docker Cluster creates an [AWS EC2 Key Pair](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) and registers it with AWS for the cluster. To use an existing AWS EC2 Key Pair, set this value to the path of the private SSH key.
- `username`: Specifies the username for the node with Administrative privileges. By default, the `os` option
sets this to the well-known username for the AMIs (which can change by distribution):
- Amazon Linux 2 is `ec2-user`.
- Centos is `centos`.
- Oracle Linux is `ec2-user`.
- RedHat is `ec2-user`.
- SLES is `ec2-user`.
- Ubuntu is `ubuntu`.
- Windows is `Administrator`.
- `password`: This value is only used by Windows nodes. By default, Windows nodes have a random password generated.
- `ami`: Specifies a custom AMI, or one that's not currently available as an OS. Specify either the id or
the owner/name to query for the latest.
- `id`: Specifies the ID of the AMI. For example, `ami-0510c89f1a2691cf2`.
- `owner`: Specifies the AWS account ID of the image owner. For example, `099720109477`.
- `name`: Specifies the name of the AMI that was provided during image creation. For example, `ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-*`.
- `platform`: Specify `windows` for Windows instances.
- `tags`: (Optional) Specifies additional name value pairs to assign to every instance.
- `swarm_labels`: (Optional) Specifies additional key value pairs that represent swarm labels to apply to every node.
#### aws_spot_instance_request
Provisions a spot instance request in AWS to dramatically reduce the cost of instances. Spot instance
availability is not guaranteed. Therefore, it is recommended to use `aws_spot_instance_request` for
additional worker nodes and not for mission-critical nodes like managers and registry.
```yaml
resource:
aws_spot_instance_request:
workers:
instance_type: t2.xlarge
price: 0.25
os: Ubuntu 16.04
quantity: 3
```
Supports the same set of parameters as [aws_instance](index.md#aws_instance), with the addition of an optional price to limit the max bid for a spot instance.
- `price`: (Optional) Specifies a maximum price to bid on the spot instance.
#### aws_lb
Provisions an AWS Load Balancer.
```yaml
resource:
aws_lb:
ucp:
domain: "example.com"
instances:
- managers
ports:
- 443:443
- 6443:6443
```
The following options are supported:
- `instances`: (Required) Specifies a list of `aws_instance` and `aws_spot_instance_request` names to
attach to the load balancer.
- `ports`: (Required) Specifies a list of `listening port[/protocol]:target port[/protocol]` mappings
to define how the load balancer should route traffic. By default, the protocol is `tcp`.
- `domain`: Specifies the domain in which to create DNS records for this load balancer. The record is named the
same as this resource, appended by the domain. For example, if the resource is `ucp` and the domain is `example.com`,
the `A` record is `ucp.example.com`.
- `internal`: (Optional) Defaults to `false`.
- `type`: (Optional) Defaults to `network`.
- `enable_cross_zone_load_balancing`: (Optional) Defaults to `false`.
#### aws_route53_zone
Creates a subdomain in an AWS route53 zone. The following example creates a public zone for `testing.example.com`:
```yaml
resource:
aws_route53_zone:
dns:
domain: example.com
subdomain: testing
```
The following elements are required:
- `domain`: (Required) Specifies the name of the hosted zone.
- `subdomain`: (Required) Specifies the subdomain to create in the `domain` hosted zone.
### variable
Docker cluster supports basic parameterization. The variable section defines a make of keys and values. A key can have a sub-key named `type`, which changes the behavior of the variable.
```yaml
variable:
region: "us-east-1"
password:
type: prompt
```
Variables are referenced in the cluster definition as `${variable_name}`. For example, `${region}` is substituted as `us-east-2` through the cluster definition.
The type defines how the variable behaves. This is currently limited in scope to:
- `prompt`: Requests the value from the user and does not echo characters as the value is entered.

View File

@ -1,527 +0,0 @@
---
description: Cluster file reference and guidelines
keywords: documentation, docs, docker, cluster, infrastructure, automation
title: Cluster file version 1 reference
toc_max: 5
toc_min: 1
---
This topic describes version 1 of the Cluster file format.
## Cluster file structure and examples
```
<div class="panel panel-default">
<div class="panel-heading collapsed" data-toggle="collapse" data-target="#collapseSample1" style="cursor: pointer">
Example Cluster file version 1
<i class="chevron fa fa-fw"></i></div>
<div class="collapse block" id="collapseSample1">
<pre><code>
variable:
domain: "YOUR DOMAIN, e.g. docker.com"
subdomain: "A SUBDOMAIN, e.g. cluster"
region: "THE AWS REGION TO DEPLOY, e.g. us-east-1"
email: "YOUR.EMAIL@COMPANY.COM"
ucp_password:
type: prompt
provider:
acme:
email: ${email}
server_url: https://acme-staging-v02.api.letsencrypt.org/directory
aws:
region: ${region}
cluster:
dtr:
version: docker/dtr:2.6.5
engine:
version: ee-stable-18.09.5
ucp:
username: admin
password: ${ucp_password}
version: docker/ucp:3.1.6
resource:
aws_instance:
managers:
instance_type: t2.xlarge
os: Ubuntu 16.04
quantity: 3
registry:
instance_type: t2.xlarge
os: Ubuntu 16.04
quantity: 3
workers:
instance_type: t2.xlarge
os: Ubuntu 16.04
quantity: 3
aws_lb:
apps:
domain: ${subdomain}.${domain}
instances:
- workers
ports:
- 80:8080
- 443:8443
dtr:
domain: ${subdomain}.${domain}
instances:
- registry
ports:
- 443:443
ucp:
domain: ${subdomain}.${domain}
instances:
- managers
ports:
- 443:443
- 6443:6443
aws_route53_zone:
dns:
domain: ${domain}
subdomain: ${subdomain}
</code></pre>
</div>
</div>
```
The topics on this reference page are organized alphabetically by top-level keys
to reflect the structure of the Cluster file. Top-level keys that define
a section in the configuration file, such as `cluster`, `provider`, and `resource`,
are listed with the options that support them as sub-topics. This information
maps to the indent structure of the Cluster file.
### cluster
Specifies components to install and configure for a cluster.
The following components are available:
- `subscription`: (Optional) A string value representing the subscription ID.
- `license`: (Optional) A path to the cluster's license file.
- `cloudstor`: (Optional) Configuration options for Docker CloudStor
- `dtr`: (Optional) Configuration options for Docker Trusted Registry
- `engine`: (Optional) Configuration options for Docker Engine
- `ucp`: (Optional) Configuration options for Docker Universal Control Plane
- `registry`: (Optional) Configuration options for authenticating nodes with a registry to pull Docker images.
#### cloudstor
Customizes the installation of Docker Cloudstor.
- `version`: (Optional) The version of Cloudstor to install. Default is `1.0`
- `use_efs`: (Optional) Specifies whether an Elastic File System should be provisioned. Defaults to `false`.
#### dtr
Customizes the installation of Docker Trusted Registry.
```yaml
cluster:
dtr:
version: "docker/dtr:2.6.5"
install_options:
- "--debug"
- "--enable-pprof"
```
The following optional elements can be specified:
- `version`: (Optional) The version of DTR to install. Defaults to `docker/dtr:2.6.5`.
- `ca`: (Optional) The path to a root CA public certificate.
- `key`: (Optional) The path to a TLS private key.
- `cert`: (Optional) The path to a public key certificate.
- `install_options`: (Optional) Additional [DTR install options](https://docs.docker.com/reference/dtr/2.6/cli/install/)
#### engine
Customizes the installation of Docker Enterprise Engine.
```yaml
cluster:
engine:
channel: "stable"
edition: "ee"
version: "19.03"
```
The following optional elements can be specified:
- `version`: (Optional) The version of the Docker Engine to install. Defaults to `19.03`.
- `edition`: (Optional) The family of Docker Engine to install. Defaults to `ee` for Enterprise edition.
- `channel`: (Optional) The channel on the repository to pull updated packages. Defaults to `stable`.
- `url`: (Optional) Defaults to "https://storebits.docker.com/ee".
- `storage_driver`: (Optional) The storage driver to use for the storage volume. Default
value is dependent on the operating system.
- Amazon Linux 2 is `overlay2`.
- Centos is `overlay2`.
- Oracle Linux is `overlay2`.
- RedHat is `overlay2`.
- SLES is `btrfs`.
- Ubuntu is `overlay2`.
- `storage_fstype`: (Optional) File system to use for storage volume. Default value is dependent on the operating system.
- Amazon Linux 2 is `xfs`.
- Centos is `xfs`.
- Oracle Linux is `xfs`.
- RedHat is `xfs`.
- SLES is `btrfs`.
- Ubuntu is `ext4`.
- `storage_volume`: (Optional) Docker storage volume path for `/var/lib/docker` Default value is provider dependent.
- AWS
- non-NVME is `/dev/xvdb`.
- NVME disks are one of `/dev/nvme[0-26]n1`.
- Azure is `/dev/disk/azure/scsi1/lun0`.
- `daemon`: (Optional) Provides docker daemon options. Defaults to "".
- `ca`: (dev) Defaults to "".
- `key`: (dev) Defaults to "".
- `enable_remote_tcp`: (dev) Enables direct access to docker engine. Defaults to `false`.
*dev indicates that the functionality is only for development and testing.
#### kubernetes
Enables provider-specific options for Kubernetes support.
##### AWS Kubernetes options
- `cloud_provider`: (Optional)Enable cloud provider support for Kubernetes. Defaults to `false`.
- `ebs_persistent_volumes`: (Optional) Enable persistent volume support with EBS volumes. Defaults to `false`.
- `efs_persistent_volumes`: (Optional) Enable persistent volume support with EFS. Defaults to `false`.
- `load_balancer`: (Optional) Enable Kubernetes pods to instantiate a load-balancer. Defaults to `false`.
- `nfs_storage`: (Optional) Install additional packages on node for NFS support. Defaults to `false`.
- `lifecycle`: (Optional) Defaults to `owned`.
#### registry
Customizes the registry from which the installation should pull images. By default, Docker Hub and credentials to access Docker Hub are used.
```yaml
cluster:
registry:
password: ${base64decode("TVJYeTNDQWpTSk5HTW1ZRzJQcE1kM0tVRlQ=")}
url: https://index.docker.io/v1/
username: user
```
The following optional elements can be specified:
- `username`: The username for logging in to the registry on each node. Default value is the current docker user.
- `url`: The registry to use for pulling Docker images. Defaults to "https://index.docker.io/v1/".
- `password`: The password for logging in to the registry on each node. Default value is the current docker user's password base64 encoded and wrapped in a call to base64decode.
#### ucp
- `version`: Specifies the version of UCP to install. Defaults to `docker/ucp:3.1.6`.
- `username`: Specifies the username of the first user to create in UCP. Defaults to `admin`.
- `password`: Specifies the password of the first user to create in UCP. Defaults to `dockerdocker`.
- `ca`: Specifies a path to a root CA public certificate.
- `key`: Specifies a path to a TLS private key.
- `cert`: Specifies a path to a public key certificate.
- `install_options`: Lists additional [UCP install options](https://docs.docker.com/reference/ucp/3.1/cli/install/)
##### Additional UCP configuration options:
Docker Cluster also accepts all UCP configuration options and creates the initial UCP config on
installation. The following list provides supported options:
- `anonymize_tracking`: Anonymizes analytic data. Specify 'true' to hide the license ID. Defaults to 'false'.
- `audit_level`: Specifies the audit logging level. Leave empty for disabling audit logs (default).
Other valid values are 'metadata' and 'request'.
- `auto_refresh`: Specify 'true' to enable attempted automatic license renewal when the license
nears expiration. If disabled, you must manually upload renewed license after expiration. Defaults to 'true'.
- `azure_ip_count`: Sets the IP count for azure allocator to allocate IPs per Azure virtual machine.
- `backend`: Specifie the name of the authorization backend to use, either 'managed' or 'ldap'. Defaults to 'managed'.
- `calico_mtu`: Specifies the MTU (maximum transmission unit) size for the Calico plugin. Defaults to '1480'.
- `cloud_provider`: Specifies the cloud provider for the kubernetes cluster.
- `cluster_label`: Specifies a label to be included with analytics/.
- `cni_installer_url`: Specifies the URL of a Kubernetes YAML file to be used for installing a CNI plugin.
Only applies during initial installation. If empty, the default CNI plugin is used.
- `controller_port`: Configures the port that the 'ucp-controller' listens to. Defaults to '443'.
- `custom_header_name`: Specifies the name of the custom header with 'name' = '*X-Custom-Header-Name*'.
- `custom_header_value`: Specifies the value of the custom header with 'value' = '*Custom Header Value*'.
- `default_new_user_role`: Specifies the role that new users get for their private resource sets.
Values are 'admin', 'viewonly', 'scheduler', 'restrictedcontrol', or 'fullcontrol'. Defaults to 'restrictedcontrol'.
- `default_node_orchestrator`: Specifies the type of orchestrator to use for new nodes that are
joined to the cluster. Can be 'swarm' or 'kubernetes'. Defaults to 'swarm'.
- `disable_tracking`: Specify 'true' to disable analytics of API call information. Defaults to 'false'.
- `disable_usageinfo`: Specify 'true' to disable analytics of usage information. Defaults to 'false'.
- `dns`: Specifies a CSV list of IP addresses to add as nameservers.
- `dns_opt`: Specifies a CSV list of options used by DNS resolvers.
- `dns_search`: Specifies a CSV list of domain names to search when a bare unqualified hostname is
used inside of a container.
- `enable_admin_ucp_scheduling`: Specify 'true' to allow admins to schedule on containers on manager nodes.
Defaults to 'false'.
- `external_service_lb`: Specifies an optional external load balancer for default links to services with
exposed ports in the web interface.
- `host_address`: Specifies the address for connecting to the DTR instance tied to this UCP cluster.
- `log_host`: Specifies a remote syslog server to send UCP controller logs to. If omitted, controller
logs are sent through the default docker daemon logging driver from the 'ucp-controller' container.
- `idpMetadataURL`: Specifies the Identity Provider Metadata URL.
- `image_repository`: Specifies the repository to use for UCP images.
- `install_args`: Specifies additional arguments to pass to the UCP installer.
- `ipip_mtu`: Specifies the IPIP MTU size for the calico IPIP tunnel interface.
- `kube_apiserver_port`: Configures the port to which the Kubernetes API server listens.
- `kv_snapshot_count`: Sets the key-value store snapshot count setting. Defaults to '20000'.
- `kv_timeout`: Sets the key-value store timeout setting, in milliseconds. Defaults to '5000'.
- `lifetime_minutes`: Specifies the initial session lifetime, in minutes. Defaults to `4320`, which is 72 hours.
- `local_volume_collection_mapping`: Stores data about collections for volumes in UCP's local KV store
instead of on the volume labels. This is used for enforcing access control on volumes.
- `log_level`: Specifies the logging level for UCP components. Values are syslog priority
levels (https://linux.die.net/man/5/syslog.conf): 'debug', 'info', 'notice', 'warning', 'err', 'crit', 'alert',
and 'emerg'.
- `managedPasswordDisabled`: Indicates if managed password is disabled. Defaults to false.
- `managedPasswordFallbackUser`: The fallback user when the managed password authentication is disabled. Defaults to "".
- `manager_kube_reserved_resources`: Specifies reserve resources for Docker UCP and Kubernetes components
that are running on manager nodes.
- `metrics_disk_usage_interval`: Specifies the interval for how frequently storage metrics are gathered.
This operation can impact performance when large volumes are present.
- `metrics_retention_time`: Adjusts the metrics retention time.
- `metrics_scrape_interval`: Specifies the interval for how frequently managers gather metrics from nodes in the cluster.
- `nodeport_range`: Specifies the port range that for Kubernetes services of type NodePort can be exposed in.
Defaults to '32768-35535'.
- `per_user_limit`: Specifies the maximum number of sessions that a user can have active simultaneously. If
the creation of a new session would put a user over this limit, the least recently used session is deleted.
A value of zero disables limiting the number of sessions that users can have. Defaults to `5`.
- `pod_cidr`: Specifies the subnet pool from which the IP for the Pod should be allocated from the CNI ipam plugin.
- `profiling_enabled`: Specify 'true' to enable specialized debugging endpoints for profiling UCP performance.
Defaults to 'false'.
- `log_protocol`: Specifies the protocol to use for remote logging. Values are 'tcp' and 'udp'. Defaults to 'tcp'.
- `renewal_threshold_minutes`: Specifies the length of time, in minutes, before the expiration of a
session. When used, a session is extended by the current configured lifetime from that point in time. A zero value disables session extension. Defaults to `1440`, which is 24 hours.
- `require_content_trust`: Specify 'true' to require images be signed by content trust. Defaults to 'false'.
- `require_signature_from`: Specifies a csv list of users or teams required to sign images.
- `rethinkdb_cache_size`: Sets the size of the cache used by UCP's RethinkDB servers. TDefaults to 1GB,
but leaving this field empty or specifying `auto` instructs RethinkDB to determine a cache size automatically.
- `rootCerts`: Defaults to empty.
- `samlEnabled`: Indicates if saml is used.
- `samlLoginText`: Specifies the customized SAML login button text.
- `service_id`: Specifies the DTR instance's OpenID Connect Client ID, as registered with the Docker
authentication provider.
- `spHost`: Specifies the Service Provider Host.
- `storage_driver`: Specifies the UCP storage driver to install.
- `support_dump_include_audit_logs`: When set to `true`, support dumps include audit logs in the logs
of the 'ucp-controller' container of each manager node. Defaults to 'false'.
- `swarm_port`: Configures the port that the 'ucp-swarm-manager' listens to. Defaults to '2376'.
- `swarm_strategy`: Configures placement strategy for container scheduling.
This doesn't affect swarm-mode services. Values are 'spread', 'binpack', and 'random'.
- `tlsSkipVerify`: Specifies TLS Skip verify for IdP Metadata.
- `unmanaged_cni`: Defaults to 'false'.
- `worker_kube_reserved_resources`: Reserves resources for Docker UCP and Kubernetes components
that are running on worker nodes.
- `custom_kube_api_server_flags`: Specifies the configuration options for the Kubernetes API server. (dev)
- `custom_kube_controller_manager_flags`: Specifies the configuration options for the Kubernetes controller manager. (dev)
- `custom_kube_scheduler_flags`: Specifies the configuration options for the Kubernetes scheduler. (dev)
- `custom_kubelet_flags`: Specifies the configuration options for Kubelets. (dev)
*dev indicates that the functionality is only for development and testing. Arbitrary Kubernetes configuration parameters are not tested and supported under the Docker Enterprise Software Support Agreement.
### provider
Defines where the cluster's resources are provisioned, as well as provider-specific configuration such as tags.
```yaml
provider:
acme:
email: ${email}
server_url: https://acme-staging-v02.api.letsencrypt.org/directory
aws:
region: ${region}
```
#### acme
The Automated Certificate Management Environment (ACME) is an evolving standard for the automation of a domain-validated certificate authority. Docker Cluster uses the ACME provider to create SSL certificates that are signed by [Let's Encrypt](https://letsencrypt.org/).
The ACME provider Configuration for the ACME provider supports arguments that closely align with the [Terraform ACME provider](https://www.terraform.io/docs/providers/acme/index.html):
The following elements can be specified:
- `email`: (Required) The email to associate the certificates with.
- `server_url`: (Optional) The URL to the ACME endpoint's directory. Default is "https://acme-v02.api.letsencrypt.org/directory"
#### aws
Configuration for the AWS provider supports arguments that closely align with the [Terraform AWS provider](https://www.terraform.io/docs/providers/aws/index.html).
```yaml
aws:
region: "us-east-1"
tags:
Owner: "Infra"
Environment: "Test"
```
The following elements can be specified:
- `region` - (Required) This is the AWS region. It can be sourced from the `AWS_DEFAULT_REGION` environment variables, or
via a shared credentials file if `profile` is specified.
- `tags` - (Optional) Additional name value pairs to assign to every resource (which
supports tagging) in the cluster.
- `access_key` - (Required) This is the AWS access key. It can be sourced from
the `AWS_ACCESS_KEY_ID` environment variable, or via
a shared credentials file if `profile` is specified.
- `secret_key` - (Required) This is the AWS secret key. It can be sourced from
the `AWS_SECRET_ACCESS_KEY` environment variable, or
via a shared credentials file if `profile` is specified.
- `profile` - (Optional) This is the AWS profile name as set in the shared credentials
file.
- `assume_role` - (Optional) An `assume_role` block (documented below). Only one
`assume_role` block can be in the configuration.
- `endpoints` - (Optional) Configuration block for customizing service endpoints. See the
[Custom Service Endpoints Guide](/docs/providers/aws/guides/custom-service-endpoints.html)
for more information about connecting to alternate AWS endpoints or AWS compatible solutions.
- `shared_credentials_file` = (Optional) This is the path to the shared
credentials file. If this is not set and a profile is specified,
`~/.aws/credentials` is used.
- `token` - (Optional) Session token for validating temporary credentials.
Typically provided after successful identity federation or Multi-Factor
Authentication (MFA) login. With MFA login, this is the session token
provided afterwards, not the 6 digit MFA code used to get temporary
credentials. It can also be sourced from the `AWS_SESSION_TOKEN`
environment variable.
- `max_retries` - (Optional) This is the maximum number of times an API
call is retried, in the case where requests are being throttled or
experiencing transient failures. The delay between the subsequent API
calls increases exponentially.
- `allowed_account_ids` - (Optional) List of allowed, white listed, AWS
account IDs to prevent you from mistakenly using an incorrect one (and
potentially end up destroying a live environment). Conflicts with
`forbidden_account_ids`.
- `forbidden_account_ids` - (Optional) List of forbidden, blacklisted,
AWS account IDs to prevent you mistakenly using a wrong one (and
potentially end up destroying a live environment). Conflicts with
`allowed_account_ids`.
- `insecure` - (Optional) Explicitly allows the provider to
perform "insecure" SSL requests. If omitted, defaults to `false`.
- `skip_credentials_validation` - (Optional) Skips the credentials
validation via the STS API. Useful for AWS API implementations that do
not have STS available or implemented.
- `skip_get_ec2_platforms` - (Optional) Skips getting the supported EC2
platforms. Used by users that don't have `ec2:DescribeAccountAttributes`
permissions.
- `skip_region_validation` - (Optional) Skips validation of provided region name.
Useful for AWS-like implementations that use their own region names
or to bypass the validation for regions that aren't publicly available yet.
### resource
Resources to provision for a cluster. Resources are organized as shown in the following example:
```yaml
resource:
type:
name:
parameters
```
For a given `type`, there may be more one or more named resources to provision.
For a given `name`, a resource may have one or more parameters.
#### aws_instance
```yaml
resource:
aws_instance:
workers:
instance_type: t2.xlarge
price: 0.25
os: Ubuntu 16.04
```
- `quantity`: (Required) The number of instances to create.
- `os`: An alias that is expanded by `docker cluster` to the AMI owner and AMI name to install.
The following aliases are supported by `docker cluster`:
- `CentOS 7`
- `RHEL 7.1`
- `RHEL 7.2`
- `RHEL 7.3`
- `RHEL 7.4`
- `RHEL 7.5`
- `RHEL 7.6`
- `Oracle Linux 7.3`
- `Oracle Linux 7.4`
- `Oracle Linux 7.5`
- `SLES 12.2`
- `SLES 12.3`
- `SLES 15`
- `Ubuntu 14.04`
- `Ubuntu 16.04`
- `Ubuntu 18.04`
- `Windows Server 2016`
- `Windows Server 1709`
- `Windows Server 1803`
- `Windows Server 2019`
> Note: Make sure the OS you select is [compatible](https://success.docker.com/article/compatibility-matrix)
with the product you're installing. Docker Cluster validates the support during installation.
- `instance_type`: Specifies the [AWS instance type](https://aws.amazon.com/ec2/instance-types/) to provision.
- `key_name`: By default, Docker Cluster creates an [AWS EC2 Key Pair](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) and registers it with AWS for the cluster.
To use an existing AWS EC2 Key Pair, set this value to the name of the AWS EC2 Key Pair.
- `ssh_private_key`: By default, Docker Cluster creates an [AWS EC2 Key Pair](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) and registers it with AWS for the cluster. To use an existing AWS EC2 Key Pair, set this value to the path of the private SSH key.
- `username`: Specifies the username for the node with Administrative privileges. By default, the `os` option
sets this to the well-known username for the AMIs (which can change by distribution):
- Amazon Linux 2 is `ec2-user`.
- Centos is `centos`.
- Oracle Linux is `ec2-user`.
- RedHat is `ec2-user`.
- SLES is `ec2-user`.
- Ubuntu is `ubuntu`.
- Windows is `Administrator`.
- `password`: This value is only used by Windows nodes. By default, Windows nodes have a random password generated.
- `ami`: Specifies a custom AMI, or one that's not currently available as an OS. Specify either the id or
the owner/name to query for the latest.
- `id`: Specifies the ID of the AMI. For example, `ami-0510c89f1a2691cf2`.
- `owner`: Specifies the AWS account ID of the image owner. For example, `099720109477`.
- `name`: Specifies the name of the AMI that was provided during image creation. For example, `ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-*`.
- `platform`: Specify `windows` for Windows instances.
- `tags`: (Optional) Specifies additional name value pairs to assign to every instance.
- `swarm_labels`: (Optional) Specifies additional key value pairs that represent swarm labels to apply to every node.
#### aws_spot_instance_request
Provisions a spot instance request in AWS to dramatically reduce the cost of instances. Spot instance
availability is not guaranteed. Therefore, it is recommended to use `aws_spot_instance_request` for
additional worker nodes and not for mission-critical nodes like managers and registry.
```yaml
resource:
aws_spot_instance_request:
workers:
instance_type: t2.xlarge
price: 0.25
os: Ubuntu 16.04
quantity: 3
```
Supports the same set of parameters as [aws_instance](index.md#aws_instance), with the addition of an optional price to limit the max bid for a spot instance.
- `price`: (Optional) Specifies a maximum price to bid on the spot instance.
#### aws_lb
Provisions an AWS Load Balancer.
```yaml
resource:
aws_lb:
ucp:
domain: "example.com"
instances:
- managers
ports:
- 443:443
- 6443:6443
```
The following options are supported:
- `instances`: (Required) Specifies a list of `aws_instance` and `aws_spot_instance_request` names to
attach to the load balancer.
- `ports`: (Required) Specifies a list of `listening port[/protocol]:target port[/protocol]` mappings
to define how the load balancer should route traffic. By default, the protocol is `tcp`.
- `domain`: Specifies the domain in which to create DNS records for this load balancer. The record is named the
same as this resource, appended by the domain. For example, if the resource is `ucp` and the domain is `example.com`,
the `A` record is `ucp.example.com`.
- `internal`: (Optional) Defaults to `false`.
- `type`: (Optional) Defaults to `network`.
- `enable_cross_zone_load_balancing`: (Optional) Defaults to `false`.
#### aws_route53_zone
Creates a subdomain in an AWS route53 zone. The following example creates a public zone for `testing.example.com`:
```yaml
resource:
aws_route53_zone:
dns:
domain: example.com
subdomain: testing
```
The following elements are required:
- `domain`: (Required) Specifies the name of the hosted zone.
- `subdomain`: (Required) Specifies the subdomain to create in the `domain` hosted zone.
### variable
Docker cluster supports basic parameterization. The variable section defines a make of keys and values. A key can have a sub-key named `type`, which changes the behavior of the variable.
```yaml
variable:
region: "us-east-1"
password:
type: prompt
```
Variables are referenced in the cluster definition as `${variable_name}`. For example, `${region}` is substituted as `us-east-2` through the cluster definition.
The type defines how the variable behaves. This is currently limited in scope to:
- `prompt`: Requests the value from the user and does not echo characters as the value is entered.

View File

@ -19,30 +19,34 @@ Using Docker Cluster is a three-step process:
A `cluster.yml` file resembles the following example: A `cluster.yml` file resembles the following example:
variable: {% raw %}
region: us-east-2 ```yaml
ucp_password: variable:
type: prompt region: us-east-2
ucp_password:
type: prompt
provider: provider:
aws: aws:
region: ${region} region: ${region}
cluster: cluster:
engine: engine:
version: "ee-stable-18.09.5" version: "ee-stable-18.09.5"
ucp: ucp:
version: "docker/ucp:3.1.6" version: "docker/ucp:3.1.6"
username: "admin" username: "admin"
password: ${ucp_password} password: ${ucp_password}
resource: resource:
aws_instance: aws_instance:
managers: managers:
quantity: 1 quantity: 1
```
{% endraw %}
For more information about Cluster files, refer to the For more information about Cluster files, refer to the
[Cluster file reference](cluster-file/index.md). [Cluster file reference](cluster-file.md).
Docker Cluster has commands for managing the whole lifecycle of your cluster: Docker Cluster has commands for managing the whole lifecycle of your cluster:
@ -52,9 +56,9 @@ Docker Cluster has commands for managing the whole lifecycle of your cluster:
* View the status of clusters * View the status of clusters
* Backup and Restore clusters * Backup and Restore clusters
## Cluster documentation ## Cluster reference pages
- [Get started with Docker Cluster on AWS](aws.md) - [Get started with Docker Cluster on AWS](aws.md)
- [Command line reference](./reference/index.md) - [Command line reference](/engine/reference/commandline/cluster/)
- [Cluster file reference](./cluster-file/index.md) - [Cluster file reference](./cluster-file.md)

View File

@ -1,21 +0,0 @@
---
description: Back up a running cluster
keywords: documentation, docs, docker, cluster, infrastructure, automation
title: docker cluster backup
notoc: true
---
## Usage
```
docker cluster backup [OPTIONS] cluster
```
Use the following options as needed to back up a running cluster:
- `--dry-run`: Skips resource provisioning.
- `--file string`: Specifies a cluster backup filename. Defaults to `backup.tar.gz`.
- `--log-level string`: Specifies the logging level. Valid values include: `trace`,`debug`,`info`,`warn`,`error`, and `fatal`.
Defaults to `warn`.
- `--passphrase string`: Specifies a cluster backup passphrase.
The backup command performs a full Docker Cluster backup following the steps found in [Backup and Restore Best Practices](https://success.docker.com/article/backup-restore-best-practices).

View File

@ -1,24 +0,0 @@
---
description: Cluster CLI reference
keywords: documentation, docs, docker, cluster, infrastructure, automation
title: Cluster command-line reference
notoc: true
---
The following pages describe the usage information for the [docker cluster](overview) subcommands. You can also view this information by running `docker cluster [subcommand] --help` from the command line.
* [docker cluster](overview)
* [backup](backup)
* [create](create)
* [inspect](inspect)
* [logs](logs)
* [ls](ls)
* [restore](restore)
* [rm](rm)
* [update](update)
* [version](version)
## Where to go next
* [CLI environment variables](envvars)
* [docker cluster command](overview)

View File

@ -1,16 +0,0 @@
---
description: Inspect clusters
keywords: documentation, docs, docker, cluster, infrastructure, automation
title: docker cluster inspect
notoc: true
---
## Usage
```
docker cluster inspect [OPTIONS] cluster
```
Use the following options as needed to display detailed information about a cluster:
- `-a, --all`: Displays complete information about the cluster.
- `--dry-run`: Skips resource provisioning.
- `--log-level string`: Specifies the logging level. Valid values include: `trace`,`debug`,`info`,`warn`,`error`, and `fatal`. Defaults to `warn`.

View File

@ -1,16 +0,0 @@
---
description: List all available clusters
keywords: documentation, docs, docker, cluster, infrastructure, automation
title: docker cluster ls
notoc: true
---
## Usage
```
docker cluster ls [OPTIONS]
```
Use the following options as needed to list all available clusters:
- `--dry-run`: Skips resource provisioning.
- `--log-level string`: Specifies the logging level. Valid values include: `trace`,`debug`,`info`,`warn`,`error`, and `fatal`. Defaults to `warn`.
- `-q`, `--quiet`: Displays only numeric IDs.

View File

@ -1,49 +0,0 @@
---
description: Overview of docker cluster CLI
keywords: documentation, docs, docker, cluster, infrastructure, automation
title: Overview of docker cluster CLI
---
This page provides usage information for the `docker cluster` CLI plugin command options.
You can also view this information by running `docker cluster --help` from the
command line.
## Usage
```
docker cluster [Options] [Commands]
```
Options:
- `--dry-run`: Skips resource provisioning.
- `--log-level string`: Specifies the logging level. Valid values include: `trace`,`debug`,`info`,`warn`,`error`, and `fatal`. Defaults to `warn`.
Commands:
- `backup`: Backs up a running cluster.
- `begin`: Creates an example cluster declaration.
- `create`: Creates a new Docker cluster.
- `inspect`: Provides detailed information about a cluster.
- `logs`:TODO: Fetches cluster logs.
- `ls`: Lists all available clusters.
- `restore`: Restores a cluster from a backup.
- `rm`: Removes a cluster.
- `update`: Updates a running cluster's desired state.
- `version`: Displays Version, Commit, and Build type.
Run 'docker cluster [Command] --help' for more information about a command.
```
## Specify name and path of one or more cluster files
Use the `-f` flag to specify the location of a cluster configuration file.
## Set up environment variables
You can set [environment variables](envvars) for various
`docker cluster` options, including the `-f` and `-p` flags.
## Where to go next
* [CLI environment variables](envvars)

Some files were not shown because too many files have changed in this diff Show More