Merge pull request #1281 from docker/amberjack
Sync Amberjack with Master
|
@ -22,7 +22,7 @@ exclude: ["_scripts", "apidocs/layouts", "Gemfile", "hooks", "index.html", "404.
|
|||
latest_engine_api_version: "1.40"
|
||||
docker_ce_version: "19.03"
|
||||
docker_ee_version: "19.03"
|
||||
compose_version: "1.25.0"
|
||||
compose_version: "1.24.1"
|
||||
compose_file_v3: "3.7"
|
||||
compose_file_v2: "2.4"
|
||||
machine_version: "0.16.1"
|
||||
|
|
|
@ -22,7 +22,7 @@ url: https://docs.docker.com
|
|||
latest_engine_api_version: "1.40"
|
||||
docker_ce_version: "19.03"
|
||||
docker_ee_version: "19.03"
|
||||
compose_version: "1.24.0"
|
||||
compose_version: "1.24.1"
|
||||
compose_file_v3: "3.7"
|
||||
compose_file_v2: "2.4"
|
||||
machine_version: "0.16.0"
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
command: docker cluster
|
||||
short: Docker Cluster
|
||||
long: A tool to build and manage Docker Clusters.
|
||||
pname: docker
|
||||
plink: docker.yaml
|
||||
cname:
|
||||
- docker cluster backup
|
||||
- docker cluster create
|
||||
- docker cluster inspect
|
||||
- docker cluster ls
|
||||
- docker cluster restore
|
||||
- docker cluster rm
|
||||
- docker cluster update
|
||||
- docker cluster version
|
||||
clink:
|
||||
- docker_cluster_backup.yaml
|
||||
- docker_cluster_create.yaml
|
||||
- docker_cluster_inspect.yaml
|
||||
- docker_cluster_ls.yaml
|
||||
- docker_cluster_restore.yaml
|
||||
- docker_cluster_rm.yaml
|
||||
- docker_cluster_update.yaml
|
||||
- docker_cluster_version.yaml
|
||||
options:
|
||||
- option: dry-run
|
||||
value_type: bool
|
||||
default_value: "false"
|
||||
description: Skip provisioning resources
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: log-level
|
||||
value_type: string
|
||||
default_value: warn
|
||||
description: |
|
||||
Set the logging level ("trace"|"debug"|"info"|"warn"|"error"|"fatal")
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
|
|
@ -0,0 +1,60 @@
|
|||
command: docker cluster backup
|
||||
short: Backup a running cluster
|
||||
long: Backup a running cluster
|
||||
usage: docker cluster backup [OPTIONS] cluster
|
||||
pname: docker cluster
|
||||
plink: docker_cluster.yaml
|
||||
options:
|
||||
- option: env
|
||||
shorthand: e
|
||||
value_type: stringSlice
|
||||
default_value: '[]'
|
||||
description: Set environment variables
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: file
|
||||
value_type: string
|
||||
default_value: backup.tar.gz
|
||||
description: Cluster backup filename
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: passphrase
|
||||
value_type: string
|
||||
description: Cluster backup passphrase
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
inherited_options:
|
||||
- option: dry-run
|
||||
value_type: bool
|
||||
default_value: "false"
|
||||
description: Skip provisioning resources
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: log-level
|
||||
value_type: string
|
||||
default_value: warn
|
||||
description: |
|
||||
Set the logging level ("trace"|"debug"|"info"|"warn"|"error"|"fatal")
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
|
|
@ -0,0 +1,81 @@
|
|||
command: docker cluster create
|
||||
short: Create a new Docker Cluster
|
||||
long: Create a new Docker Cluster
|
||||
usage: docker cluster create [OPTIONS]
|
||||
pname: docker cluster
|
||||
plink: docker_cluster.yaml
|
||||
options:
|
||||
- option: env
|
||||
shorthand: e
|
||||
value_type: stringSlice
|
||||
default_value: '[]'
|
||||
description: Set environment variables
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: example
|
||||
value_type: string
|
||||
default_value: aws
|
||||
description: Display an example cluster declaration
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: file
|
||||
shorthand: f
|
||||
value_type: string
|
||||
default_value: cluster.yml
|
||||
description: Cluster declaration
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: name
|
||||
shorthand: "n"
|
||||
value_type: string
|
||||
description: Name for the cluster
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: switch-context
|
||||
shorthand: s
|
||||
value_type: bool
|
||||
default_value: "false"
|
||||
description: Switch context after cluster create.
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
inherited_options:
|
||||
- option: dry-run
|
||||
value_type: bool
|
||||
default_value: "false"
|
||||
description: Skip provisioning resources
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: log-level
|
||||
value_type: string
|
||||
default_value: warn
|
||||
description: |
|
||||
Set the logging level ("trace"|"debug"|"info"|"warn"|"error"|"fatal")
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
command: docker cluster inspect
|
||||
short: Display detailed information about a cluster
|
||||
long: Display detailed information about a cluster
|
||||
usage: docker cluster inspect [OPTIONS] cluster
|
||||
pname: docker cluster
|
||||
plink: docker_cluster.yaml
|
||||
options:
|
||||
- option: all
|
||||
shorthand: a
|
||||
value_type: bool
|
||||
default_value: "false"
|
||||
description: Display complete info about cluster
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
inherited_options:
|
||||
- option: dry-run
|
||||
value_type: bool
|
||||
default_value: "false"
|
||||
description: Skip provisioning resources
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: log-level
|
||||
value_type: string
|
||||
default_value: warn
|
||||
description: |
|
||||
Set the logging level ("trace"|"debug"|"info"|"warn"|"error"|"fatal")
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
command: docker cluster ls
|
||||
short: List all available clusters
|
||||
long: List all available clusters
|
||||
usage: docker cluster ls [OPTIONS]
|
||||
pname: docker cluster
|
||||
plink: docker_cluster.yaml
|
||||
options:
|
||||
- option: quiet
|
||||
shorthand: q
|
||||
value_type: bool
|
||||
default_value: "false"
|
||||
description: Only display numeric IDs
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
inherited_options:
|
||||
- option: dry-run
|
||||
value_type: bool
|
||||
default_value: "false"
|
||||
description: Skip provisioning resources
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: log-level
|
||||
value_type: string
|
||||
default_value: warn
|
||||
description: |
|
||||
Set the logging level ("trace"|"debug"|"info"|"warn"|"error"|"fatal")
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
|
|
@ -0,0 +1,60 @@
|
|||
command: docker cluster restore
|
||||
short: Restore a cluster from a backup
|
||||
long: Restore a cluster from a backup
|
||||
usage: docker cluster restore [OPTIONS] cluster
|
||||
pname: docker cluster
|
||||
plink: docker_cluster.yaml
|
||||
options:
|
||||
- option: env
|
||||
shorthand: e
|
||||
value_type: stringSlice
|
||||
default_value: '[]'
|
||||
description: Set environment variables
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: file
|
||||
value_type: string
|
||||
default_value: backup.tar.gz
|
||||
description: Cluster backup filename
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: passphrase
|
||||
value_type: string
|
||||
description: Cluster backup passphrase
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
inherited_options:
|
||||
- option: dry-run
|
||||
value_type: bool
|
||||
default_value: "false"
|
||||
description: Skip provisioning resources
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: log-level
|
||||
value_type: string
|
||||
default_value: warn
|
||||
description: |
|
||||
Set the logging level ("trace"|"debug"|"info"|"warn"|"error"|"fatal")
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
command: docker cluster rm
|
||||
short: Remove a cluster
|
||||
long: Remove a cluster
|
||||
usage: docker cluster rm [OPTIONS] cluster
|
||||
pname: docker cluster
|
||||
plink: docker_cluster.yaml
|
||||
options:
|
||||
- option: env
|
||||
shorthand: e
|
||||
value_type: stringSlice
|
||||
default_value: '[]'
|
||||
description: Set environment variables
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: force
|
||||
shorthand: f
|
||||
value_type: bool
|
||||
default_value: "false"
|
||||
description: Force removal of the cluster files
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
inherited_options:
|
||||
- option: dry-run
|
||||
value_type: bool
|
||||
default_value: "false"
|
||||
description: Skip provisioning resources
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: log-level
|
||||
value_type: string
|
||||
default_value: warn
|
||||
description: |
|
||||
Set the logging level ("trace"|"debug"|"info"|"warn"|"error"|"fatal")
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
|
|
@ -0,0 +1,52 @@
|
|||
command: docker cluster update
|
||||
short: Update a running cluster's desired state
|
||||
long: Update a running cluster's desired state
|
||||
usage: docker cluster update [OPTIONS] cluster
|
||||
pname: docker cluster
|
||||
plink: docker_cluster.yaml
|
||||
options:
|
||||
- option: env
|
||||
shorthand: e
|
||||
value_type: stringSlice
|
||||
default_value: '[]'
|
||||
description: Set environment variables
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: file
|
||||
shorthand: f
|
||||
value_type: string
|
||||
description: Cluster definition
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
inherited_options:
|
||||
- option: dry-run
|
||||
value_type: bool
|
||||
default_value: "false"
|
||||
description: Skip provisioning resources
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: log-level
|
||||
value_type: string
|
||||
default_value: warn
|
||||
description: |
|
||||
Set the logging level ("trace"|"debug"|"info"|"warn"|"error"|"fatal")
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
|
|
@ -0,0 +1,42 @@
|
|||
command: docker cluster version
|
||||
short: Print Version, Commit, and Build type
|
||||
long: Print Version, Commit, and Build type
|
||||
usage: docker cluster version
|
||||
pname: docker cluster
|
||||
plink: docker_cluster.yaml
|
||||
options:
|
||||
- option: json
|
||||
value_type: bool
|
||||
default_value: "false"
|
||||
description: Formats output as JSON. Implies '--log-level error'
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
inherited_options:
|
||||
- option: dry-run
|
||||
value_type: bool
|
||||
default_value: "false"
|
||||
description: Skip provisioning resources
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: log-level
|
||||
value_type: string
|
||||
default_value: warn
|
||||
description: |
|
||||
Set the logging level ("trace"|"debug"|"info"|"warn"|"error"|"fatal")
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
|
|
@ -3,6 +3,15 @@
|
|||
# environment that can't access the internet
|
||||
|
||||
# Used by _includes/components/ddc_url_list_2.html
|
||||
- product: "ucp"
|
||||
version: "3.2"
|
||||
tar-files:
|
||||
- description: "3.2.0 Linux"
|
||||
url: https://packages.docker.com/caas/ucp_images_3.2.0.tar.gz
|
||||
- description: "3.2.0 Windows Server 2016 LTSC"
|
||||
url: https://packages.docker.com/caas/ucp_images_win_2016_3.2.0.tar.gz
|
||||
- description: "3.2.0 Windows Server 2019 LTSC"
|
||||
url: https://packages.docker.com/caas/ucp_images_win_2019_3.2.0.tar.gz
|
||||
- product: "ucp"
|
||||
version: "3.1"
|
||||
tar-files:
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
command: docker context import
|
||||
short: Import a context from a tar file
|
||||
short: Import a context from a tar or zip file
|
||||
long: Imports a context previously exported with `docker context export`. To import
|
||||
from stdin, use a hyphen (`-`) as filename.
|
||||
usage: docker context import CONTEXT FILE|-
|
||||
|
|
|
@ -1343,6 +1343,33 @@ examples: |-
|
|||
> Windows containers. This option fails if the container isolation is `hyperv`
|
||||
> or when running Linux Containers on Windows (LCOW).
|
||||
|
||||
### Access an NVIDIA GPU
|
||||
|
||||
The `--gpus` flag allows you to access NVIDIA GPU resources. First you need to
|
||||
install [nvidia-container-runtime](https://nvidia.github.io/nvidia-container-runtime/).
|
||||
Visit [Specify a container's resources](https://docs.docker.com/config/containers/resource_constraints/)
|
||||
for more information.
|
||||
|
||||
To use `--gpus`, specify which GPUs (or all) to use. If no value is provied, all
|
||||
available GPUs are used. The example below exposes all available GPUs.
|
||||
|
||||
```bash
|
||||
$ docker run -it --rm --gpus all ubuntu nvidia-smi
|
||||
```
|
||||
|
||||
Use the `device` option to specify GPUs. The example below exposes a specific
|
||||
GPU.
|
||||
|
||||
```bash
|
||||
$ docker run -it --rm --gpus device=GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a ubuntu nvidia-smi
|
||||
```
|
||||
|
||||
The example below exposes the first and third GPUs.
|
||||
|
||||
```bash
|
||||
$ docker run -it --rm --gpus device=0,2 nvidia-smi
|
||||
```
|
||||
|
||||
### Restart policies (--restart)
|
||||
|
||||
Use Docker's `--restart` to specify a container's *restart policy*. A restart
|
||||
|
@ -1580,4 +1607,3 @@ experimental: false
|
|||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
|
||||
|
|
233
_data/toc.yaml
|
@ -469,7 +469,7 @@ guides:
|
|||
title: NIST ITL Bulletin October 2017
|
||||
- sectiontitle: OSCAL
|
||||
section:
|
||||
- path: /compliance/oscal
|
||||
- path: /compliance/oscal/
|
||||
title: OSCAL compliance guidance
|
||||
- sectiontitle: CIS Benchmarks
|
||||
section:
|
||||
|
@ -629,6 +629,27 @@ reference:
|
|||
title: docker checkpoint ls
|
||||
- path: /engine/reference/commandline/checkpoint_rm/
|
||||
title: docker checkpoint rm
|
||||
|
||||
- sectiontitle: docker cluster *
|
||||
section:
|
||||
- path: /engine/reference/commandline/cluster/
|
||||
title: docker cluster
|
||||
- path: /engine/reference/commandline/cluster_backup/
|
||||
title: docker cluster backup
|
||||
- path: /engine/reference/commandline/cluster_create/
|
||||
title: docker cluster create
|
||||
- path: /engine/reference/commandline/cluster_inspect/
|
||||
title: docker cluster inspect
|
||||
- path: /engine/reference/commandline/cluster_ls/
|
||||
title: docker cluster ls
|
||||
- path: /engine/reference/commandline/cluster_restore/
|
||||
title: docker cluster restore
|
||||
- path: /engine/reference/commandline/cluster_rm/
|
||||
title: docker cluster rm
|
||||
- path: /engine/reference/commandline/cluster_update/
|
||||
title: docker cluster update
|
||||
- path: /engine/reference/commandline/cluster_version/
|
||||
title: docker cluster version
|
||||
- path: /engine/reference/commandline/commit/
|
||||
title: docker commit
|
||||
- sectiontitle: docker config *
|
||||
|
@ -1066,7 +1087,7 @@ reference:
|
|||
- path: /reference/dtr/2.7/cli/destroy/
|
||||
title: destroy
|
||||
- path: /reference/dtr/2.7/cli/emergency-repair/
|
||||
title: emergency-repair7
|
||||
title: emergency-repair
|
||||
- path: /reference/dtr/2.7/cli/install/
|
||||
title: install
|
||||
- path: /reference/dtr/2.7/cli/join/
|
||||
|
@ -1268,102 +1289,25 @@ manuals:
|
|||
- sectiontitle: Docker Enterprise
|
||||
section:
|
||||
- path: /ee/
|
||||
title: About Docker Enterprise
|
||||
- sectiontitle: Release Notes
|
||||
section:
|
||||
title: Overview
|
||||
- path: /ee/release-notes/
|
||||
title: Platform
|
||||
- path: /engine/release-notes/
|
||||
title: Docker Engine - Enterprise and Engine - Community
|
||||
nosync: true
|
||||
- path: /ee/ucp/release-notes/
|
||||
title: Docker Universal Control Plane
|
||||
nosync: true
|
||||
- path: /ee/dtr/release-notes/
|
||||
title: Docker Trusted Registry
|
||||
nosync: true
|
||||
- path: /ee/desktop/release-notes/
|
||||
title: Docker Desktop Enterprise
|
||||
nosync: true
|
||||
- path: /ee/docker-ee-architecture/
|
||||
title: Docker Enterprise Architecture
|
||||
- path: /ee/supported-platforms/
|
||||
title: Supported platforms
|
||||
nosync: true
|
||||
- sectiontitle: Deploy Docker Enterprise
|
||||
section:
|
||||
title: Release notes
|
||||
- sectiontitle: Docker Cluster
|
||||
section:
|
||||
- path: /cluster/overview/
|
||||
- path: /cluster/
|
||||
title: Overview
|
||||
- path: /cluster/aws/
|
||||
title: Docker Cluster on AWS
|
||||
- path: /cluster/cluster-file/
|
||||
title: Cluster file structure
|
||||
- path: /cluster/reference/
|
||||
title: Subcommands
|
||||
- path: /cluster/reference/envvars/
|
||||
title: Environment variables
|
||||
- path: /ee/end-to-end-install/
|
||||
title: Install components individually
|
||||
- sectiontitle: Back up Docker Enterprise
|
||||
section:
|
||||
- path: /ee/admin/backup/
|
||||
title: Overview
|
||||
- path: /ee/admin/backup/back-up-swarm/
|
||||
title: Back up Docker Swarm
|
||||
- path: /ee/admin/backup/back-up-ucp/
|
||||
title: Back up UCP
|
||||
- path: /ee/admin/backup/back-up-dtr/
|
||||
title: Back up DTR
|
||||
- path: /cluster/reference/backup/
|
||||
title: Back up clusters with Docker Cluster
|
||||
- sectiontitle: Restore Docker Enterprise
|
||||
section:
|
||||
- path: /ee/admin/restore/
|
||||
title: Overview
|
||||
- path: /ee/admin/restore/restore-swarm/
|
||||
title: Restore Docker Swarm
|
||||
- path: /ee/admin/restore/restore-ucp/
|
||||
title: Restore UCP
|
||||
- path: /ee/admin/restore/restore-dtr/
|
||||
title: Restore DTR
|
||||
- path: /cluster/reference/restore/
|
||||
title: Restore clusters with Docker Cluster
|
||||
- sectiontitle: Disaster Recovery
|
||||
section:
|
||||
- path: /ee/admin/disaster-recovery/
|
||||
title: Overview
|
||||
- path: /ee/upgrade/
|
||||
title: Upgrade Docker Enterprise
|
||||
- sectiontitle: Docker Cluster
|
||||
section:
|
||||
- path: /cluster/overview/
|
||||
title: Overview
|
||||
- path: /cluster/aws/
|
||||
title: Docker Cluster on AWS
|
||||
- path: /cluster/cluster-file/
|
||||
title: Cluster file structure
|
||||
- path: /cluster/reference/
|
||||
title: Subcommands
|
||||
- path: /cluster/reference/envvars/
|
||||
title: Environment variables
|
||||
- path: /cluster/reference/ls/
|
||||
title: List clusters
|
||||
- path: /cluster/reference/inspect/
|
||||
title: Inspect clusters
|
||||
- path: /cluster/reference/update/
|
||||
title: Update clusters
|
||||
- path: /cluster/reference/remove/
|
||||
title: Remove clusters
|
||||
- path: /cluster/reference/version/
|
||||
title: Version information
|
||||
- path: /ee/telemetry/
|
||||
title: Manage usage data collection
|
||||
- sectiontitle: Docker Engine - Enterprise
|
||||
section:
|
||||
- path: /ee/supported-platforms/
|
||||
title: Install Docker Enterprise Engine
|
||||
title: Install Docker Engine - Enterprise
|
||||
nosync: true
|
||||
- title: Release notes
|
||||
path: /engine/release-notes/
|
||||
|
@ -1385,9 +1329,11 @@ manuals:
|
|||
title: Install
|
||||
- path: /ee/ucp/admin/install/install-offline/
|
||||
title: Install offline
|
||||
- path: /ee/ucp/admin/install/install-on-azure/
|
||||
- sectiontitle: Cloud Providers
|
||||
section:
|
||||
- path: /ee/ucp/admin/install/cloudproviders/install-on-azure/
|
||||
title: Install on Azure
|
||||
- path: /ee/ucp/admin/install/install-on-aws/
|
||||
- path: /ee/ucp/admin/install/cloudproviders/install-on-aws/
|
||||
title: Install on AWS
|
||||
- path: /ee/ucp/admin/install/upgrade/
|
||||
title: Upgrade
|
||||
|
@ -1403,6 +1349,8 @@ manuals:
|
|||
title: Add labels to cluster nodes
|
||||
- path: /ee/ucp/admin/configure/add-sans-to-cluster/
|
||||
title: Add SANs to cluster certificates
|
||||
- path: /ee/ucp/admin/configure/admission-controllers
|
||||
title: Admission Controllers
|
||||
- path: /ee/ucp/admin/configure/collect-cluster-metrics/
|
||||
title: Collect UCP cluster metrics with Prometheus
|
||||
- path: /ee/ucp/admin/configure/metrics-descriptions/
|
||||
|
@ -1582,16 +1530,12 @@ manuals:
|
|||
path: /ee/ucp/kubernetes/deploy-with-compose/
|
||||
- title: Using Pod Security Policies
|
||||
path: /ee/ucp/kubernetes/pod-security-policies/
|
||||
- title: Deploy an ingress controller
|
||||
path: /ee/ucp/kubernetes/layer-7-routing/
|
||||
- title: Create a service account for a Kubernetes app
|
||||
path: /ee/ucp/kubernetes/create-service-account/
|
||||
- title: Install an unmanaged CNI plugin
|
||||
path: /ee/ucp/kubernetes/install-cni-plugin/
|
||||
- title: Kubernetes network encryption
|
||||
path: /ee/ucp/kubernetes/kubernetes-network-encryption/
|
||||
- title: Deploy a CSI plugin
|
||||
path: /ee/ucp/kubernetes/use-csi/
|
||||
- sectiontitle: Persistent Storage
|
||||
section:
|
||||
- title: Use NFS Storage
|
||||
|
@ -1604,6 +1548,20 @@ manuals:
|
|||
path: /ee/ucp/kubernetes/storage/configure-aws-storage/
|
||||
- title: Configure iSCSI
|
||||
path: /ee/ucp/kubernetes/storage/use-iscsi/
|
||||
- title: Deploy a CSI plugin
|
||||
path: /ee/ucp/kubernetes/storage/use-csi/
|
||||
- sectiontitle: Cluster Ingress
|
||||
section:
|
||||
- title: Overview
|
||||
path: /ee/ucp/kubernetes/cluster-ingress/
|
||||
- title: Install Ingress
|
||||
path: /ee/ucp/kubernetes/cluster-ingress/install/
|
||||
- title: Deploy Simple Application
|
||||
path: /ee/ucp/kubernetes/cluster-ingress/ingress/
|
||||
- title: Deploy a Canary Deployment
|
||||
path: /ee/ucp/kubernetes/cluster-ingress/canary/
|
||||
- title: Implementing Persistent (sticky) Sessions
|
||||
path: /ee/ucp/kubernetes/cluster-ingress/sticky/
|
||||
- title: API reference
|
||||
path: /reference/ucp/3.2/api/
|
||||
nosync: true
|
||||
|
@ -2386,6 +2344,8 @@ manuals:
|
|||
- path: /ee/dtr/admin/configure/use-your-own-tls-certificates/
|
||||
title: Use your own TLS certificates
|
||||
- path: /ee/dtr/admin/configure/enable-single-sign-on/
|
||||
title: Disable persistent cookies
|
||||
- path: /ee/dtr/admin/configure/disable-persistent-cookies/
|
||||
title: Enable single sign-on
|
||||
- sectiontitle: External storage
|
||||
section:
|
||||
|
@ -3337,6 +3297,49 @@ manuals:
|
|||
title: Troubleshoot DDE issues on Mac
|
||||
- path: /ee/desktop/troubleshoot/windows-issues/
|
||||
title: Troubleshoot DDE issues on Windows
|
||||
- sectiontitle: Manage Docker Enterprise
|
||||
section:
|
||||
- path: /ee/docker-ee-architecture/
|
||||
title: Docker Enterprise Architecture
|
||||
- path: /ee/supported-platforms/
|
||||
title: Supported platforms
|
||||
nosync: true
|
||||
- path: /ee/end-to-end-install/
|
||||
title: Deploy Docker Enterprise
|
||||
- path: /ee/upgrade/
|
||||
title: Upgrade Docker Enterprise
|
||||
- sectiontitle: Back up Docker Enterprise
|
||||
section:
|
||||
- path: /ee/admin/backup/
|
||||
title: Overview
|
||||
- path: /ee/admin/backup/back-up-swarm/
|
||||
title: Back up Docker Swarm
|
||||
- path: /ee/admin/backup/back-up-ucp/
|
||||
title: Back up UCP
|
||||
- path: /ee/admin/backup/back-up-dtr/
|
||||
title: Back up DTR
|
||||
- path: /cluster/reference/backup/
|
||||
title: Back up clusters with Docker Cluster
|
||||
- sectiontitle: Restore Docker Enterprise
|
||||
section:
|
||||
- path: /ee/admin/restore/
|
||||
title: Overview
|
||||
- path: /ee/admin/restore/restore-swarm/
|
||||
title: Restore Docker Swarm
|
||||
- path: /ee/admin/restore/restore-ucp/
|
||||
title: Restore UCP
|
||||
- path: /ee/admin/restore/restore-dtr/
|
||||
title: Restore DTR
|
||||
- path: /cluster/reference/restore/
|
||||
title: Restore clusters with Docker Cluster
|
||||
- sectiontitle: Disaster Recovery
|
||||
section:
|
||||
- path: /ee/admin/disaster-recovery/
|
||||
title: Overview
|
||||
- path: /ee/enable-client-certificate-authentication/
|
||||
title: Enable client certificate authentication with your PKI
|
||||
- path: /ee/telemetry/
|
||||
title: Manage usage data collection
|
||||
- title: Get support
|
||||
path: /ee/get-support/
|
||||
- sectiontitle: Docker Assemble
|
||||
|
@ -3369,6 +3372,12 @@ manuals:
|
|||
title: API reference
|
||||
- path: /engine/reference/commandline/template/
|
||||
title: CLI reference
|
||||
- sectiontitle: Docker Buildx
|
||||
section:
|
||||
- path: /buildx/working-with-buildx/
|
||||
title: Working with Docker Buildx
|
||||
- path: /engine/reference/commandline/buildx/
|
||||
title: CLI reference
|
||||
- sectiontitle: Docker Compose
|
||||
section:
|
||||
- path: /compose/
|
||||
|
@ -3671,52 +3680,8 @@ manuals:
|
|||
title: Token scope documentation
|
||||
- path: /registry/spec/auth/token/
|
||||
title: Token authentication specification
|
||||
- sectiontitle: Release notes
|
||||
section:
|
||||
- path: /release-notes/
|
||||
title: Overview
|
||||
- sectiontitle: Docker Enterprise Platform
|
||||
section:
|
||||
- path: /ee/release-notes/
|
||||
title: Platform
|
||||
- path: /engine/release-notes/
|
||||
title: Docker Engine - Enterprise and Engine - Community
|
||||
nosync: true
|
||||
- path: /ee/ucp/release-notes/
|
||||
title: Docker Universal Control Plane
|
||||
nosync: true
|
||||
- path: /ee/dtr/release-notes/
|
||||
title: Docker Trusted Registry
|
||||
nosync: true
|
||||
- path: /ee/desktop/release-notes/
|
||||
title: Docker Desktop Enterprise
|
||||
nosync: true
|
||||
- path: /docker-for-mac/release-notes/
|
||||
title: Docker Desktop for Mac
|
||||
nosync: true
|
||||
- path: /docker-for-windows/release-notes/
|
||||
title: Docker Desktop for Windows
|
||||
nosync: true
|
||||
- path: /release-notes/docker-compose/
|
||||
title: Docker Compose
|
||||
nosync: true
|
||||
- path: /docker-for-aws/release-notes/
|
||||
title: Docker for AWS
|
||||
nosync: true
|
||||
- path: /docker-for-azure/release-notes/
|
||||
title: Docker for Azure
|
||||
nosync: true
|
||||
- path: /release-notes/docker-swarm/
|
||||
title: Docker Swarm release notes
|
||||
nosync: true
|
||||
- sectiontitle: Superseded products and tools
|
||||
section:
|
||||
- path: /cs-engine/1.13/release-notes/
|
||||
title: CS Docker Engine
|
||||
- path: /release-notes/docker-engine/
|
||||
title: Docker (1.13 and earlier)
|
||||
- path: /release-notes/docker-machine/
|
||||
title: Docker Machine
|
||||
title: Release notes
|
||||
- sectiontitle: Superseded products and tools
|
||||
section:
|
||||
- path: /cs-engine/1.13/release-notes/
|
||||
|
|
|
@ -4,6 +4,6 @@
|
|||
|:---------------------------------------------------------------------|:-------------------------:|:----------------------------:|
|
||||
| Container engine and built in orchestration, networking, security | {{green-check}} | {{green-check}} |
|
||||
| Certified infrastructure, plugins and ISV containers | {{green-check}} | {{green-check}} |
|
||||
| Image management | | {{green-check}} |
|
||||
| Container app management | | {{green-check}} |
|
||||
| Image security scanning | | {{green-check}} |
|
||||
| Image management with Docker Trusted Registry security scanning | | {{green-check}} |
|
||||
| Container app management with Universal Control Plane | | {{green-check}} |
|
||||
| Developer solutions with Docker Desktop Enterprise | | {{green-check}} |
|
||||
|
|
|
@ -35,7 +35,7 @@ A service template provides the description required by Docker Template to scaff
|
|||
|
||||
1. `/run/configuration`, a JSON file which contains all settings such as parameters, image name, etc. For example:
|
||||
|
||||
```
|
||||
```json
|
||||
{
|
||||
"parameters": {
|
||||
"externalPort": "80",
|
||||
|
@ -53,7 +53,7 @@ To create a basic service template, you need to create two files — a dockerfil
|
|||
|
||||
`docker-compose.yaml`
|
||||
|
||||
```
|
||||
```yaml
|
||||
version: "3.6"
|
||||
services:
|
||||
mysql:
|
||||
|
@ -62,7 +62,7 @@ services:
|
|||
|
||||
`Dockerfile`
|
||||
|
||||
```
|
||||
```conf
|
||||
FROM alpine
|
||||
COPY docker-compose.yaml .
|
||||
CMD cp docker-compose.yaml /project/
|
||||
|
@ -80,7 +80,7 @@ Services that generate a template using code must contain the following files th
|
|||
|
||||
Here’s an example of a simple NodeJS service:
|
||||
|
||||
```
|
||||
```bash
|
||||
my-service
|
||||
├── Dockerfile # The Dockerfile of the service template
|
||||
└── assets
|
||||
|
@ -92,7 +92,7 @@ The NodeJS service contains the following files:
|
|||
|
||||
`my-service/Dockerfile`
|
||||
|
||||
```
|
||||
```conf
|
||||
FROM alpine
|
||||
COPY assets /assets
|
||||
CMD ["cp", "/assets", "/project"]
|
||||
|
@ -103,7 +103,7 @@ COPY assets /assets
|
|||
`my-service/assets/docker-compose.yaml`
|
||||
|
||||
{% raw %}
|
||||
```
|
||||
```yaml
|
||||
version: "3.6"
|
||||
services:
|
||||
{{ .Name }}:
|
||||
|
@ -115,7 +115,7 @@ services:
|
|||
|
||||
`my-service/assets/Dockerfile`
|
||||
|
||||
```
|
||||
```conf
|
||||
FROM NODE:9
|
||||
WORKDIR /app
|
||||
COPY package.json .
|
||||
|
@ -128,7 +128,7 @@ CMD ["yarn", "run", "start"]
|
|||
|
||||
The next step is to build and push the service template image to a remote repository by running the following command:
|
||||
|
||||
```
|
||||
```bash
|
||||
cd [...]/my-service
|
||||
docker build -t org/my-service .
|
||||
docker push org/my-service
|
||||
|
@ -136,7 +136,7 @@ docker push org/my-service
|
|||
|
||||
To build and push the image to an instance of Docker Trusted Registry(DTR), or to an external registry, specify the name of the repository:
|
||||
|
||||
```
|
||||
```bash
|
||||
cd [...]/my-service
|
||||
docker build -t myrepo:5000/my-service .
|
||||
docker push myrepo:5000/my-service
|
||||
|
@ -151,7 +151,7 @@ Of all the available service and application definitions, Docker Template has ac
|
|||
|
||||
Here is an example of the Express service definition:
|
||||
|
||||
```
|
||||
```yaml
|
||||
- apiVersion: v1alpha1 # constant
|
||||
kind: ServiceTemplate # constant
|
||||
metadata:
|
||||
|
@ -180,7 +180,7 @@ To customize a service, you need to complete the following tasks:
|
|||
|
||||
Add the parameters available to the application. The following example adds the NodeJS version and the external port:
|
||||
|
||||
```
|
||||
```yaml
|
||||
- [...]
|
||||
spec:
|
||||
[...]
|
||||
|
@ -209,7 +209,7 @@ When you run the service template container, a volume is mounted making the serv
|
|||
|
||||
The file matches the following go struct:
|
||||
|
||||
```
|
||||
```golang
|
||||
type TemplateContext struct {
|
||||
ServiceID string `json:"serviceId,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
|
@ -224,7 +224,7 @@ type TemplateContext struct {
|
|||
|
||||
Where `ConfiguredService` is:
|
||||
|
||||
```
|
||||
```go
|
||||
type ConfiguredService struct {
|
||||
ID string `json:"serviceId,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
|
@ -236,7 +236,7 @@ You can then use the file to obtain values for the parameters and use this infor
|
|||
|
||||
To use the `interpolator` image, update `my-service/Dockerfile` to use the following Dockerfile:
|
||||
|
||||
```
|
||||
```conf
|
||||
FROM dockertemplate/interpolator:v0.0.3-beta1
|
||||
COPY assets .
|
||||
```
|
||||
|
@ -245,7 +245,7 @@ COPY assets .
|
|||
|
||||
This places the interpolator image in the `/assets` folder and copies the folder to the target `/project` folder. If you prefer to do this manually, use a Dockerfile instead:
|
||||
|
||||
```
|
||||
```conf
|
||||
WORKDIR /assets
|
||||
CMD ["/interpolator", "-config", "/run/configuration", "-source", "/assets", "-destination", "/project"]
|
||||
```
|
||||
|
@ -270,7 +270,7 @@ Create a local repository file called `library.yaml` anywhere on your local driv
|
|||
|
||||
`library.yaml`
|
||||
|
||||
```
|
||||
```yaml
|
||||
apiVersion: v1alpha1
|
||||
generated: "2018-06-13T09:24:07.392654524Z"
|
||||
kind: RepositoryContent
|
||||
|
@ -291,7 +291,7 @@ Now that you have created a local repository and added service definitions to it
|
|||
|
||||
1. Edit `~/.docker/dockertemplate/preferences.yaml` as follows:
|
||||
|
||||
```
|
||||
```yaml
|
||||
apiVersion: v1alpha1
|
||||
channel: master
|
||||
kind: Preferences
|
||||
|
@ -302,7 +302,7 @@ repositories:
|
|||
|
||||
2. Add your local repository:
|
||||
|
||||
```
|
||||
```yaml
|
||||
apiVersion: v1alpha1
|
||||
channel: master
|
||||
kind: Preferences
|
||||
|
@ -313,6 +313,13 @@ repositories:
|
|||
url: https://docker-application-template.s3.amazonaws.com/master/library.yaml
|
||||
```
|
||||
|
||||
When configuring a local repository on Windows, the `url` structure is slightly different:
|
||||
|
||||
```yaml
|
||||
- name: custom-services
|
||||
url: file://c:/path/to/my/library.yaml
|
||||
```
|
||||
|
||||
After updating the `preferences.yaml` file, run `docker template ls` or restart the Application Designer and select **Custom application**. The new service should now be visible in the list of available services.
|
||||
|
||||
### Share custom service templates
|
||||
|
@ -343,7 +350,7 @@ Before you create an application template definition, you must create a reposito
|
|||
|
||||
For example, to create an Express and MySQL application, the application definition must be similar to the following yaml file:
|
||||
|
||||
```
|
||||
```yaml
|
||||
apiVersion: v1alpha1 #constant
|
||||
kind: ApplicationTemplate #constant
|
||||
metadata:
|
||||
|
@ -366,7 +373,7 @@ Create a local repository file called `library.yaml` anywhere on your local driv
|
|||
|
||||
`library.yaml`
|
||||
|
||||
```
|
||||
```yaml
|
||||
apiVersion: v1alpha1
|
||||
generated: "2018-06-13T09:24:07.392654524Z"
|
||||
kind: RepositoryContent
|
||||
|
@ -391,7 +398,7 @@ Now that you have created a local repository and added application definitions,
|
|||
|
||||
1. Edit `~/.docker/dockertemplate/preferences.yaml` as follows:
|
||||
|
||||
```
|
||||
```yaml
|
||||
apiVersion: v1alpha1
|
||||
channel: master
|
||||
kind: Preferences
|
||||
|
@ -402,7 +409,7 @@ repositories:
|
|||
|
||||
2. Add your local repository:
|
||||
|
||||
```
|
||||
```yaml
|
||||
apiVersion: v1alpha1
|
||||
channel: master
|
||||
kind: Preferences
|
||||
|
@ -413,6 +420,13 @@ repositories:
|
|||
url: https://docker-application-template.s3.amazonaws.com/master/library.yaml
|
||||
```
|
||||
|
||||
When configuring a local repository on Windows, the `url` structure is slightly different:
|
||||
|
||||
```yaml
|
||||
- name: custom-services
|
||||
url: file://c:/path/to/my/library.yaml
|
||||
```
|
||||
|
||||
After updating the `preferences.yaml` file, run `docker template ls` or restart the Application Designer and select **Custom application**. The new template should now be visible in the list of available templates.
|
||||
|
||||
### Share the custom application template
|
||||
|
|
|
@ -10,7 +10,8 @@ keywords: Docker App, applications, compose, orchestration
|
|||
|
||||
## Overview
|
||||
|
||||
Docker App is a CLI plug-in that introduces a top-level `docker app` command that brings the _container experience_ to applications. The following table compares Docker containers with Docker applications.
|
||||
Docker App is a CLI plug-in that introduces a top-level `docker app` command to bring
|
||||
the _container experience_ to applications. The following table compares Docker containers with Docker applications.
|
||||
|
||||
|
||||
| Object | Config file | Build with | Execute with | Share with |
|
||||
|
@ -19,20 +20,22 @@ Docker App is a CLI plug-in that introduces a top-level `docker app` command tha
|
|||
| App | App Package | docker app bundle | docker app install | docker app push |
|
||||
|
||||
|
||||
With Docker App, entire applications can now be managed as easily as images and containers. For example, Docker App lets you _build_, _validate_ and _deploy_ applications with the `docker app` command. You can even leverage secure supply-chain features such as signed `push` and `pull` operations.
|
||||
With Docker App, entire applications can now be managed as easily as images and containers. For example,
|
||||
Docker App lets you _build_, _validate_ and _deploy_ applications with the `docker app` command. You can
|
||||
even leverage secure supply-chain features such as signed `push` and `pull` operations.
|
||||
|
||||
This guide will walk you through two scenarios:
|
||||
> **NOTE**: `docker app` works with `Engine - Community 19.03` or higher and `Engine - Enterprise 19.03` or higher.
|
||||
|
||||
1. Initialize and deploy a new Docker App project from scratch
|
||||
1. Convert an existing Compose app into a Docker App project (added later in the beta process)
|
||||
This guide walks you through two scenarios:
|
||||
|
||||
The first scenario will familiarize you with the basic components of a Docker App and get you comfortable with the tools and workflow.
|
||||
1. Initialize and deploy a new Docker App project from scratch.
|
||||
1. Convert an existing Compose app into a Docker App project (added later in the beta process).
|
||||
|
||||
The first scenario describes basic components of a Docker App with tools and workflow.
|
||||
|
||||
## Initialize and deploy a new Docker App project from scratch
|
||||
|
||||
In this section, we'll walk through the process of creating a new Docker App project. By then end, you'll be familiar with the workflow and most important commands.
|
||||
|
||||
We'll complete the following steps:
|
||||
This section describes the steps for creating a new Docker App project to familiarize you with the workflow and most important commands.
|
||||
|
||||
1. Prerequisites
|
||||
1. Initialize an empty new project
|
||||
|
@ -44,14 +47,16 @@ We'll complete the following steps:
|
|||
|
||||
### Prerequisites
|
||||
|
||||
In order to follow along, you'll need at least one Docker node operating in Swarm mode. You will also need the latest build of the Docker CLI with the App CLI plugin included.
|
||||
|
||||
Depending on your Linux distribution and your security context, you may need to prepend commands with `sudo`.
|
||||
You need at least one Docker node operating in Swarm mode. You also need the latest build of the Docker CLI
|
||||
with the App CLI plugin included.
|
||||
|
||||
Depending on your Linux distribution and your security context, you might need to prepend commands with `sudo`.
|
||||
|
||||
### Initialize a new empty project
|
||||
|
||||
The `docker app init` command is used to initialize a new Docker application project. If you run it on its own, it initializes a new empty project. If you point it to an existing `docker-compose.yml` file, it initializes a new project based on the Compose file.
|
||||
The `docker app init` command is used to initialize a new Docker application project. If you run it on
|
||||
its own, it initializes a new empty project. If you point it to an existing `docker-compose.yml` file,
|
||||
it initializes a new project based on the Compose file.
|
||||
|
||||
Use the following command to initialize a new empty project called "hello-world".
|
||||
|
||||
|
@ -60,20 +65,25 @@ $ docker app init --single-file hello-world
|
|||
Created "hello-world.dockerapp"
|
||||
```
|
||||
|
||||
The command will produce a single file in your current directory called `hello-world.dockerapp`. The format of the file name is <project-name> appended with `.dockerapp`.
|
||||
The command produces a single file in your current directory called `hello-world.dockerapp`.
|
||||
The format of the file name is <project-name> appended with `.dockerapp`.
|
||||
|
||||
```
|
||||
$ ls
|
||||
hello-world.dockerapp
|
||||
```
|
||||
|
||||
If you run `docker app init` without the `--single-file` flag you will get a new directory containing three YAML files. The name of the directory will be the name of the project with `.dockerapp` appended, and the three YAML files will be:
|
||||
If you run `docker app init` without the `--single-file` flag, you get a new directory containing three YAML files.
|
||||
The name of the directory is the name of the project with `.dockerapp` appended, and the three YAML files are:
|
||||
|
||||
- `docker-compose.yml`
|
||||
- `metadata.yml`
|
||||
- `parameters.yml`
|
||||
|
||||
However, the `--single-file` option merges the three YAML files into a single YAML file with three sections. Each of these sections relates to one of the three YAML files mentioned above:`docker-compose.yml`, `metadata.yml`, and `parameters.yml`. Using the `--single-file` option is great for enabling you to share your application via a single configuration file.
|
||||
However, the `--single-file` option merges the three YAML files into a single YAML file with three sections.
|
||||
Each of these sections relates to one of the three YAML files mentioned previously: `docker-compose.yml`,
|
||||
`metadata.yml`, and `parameters.yml`. Using the `--single-file` option enables you to share your application
|
||||
using a single configuration file.
|
||||
|
||||
Inspect the YAML with the following command.
|
||||
|
||||
|
@ -91,21 +101,23 @@ services: {}
|
|||
# Default application parameters - equivalent to parameters.yml.
|
||||
```
|
||||
|
||||
Your file may be more verbose.
|
||||
Your file might be more verbose.
|
||||
|
||||
Notice that each of the three sections is separated by a set of three dashes ("---"). Let's quickly describe each section.
|
||||
|
||||
The first section of the file is where you specify identification metadata such as name, version, description and maintainers. It accepts key-value pairs. This part of the file can be a separate file called `metadata.yml`
|
||||
The first section of the file specifies identification metadata such as name, version,
|
||||
description and maintainers. It accepts key-value pairs. This part of the file can be a separate file called `metadata.yml`
|
||||
|
||||
The second section of the file describes the application. It can be a separate file called `docker-compose.yml`.
|
||||
|
||||
The final section is where default values for application parameters can be expressed. It can be a separate file called `parameters.yml`
|
||||
The final section specifies default values for application parameters. It can be a separate file called `parameters.yml`
|
||||
|
||||
### Populate the project
|
||||
|
||||
In this section, we'll edit the project YAML file so that it runs a simple web app.
|
||||
This section describes editing the project YAML file so that it runs a simple web app.
|
||||
|
||||
Use your preferred editor to edit the `hello-world.dockerapp` YAML file and update the application section to the following:
|
||||
Use your preferred editor to edit the `hello-world.dockerapp` YAML file and update the application section with
|
||||
the following information:
|
||||
|
||||
```
|
||||
version: "3.6"
|
||||
|
@ -129,11 +141,16 @@ The sections of the YAML file are currently order-based. This means it's importa
|
|||
|
||||
Save the changes.
|
||||
|
||||
The application has been updated to run a single-container application based on the `hashicorp/http-echo` web server image. This image will have it execute a single command that displays some text and exposes itself on a network port.
|
||||
The application is updated to run a single-container application based on the `hashicorp/http-echo` web server image.
|
||||
This image has it execute a single command that displays some text and exposes itself on a network port.
|
||||
|
||||
Following best practices, the configuration of the application has been decoupled form the application itself using variables. In this case, the text displayed by the app, and the port it will be published on, are controlled by two variables defined in the `Parameters` section of the file.
|
||||
Following best practices, the configuration of the application is decoupled from the application itself using variables.
|
||||
In this case, the text displayed by the app and the port on which it will be published are controlled by two variables defined in the `Parameters` section of the file.
|
||||
|
||||
Docker App provides the `inspect` subcommand to provide a prettified summary of the application configuration. It is a quick way to check how to configure the application before deployment, without having to read the `Compose file`. It's important to note that the application is not running at this point, and that the `inspect` operation inspects the configuration file(s).
|
||||
Docker App provides the `inspect` subcommand to provide a prettified summary of the application configuration.
|
||||
It is a quick way to check how to configure the application before deployment, without having to read
|
||||
the `Compose file`. It's important to note that the application is not running at this point, and that
|
||||
the `inspect` operation inspects the configuration file(s).
|
||||
|
||||
```
|
||||
$ docker app inspect hello-world.dockerapp
|
||||
|
@ -149,20 +166,23 @@ hello.port 8080
|
|||
hello.text Hello world!
|
||||
```
|
||||
|
||||
`docker app inspect` operations will fail if the `Parameters` section doesn't specify a default value for every parameter expressed in the app section.
|
||||
`docker app inspect` operations fail if the `Parameters` section doesn't specify a default value for
|
||||
every parameter expressed in the app section.
|
||||
|
||||
The application is ready to be validated and rendered.
|
||||
|
||||
### Validate the app
|
||||
Docker App provides the `validate` subcommand to check syntax and other aspects of the configuration. If the app passes validation, the command returns no arguments.
|
||||
|
||||
Docker App provides the `validate` subcommand to check syntax and other aspects of the configuration.
|
||||
If the app passes validation, the command returns no arguments.
|
||||
|
||||
```
|
||||
$ docker app validate hello-world.dockerapp
|
||||
Validated "hello-world.dockerapp"
|
||||
```
|
||||
|
||||
`docker app validate` operations will fail if the `Parameters` section doesn't specify a default value for every parameter expressed in the app section.
|
||||
|
||||
`docker app validate` operations fail if the `Parameters` section doesn't specify a default value for
|
||||
every parameter expressed in the app section.
|
||||
|
||||
As the `validate` operation has returned no problems, the app is ready to be deployed.
|
||||
|
||||
|
@ -170,17 +190,17 @@ As the `validate` operation has returned no problems, the app is ready to be dep
|
|||
|
||||
There are several options for deploying a Docker App project.
|
||||
|
||||
1. Deploy as a native Docker App application
|
||||
1. Deploy as a Compose app application
|
||||
1. Deploy as a Docker Stack application
|
||||
- Deploy as a native Docker App application
|
||||
- Deploy as a Compose app application
|
||||
- Deploy as a Docker Stack application
|
||||
|
||||
We'll look at all three options, starting with deploying as a native Dock App application.
|
||||
All three options are discussed, starting with deploying as a native Dock App application.
|
||||
|
||||
#### Deploy as a native Docker App
|
||||
|
||||
The process for deploying as a native Docker app is as follows.
|
||||
The process for deploying as a native Docker app is as follows:
|
||||
|
||||
1. Use `docker app install` to deploy the application
|
||||
Use `docker app install` to deploy the application.
|
||||
|
||||
Use the following command to deploy (install) the application.
|
||||
|
||||
|
@ -191,15 +211,22 @@ Creating service my-app_hello
|
|||
Application "my-app" installed on context "default"
|
||||
```
|
||||
|
||||
The app will be deployed using the stack orchestrator. This means you can inspect it with regular `docker stack` commands.
|
||||
By default, `docker app` uses the [current context](/engine/context/working-with-contexts) to run the
|
||||
installation container and as a target context to deploy the application. You can override the second context
|
||||
using the flag `--target-context` or by using the environment variable `DOCKER_TARGET_CONTEXT`. This flag is also
|
||||
available for the commands `status`, `upgrade`, and `uninstall`.
|
||||
|
||||
```
|
||||
$ docker stack ls
|
||||
NAME SERVICES ORCHESTRATOR
|
||||
my-app 1 Swarm
|
||||
$ docker app install hello-world.dockerapp --name my-app --target-context=my-big-production-cluster
|
||||
Creating network my-app_default
|
||||
Creating service my-app_hello
|
||||
Application "my-app" installed on context "my-big-production-cluster"
|
||||
```
|
||||
|
||||
You can also check the status of the app with the `docker app status <app-name>` command.
|
||||
> **Note**: Two applications deployed on the same target context cannot share the same name, but this is
|
||||
valid if they are deployed on different target contexts.
|
||||
|
||||
You can check the status of the app with the `docker app status <app-name>` command.
|
||||
|
||||
```
|
||||
$ docker app status my-app
|
||||
|
@ -230,20 +257,37 @@ ID NAME MODE REPLICAS IMAGE PORT
|
|||
miqdk1v7j3zk my-app_hello replicated 1/1 hashicorp/http-echo:latest *:8080->5678/tcp
|
||||
```
|
||||
|
||||
Now that the app is running, you can point a web browser at the DNS name or public IP of the Docker node on port 8080 and see the app in all its glory. You will need to ensure traffic to port 8080 is allowed on the connection form your browser to your Docker host.
|
||||
The app is deployed using the stack orchestrator. This means you can also inspect it using the regular `docker stack` commands.
|
||||
|
||||
```
|
||||
$ docker stack ls
|
||||
NAME SERVICES ORCHESTRATOR
|
||||
my-app 1 Swarm
|
||||
```
|
||||
|
||||
Now that the app is running, you can point a web browser at the DNS name or public IP of the Docker node on
|
||||
port 8080 and see the app. You must ensure traffic to port 8080 is allowed on
|
||||
the connection form your browser to your Docker host.
|
||||
|
||||
Now change the port of the application using `docker app upgrade <app-name>` command.
|
||||
```
|
||||
$ docker app upgrade my-app --hello.port=8181
|
||||
Upgrading service my-app_hello
|
||||
Application "my-app" upgraded on context "default"
|
||||
```
|
||||
|
||||
You can uninstall the app with `docker app uninstall my-app`.
|
||||
|
||||
#### Deploy as a Docker Compose app
|
||||
|
||||
The process for deploying a as a Compose app comprises two major steps:
|
||||
The process for deploying as a Compose app comprises two major steps:
|
||||
|
||||
1. Render the Docker app project as a `docker-compose.yml` file.
|
||||
1. Deploy the app using `docker-compose up`.
|
||||
2. Deploy the app using `docker-compose up`.
|
||||
|
||||
You will need a recent version of Docker Compose to complete these steps.
|
||||
You need a recent version of Docker Compose to complete these steps.
|
||||
|
||||
Rendering is the process of reading the entire application configuration and outputting it as a single `docker-compose.yml` file. This will create a Compose file with hard-coded values wherever a parameter was specified as a variable.
|
||||
Rendering is the process of reading the entire application configuration and outputting it as a single `docker-compose.yml` file. This creates a Compose file with hard-coded values wherever a parameter was specified as a variable.
|
||||
|
||||
Use the following command to render the app to a Compose file called `docker-compose.yml` in the current directory.
|
||||
|
||||
|
@ -269,7 +313,10 @@ services:
|
|||
protocol: tcp
|
||||
```
|
||||
|
||||
Notice that the file contains hard-coded values that were expanded based on the contents of the Parameters section of the project's YAML file. For example, `${hello.text}` has been expanded to "Hello world!". Almost all the `docker app` commands propose the `--set key=value` flag to override a default parameter.
|
||||
Notice that the file contains hard-coded values that were expanded based on the contents of the `Parameters`
|
||||
section of the project's YAML file. For example, `${hello.text}` has been expanded to "Hello world!".
|
||||
|
||||
> **Note**: Almost all the `docker app` commands propose the `--set key=value` flag to override a default parameter.
|
||||
|
||||
Try to render the application with a different text:
|
||||
|
||||
|
@ -297,18 +344,20 @@ WARNING: The Docker Engine you're using is running in swarm mode.
|
|||
<Snip>
|
||||
```
|
||||
|
||||
The application is now running as a Docker compose app and should be reachable on port `8080` on your Docker host. You will need to ensure traffic to port 8080 is allowed on the connection form your browser to your Docker host.
|
||||
The application is now running as a Docker Compose app and should be reachable on port `8080` on your Docker host.
|
||||
You must ensure traffic to port `8080` is allowed on the connection form your browser to your Docker host.
|
||||
|
||||
You can use `docker-compose down` to stop and remove the application.
|
||||
|
||||
#### Deploy as a Docker Stack
|
||||
|
||||
Deploying the app as a Docker stack is a two-step process very similar to deploying it as a Docker compose app.
|
||||
Deploying the app as a Docker stack is a two-step process very similar to deploying it as a Docker Compose app.
|
||||
|
||||
1. Render the Docker app project as a `docker-compose.yml` file.
|
||||
1. Deploy the app using `docker stack deploy`.
|
||||
2. Deploy the app using `docker stack deploy`.
|
||||
|
||||
We'll assume that you've followed the steps to render the Docker app project as a compose file (shown in the previous section) and that you're ready to deploy it as a Docker Stack. Your Docker host will need to be in Swarm mode.
|
||||
Complete the steps in the previous section to render the Docker app project as a Compose file and make sure
|
||||
you're ready to deploy it as a Docker Stack. Your Docker host must be in Swarm mode.
|
||||
|
||||
```
|
||||
$ docker stack deploy hello-world-app -c docker-compose.yml
|
||||
|
@ -318,17 +367,23 @@ Creating service hello-world-app_hello
|
|||
|
||||
The app is now deployed as a Docker stack and can be reached on port `8080` on your Docker host.
|
||||
|
||||
Use the `docker stack rm hello-world-app` command to stop and remove the stack. You will need to ensure traffic to port 8080 is allowed on the connection form your browser to your Docker host.
|
||||
Use the `docker stack rm hello-world-app` command to stop and remove the stack. You must ensure traffic to
|
||||
port `8080` is allowed on the connection form your browser to your Docker host.
|
||||
|
||||
### Push the app to Docker Hub
|
||||
|
||||
As mentioned in the intro, `docker app` lets you manage entire applications the same way that we currently manage container images. For example, you can push and pull entire applications from registries like Docker Hub with `docker app push` and `docker app pull`. Other `docker app` commands, such as `install`, `upgrade`, `inspect` and `render` can be performed directly on applications while they are stored in a registry.
|
||||
As mentioned in the introduction, `docker app` lets you manage entire applications the same way that you
|
||||
currently manage container images. For example, you can push and pull entire applications from registries like
|
||||
Docker Hub with `docker app push` and `docker app pull`. Other `docker app` commands, such
|
||||
as `install`, `upgrade`, `inspect` and `render` can be performed directly on applications while they are
|
||||
stored in a registry.
|
||||
|
||||
Let's see some examples.
|
||||
The following section contains some examples.
|
||||
|
||||
Push the application to Docker Hub. To complete this step, you'll need a valid Docker ID and you'll need to be logged in to the registry you are pushing the app to.
|
||||
Push the application to Docker Hub. To complete this step, you need a valid Docker ID and you must be
|
||||
logged in to the registry to which you are pushing the app.
|
||||
|
||||
Be sure to replace the registry ID in the example below with your own.
|
||||
Be sure to replace the registry ID in the following example with your own.
|
||||
|
||||
```
|
||||
$ docker app push my-app --tag nigelpoulton/app-test:0.1.0
|
||||
|
@ -358,7 +413,8 @@ The app is now stored in your DTR.
|
|||
|
||||
### Install the app directly from Docker Hub or DTR
|
||||
|
||||
Now that the app is pushed to the registry, try an `inspect` and `install` command against it. The location of your app will be different to the one shown in the examples.
|
||||
Now that the app is pushed to the registry, try an `inspect` and `install` command against it.
|
||||
The location of your app is different from the one provided in the examples.
|
||||
|
||||
```
|
||||
$ docker app inspect nigelpoulton/app-test:0.1.0
|
||||
|
@ -387,7 +443,8 @@ Application "hello-world" installed on context "default"
|
|||
|
||||
Test that the app is working.
|
||||
|
||||
The app used in these examples is a simple web server that displays the text "Hello world!" on port 8181, your app may be different.
|
||||
The app used in these examples is a simple web server that displays the text "Hello world!" on port 8181,
|
||||
your app might be different.
|
||||
|
||||
```
|
||||
$ curl http://localhost:8181
|
||||
|
@ -404,4 +461,3 @@ Application "hello-world" uninstalled on context "default"
|
|||
```
|
||||
|
||||
You can see the name of your Docker App with the `docker stack ls` command.
|
||||
|
||||
|
|
|
@ -105,7 +105,7 @@ The values are substituted in the cluster definition, which makes it
|
|||
easy to define a re-usable cluster definition and then change the variables
|
||||
to create multiple instances of a cluster.
|
||||
|
||||
Run `docker cluster create --file cluster.yml --name quickstart`
|
||||
Run `docker cluster create --file cluster.yml --name quickstart`.
|
||||
|
||||
$ docker cluster create --file cluster.yml --name quickstart
|
||||
Please provide a value for ucp_password
|
||||
|
@ -114,7 +114,7 @@ Run `docker cluster create --file cluster.yml --name quickstart`
|
|||
Planning cluster on aws [OK]
|
||||
Creating: [=========================== ] 44%
|
||||
|
||||
After approximately 10 minutes, resources are provisioned Docker Enterprise installation is started:
|
||||
After approximately 10 minutes, resources are provisioned, and Docker Enterprise installation is started:
|
||||
|
||||
$ docker cluster create --file cluster.yml --name quickstart
|
||||
Please provide a value for ucp_password
|
||||
|
@ -156,7 +156,7 @@ To view an inventory of the clusters you created, run `docker cluster ls`:
|
|||
ID NAME PROVIDER ENGINE UCP DTR STATE
|
||||
911c882340b2 quickstart acme, aws ee-stable-18.09.5 docker/ucp:3.1.6 docker/dtr:2.6.5 running
|
||||
|
||||
For detailed information about the cluster, run `docker cluster inspect quickstart`
|
||||
For detailed information about the cluster, run `docker cluster inspect quickstart`.
|
||||
|
||||
$ docker cluster inspect quickstart
|
||||
```yaml
|
||||
|
@ -398,5 +398,5 @@ All provisioned resources are destroyed and the context for the cluster is remov
|
|||
## Where to go next
|
||||
|
||||
- View the quick start guide for [Azure](azure.md) or [vSphere](vsphere.md)
|
||||
- [Explore the full list of Cluster commands](./reference/index.md)
|
||||
- [Cluster configuration file reference](./cluster-file/index.md)
|
||||
- [Explore the full list of Cluster commands](/engine/reference/commandline/cluster/)
|
||||
- [Cluster configuration file reference](./cluster-file.md)
|
||||
|
|
|
@ -0,0 +1,529 @@
|
|||
---
|
||||
description: Cluster file reference and guidelines
|
||||
keywords: documentation, docs, docker, cluster, infrastructure, automation
|
||||
title: Cluster file version 1 reference
|
||||
toc_max: 5
|
||||
toc_min: 1
|
||||
---
|
||||
|
||||
This topic describes version 1 of the Cluster file format.
|
||||
|
||||
## Cluster file structure and examples
|
||||
|
||||
<div class="panel panel-default">
|
||||
<div class="panel-heading collapsed" data-toggle="collapse" data-target="#collapseSample1" style="cursor: pointer">
|
||||
Example Cluster file version 1
|
||||
<i class="chevron fa fa-fw"></i></div>
|
||||
<div class="collapse block" id="collapseSample1">
|
||||
<pre><code>
|
||||
variable:
|
||||
domain: "YOUR DOMAIN, e.g. docker.com"
|
||||
subdomain: "A SUBDOMAIN, e.g. cluster"
|
||||
region: "THE AWS REGION TO DEPLOY, e.g. us-east-1"
|
||||
email: "YOUR.EMAIL@COMPANY.COM"
|
||||
ucp_password:
|
||||
type: prompt
|
||||
provider:
|
||||
acme:
|
||||
email: ${email}
|
||||
server_url: https://acme-staging-v02.api.letsencrypt.org/directory
|
||||
aws:
|
||||
region: ${region}
|
||||
cluster:
|
||||
dtr:
|
||||
version: docker/dtr:2.6.5
|
||||
engine:
|
||||
version: ee-stable-18.09.5
|
||||
ucp:
|
||||
username: admin
|
||||
password: ${ucp_password}
|
||||
version: docker/ucp:3.1.6
|
||||
resource:
|
||||
aws_instance:
|
||||
managers:
|
||||
instance_type: t2.xlarge
|
||||
os: Ubuntu 16.04
|
||||
quantity: 3
|
||||
registry:
|
||||
instance_type: t2.xlarge
|
||||
os: Ubuntu 16.04
|
||||
quantity: 3
|
||||
workers:
|
||||
instance_type: t2.xlarge
|
||||
os: Ubuntu 16.04
|
||||
quantity: 3
|
||||
aws_lb:
|
||||
apps:
|
||||
domain: ${subdomain}.${domain}
|
||||
instances:
|
||||
- workers
|
||||
ports:
|
||||
- 80:8080
|
||||
- 443:8443
|
||||
dtr:
|
||||
domain: ${subdomain}.${domain}
|
||||
instances:
|
||||
- registry
|
||||
ports:
|
||||
- 443:443
|
||||
ucp:
|
||||
domain: ${subdomain}.${domain}
|
||||
instances:
|
||||
- managers
|
||||
ports:
|
||||
- 443:443
|
||||
- 6443:6443
|
||||
aws_route53_zone:
|
||||
dns:
|
||||
domain: ${domain}
|
||||
subdomain: ${subdomain}
|
||||
</code></pre>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
The topics on this reference page are organized alphabetically by top-level keys
|
||||
to reflect the structure of the Cluster file. Top-level keys that define
|
||||
a section in the configuration file, such as `cluster`, `provider`, and `resource`,
|
||||
are listed with the options that support them as sub-topics. This information
|
||||
maps to the indent structure of the Cluster file.
|
||||
|
||||
### cluster
|
||||
Specifies components to install and configure for a cluster.
|
||||
|
||||
The following components are available:
|
||||
|
||||
- `subscription`: (Optional) A string value representing the subscription ID.
|
||||
- `license`: (Optional) A path to the cluster's license file.
|
||||
- `cloudstor`: (Optional) Configuration options for Docker CloudStor.
|
||||
- `dtr`: (Optional) Configuration options for Docker Trusted Registry.
|
||||
- `engine`: (Optional) Configuration options for Docker Engine.
|
||||
- `ucp`: (Optional) Configuration options for Docker Universal Control Plane.
|
||||
- `registry`: (Optional) Configuration options for authenticating nodes with a registry to pull Docker images.
|
||||
|
||||
#### cloudstor
|
||||
Customizes the installation of Docker Cloudstor.
|
||||
|
||||
- `version`: (Optional) The version of Cloudstor to install. Default is `1.0`.
|
||||
- `use_efs`: (Optional) Specifies whether an Elastic File System should be provisioned. Defaults to `false`.
|
||||
|
||||
#### dtr
|
||||
Customizes the installation of Docker Trusted Registry.
|
||||
```yaml
|
||||
cluster:
|
||||
dtr:
|
||||
version: "docker/dtr:2.6.5"
|
||||
install_options:
|
||||
- "--debug"
|
||||
- "--enable-pprof"
|
||||
```
|
||||
|
||||
The following optional elements can be specified:
|
||||
|
||||
- `version`: (Optional) The version of DTR to install. Defaults to `docker/dtr:2.6.5`.
|
||||
- `ca`: (Optional) The path to a root CA public certificate.
|
||||
- `key`: (Optional) The path to a TLS private key.
|
||||
- `cert`: (Optional) The path to a public key certificate.
|
||||
- `install_options`: (Optional) Additional [DTR install options](https://docs.docker.com/reference/dtr/2.6/cli/install/).
|
||||
|
||||
#### engine
|
||||
Customizes the installation of Docker Enterprise Engine.
|
||||
```yaml
|
||||
cluster:
|
||||
engine:
|
||||
channel: "stable"
|
||||
edition: "ee"
|
||||
version: "19.03"
|
||||
```
|
||||
|
||||
The following optional elements can be specified:
|
||||
- `version`: (Optional) The version of the Docker Engine to install. Defaults to `19.03`.
|
||||
- `edition`: (Optional) The family of Docker Engine to install. Defaults to `ee` for Enterprise edition.
|
||||
- `channel`: (Optional) The channel on the repository to pull updated packages. Defaults to `stable`.
|
||||
- `url`: (Optional) Defaults to "https://storebits.docker.com/ee".
|
||||
- `storage_driver`: (Optional) The storage driver to use for the storage volume. Default
|
||||
value is dependent on the operating system.
|
||||
- Amazon Linux 2 is `overlay2`.
|
||||
- Centos is `overlay2`.
|
||||
- Oracle Linux is `overlay2`.
|
||||
- RedHat is `overlay2`.
|
||||
- SLES is `btrfs`.
|
||||
- Ubuntu is `overlay2`.
|
||||
- `storage_fstype`: (Optional) File system to use for storage volume. Default value is dependent on the operating system.
|
||||
- Amazon Linux 2 is `xfs`.
|
||||
- Centos is `xfs`.
|
||||
- Oracle Linux is `xfs`.
|
||||
- RedHat is `xfs`.
|
||||
- SLES is `btrfs`.
|
||||
- Ubuntu is `ext4`.
|
||||
- `storage_volume`: (Optional) Docker storage volume path for `/var/lib/docker` Default value is provider dependent.
|
||||
- AWS
|
||||
- non-NVME is `/dev/xvdb`.
|
||||
- NVME disks are one of `/dev/nvme[0-26]n1`.
|
||||
- Azure is `/dev/disk/azure/scsi1/lun0`.
|
||||
- `daemon`: (Optional) Provides docker daemon options. Defaults to "".
|
||||
- `ca`: (dev) Defaults to "".
|
||||
- `key`: (dev) Defaults to "".
|
||||
- `enable_remote_tcp`: (dev) Enables direct access to docker engine. Defaults to `false`.
|
||||
|
||||
*dev indicates that the functionality is only for development and testing.
|
||||
|
||||
#### kubernetes
|
||||
Enables provider-specific options for Kubernetes support.
|
||||
|
||||
##### AWS Kubernetes options
|
||||
|
||||
- `cloud_provider`: (Optional)Enable cloud provider support for Kubernetes. Defaults to `false`.
|
||||
- `ebs_persistent_volumes`: (Optional) Enable persistent volume support with EBS volumes. Defaults to `false`.
|
||||
- `efs_persistent_volumes`: (Optional) Enable persistent volume support with EFS. Defaults to `false`.
|
||||
- `load_balancer`: (Optional) Enable Kubernetes pods to instantiate a load-balancer. Defaults to `false`.
|
||||
- `nfs_storage`: (Optional) Install additional packages on node for NFS support. Defaults to `false`.
|
||||
- `lifecycle`: (Optional) Defaults to `owned`.
|
||||
|
||||
#### registry
|
||||
Customizes the registry from which the installation should pull images. By default, Docker Hub and credentials to access Docker Hub are used.
|
||||
|
||||
```yaml
|
||||
cluster:
|
||||
registry:
|
||||
password: ${base64decode("TVJYeTNDQWpTSk5HTW1ZRzJQcE1kM0tVRlQ=")}
|
||||
url: https://index.docker.io/v1/
|
||||
username: user
|
||||
```
|
||||
|
||||
The following optional elements can be specified:
|
||||
- `username`: The username for logging in to the registry on each node. Default value is the current docker user.
|
||||
- `url`: The registry to use for pulling Docker images. Defaults to "https://index.docker.io/v1/".
|
||||
- `password`: The password for logging in to the registry on each node. Default value is the current docker user's password base64 encoded and wrapped in a call to base64decode.
|
||||
|
||||
#### ucp
|
||||
|
||||
- `version`: Specifies the version of UCP to install. Defaults to `docker/ucp:3.1.6`.
|
||||
- `username`: Specifies the username of the first user to create in UCP. Defaults to `admin`.
|
||||
- `password`: Specifies the password of the first user to create in UCP. Defaults to `dockerdocker`.
|
||||
- `ca`: Specifies a path to a root CA public certificate.
|
||||
- `key`: Specifies a path to a TLS private key.
|
||||
- `cert`: Specifies a path to a public key certificate.
|
||||
- `install_options`: Lists additional [UCP install options](https://docs.docker.com/reference/ucp/3.1/cli/install/)
|
||||
|
||||
##### Additional UCP configuration options
|
||||
Docker Cluster also accepts all UCP configuration options and creates the initial UCP config on
|
||||
installation. The following list provides supported options:
|
||||
- `anonymize_tracking`: Anonymizes analytic data. Specify 'true' to hide the license ID. Defaults to 'false'.
|
||||
- `audit_level`: Specifies the audit logging level. Leave empty for disabling audit logs (default).
|
||||
Other valid values are 'metadata' and 'request'.
|
||||
- `auto_refresh`: Specify 'true' to enable attempted automatic license renewal when the license
|
||||
nears expiration. If disabled, you must manually upload renewed license after expiration. Defaults to 'true'.
|
||||
- `azure_ip_count`: Sets the IP count for azure allocator to allocate IPs per Azure virtual machine.
|
||||
- `backend`: Specifie the name of the authorization backend to use, either 'managed' or 'ldap'. Defaults to 'managed'.
|
||||
- `calico_mtu`: Specifies the MTU (maximum transmission unit) size for the Calico plugin. Defaults to '1480'.
|
||||
- `cloud_provider`: Specifies the cloud provider for the kubernetes cluster.
|
||||
- `cluster_label`: Specifies a label to be included with analytics/.
|
||||
- `cni_installer_url`: Specifies the URL of a Kubernetes YAML file to be used for installing a CNI plugin.
|
||||
Only applies during initial installation. If empty, the default CNI plugin is used.
|
||||
- `controller_port`: Configures the port that the 'ucp-controller' listens to. Defaults to '443'.
|
||||
- `custom_header_name`: Specifies the name of the custom header with 'name' = '*X-Custom-Header-Name*'.
|
||||
- `custom_header_value`: Specifies the value of the custom header with 'value' = '*Custom Header Value*'.
|
||||
- `default_new_user_role`: Specifies the role that new users get for their private resource sets.
|
||||
Values are 'admin', 'viewonly', 'scheduler', 'restrictedcontrol', or 'fullcontrol'. Defaults to 'restrictedcontrol'.
|
||||
- `default_node_orchestrator`: Specifies the type of orchestrator to use for new nodes that are
|
||||
joined to the cluster. Can be 'swarm' or 'kubernetes'. Defaults to 'swarm'.
|
||||
- `disable_tracking`: Specify 'true' to disable analytics of API call information. Defaults to 'false'.
|
||||
- `disable_usageinfo`: Specify 'true' to disable analytics of usage information. Defaults to 'false'.
|
||||
- `dns`: Specifies a CSV list of IP addresses to add as nameservers.
|
||||
- `dns_opt`: Specifies a CSV list of options used by DNS resolvers.
|
||||
- `dns_search`: Specifies a CSV list of domain names to search when a bare unqualified hostname is
|
||||
used inside of a container.
|
||||
- `enable_admin_ucp_scheduling`: Specify 'true' to allow admins to schedule on containers on manager nodes.
|
||||
Defaults to 'false'.
|
||||
- `external_service_lb`: Specifies an optional external load balancer for default links to services with
|
||||
exposed ports in the web interface.
|
||||
- `host_address`: Specifies the address for connecting to the DTR instance tied to this UCP cluster.
|
||||
- `log_host`: Specifies a remote syslog server to send UCP controller logs to. If omitted, controller
|
||||
logs are sent through the default docker daemon logging driver from the 'ucp-controller' container.
|
||||
- `idpMetadataURL`: Specifies the Identity Provider Metadata URL.
|
||||
- `image_repository`: Specifies the repository to use for UCP images.
|
||||
- `install_args`: Specifies additional arguments to pass to the UCP installer.
|
||||
- `ipip_mtu`: Specifies the IPIP MTU size for the calico IPIP tunnel interface.
|
||||
- `kube_apiserver_port`: Configures the port to which the Kubernetes API server listens.
|
||||
- `kv_snapshot_count`: Sets the key-value store snapshot count setting. Defaults to '20000'.
|
||||
- `kv_timeout`: Sets the key-value store timeout setting, in milliseconds. Defaults to '5000'.
|
||||
- `lifetime_minutes`: Specifies the initial session lifetime, in minutes. Defaults to `4320`, which is 72 hours.
|
||||
- `local_volume_collection_mapping`: Stores data about collections for volumes in UCP's local KV store
|
||||
instead of on the volume labels. This is used for enforcing access control on volumes.
|
||||
- `log_level`: Specifies the logging level for UCP components. Values are syslog priority
|
||||
levels (https://linux.die.net/man/5/syslog.conf): 'debug', 'info', 'notice', 'warning', 'err', 'crit', 'alert',
|
||||
and 'emerg'.
|
||||
- `managedPasswordDisabled`: Indicates if managed password is disabled. Defaults to false.
|
||||
- `managedPasswordFallbackUser`: The fallback user when the managed password authentication is disabled. Defaults to "".
|
||||
- `manager_kube_reserved_resources`: Specifies reserve resources for Docker UCP and Kubernetes components
|
||||
that are running on manager nodes.
|
||||
- `metrics_disk_usage_interval`: Specifies the interval for how frequently storage metrics are gathered.
|
||||
This operation can impact performance when large volumes are present.
|
||||
- `metrics_retention_time`: Adjusts the metrics retention time.
|
||||
- `metrics_scrape_interval`: Specifies the interval for how frequently managers gather metrics from nodes in the cluster.
|
||||
- `nodeport_range`: Specifies the port range that for Kubernetes services of type NodePort can be exposed in.
|
||||
Defaults to '32768-35535'.
|
||||
- `per_user_limit`: Specifies the maximum number of sessions that a user can have active simultaneously. If
|
||||
the creation of a new session would put a user over this limit, the least recently used session is deleted.
|
||||
A value of zero disables limiting the number of sessions that users can have. Defaults to `5`.
|
||||
- `pod_cidr`: Specifies the subnet pool from which the IP for the Pod should be allocated from the CNI ipam plugin.
|
||||
- `profiling_enabled`: Specify 'true' to enable specialized debugging endpoints for profiling UCP performance.
|
||||
Defaults to 'false'.
|
||||
- `log_protocol`: Specifies the protocol to use for remote logging. Values are 'tcp' and 'udp'. Defaults to 'tcp'.
|
||||
- `renewal_threshold_minutes`: Specifies the length of time, in minutes, before the expiration of a
|
||||
session. When used, a session is extended by the current configured lifetime from that point in time. A zero value disables session extension. Defaults to `1440`, which is 24 hours.
|
||||
- `require_content_trust`: Specify 'true' to require images be signed by content trust. Defaults to 'false'.
|
||||
- `require_signature_from`: Specifies a csv list of users or teams required to sign images.
|
||||
- `rethinkdb_cache_size`: Sets the size of the cache used by UCP's RethinkDB servers. TDefaults to 1GB,
|
||||
but leaving this field empty or specifying `auto` instructs RethinkDB to determine a cache size automatically.
|
||||
- `rootCerts`: Defaults to empty.
|
||||
- `samlEnabled`: Indicates if saml is used.
|
||||
- `samlLoginText`: Specifies the customized SAML login button text.
|
||||
- `service_id`: Specifies the DTR instance's OpenID Connect Client ID, as registered with the Docker
|
||||
authentication provider.
|
||||
- `spHost`: Specifies the Service Provider Host.
|
||||
- `storage_driver`: Specifies the UCP storage driver to install.
|
||||
- `support_dump_include_audit_logs`: When set to `true`, support dumps include audit logs in the logs
|
||||
of the 'ucp-controller' container of each manager node. Defaults to 'false'.
|
||||
- `swarm_port`: Configures the port that the 'ucp-swarm-manager' listens to. Defaults to '2376'.
|
||||
- `swarm_strategy`: Configures placement strategy for container scheduling.
|
||||
This doesn't affect swarm-mode services. Values are 'spread', 'binpack', and 'random'.
|
||||
- `tlsSkipVerify`: Specifies TLS Skip verify for IdP Metadata.
|
||||
- `unmanaged_cni`: Defaults to 'false'.
|
||||
- `worker_kube_reserved_resources`: Reserves resources for Docker UCP and Kubernetes components
|
||||
that are running on worker nodes.
|
||||
- `custom_kube_api_server_flags`: Specifies the configuration options for the Kubernetes API server. (dev)
|
||||
- `custom_kube_controller_manager_flags`: Specifies the configuration options for the Kubernetes controller manager. (dev)
|
||||
- `custom_kube_scheduler_flags`: Specifies the configuration options for the Kubernetes scheduler. (dev)
|
||||
- `custom_kubelet_flags`: Specifies the configuration options for Kubelets. (dev)
|
||||
|
||||
*dev indicates that the functionality is only for development and testing. Arbitrary Kubernetes configuration parameters are not tested and supported under the Docker Enterprise Software Support Agreement.
|
||||
|
||||
### provider
|
||||
Defines where the cluster's resources are provisioned, as well as provider-specific configuration such as tags.
|
||||
|
||||
{% raw %}
|
||||
```yaml
|
||||
provider:
|
||||
acme:
|
||||
email: ${email}
|
||||
server_url: https://acme-staging-v02.api.letsencrypt.org/directory
|
||||
aws:
|
||||
region: ${region}
|
||||
```
|
||||
{% endraw %}
|
||||
|
||||
#### acme
|
||||
The Automated Certificate Management Environment (ACME) is an evolving standard for the automation of a domain-validated certificate authority. Docker Cluster uses the ACME provider to create SSL certificates that are signed by [Let's Encrypt](https://letsencrypt.org/).
|
||||
|
||||
The ACME provider Configuration for the ACME provider supports arguments that closely align with the [Terraform ACME provider](https://www.terraform.io/docs/providers/acme/index.html):
|
||||
|
||||
The following elements can be specified:
|
||||
- `email`: (Required) The email to associate the certificates with.
|
||||
- `server_url`: (Optional) The URL to the ACME endpoint's directory. Default is "https://acme-v02.api.letsencrypt.org/directory"
|
||||
|
||||
#### aws
|
||||
Configuration for the AWS provider supports arguments that closely align with the [Terraform AWS provider](https://www.terraform.io/docs/providers/aws/index.html).
|
||||
|
||||
```yaml
|
||||
aws:
|
||||
region: "us-east-1"
|
||||
tags:
|
||||
Owner: "Infra"
|
||||
Environment: "Test"
|
||||
```
|
||||
The following elements can be specified:
|
||||
- `region` - (Required) This is the AWS region. It can be sourced from the `AWS_DEFAULT_REGION` environment variables, or
|
||||
via a shared credentials file if `profile` is specified.
|
||||
- `tags` - (Optional) Additional name value pairs to assign to every resource (which
|
||||
supports tagging) in the cluster.
|
||||
- `access_key` - (Required) This is the AWS access key. It can be sourced from
|
||||
the `AWS_ACCESS_KEY_ID` environment variable, or via
|
||||
a shared credentials file if `profile` is specified.
|
||||
- `secret_key` - (Required) This is the AWS secret key. It can be sourced from
|
||||
the `AWS_SECRET_ACCESS_KEY` environment variable, or
|
||||
via a shared credentials file if `profile` is specified.
|
||||
- `profile` - (Optional) This is the AWS profile name as set in the shared credentials
|
||||
file.
|
||||
- `assume_role` - (Optional) An `assume_role` block (documented below). Only one
|
||||
`assume_role` block can be in the configuration.
|
||||
- `endpoints` - (Optional) Configuration block for customizing service endpoints. See the
|
||||
[Custom Service Endpoints Guide](/docs/providers/aws/guides/custom-service-endpoints.html)
|
||||
for more information about connecting to alternate AWS endpoints or AWS compatible solutions.
|
||||
- `shared_credentials_file` = (Optional) This is the path to the shared
|
||||
credentials file. If this is not set and a profile is specified,
|
||||
`~/.aws/credentials` is used.
|
||||
- `token` - (Optional) Session token for validating temporary credentials.
|
||||
Typically provided after successful identity federation or Multi-Factor
|
||||
Authentication (MFA) login. With MFA login, this is the session token
|
||||
provided afterwards, not the 6 digit MFA code used to get temporary
|
||||
credentials. It can also be sourced from the `AWS_SESSION_TOKEN`
|
||||
environment variable.
|
||||
- `max_retries` - (Optional) This is the maximum number of times an API
|
||||
call is retried, in the case where requests are being throttled or
|
||||
experiencing transient failures. The delay between the subsequent API
|
||||
calls increases exponentially.
|
||||
- `allowed_account_ids` - (Optional) List of allowed, white listed, AWS
|
||||
account IDs to prevent you from mistakenly using an incorrect one (and
|
||||
potentially end up destroying a live environment). Conflicts with
|
||||
`forbidden_account_ids`.
|
||||
- `forbidden_account_ids` - (Optional) List of forbidden, blacklisted,
|
||||
AWS account IDs to prevent you mistakenly using a wrong one (and
|
||||
potentially end up destroying a live environment). Conflicts with
|
||||
`allowed_account_ids`.
|
||||
- `insecure` - (Optional) Explicitly allows the provider to
|
||||
perform "insecure" SSL requests. If omitted, defaults to `false`.
|
||||
- `skip_credentials_validation` - (Optional) Skips the credentials
|
||||
validation via the STS API. Useful for AWS API implementations that do
|
||||
not have STS available or implemented.
|
||||
- `skip_get_ec2_platforms` - (Optional) Skips getting the supported EC2
|
||||
platforms. Used by users that don't have `ec2:DescribeAccountAttributes`
|
||||
permissions.
|
||||
- `skip_region_validation` - (Optional) Skips validation of provided region name.
|
||||
Useful for AWS-like implementations that use their own region names
|
||||
or to bypass the validation for regions that aren't publicly available yet.
|
||||
|
||||
### resource
|
||||
Resources to provision for a cluster. Resources are organized as shown in the following example:
|
||||
|
||||
```yaml
|
||||
resource:
|
||||
type:
|
||||
name:
|
||||
parameters
|
||||
```
|
||||
For a given `type`, there may be more one or more named resources to provision.
|
||||
|
||||
For a given `name`, a resource may have one or more parameters.
|
||||
|
||||
#### aws_instance
|
||||
|
||||
```yaml
|
||||
resource:
|
||||
aws_instance:
|
||||
workers:
|
||||
instance_type: t2.xlarge
|
||||
price: 0.25
|
||||
os: Ubuntu 16.04
|
||||
```
|
||||
- `quantity`: (Required) The number of instances to create.
|
||||
- `os`: An alias that is expanded by `docker cluster` to the AMI owner and AMI name to install.
|
||||
The following aliases are supported by `docker cluster`:
|
||||
- `CentOS 7`
|
||||
- `RHEL 7.1`
|
||||
- `RHEL 7.2`
|
||||
- `RHEL 7.3`
|
||||
- `RHEL 7.4`
|
||||
- `RHEL 7.5`
|
||||
- `RHEL 7.6`
|
||||
- `Oracle Linux 7.3`
|
||||
- `Oracle Linux 7.4`
|
||||
- `Oracle Linux 7.5`
|
||||
- `SLES 12.2`
|
||||
- `SLES 12.3`
|
||||
- `SLES 15`
|
||||
- `Ubuntu 14.04`
|
||||
- `Ubuntu 16.04`
|
||||
- `Ubuntu 18.04`
|
||||
- `Windows Server 2016`
|
||||
- `Windows Server 1709`
|
||||
- `Windows Server 1803`
|
||||
- `Windows Server 2019`
|
||||
> Note: Make sure the OS you select is [compatible](https://success.docker.com/article/compatibility-matrix)
|
||||
with the product you're installing. Docker Cluster validates the support during installation.
|
||||
- `instance_type`: Specifies the [AWS instance type](https://aws.amazon.com/ec2/instance-types/) to provision.
|
||||
- `key_name`: By default, Docker Cluster creates an [AWS EC2 Key Pair](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) and registers it with AWS for the cluster.
|
||||
To use an existing AWS EC2 Key Pair, set this value to the name of the AWS EC2 Key Pair.
|
||||
- `ssh_private_key`: By default, Docker Cluster creates an [AWS EC2 Key Pair](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) and registers it with AWS for the cluster. To use an existing AWS EC2 Key Pair, set this value to the path of the private SSH key.
|
||||
- `username`: Specifies the username for the node with Administrative privileges. By default, the `os` option
|
||||
sets this to the well-known username for the AMIs (which can change by distribution):
|
||||
- Amazon Linux 2 is `ec2-user`.
|
||||
- Centos is `centos`.
|
||||
- Oracle Linux is `ec2-user`.
|
||||
- RedHat is `ec2-user`.
|
||||
- SLES is `ec2-user`.
|
||||
- Ubuntu is `ubuntu`.
|
||||
- Windows is `Administrator`.
|
||||
- `password`: This value is only used by Windows nodes. By default, Windows nodes have a random password generated.
|
||||
- `ami`: Specifies a custom AMI, or one that's not currently available as an OS. Specify either the id or
|
||||
the owner/name to query for the latest.
|
||||
- `id`: Specifies the ID of the AMI. For example, `ami-0510c89f1a2691cf2`.
|
||||
- `owner`: Specifies the AWS account ID of the image owner. For example, `099720109477`.
|
||||
- `name`: Specifies the name of the AMI that was provided during image creation. For example, `ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-*`.
|
||||
- `platform`: Specify `windows` for Windows instances.
|
||||
- `tags`: (Optional) Specifies additional name value pairs to assign to every instance.
|
||||
- `swarm_labels`: (Optional) Specifies additional key value pairs that represent swarm labels to apply to every node.
|
||||
|
||||
#### aws_spot_instance_request
|
||||
|
||||
Provisions a spot instance request in AWS to dramatically reduce the cost of instances. Spot instance
|
||||
availability is not guaranteed. Therefore, it is recommended to use `aws_spot_instance_request` for
|
||||
additional worker nodes and not for mission-critical nodes like managers and registry.
|
||||
|
||||
```yaml
|
||||
resource:
|
||||
aws_spot_instance_request:
|
||||
workers:
|
||||
instance_type: t2.xlarge
|
||||
price: 0.25
|
||||
os: Ubuntu 16.04
|
||||
quantity: 3
|
||||
```
|
||||
|
||||
Supports the same set of parameters as [aws_instance](index.md#aws_instance), with the addition of an optional price to limit the max bid for a spot instance.
|
||||
- `price`: (Optional) Specifies a maximum price to bid on the spot instance.
|
||||
|
||||
#### aws_lb
|
||||
Provisions an AWS Load Balancer.
|
||||
```yaml
|
||||
resource:
|
||||
aws_lb:
|
||||
ucp:
|
||||
domain: "example.com"
|
||||
instances:
|
||||
- managers
|
||||
ports:
|
||||
- 443:443
|
||||
- 6443:6443
|
||||
```
|
||||
The following options are supported:
|
||||
|
||||
- `instances`: (Required) Specifies a list of `aws_instance` and `aws_spot_instance_request` names to
|
||||
attach to the load balancer.
|
||||
- `ports`: (Required) Specifies a list of `listening port[/protocol]:target port[/protocol]` mappings
|
||||
to define how the load balancer should route traffic. By default, the protocol is `tcp`.
|
||||
- `domain`: Specifies the domain in which to create DNS records for this load balancer. The record is named the
|
||||
same as this resource, appended by the domain. For example, if the resource is `ucp` and the domain is `example.com`,
|
||||
the `A` record is `ucp.example.com`.
|
||||
- `internal`: (Optional) Defaults to `false`.
|
||||
- `type`: (Optional) Defaults to `network`.
|
||||
- `enable_cross_zone_load_balancing`: (Optional) Defaults to `false`.
|
||||
|
||||
#### aws_route53_zone
|
||||
Creates a subdomain in an AWS route53 zone. The following example creates a public zone for `testing.example.com`:
|
||||
|
||||
```yaml
|
||||
resource:
|
||||
aws_route53_zone:
|
||||
dns:
|
||||
domain: example.com
|
||||
subdomain: testing
|
||||
```
|
||||
The following elements are required:
|
||||
- `domain`: (Required) Specifies the name of the hosted zone.
|
||||
- `subdomain`: (Required) Specifies the subdomain to create in the `domain` hosted zone.
|
||||
|
||||
### variable
|
||||
Docker cluster supports basic parameterization. The variable section defines a make of keys and values. A key can have a sub-key named `type`, which changes the behavior of the variable.
|
||||
|
||||
```yaml
|
||||
variable:
|
||||
region: "us-east-1"
|
||||
password:
|
||||
type: prompt
|
||||
```
|
||||
|
||||
Variables are referenced in the cluster definition as `${variable_name}`. For example, `${region}` is substituted as `us-east-2` through the cluster definition.
|
||||
|
||||
The type defines how the variable behaves. This is currently limited in scope to:
|
||||
- `prompt`: Requests the value from the user and does not echo characters as the value is entered.
|
|
@ -1,527 +0,0 @@
|
|||
---
|
||||
description: Cluster file reference and guidelines
|
||||
keywords: documentation, docs, docker, cluster, infrastructure, automation
|
||||
title: Cluster file version 1 reference
|
||||
toc_max: 5
|
||||
toc_min: 1
|
||||
---
|
||||
|
||||
This topic describes version 1 of the Cluster file format.
|
||||
|
||||
## Cluster file structure and examples
|
||||
```
|
||||
<div class="panel panel-default">
|
||||
<div class="panel-heading collapsed" data-toggle="collapse" data-target="#collapseSample1" style="cursor: pointer">
|
||||
Example Cluster file version 1
|
||||
<i class="chevron fa fa-fw"></i></div>
|
||||
<div class="collapse block" id="collapseSample1">
|
||||
<pre><code>
|
||||
variable:
|
||||
domain: "YOUR DOMAIN, e.g. docker.com"
|
||||
subdomain: "A SUBDOMAIN, e.g. cluster"
|
||||
region: "THE AWS REGION TO DEPLOY, e.g. us-east-1"
|
||||
email: "YOUR.EMAIL@COMPANY.COM"
|
||||
ucp_password:
|
||||
type: prompt
|
||||
provider:
|
||||
acme:
|
||||
email: ${email}
|
||||
server_url: https://acme-staging-v02.api.letsencrypt.org/directory
|
||||
aws:
|
||||
region: ${region}
|
||||
cluster:
|
||||
dtr:
|
||||
version: docker/dtr:2.6.5
|
||||
engine:
|
||||
version: ee-stable-18.09.5
|
||||
ucp:
|
||||
username: admin
|
||||
password: ${ucp_password}
|
||||
version: docker/ucp:3.1.6
|
||||
resource:
|
||||
aws_instance:
|
||||
managers:
|
||||
instance_type: t2.xlarge
|
||||
os: Ubuntu 16.04
|
||||
quantity: 3
|
||||
registry:
|
||||
instance_type: t2.xlarge
|
||||
os: Ubuntu 16.04
|
||||
quantity: 3
|
||||
workers:
|
||||
instance_type: t2.xlarge
|
||||
os: Ubuntu 16.04
|
||||
quantity: 3
|
||||
aws_lb:
|
||||
apps:
|
||||
domain: ${subdomain}.${domain}
|
||||
instances:
|
||||
- workers
|
||||
ports:
|
||||
- 80:8080
|
||||
- 443:8443
|
||||
dtr:
|
||||
domain: ${subdomain}.${domain}
|
||||
instances:
|
||||
- registry
|
||||
ports:
|
||||
- 443:443
|
||||
ucp:
|
||||
domain: ${subdomain}.${domain}
|
||||
instances:
|
||||
- managers
|
||||
ports:
|
||||
- 443:443
|
||||
- 6443:6443
|
||||
aws_route53_zone:
|
||||
dns:
|
||||
domain: ${domain}
|
||||
subdomain: ${subdomain}
|
||||
</code></pre>
|
||||
</div>
|
||||
</div>
|
||||
```
|
||||
The topics on this reference page are organized alphabetically by top-level keys
|
||||
to reflect the structure of the Cluster file. Top-level keys that define
|
||||
a section in the configuration file, such as `cluster`, `provider`, and `resource`,
|
||||
are listed with the options that support them as sub-topics. This information
|
||||
maps to the indent structure of the Cluster file.
|
||||
|
||||
### cluster
|
||||
Specifies components to install and configure for a cluster.
|
||||
|
||||
The following components are available:
|
||||
|
||||
- `subscription`: (Optional) A string value representing the subscription ID.
|
||||
- `license`: (Optional) A path to the cluster's license file.
|
||||
- `cloudstor`: (Optional) Configuration options for Docker CloudStor
|
||||
- `dtr`: (Optional) Configuration options for Docker Trusted Registry
|
||||
- `engine`: (Optional) Configuration options for Docker Engine
|
||||
- `ucp`: (Optional) Configuration options for Docker Universal Control Plane
|
||||
- `registry`: (Optional) Configuration options for authenticating nodes with a registry to pull Docker images.
|
||||
|
||||
#### cloudstor
|
||||
Customizes the installation of Docker Cloudstor.
|
||||
|
||||
- `version`: (Optional) The version of Cloudstor to install. Default is `1.0`
|
||||
- `use_efs`: (Optional) Specifies whether an Elastic File System should be provisioned. Defaults to `false`.
|
||||
|
||||
#### dtr
|
||||
Customizes the installation of Docker Trusted Registry.
|
||||
```yaml
|
||||
cluster:
|
||||
dtr:
|
||||
version: "docker/dtr:2.6.5"
|
||||
install_options:
|
||||
- "--debug"
|
||||
- "--enable-pprof"
|
||||
```
|
||||
|
||||
The following optional elements can be specified:
|
||||
|
||||
- `version`: (Optional) The version of DTR to install. Defaults to `docker/dtr:2.6.5`.
|
||||
- `ca`: (Optional) The path to a root CA public certificate.
|
||||
- `key`: (Optional) The path to a TLS private key.
|
||||
- `cert`: (Optional) The path to a public key certificate.
|
||||
- `install_options`: (Optional) Additional [DTR install options](https://docs.docker.com/reference/dtr/2.6/cli/install/)
|
||||
|
||||
#### engine
|
||||
Customizes the installation of Docker Enterprise Engine.
|
||||
```yaml
|
||||
cluster:
|
||||
engine:
|
||||
channel: "stable"
|
||||
edition: "ee"
|
||||
version: "19.03"
|
||||
```
|
||||
|
||||
The following optional elements can be specified:
|
||||
- `version`: (Optional) The version of the Docker Engine to install. Defaults to `19.03`.
|
||||
- `edition`: (Optional) The family of Docker Engine to install. Defaults to `ee` for Enterprise edition.
|
||||
- `channel`: (Optional) The channel on the repository to pull updated packages. Defaults to `stable`.
|
||||
- `url`: (Optional) Defaults to "https://storebits.docker.com/ee".
|
||||
- `storage_driver`: (Optional) The storage driver to use for the storage volume. Default
|
||||
value is dependent on the operating system.
|
||||
- Amazon Linux 2 is `overlay2`.
|
||||
- Centos is `overlay2`.
|
||||
- Oracle Linux is `overlay2`.
|
||||
- RedHat is `overlay2`.
|
||||
- SLES is `btrfs`.
|
||||
- Ubuntu is `overlay2`.
|
||||
- `storage_fstype`: (Optional) File system to use for storage volume. Default value is dependent on the operating system.
|
||||
- Amazon Linux 2 is `xfs`.
|
||||
- Centos is `xfs`.
|
||||
- Oracle Linux is `xfs`.
|
||||
- RedHat is `xfs`.
|
||||
- SLES is `btrfs`.
|
||||
- Ubuntu is `ext4`.
|
||||
- `storage_volume`: (Optional) Docker storage volume path for `/var/lib/docker` Default value is provider dependent.
|
||||
- AWS
|
||||
- non-NVME is `/dev/xvdb`.
|
||||
- NVME disks are one of `/dev/nvme[0-26]n1`.
|
||||
- Azure is `/dev/disk/azure/scsi1/lun0`.
|
||||
- `daemon`: (Optional) Provides docker daemon options. Defaults to "".
|
||||
- `ca`: (dev) Defaults to "".
|
||||
- `key`: (dev) Defaults to "".
|
||||
- `enable_remote_tcp`: (dev) Enables direct access to docker engine. Defaults to `false`.
|
||||
|
||||
*dev indicates that the functionality is only for development and testing.
|
||||
|
||||
#### kubernetes
|
||||
Enables provider-specific options for Kubernetes support.
|
||||
|
||||
##### AWS Kubernetes options
|
||||
|
||||
- `cloud_provider`: (Optional)Enable cloud provider support for Kubernetes. Defaults to `false`.
|
||||
- `ebs_persistent_volumes`: (Optional) Enable persistent volume support with EBS volumes. Defaults to `false`.
|
||||
- `efs_persistent_volumes`: (Optional) Enable persistent volume support with EFS. Defaults to `false`.
|
||||
- `load_balancer`: (Optional) Enable Kubernetes pods to instantiate a load-balancer. Defaults to `false`.
|
||||
- `nfs_storage`: (Optional) Install additional packages on node for NFS support. Defaults to `false`.
|
||||
- `lifecycle`: (Optional) Defaults to `owned`.
|
||||
|
||||
#### registry
|
||||
Customizes the registry from which the installation should pull images. By default, Docker Hub and credentials to access Docker Hub are used.
|
||||
|
||||
```yaml
|
||||
cluster:
|
||||
registry:
|
||||
password: ${base64decode("TVJYeTNDQWpTSk5HTW1ZRzJQcE1kM0tVRlQ=")}
|
||||
url: https://index.docker.io/v1/
|
||||
username: user
|
||||
```
|
||||
|
||||
The following optional elements can be specified:
|
||||
- `username`: The username for logging in to the registry on each node. Default value is the current docker user.
|
||||
- `url`: The registry to use for pulling Docker images. Defaults to "https://index.docker.io/v1/".
|
||||
- `password`: The password for logging in to the registry on each node. Default value is the current docker user's password base64 encoded and wrapped in a call to base64decode.
|
||||
|
||||
#### ucp
|
||||
|
||||
- `version`: Specifies the version of UCP to install. Defaults to `docker/ucp:3.1.6`.
|
||||
- `username`: Specifies the username of the first user to create in UCP. Defaults to `admin`.
|
||||
- `password`: Specifies the password of the first user to create in UCP. Defaults to `dockerdocker`.
|
||||
- `ca`: Specifies a path to a root CA public certificate.
|
||||
- `key`: Specifies a path to a TLS private key.
|
||||
- `cert`: Specifies a path to a public key certificate.
|
||||
- `install_options`: Lists additional [UCP install options](https://docs.docker.com/reference/ucp/3.1/cli/install/)
|
||||
|
||||
##### Additional UCP configuration options:
|
||||
Docker Cluster also accepts all UCP configuration options and creates the initial UCP config on
|
||||
installation. The following list provides supported options:
|
||||
- `anonymize_tracking`: Anonymizes analytic data. Specify 'true' to hide the license ID. Defaults to 'false'.
|
||||
- `audit_level`: Specifies the audit logging level. Leave empty for disabling audit logs (default).
|
||||
Other valid values are 'metadata' and 'request'.
|
||||
- `auto_refresh`: Specify 'true' to enable attempted automatic license renewal when the license
|
||||
nears expiration. If disabled, you must manually upload renewed license after expiration. Defaults to 'true'.
|
||||
- `azure_ip_count`: Sets the IP count for azure allocator to allocate IPs per Azure virtual machine.
|
||||
- `backend`: Specifie the name of the authorization backend to use, either 'managed' or 'ldap'. Defaults to 'managed'.
|
||||
- `calico_mtu`: Specifies the MTU (maximum transmission unit) size for the Calico plugin. Defaults to '1480'.
|
||||
- `cloud_provider`: Specifies the cloud provider for the kubernetes cluster.
|
||||
- `cluster_label`: Specifies a label to be included with analytics/.
|
||||
- `cni_installer_url`: Specifies the URL of a Kubernetes YAML file to be used for installing a CNI plugin.
|
||||
Only applies during initial installation. If empty, the default CNI plugin is used.
|
||||
- `controller_port`: Configures the port that the 'ucp-controller' listens to. Defaults to '443'.
|
||||
- `custom_header_name`: Specifies the name of the custom header with 'name' = '*X-Custom-Header-Name*'.
|
||||
- `custom_header_value`: Specifies the value of the custom header with 'value' = '*Custom Header Value*'.
|
||||
- `default_new_user_role`: Specifies the role that new users get for their private resource sets.
|
||||
Values are 'admin', 'viewonly', 'scheduler', 'restrictedcontrol', or 'fullcontrol'. Defaults to 'restrictedcontrol'.
|
||||
- `default_node_orchestrator`: Specifies the type of orchestrator to use for new nodes that are
|
||||
joined to the cluster. Can be 'swarm' or 'kubernetes'. Defaults to 'swarm'.
|
||||
- `disable_tracking`: Specify 'true' to disable analytics of API call information. Defaults to 'false'.
|
||||
- `disable_usageinfo`: Specify 'true' to disable analytics of usage information. Defaults to 'false'.
|
||||
- `dns`: Specifies a CSV list of IP addresses to add as nameservers.
|
||||
- `dns_opt`: Specifies a CSV list of options used by DNS resolvers.
|
||||
- `dns_search`: Specifies a CSV list of domain names to search when a bare unqualified hostname is
|
||||
used inside of a container.
|
||||
- `enable_admin_ucp_scheduling`: Specify 'true' to allow admins to schedule on containers on manager nodes.
|
||||
Defaults to 'false'.
|
||||
- `external_service_lb`: Specifies an optional external load balancer for default links to services with
|
||||
exposed ports in the web interface.
|
||||
- `host_address`: Specifies the address for connecting to the DTR instance tied to this UCP cluster.
|
||||
- `log_host`: Specifies a remote syslog server to send UCP controller logs to. If omitted, controller
|
||||
logs are sent through the default docker daemon logging driver from the 'ucp-controller' container.
|
||||
- `idpMetadataURL`: Specifies the Identity Provider Metadata URL.
|
||||
- `image_repository`: Specifies the repository to use for UCP images.
|
||||
- `install_args`: Specifies additional arguments to pass to the UCP installer.
|
||||
- `ipip_mtu`: Specifies the IPIP MTU size for the calico IPIP tunnel interface.
|
||||
- `kube_apiserver_port`: Configures the port to which the Kubernetes API server listens.
|
||||
- `kv_snapshot_count`: Sets the key-value store snapshot count setting. Defaults to '20000'.
|
||||
- `kv_timeout`: Sets the key-value store timeout setting, in milliseconds. Defaults to '5000'.
|
||||
- `lifetime_minutes`: Specifies the initial session lifetime, in minutes. Defaults to `4320`, which is 72 hours.
|
||||
- `local_volume_collection_mapping`: Stores data about collections for volumes in UCP's local KV store
|
||||
instead of on the volume labels. This is used for enforcing access control on volumes.
|
||||
- `log_level`: Specifies the logging level for UCP components. Values are syslog priority
|
||||
levels (https://linux.die.net/man/5/syslog.conf): 'debug', 'info', 'notice', 'warning', 'err', 'crit', 'alert',
|
||||
and 'emerg'.
|
||||
- `managedPasswordDisabled`: Indicates if managed password is disabled. Defaults to false.
|
||||
- `managedPasswordFallbackUser`: The fallback user when the managed password authentication is disabled. Defaults to "".
|
||||
- `manager_kube_reserved_resources`: Specifies reserve resources for Docker UCP and Kubernetes components
|
||||
that are running on manager nodes.
|
||||
- `metrics_disk_usage_interval`: Specifies the interval for how frequently storage metrics are gathered.
|
||||
This operation can impact performance when large volumes are present.
|
||||
- `metrics_retention_time`: Adjusts the metrics retention time.
|
||||
- `metrics_scrape_interval`: Specifies the interval for how frequently managers gather metrics from nodes in the cluster.
|
||||
- `nodeport_range`: Specifies the port range that for Kubernetes services of type NodePort can be exposed in.
|
||||
Defaults to '32768-35535'.
|
||||
- `per_user_limit`: Specifies the maximum number of sessions that a user can have active simultaneously. If
|
||||
the creation of a new session would put a user over this limit, the least recently used session is deleted.
|
||||
A value of zero disables limiting the number of sessions that users can have. Defaults to `5`.
|
||||
- `pod_cidr`: Specifies the subnet pool from which the IP for the Pod should be allocated from the CNI ipam plugin.
|
||||
- `profiling_enabled`: Specify 'true' to enable specialized debugging endpoints for profiling UCP performance.
|
||||
Defaults to 'false'.
|
||||
- `log_protocol`: Specifies the protocol to use for remote logging. Values are 'tcp' and 'udp'. Defaults to 'tcp'.
|
||||
- `renewal_threshold_minutes`: Specifies the length of time, in minutes, before the expiration of a
|
||||
session. When used, a session is extended by the current configured lifetime from that point in time. A zero value disables session extension. Defaults to `1440`, which is 24 hours.
|
||||
- `require_content_trust`: Specify 'true' to require images be signed by content trust. Defaults to 'false'.
|
||||
- `require_signature_from`: Specifies a csv list of users or teams required to sign images.
|
||||
- `rethinkdb_cache_size`: Sets the size of the cache used by UCP's RethinkDB servers. TDefaults to 1GB,
|
||||
but leaving this field empty or specifying `auto` instructs RethinkDB to determine a cache size automatically.
|
||||
- `rootCerts`: Defaults to empty.
|
||||
- `samlEnabled`: Indicates if saml is used.
|
||||
- `samlLoginText`: Specifies the customized SAML login button text.
|
||||
- `service_id`: Specifies the DTR instance's OpenID Connect Client ID, as registered with the Docker
|
||||
authentication provider.
|
||||
- `spHost`: Specifies the Service Provider Host.
|
||||
- `storage_driver`: Specifies the UCP storage driver to install.
|
||||
- `support_dump_include_audit_logs`: When set to `true`, support dumps include audit logs in the logs
|
||||
of the 'ucp-controller' container of each manager node. Defaults to 'false'.
|
||||
- `swarm_port`: Configures the port that the 'ucp-swarm-manager' listens to. Defaults to '2376'.
|
||||
- `swarm_strategy`: Configures placement strategy for container scheduling.
|
||||
This doesn't affect swarm-mode services. Values are 'spread', 'binpack', and 'random'.
|
||||
- `tlsSkipVerify`: Specifies TLS Skip verify for IdP Metadata.
|
||||
- `unmanaged_cni`: Defaults to 'false'.
|
||||
- `worker_kube_reserved_resources`: Reserves resources for Docker UCP and Kubernetes components
|
||||
that are running on worker nodes.
|
||||
- `custom_kube_api_server_flags`: Specifies the configuration options for the Kubernetes API server. (dev)
|
||||
- `custom_kube_controller_manager_flags`: Specifies the configuration options for the Kubernetes controller manager. (dev)
|
||||
- `custom_kube_scheduler_flags`: Specifies the configuration options for the Kubernetes scheduler. (dev)
|
||||
- `custom_kubelet_flags`: Specifies the configuration options for Kubelets. (dev)
|
||||
|
||||
*dev indicates that the functionality is only for development and testing. Arbitrary Kubernetes configuration parameters are not tested and supported under the Docker Enterprise Software Support Agreement.
|
||||
|
||||
### provider
|
||||
Defines where the cluster's resources are provisioned, as well as provider-specific configuration such as tags.
|
||||
|
||||
```yaml
|
||||
provider:
|
||||
acme:
|
||||
email: ${email}
|
||||
server_url: https://acme-staging-v02.api.letsencrypt.org/directory
|
||||
aws:
|
||||
region: ${region}
|
||||
```
|
||||
|
||||
#### acme
|
||||
The Automated Certificate Management Environment (ACME) is an evolving standard for the automation of a domain-validated certificate authority. Docker Cluster uses the ACME provider to create SSL certificates that are signed by [Let's Encrypt](https://letsencrypt.org/).
|
||||
|
||||
The ACME provider Configuration for the ACME provider supports arguments that closely align with the [Terraform ACME provider](https://www.terraform.io/docs/providers/acme/index.html):
|
||||
|
||||
The following elements can be specified:
|
||||
- `email`: (Required) The email to associate the certificates with.
|
||||
- `server_url`: (Optional) The URL to the ACME endpoint's directory. Default is "https://acme-v02.api.letsencrypt.org/directory"
|
||||
|
||||
#### aws
|
||||
Configuration for the AWS provider supports arguments that closely align with the [Terraform AWS provider](https://www.terraform.io/docs/providers/aws/index.html).
|
||||
|
||||
```yaml
|
||||
aws:
|
||||
region: "us-east-1"
|
||||
tags:
|
||||
Owner: "Infra"
|
||||
Environment: "Test"
|
||||
```
|
||||
The following elements can be specified:
|
||||
- `region` - (Required) This is the AWS region. It can be sourced from the `AWS_DEFAULT_REGION` environment variables, or
|
||||
via a shared credentials file if `profile` is specified.
|
||||
- `tags` - (Optional) Additional name value pairs to assign to every resource (which
|
||||
supports tagging) in the cluster.
|
||||
- `access_key` - (Required) This is the AWS access key. It can be sourced from
|
||||
the `AWS_ACCESS_KEY_ID` environment variable, or via
|
||||
a shared credentials file if `profile` is specified.
|
||||
- `secret_key` - (Required) This is the AWS secret key. It can be sourced from
|
||||
the `AWS_SECRET_ACCESS_KEY` environment variable, or
|
||||
via a shared credentials file if `profile` is specified.
|
||||
- `profile` - (Optional) This is the AWS profile name as set in the shared credentials
|
||||
file.
|
||||
- `assume_role` - (Optional) An `assume_role` block (documented below). Only one
|
||||
`assume_role` block can be in the configuration.
|
||||
- `endpoints` - (Optional) Configuration block for customizing service endpoints. See the
|
||||
[Custom Service Endpoints Guide](/docs/providers/aws/guides/custom-service-endpoints.html)
|
||||
for more information about connecting to alternate AWS endpoints or AWS compatible solutions.
|
||||
- `shared_credentials_file` = (Optional) This is the path to the shared
|
||||
credentials file. If this is not set and a profile is specified,
|
||||
`~/.aws/credentials` is used.
|
||||
- `token` - (Optional) Session token for validating temporary credentials.
|
||||
Typically provided after successful identity federation or Multi-Factor
|
||||
Authentication (MFA) login. With MFA login, this is the session token
|
||||
provided afterwards, not the 6 digit MFA code used to get temporary
|
||||
credentials. It can also be sourced from the `AWS_SESSION_TOKEN`
|
||||
environment variable.
|
||||
- `max_retries` - (Optional) This is the maximum number of times an API
|
||||
call is retried, in the case where requests are being throttled or
|
||||
experiencing transient failures. The delay between the subsequent API
|
||||
calls increases exponentially.
|
||||
- `allowed_account_ids` - (Optional) List of allowed, white listed, AWS
|
||||
account IDs to prevent you from mistakenly using an incorrect one (and
|
||||
potentially end up destroying a live environment). Conflicts with
|
||||
`forbidden_account_ids`.
|
||||
- `forbidden_account_ids` - (Optional) List of forbidden, blacklisted,
|
||||
AWS account IDs to prevent you mistakenly using a wrong one (and
|
||||
potentially end up destroying a live environment). Conflicts with
|
||||
`allowed_account_ids`.
|
||||
- `insecure` - (Optional) Explicitly allows the provider to
|
||||
perform "insecure" SSL requests. If omitted, defaults to `false`.
|
||||
- `skip_credentials_validation` - (Optional) Skips the credentials
|
||||
validation via the STS API. Useful for AWS API implementations that do
|
||||
not have STS available or implemented.
|
||||
- `skip_get_ec2_platforms` - (Optional) Skips getting the supported EC2
|
||||
platforms. Used by users that don't have `ec2:DescribeAccountAttributes`
|
||||
permissions.
|
||||
- `skip_region_validation` - (Optional) Skips validation of provided region name.
|
||||
Useful for AWS-like implementations that use their own region names
|
||||
or to bypass the validation for regions that aren't publicly available yet.
|
||||
|
||||
### resource
|
||||
Resources to provision for a cluster. Resources are organized as shown in the following example:
|
||||
|
||||
```yaml
|
||||
resource:
|
||||
type:
|
||||
name:
|
||||
parameters
|
||||
```
|
||||
For a given `type`, there may be more one or more named resources to provision.
|
||||
|
||||
For a given `name`, a resource may have one or more parameters.
|
||||
|
||||
#### aws_instance
|
||||
|
||||
```yaml
|
||||
resource:
|
||||
aws_instance:
|
||||
workers:
|
||||
instance_type: t2.xlarge
|
||||
price: 0.25
|
||||
os: Ubuntu 16.04
|
||||
```
|
||||
- `quantity`: (Required) The number of instances to create.
|
||||
- `os`: An alias that is expanded by `docker cluster` to the AMI owner and AMI name to install.
|
||||
The following aliases are supported by `docker cluster`:
|
||||
- `CentOS 7`
|
||||
- `RHEL 7.1`
|
||||
- `RHEL 7.2`
|
||||
- `RHEL 7.3`
|
||||
- `RHEL 7.4`
|
||||
- `RHEL 7.5`
|
||||
- `RHEL 7.6`
|
||||
- `Oracle Linux 7.3`
|
||||
- `Oracle Linux 7.4`
|
||||
- `Oracle Linux 7.5`
|
||||
- `SLES 12.2`
|
||||
- `SLES 12.3`
|
||||
- `SLES 15`
|
||||
- `Ubuntu 14.04`
|
||||
- `Ubuntu 16.04`
|
||||
- `Ubuntu 18.04`
|
||||
- `Windows Server 2016`
|
||||
- `Windows Server 1709`
|
||||
- `Windows Server 1803`
|
||||
- `Windows Server 2019`
|
||||
> Note: Make sure the OS you select is [compatible](https://success.docker.com/article/compatibility-matrix)
|
||||
with the product you're installing. Docker Cluster validates the support during installation.
|
||||
- `instance_type`: Specifies the [AWS instance type](https://aws.amazon.com/ec2/instance-types/) to provision.
|
||||
- `key_name`: By default, Docker Cluster creates an [AWS EC2 Key Pair](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) and registers it with AWS for the cluster.
|
||||
To use an existing AWS EC2 Key Pair, set this value to the name of the AWS EC2 Key Pair.
|
||||
- `ssh_private_key`: By default, Docker Cluster creates an [AWS EC2 Key Pair](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) and registers it with AWS for the cluster. To use an existing AWS EC2 Key Pair, set this value to the path of the private SSH key.
|
||||
- `username`: Specifies the username for the node with Administrative privileges. By default, the `os` option
|
||||
sets this to the well-known username for the AMIs (which can change by distribution):
|
||||
- Amazon Linux 2 is `ec2-user`.
|
||||
- Centos is `centos`.
|
||||
- Oracle Linux is `ec2-user`.
|
||||
- RedHat is `ec2-user`.
|
||||
- SLES is `ec2-user`.
|
||||
- Ubuntu is `ubuntu`.
|
||||
- Windows is `Administrator`.
|
||||
- `password`: This value is only used by Windows nodes. By default, Windows nodes have a random password generated.
|
||||
- `ami`: Specifies a custom AMI, or one that's not currently available as an OS. Specify either the id or
|
||||
the owner/name to query for the latest.
|
||||
- `id`: Specifies the ID of the AMI. For example, `ami-0510c89f1a2691cf2`.
|
||||
- `owner`: Specifies the AWS account ID of the image owner. For example, `099720109477`.
|
||||
- `name`: Specifies the name of the AMI that was provided during image creation. For example, `ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-*`.
|
||||
- `platform`: Specify `windows` for Windows instances.
|
||||
- `tags`: (Optional) Specifies additional name value pairs to assign to every instance.
|
||||
- `swarm_labels`: (Optional) Specifies additional key value pairs that represent swarm labels to apply to every node.
|
||||
|
||||
#### aws_spot_instance_request
|
||||
|
||||
Provisions a spot instance request in AWS to dramatically reduce the cost of instances. Spot instance
|
||||
availability is not guaranteed. Therefore, it is recommended to use `aws_spot_instance_request` for
|
||||
additional worker nodes and not for mission-critical nodes like managers and registry.
|
||||
|
||||
```yaml
|
||||
resource:
|
||||
aws_spot_instance_request:
|
||||
workers:
|
||||
instance_type: t2.xlarge
|
||||
price: 0.25
|
||||
os: Ubuntu 16.04
|
||||
quantity: 3
|
||||
```
|
||||
|
||||
Supports the same set of parameters as [aws_instance](index.md#aws_instance), with the addition of an optional price to limit the max bid for a spot instance.
|
||||
- `price`: (Optional) Specifies a maximum price to bid on the spot instance.
|
||||
|
||||
#### aws_lb
|
||||
Provisions an AWS Load Balancer.
|
||||
```yaml
|
||||
resource:
|
||||
aws_lb:
|
||||
ucp:
|
||||
domain: "example.com"
|
||||
instances:
|
||||
- managers
|
||||
ports:
|
||||
- 443:443
|
||||
- 6443:6443
|
||||
```
|
||||
The following options are supported:
|
||||
|
||||
- `instances`: (Required) Specifies a list of `aws_instance` and `aws_spot_instance_request` names to
|
||||
attach to the load balancer.
|
||||
- `ports`: (Required) Specifies a list of `listening port[/protocol]:target port[/protocol]` mappings
|
||||
to define how the load balancer should route traffic. By default, the protocol is `tcp`.
|
||||
- `domain`: Specifies the domain in which to create DNS records for this load balancer. The record is named the
|
||||
same as this resource, appended by the domain. For example, if the resource is `ucp` and the domain is `example.com`,
|
||||
the `A` record is `ucp.example.com`.
|
||||
- `internal`: (Optional) Defaults to `false`.
|
||||
- `type`: (Optional) Defaults to `network`.
|
||||
- `enable_cross_zone_load_balancing`: (Optional) Defaults to `false`.
|
||||
|
||||
#### aws_route53_zone
|
||||
Creates a subdomain in an AWS route53 zone. The following example creates a public zone for `testing.example.com`:
|
||||
|
||||
```yaml
|
||||
resource:
|
||||
aws_route53_zone:
|
||||
dns:
|
||||
domain: example.com
|
||||
subdomain: testing
|
||||
```
|
||||
The following elements are required:
|
||||
- `domain`: (Required) Specifies the name of the hosted zone.
|
||||
- `subdomain`: (Required) Specifies the subdomain to create in the `domain` hosted zone.
|
||||
|
||||
### variable
|
||||
Docker cluster supports basic parameterization. The variable section defines a make of keys and values. A key can have a sub-key named `type`, which changes the behavior of the variable.
|
||||
|
||||
```yaml
|
||||
variable:
|
||||
region: "us-east-1"
|
||||
password:
|
||||
type: prompt
|
||||
```
|
||||
|
||||
Variables are referenced in the cluster definition as `${variable_name}`. For example, `${region}` is substituted as `us-east-2` through the cluster definition.
|
||||
|
||||
The type defines how the variable behaves. This is currently limited in scope to:
|
||||
- `prompt`: Requests the value from the user and does not echo characters as the value is entered.
|
|
@ -19,6 +19,8 @@ Using Docker Cluster is a three-step process:
|
|||
|
||||
A `cluster.yml` file resembles the following example:
|
||||
|
||||
{% raw %}
|
||||
```yaml
|
||||
variable:
|
||||
region: us-east-2
|
||||
ucp_password:
|
||||
|
@ -40,9 +42,11 @@ A `cluster.yml` file resembles the following example:
|
|||
aws_instance:
|
||||
managers:
|
||||
quantity: 1
|
||||
```
|
||||
{% endraw %}
|
||||
|
||||
For more information about Cluster files, refer to the
|
||||
[Cluster file reference](cluster-file/index.md).
|
||||
[Cluster file reference](cluster-file.md).
|
||||
|
||||
Docker Cluster has commands for managing the whole lifecycle of your cluster:
|
||||
|
||||
|
@ -52,9 +56,9 @@ Docker Cluster has commands for managing the whole lifecycle of your cluster:
|
|||
* View the status of clusters
|
||||
* Backup and Restore clusters
|
||||
|
||||
## Cluster documentation
|
||||
## Cluster reference pages
|
||||
|
||||
- [Get started with Docker Cluster on AWS](aws.md)
|
||||
- [Command line reference](./reference/index.md)
|
||||
- [Cluster file reference](./cluster-file/index.md)
|
||||
- [Command line reference](/engine/reference/commandline/cluster/)
|
||||
- [Cluster file reference](./cluster-file.md)
|
||||
|
|
@ -1,21 +0,0 @@
|
|||
---
|
||||
description: Back up a running cluster
|
||||
keywords: documentation, docs, docker, cluster, infrastructure, automation
|
||||
title: docker cluster backup
|
||||
notoc: true
|
||||
---
|
||||
|
||||
## Usage
|
||||
```
|
||||
docker cluster backup [OPTIONS] cluster
|
||||
```
|
||||
|
||||
Use the following options as needed to back up a running cluster:
|
||||
|
||||
- `--dry-run`: Skips resource provisioning.
|
||||
- `--file string`: Specifies a cluster backup filename. Defaults to `backup.tar.gz`.
|
||||
- `--log-level string`: Specifies the logging level. Valid values include: `trace`,`debug`,`info`,`warn`,`error`, and `fatal`.
|
||||
Defaults to `warn`.
|
||||
- `--passphrase string`: Specifies a cluster backup passphrase.
|
||||
|
||||
The backup command performs a full Docker Cluster backup following the steps found in [Backup and Restore Best Practices](https://success.docker.com/article/backup-restore-best-practices).
|
|
@ -1,24 +0,0 @@
|
|||
---
|
||||
description: Cluster CLI reference
|
||||
keywords: documentation, docs, docker, cluster, infrastructure, automation
|
||||
title: Cluster command-line reference
|
||||
notoc: true
|
||||
---
|
||||
|
||||
The following pages describe the usage information for the [docker cluster](overview) subcommands. You can also view this information by running `docker cluster [subcommand] --help` from the command line.
|
||||
|
||||
* [docker cluster](overview)
|
||||
* [backup](backup)
|
||||
* [create](create)
|
||||
* [inspect](inspect)
|
||||
* [logs](logs)
|
||||
* [ls](ls)
|
||||
* [restore](restore)
|
||||
* [rm](rm)
|
||||
* [update](update)
|
||||
* [version](version)
|
||||
|
||||
## Where to go next
|
||||
|
||||
* [CLI environment variables](envvars)
|
||||
* [docker cluster command](overview)
|
|
@ -1,16 +0,0 @@
|
|||
---
|
||||
description: Inspect clusters
|
||||
keywords: documentation, docs, docker, cluster, infrastructure, automation
|
||||
title: docker cluster inspect
|
||||
notoc: true
|
||||
---
|
||||
|
||||
## Usage
|
||||
```
|
||||
docker cluster inspect [OPTIONS] cluster
|
||||
```
|
||||
Use the following options as needed to display detailed information about a cluster:
|
||||
|
||||
- `-a, --all`: Displays complete information about the cluster.
|
||||
- `--dry-run`: Skips resource provisioning.
|
||||
- `--log-level string`: Specifies the logging level. Valid values include: `trace`,`debug`,`info`,`warn`,`error`, and `fatal`. Defaults to `warn`.
|
|
@ -1,16 +0,0 @@
|
|||
---
|
||||
description: List all available clusters
|
||||
keywords: documentation, docs, docker, cluster, infrastructure, automation
|
||||
title: docker cluster ls
|
||||
notoc: true
|
||||
---
|
||||
|
||||
## Usage
|
||||
```
|
||||
docker cluster ls [OPTIONS]
|
||||
```
|
||||
Use the following options as needed to list all available clusters:
|
||||
|
||||
- `--dry-run`: Skips resource provisioning.
|
||||
- `--log-level string`: Specifies the logging level. Valid values include: `trace`,`debug`,`info`,`warn`,`error`, and `fatal`. Defaults to `warn`.
|
||||
- `-q`, `--quiet`: Displays only numeric IDs.
|
|
@ -1,49 +0,0 @@
|
|||
---
|
||||
description: Overview of docker cluster CLI
|
||||
keywords: documentation, docs, docker, cluster, infrastructure, automation
|
||||
title: Overview of docker cluster CLI
|
||||
---
|
||||
|
||||
This page provides usage information for the `docker cluster` CLI plugin command options.
|
||||
|
||||
You can also view this information by running `docker cluster --help` from the
|
||||
command line.
|
||||
|
||||
## Usage
|
||||
```
|
||||
docker cluster [Options] [Commands]
|
||||
```
|
||||
|
||||
Options:
|
||||
|
||||
- `--dry-run`: Skips resource provisioning.
|
||||
- `--log-level string`: Specifies the logging level. Valid values include: `trace`,`debug`,`info`,`warn`,`error`, and `fatal`. Defaults to `warn`.
|
||||
|
||||
Commands:
|
||||
|
||||
- `backup`: Backs up a running cluster.
|
||||
- `begin`: Creates an example cluster declaration.
|
||||
- `create`: Creates a new Docker cluster.
|
||||
- `inspect`: Provides detailed information about a cluster.
|
||||
- `logs`:TODO: Fetches cluster logs.
|
||||
- `ls`: Lists all available clusters.
|
||||
- `restore`: Restores a cluster from a backup.
|
||||
- `rm`: Removes a cluster.
|
||||
- `update`: Updates a running cluster's desired state.
|
||||
- `version`: Displays Version, Commit, and Build type.
|
||||
|
||||
Run 'docker cluster [Command] --help' for more information about a command.
|
||||
```
|
||||
|
||||
## Specify name and path of one or more cluster files
|
||||
|
||||
Use the `-f` flag to specify the location of a cluster configuration file.
|
||||
|
||||
## Set up environment variables
|
||||
|
||||
You can set [environment variables](envvars) for various
|
||||
`docker cluster` options, including the `-f` and `-p` flags.
|
||||
|
||||
## Where to go next
|
||||
|
||||
* [CLI environment variables](envvars)
|
|
@ -1,20 +0,0 @@
|
|||
---
|
||||
description: Restore to a running cluster
|
||||
keywords: documentation, docs, docker, cluster, infrastructure, automation
|
||||
title: docker cluster restore
|
||||
notoc: true
|
||||
---
|
||||
|
||||
## Usage
|
||||
```
|
||||
docker cluster restore [OPTIONS] cluster
|
||||
```
|
||||
Use the following options as needed to restore a cluster from a backup:
|
||||
|
||||
- `--dry-run`: Skips resource provisioning.
|
||||
- `--file string`: Specifies a cluster backup filename. Defaults to `backup.tar.gz`.
|
||||
- `--log-level string`: Specifies the logging level. Valid values include:
|
||||
`trace`,`debug`,`info`,`warn`,`error`, and `fatal`. Defaults to `warn`.
|
||||
- `--passphrase string`: Specifies a cluster backup passphrase.
|
||||
|
||||
The restore command performs a full Docker Cluster restore following the steps found in [Backup and Restore Best Practices](https://success.docker.com/article/backup-restore-best-practices).
|
|
@ -1,16 +0,0 @@
|
|||
---
|
||||
description: Remove a cluster
|
||||
keywords: documentation, docs, docker, cluster, infrastructure, automation
|
||||
title: docker cluster rm
|
||||
notoc: true
|
||||
---
|
||||
|
||||
## Usage
|
||||
```
|
||||
docker cluster rm [OPTIONS] cluster
|
||||
```
|
||||
Use the following options as needed when removing a cluster:
|
||||
|
||||
- `--dry-run`: Skips resource provisioning.
|
||||
- `-f`, `--force`: Forces removal of the cluster files.
|
||||
- `--log-level string`: Specifies the logging level. Valid values include: `trace`,`debug`,`info`,`warn`,`error`, and `fatal`. Defaults to `warn`.
|
|
@ -1,18 +0,0 @@
|
|||
---
|
||||
description: Update a cluster
|
||||
keywords: documentation, docs, docker, cluster, infrastructure, automation
|
||||
title: docker cluster update
|
||||
notoc: true
|
||||
---
|
||||
|
||||
## Usage
|
||||
```
|
||||
docker cluster update [Options] cluster
|
||||
```
|
||||
Use the following options as needed to update a running cluster's desired state:
|
||||
|
||||
Options:
|
||||
|
||||
- `--dry-run`: Skips resource provisioning.
|
||||
- `-f`, `--file string`: Specfies cluster definition.
|
||||
- `--log-level string`: Specifies the logging level. Valid values include: `trace`,`debug`,`info`,`warn`,`error`, and `fatal`. Defaults to `warn`.
|
|
@ -1,15 +0,0 @@
|
|||
---
|
||||
description: Print Version
|
||||
keywords: documentation, docs, docker, cluster, infrastructure, automation
|
||||
title: docker cluster version
|
||||
notoc: true
|
||||
---
|
||||
|
||||
## Usage
|
||||
```
|
||||
docker cluster version
|
||||
```
|
||||
Use the following options as needed for printing Version, Commit, and Build type:
|
||||
|
||||
- `--dry-run`: Skips resource provisioning.
|
||||
- `--log-level string`: Specifies the logging level. Valid values include: `trace`,`debug`,`info`,`warn`,`error`, and `fatal`. Defaults to `warn`.
|
|
@ -2,8 +2,8 @@
|
|||
redirect_from:
|
||||
- /engine/articles/systemd/
|
||||
- /engine/admin/resource_constraints/
|
||||
title: "Limit a container's resources"
|
||||
description: "Limiting the system resources a container can use"
|
||||
title: "Specify a container's resources"
|
||||
description: "Specify the system resources a container can use"
|
||||
keywords: "docker, daemon, configuration"
|
||||
---
|
||||
|
||||
|
@ -258,3 +258,98 @@ $ docker run -it --cpu-rt-runtime=950000 \
|
|||
```
|
||||
|
||||
If the kernel or Docker daemon is not configured correctly, an error occurs.
|
||||
|
||||
## GPU
|
||||
|
||||
### Access an NVIDIA GPU
|
||||
|
||||
#### Prerequisites
|
||||
|
||||
Visit the official [NVIDIA drivers page](https://www.nvidia.com/Download/index.aspx)
|
||||
to download and install the proper drivers. Reboot your system once you have
|
||||
done so.
|
||||
|
||||
Verify that your GPU is running and accessible.
|
||||
|
||||
#### Install nvidia-container-runtime
|
||||
|
||||
Follow the instructions at (https://nvidia.github.io/nvidia-container-runtime/)
|
||||
and then run this command:
|
||||
|
||||
```bash
|
||||
$ apt-get install nvidia-container-runtime
|
||||
```
|
||||
|
||||
Ensure the `nvidia-container-runtime-hook` is accessible from `$PATH`.
|
||||
|
||||
```bash
|
||||
$ which nvidia-container-runtime-hook
|
||||
```
|
||||
|
||||
Restart the Docker daemon.
|
||||
|
||||
#### Expose GPUs for use
|
||||
|
||||
Include the `--gpus` flag when you start a container to access GPU resources.
|
||||
Specify how many GPUs to use. For example:
|
||||
|
||||
```bash
|
||||
$ docker run -it --rm --gpus all ubuntu nvidia-smi
|
||||
```
|
||||
|
||||
Exposes all available GPUs and returns a result akin to the following:
|
||||
|
||||
```bash
|
||||
+-----------------------------------------------------------------------------+
|
||||
| NVIDIA-SMI 384.130 Driver Version: 384.130 |
|
||||
|-------------------------------+----------------------+----------------------+
|
||||
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
|
||||
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
|
||||
|===============================+======================+======================|
|
||||
| 0 GRID K520 Off | 00000000:00:03.0 Off | N/A |
|
||||
| N/A 36C P0 39W / 125W | 0MiB / 4036MiB | 0% Default |
|
||||
+-------------------------------+----------------------+----------------------+
|
||||
+-----------------------------------------------------------------------------+
|
||||
| Processes: GPU Memory |
|
||||
| GPU PID Type Process name Usage |
|
||||
|=============================================================================|
|
||||
| No running processes found |
|
||||
+-----------------------------------------------------------------------------+
|
||||
```
|
||||
|
||||
Use the `device` option to specify GPUs. For example:
|
||||
|
||||
```bash
|
||||
$ docker run -it --rm --gpus device=GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a ubuntu nvidia-smi
|
||||
```
|
||||
|
||||
Exposes that specific GPU.
|
||||
|
||||
```bash
|
||||
$ docker run -it --rm --gpus device=0,2 nvidia-smi
|
||||
```
|
||||
|
||||
Exposes the first and third GPUs.
|
||||
|
||||
> **Note**: NVIDIA GPUs can only be accessed by systems running a single engine.
|
||||
|
||||
#### Set NVIDIA capabilities
|
||||
|
||||
You can set capabilities manually. For example, on Ubuntu you can run the
|
||||
following:
|
||||
|
||||
```
|
||||
docker run --gpus 'all,capabilities=utility' --rm ubuntu nvidia-smi
|
||||
```
|
||||
|
||||
This enables the `utility` driver capability which adds the `nvidia-smi` tool to
|
||||
the container.
|
||||
|
||||
Capabilities as well as other configurations can be set in images via
|
||||
environment variables. More information on valid variables can be found at the
|
||||
[nvidia-container-runtime](https://github.com/NVIDIA/nvidia-container-runtime)
|
||||
GitHub page. These variables can be set in a Dockerfile.
|
||||
|
||||
You can also utitize CUDA images which sets these variables automatically. See
|
||||
the [CUDA images](https://github.com/NVIDIA/nvidia-docker/wiki/CUDA) GitHub page
|
||||
for more information.
|
||||
|
|
|
@ -16,7 +16,7 @@ designed to:
|
|||
starting point for the majority of users.
|
||||
|
||||
* Provide drop-in solutions for popular programming language runtimes, data
|
||||
stores, and other services, similar to what a Platform-as-a-Service (PAAS)
|
||||
stores, and other services, similar to what a Platform as a Service (PAAS)
|
||||
would offer.
|
||||
|
||||
* Exemplify [`Dockerfile` best practices](/engine/userguide/eng-image/dockerfile_best-practices/)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
description: Run certification tests against your images
|
||||
keywords: Docker, docker, store, certified content, images
|
||||
keywords: Docker, Docker Hub, store, certified content, images
|
||||
title: Certify Docker images
|
||||
redirect_from:
|
||||
- /docker-store/certify-images/
|
||||
|
@ -30,7 +30,7 @@ The `inspectDockerImage` tool does the following:
|
|||
|
||||
- Checks if a Linux Docker image is running `supervisord` to launch multiple services.
|
||||
|
||||
> Running `supervisord` in a container is not a best practice for images destined for Doctor Store. The recommended best practice is to split the multiple services into separate Docker images and run them in separate containers.
|
||||
> Running `supervisord` in a container is not a best practice for images destined for Docker Hub. The recommended best practice is to split the multiple services into separate Docker images and run them in separate containers.
|
||||
|
||||
- Attempts to start a container from the Docker image to ensure that the image is functional.
|
||||
|
||||
|
|
|
@ -89,7 +89,7 @@ You can also set the following properties:
|
|||
|
||||
For example:
|
||||
|
||||
msiexec /i DockerDesktop.msi /quiet AUTOSTART=no STARTMENUSHORTCUT=no INSTALLDIR=”D:\Docker Desktop”
|
||||
msiexec /i DockerDesktop.msi /quiet STARTMENUSHORTCUT=no INSTALLDIR=”D:\Docker Desktop”
|
||||
|
||||
Docker Desktop Enterprise includes a command line executable to install and uninstall version packs. When you install DDE, the command line tool is installed at the following location:
|
||||
|
||||
|
|
Before Width: | Height: | Size: 28 KiB After Width: | Height: | Size: 5.4 KiB |
Before Width: | Height: | Size: 28 KiB After Width: | Height: | Size: 4.1 KiB |
Before Width: | Height: | Size: 21 KiB After Width: | Height: | Size: 4.2 KiB |
Before Width: | Height: | Size: 58 KiB After Width: | Height: | Size: 84 KiB |
Before Width: | Height: | Size: 37 KiB After Width: | Height: | Size: 25 KiB |
Before Width: | Height: | Size: 142 KiB After Width: | Height: | Size: 40 KiB |
Before Width: | Height: | Size: 125 KiB After Width: | Height: | Size: 37 KiB |
Before Width: | Height: | Size: 133 KiB After Width: | Height: | Size: 37 KiB |
Before Width: | Height: | Size: 155 KiB After Width: | Height: | Size: 37 KiB |
Before Width: | Height: | Size: 137 KiB After Width: | Height: | Size: 35 KiB |
Before Width: | Height: | Size: 127 KiB After Width: | Height: | Size: 35 KiB |
Before Width: | Height: | Size: 94 KiB After Width: | Height: | Size: 30 KiB |
Before Width: | Height: | Size: 115 KiB After Width: | Height: | Size: 32 KiB |
Before Width: | Height: | Size: 36 KiB After Width: | Height: | Size: 6.4 KiB |
Before Width: | Height: | Size: 38 KiB After Width: | Height: | Size: 5.1 KiB |
|
@ -2,10 +2,11 @@
|
|||
title: Docker Desktop Enterprise release notes
|
||||
description: Release notes for Docker Desktop Enterprise
|
||||
keywords: Docker Desktop Enterprise, Windows, Mac, Docker Desktop, Enterprise,
|
||||
toc_min: 1
|
||||
toc_max: 2
|
||||
---
|
||||
|
||||
This topic contains information about the main improvements and issues, starting with the
|
||||
current release. The documentation is updated for each release.
|
||||
This page contains information about the new features, improvements, known issues, and bug fixes in the Docker Desktop Enterprise release. Documentation is updated for each release. We suggest that you regularly visit this page to learn about updates.
|
||||
|
||||
For information on system requirements, installation, and download, see:
|
||||
|
||||
|
@ -14,18 +15,39 @@ For information on system requirements, installation, and download, see:
|
|||
|
||||
For Docker Enterprise Engine release notes, see [Docker Engine release notes](/engine/release-notes).
|
||||
|
||||
## Docker Desktop Enterprise Releases of 2019
|
||||
## Version 2.1.0.1
|
||||
2019-07-22
|
||||
|
||||
### Docker Desktop Enterprise 2.0.0.6
|
||||
Docker Desktop Enterprise 2.1.0.1 contains a Kubernetes upgrade. Note that your local Kubernetes cluster will be reset after installing this release.
|
||||
|
||||
### Upgrades
|
||||
|
||||
- [Docker 19.03.0](https://docs.docker.com/engine/release-notes/) in Version Pack Enterprise 3.0
|
||||
- [Kubernetes 1.14.3](https://github.com/kubernetes/kubernetes/releases/tag/v1.14.3) in Version Pack Enterprise 3.0
|
||||
- [Compose on Kubernetes 0.4.23](https://github.com/docker/compose-on-kubernetes/releases/tag/v0.4.23) in Version Pack Enterprise 3.0
|
||||
- [Docker Compose 1.24.1](https://github.com/docker/compose/releases/tag/1.24.1)
|
||||
- [Docker 18.09.8](https://docs.docker.com/engine/release-notes/) in Version Pack Enterprise 2.1
|
||||
- [Docker 17.06.2-ee-23](https://docs.docker.com/engine/release-notes/) in Version Pack Enterprise 2.0
|
||||
- [Docker Credential Helpers 0.6.3](https://github.com/docker/docker-credential-helpers/releases/tag/v0.6.3)
|
||||
- [Application Designer 0.1.4](/ee/desktop/app-designer/)
|
||||
|
||||
### Known issue
|
||||
|
||||
When you sign out of Windows while Docker Desktop is still running, and then sign in and start Docker Desktop, attempts to run Docker commands that rely on network connections can fail. For example, the command `docker pull alpine` returns `Error response from daemon: Get https://registry-1.docker.io/v2/: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)`.
|
||||
|
||||
Note that this issue occurs only when you sign out of Windows and not when you shut down or restart Windows.
|
||||
|
||||
Workaround: After signing back into Windows, when Docker Desktop has started, right-click the Docker menu from the systray and then select Quit Docker Desktop. When this is complete, start Docker Desktop normally.
|
||||
|
||||
## Version 2.0.0.6
|
||||
2019-06-06
|
||||
|
||||
- Upgrades
|
||||
### Upgrades
|
||||
|
||||
- [Docker 19.03.0-rc2](https://docs.docker.com/engine/release-notes/) in Version Pack Enterprise 3.0
|
||||
- Application Designer 0.1.3
|
||||
|
||||
- Bug fixes and minor changes:
|
||||
### Bug fixes and minor changes
|
||||
|
||||
- Application Designer/ Docker Template CLI: The `DefaultServer` preference has been renamed to `DefaultRegistry`.
|
||||
- Windows: Docker Desktop now allows administrators to specify authorized named pipes that can be mounted on Windows containers.
|
||||
|
@ -33,17 +55,16 @@ For Docker Enterprise Engine release notes, see [Docker Engine release notes](/e
|
|||
- Mac: Docker Deskop does not send DNS queries for `docker-desktop.<domain>` every 10s. It relies on the host's DNS domain search order rather than trying to replicate it inside the VM.
|
||||
- Mac: Docker Desktop now uses a separate thread to move its storage to a different disk. This allows the UI to remain responsive during the disk move. Fixes [docker/for-mac#3592](https://github.com/docker/for-mac/issues/3592)
|
||||
|
||||
### Docker Desktop Enterprise 2.0.0.5
|
||||
|
||||
## Version 2.0.0.5
|
||||
2019-05-30
|
||||
|
||||
- Upgrades
|
||||
### Upgrades
|
||||
|
||||
- [Docker 19.03.0-rc1](https://docs.docker.com/engine/release-notes/) in Enterprise 3.0 version pack
|
||||
- Application Designer 0.1.2
|
||||
- [Qemu 4.0.0](https://github.com/docker/binfmt) to cross-compile ARM devices
|
||||
|
||||
- Bug fixes and minor changes
|
||||
### Bug fixes and minor changes
|
||||
|
||||
- Application Designer now allows users to copy and paste application logs.
|
||||
- Users can browse the scaffold logs when scaffolding a new application using the Application Designer.
|
||||
|
@ -52,17 +73,16 @@ For Docker Enterprise Engine release notes, see [Docker Engine release notes](/e
|
|||
- Security improvements: Docker Desktop now checks TLS certificates for the target endpoints when using `kubectl`.
|
||||
- Fixed an issue where Visual Studio Code IDE path was not detected properly.
|
||||
|
||||
### Docker Desktop Enterprise 2.0.0.4
|
||||
|
||||
## Version 2.0.0.4
|
||||
2019-05-16
|
||||
|
||||
- Upgrades
|
||||
### Upgrades
|
||||
|
||||
- [Docker 19.03.0-beta4](https://docs.docker.com/engine/release-notes/) in Enterprise 3.0 version pack
|
||||
- [Docker 18.09.6](https://docs.docker.com/engine/release-notes/), [Kubernetes 1.11.10](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.11.md#v11110) in Enterprise 2.1 version pack
|
||||
- [LinuxKit v0.7](https://github.com/linuxkit/linuxkit/releases/tag/v0.7)
|
||||
|
||||
- Bug fixes and minor changes
|
||||
### Bug fixes and minor changes
|
||||
|
||||
- Fixed a stability issue with the DNS resolver.
|
||||
- Fixed a race condition where Kubernetes sometimes failed to start after restarting the application.
|
||||
|
@ -74,84 +94,80 @@ For Docker Enterprise Engine release notes, see [Docker Engine release notes](/e
|
|||
- Added a dialog box during startup when a shared drive fails to mount. This allows users to retry mounting the drive or remove it from the shared drive list.
|
||||
- Removed the ability to log in using an email address as a username as this is not supported by the Docker command line.
|
||||
|
||||
### Docker Desktop Enterprise 2.0.0.3
|
||||
|
||||
## Version 2.0.0.3
|
||||
2019-04-26
|
||||
|
||||
- Upgrades
|
||||
### Upgrades
|
||||
|
||||
- [Docker Engine 19.03.0-beta2](https://docs.docker.com/engine/release-notes/) for Version Pack Enterprise 3.0.
|
||||
|
||||
### Docker Desktop Enterprise 2.0.0.2
|
||||
|
||||
## Version 2.0.0.2
|
||||
2019-04-19
|
||||
|
||||
**WARNING:** You must upgrade the previously installed Version Packs to the latest revision.
|
||||
|
||||
- New
|
||||
### New
|
||||
|
||||
- Version Pack Enterprise 3.0 with [Docker Engine 19.03.0-beta1](https://docs.docker.com/engine/release-notes/) and [Kubernetes 1.14.1](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.14.md#changelog-since-v1141)
|
||||
|
||||
- Application Designer now includes new templates for AngularJS and VueJS.
|
||||
|
||||
- Upgrades
|
||||
### Upgrades
|
||||
|
||||
- [Docker Compose 1.24.0](https://github.com/docker/compose/releases/tag/1.24.0)
|
||||
- [Docker Engine 18.09.5](https://docs.docker.com/engine/release-notes/), [Kubernetes 1.11.7](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.11.md#v1117) and [Compose on Kubernetes 0.4.22](https://github.com/docker/compose-on-kubernetes/releases/tag/v0.4.22) for Version Pack Enterprise 2.1
|
||||
- [Docker Engine 17.06.2-ee-21](https://docs.docker.com/engine/release-notes/) for Version Pack Enterprise 2.0
|
||||
|
||||
- Bug fixes and minor changes
|
||||
### Bug fixes and minor changes
|
||||
|
||||
- For security, only administrators can install or upgrade Version Packs using the `dockerdesktop-admin` tool.
|
||||
- Truncate UDP DNS responses which are over 512 bytes in size
|
||||
- Fixed airgap install of kubernetes in version pack enterprise-2.0
|
||||
- Reset to factory default now resets to admin defaults
|
||||
|
||||
- Known issues
|
||||
### Known issues
|
||||
|
||||
- The Docker Template CLI plugin included in this version is an outdated version of the plugin and will fail when scaffolding templates. Note that the Application Designer is not affected by this outdated version of the CLI plugin.
|
||||
|
||||
### Docker Desktop Enterprise 2.0.0.1
|
||||
|
||||
## Version 2.0.0.1
|
||||
2019-03-01
|
||||
|
||||
**WARNING:** You must upgrade the previously installed Version Packs to the latest revision.
|
||||
|
||||
#### Windows
|
||||
### Windows
|
||||
|
||||
Upgrades:
|
||||
#### Upgrades
|
||||
|
||||
- Docker 18.09.3 for Version Pack Enterprise 2.1, fixes [CVE-2019-5736](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-5736)
|
||||
- Docker 17.06.2-ee-20 for Version Pack Enterprise 2.0, fixes [CVE-2019-5736](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-5736)
|
||||
|
||||
Bug fixes and minor changes:
|
||||
#### Bug fixes and minor changes
|
||||
|
||||
- Fixed port 8080 that was used on localhost when starting Kubernetes.
|
||||
- Fixed Hub login through the desktop UI not sync with login through `docker login` command line.
|
||||
- Fixed crash in system tray menu when the Hub login fails or Air gap mode.
|
||||
|
||||
#### Mac
|
||||
### Mac
|
||||
|
||||
New features:
|
||||
#### New features
|
||||
|
||||
- Added ability to list all installed version packs with the admin CLI command `dockerdesktop-admin version-pack list`.
|
||||
- `dockerdesktop-admin app uninstall` will also remove Docker Desktop user files.
|
||||
|
||||
Upgrades:
|
||||
#### Upgrades
|
||||
|
||||
- Docker 18.09.3 for Version Pack Enterprise 2.1, fixes [CVE-2019-5736](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-5736)
|
||||
- Docker 17.06.2-ee-20 for Version Pack Enterprise 2.0, fixes [CVE-2019-5736](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-5736)
|
||||
|
||||
Bug fixes and minor changes:
|
||||
#### Bug fixes and minor changes
|
||||
|
||||
- Fixed port 8080 that was used on localhost when starting Kubernetes.
|
||||
- Improved error messaging to suggest running diagnostics / resetting to factory default only when it is appropriate.
|
||||
|
||||
### Docker Desktop Enterprise 2.0.0.0
|
||||
|
||||
## Version 2.0.0.0
|
||||
2019-01-31
|
||||
|
||||
New features:
|
||||
### New features
|
||||
|
||||
- **Version selection**: Configurable version packs ensure the local
|
||||
instance of Docker Desktop Enterprise is a precise copy of the
|
||||
|
|
|
@ -187,7 +187,7 @@ You can configure options on the Docker daemon that determine how your
|
|||
containers run.
|
||||
|
||||
Select **Basic** to configure the daemon with interactive settings, or select
|
||||
**Advanced** to edit the JSON directly.
|
||||
**Advanced** to edit the JSON file directly.
|
||||
|
||||

|
||||
|
||||
|
@ -196,7 +196,7 @@ Select **Basic** to configure the daemon with interactive settings, or select
|
|||
Docker Desktop Enterprise has experimental features enabled
|
||||
on Docker Engine, as described in [Docker Experimental Features](https://github.com/docker/cli/blob/master/experimental/README.md) Readme. If you don't select **Experimental Features**, Docker Desktop Enterprise uses the current generally available release of Docker Engine.
|
||||
|
||||
> **Note:** Do not enable experimental features in production. Experimental features are not appropriate for production environments or workloads. They are meant to be sandbox experiments for new ideas.
|
||||
> {% include experimental.md %}
|
||||
|
||||
You can see whether you are running experimental mode at the command line. If
|
||||
`Experimental` is `true`, then Docker is running in experimental mode, as shown
|
||||
|
@ -224,9 +224,9 @@ For more information, see:
|
|||
Click the **Advanced** tab to configure the daemon from the JSON file. For a
|
||||
full list of options, see the Docker Engine [dockerd command line reference](https://docs.docker.com/engine/reference/commandline/dockerd).
|
||||
|
||||
Click **Apply & Restart** to save your settings and reboot Docker. Or, to cancel
|
||||
Click **Apply & Restart** to save your settings and reboot Docker. To cancel
|
||||
changes, click another preference tab, then choose to discard or not apply
|
||||
changes when asked.
|
||||
changes when prompted.
|
||||
|
||||

|
||||
|
||||
|
@ -277,7 +277,7 @@ Click on the Docker icon from the menu bar and then **Preferences**. Click **Res
|
|||
|
||||

|
||||
|
||||
### Diagnose and Feedback
|
||||
### Troubleshoot
|
||||
|
||||
The **Diagnose and Feedback** option allows you troubleshoot any issues you may be experiencing with Docker Desktop Enterprise. For more information, see [Troubleshoot DDE issues on Mac](/ee/desktop/troubleshoot/mac-issues).
|
||||
|
||||
|
@ -316,7 +316,7 @@ an example.
|
|||
$ sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain ca.crt
|
||||
```
|
||||
|
||||
Or, if you prefer to add the certificate to your own local keychain only (rather
|
||||
If you prefer to add the certificate to your own local keychain only (rather
|
||||
than for all users), run this command instead:
|
||||
|
||||
```
|
||||
|
|
|
@ -251,6 +251,8 @@ The Linux VM restarts after changing the settings on the **Advanced** tab. This
|
|||
|
||||
- **Memory** - Change the amount of memory the Docker Desktop Enterprise Linux VM uses.
|
||||
|
||||
- **Swap** - Configure the swap file size.
|
||||
|
||||
#### Network
|
||||
|
||||
You can configure Docker Desktop Enterprise networking to work on a virtual private network (VPN).
|
||||
|
@ -314,8 +316,7 @@ The **Basic** mode lets you configure the more common daemon options with intera
|
|||
Docker Desktop Enterprise has the experimental version
|
||||
of Docker Engine enabled, described in the [Docker Experimental Features](https://github.com/docker/cli/blob/master/experimental/README.md) readme. If you don't select **Experimental Features**, Docker Desktop Enterprise uses the current generally available release of Docker Engine.
|
||||
|
||||
> **Note:** Do not enable experimental features in production. Experimental features are not appropriate for production environments or
|
||||
workloads. They are meant to be sandbox experiments for new ideas.
|
||||
> {% include experimental.md %}
|
||||
|
||||
Run `docker version` to see if you are in Experimental mode. Experimental mode
|
||||
is listed under `Server` data. If `Experimental` is `true`, then Docker is
|
||||
|
@ -438,7 +439,7 @@ To switch to a different version pack, simply click on the version pack you woul
|
|||
|
||||

|
||||
|
||||
### Diagnose and Feedback
|
||||
### Troubleshoot
|
||||
|
||||
The **Diagnose and Feedback** option allows you troubleshoot any issues you may be experiencing with Docker Desktop Enterprise. For more information, see [Troubleshoot DDE issues on Windows](/ee/desktop/troubleshoot/windows-issues).
|
||||
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
---
|
||||
title: Disable persistent cookies
|
||||
description: Learn how to disable persistent cookies for Docker Trusted Registry.
|
||||
keywords: dtr, browser cookies, sso
|
||||
---
|
||||
If you want your Docker Trusted Registry (DTR) to use session-based authentication cookies that expire when you close your browser, toggle "Disable persistent cookies".
|
||||
|
||||
{: .with-border}
|
||||
|
||||
## Verify your DTR cookies setting
|
||||
|
||||
You may need to disable Single Sign-On (SSO). From the DTR web UI in a Chrome browser, right-click on any page and click **Inspect**. With the Developer Tools open, select **Application > Storage > Cookies > `https://<dtr-external-url>`**. Verify that the cookies has "Session" as the setting for **Expires / Max-Age**.
|
||||
|
||||
## Where to go next
|
||||
|
||||
- [Use your own TLS certificates](use-your-own-tls-certificates)
|
||||
- [Enable authentication using client certificates](/ee/enable-client-certificate-authentication/)
|
|
@ -4,42 +4,62 @@ description: Learn how to set up single sign-on between UCP and DTR, so that you
|
|||
keywords: dtr, login, sso
|
||||
---
|
||||
|
||||
By default, users are shared between UCP and DTR, but you have to authenticate
|
||||
separately on the web UI of both applications.
|
||||
Users are shared between UCP and DTR by default, but the applications have separate browser-based interfaces which require authentication.
|
||||
|
||||
You can configure DTR to have single sign-on (SSO) with UCP, so that users only
|
||||
have to authenticate once.
|
||||
To only authenticate once, you can configure DTR to have single sign-on (SSO) with UCP.
|
||||
|
||||
> **Note**: After configuring single sign-on with DTR, users accessing DTR via
|
||||
> `docker login` should create an [access token](/ee/dtr/user/access-tokens/) and use it to authenticate.
|
||||
|
||||
## At installation time
|
||||
## At install time
|
||||
|
||||
When installing DTR, use the `docker/dtr install --dtr-external-url <url>`
|
||||
option to enable SSO. When accessing the DTR web UI, users are redirected to the
|
||||
UCP login page, and once they are authenticated, they're redirected to the URL
|
||||
you provided to `--dtr-external-url`.
|
||||
When [installing DTR](/reference/dtr/2.7/install/), pass `--dtr-external-url <url>` to enable SSO. [Specify the Fully Qualified Domain Name (FQDN)](/use-your-own-tls-certificates/) of your DTR, or a load balancer, to load-balance requests across multiple DTR replicas.
|
||||
|
||||
Use the domain name of DTR, or the domain name of a load balancer, if you're
|
||||
using one, to load-balance requests across multiple DTR replicas.
|
||||
|
||||
## After install
|
||||
```bash
|
||||
docker run --rm -it \
|
||||
{{ page.dtr_org }}/{{ page.dtr_repo }}:{{ page.dtr_version }} install \
|
||||
--dtr-external-url dtr.example.com \
|
||||
--dtr-cert "$(cat cert.pem)" \
|
||||
--dtr-ca "$(cat dtr_ca.pem)" \
|
||||
--dtr-key "$(cat key.pem)" \
|
||||
--ucp-url ucp.example.com \
|
||||
--ucp-username admin \
|
||||
--ucp-ca "$(cat ucp_ca.pem)"
|
||||
```
|
||||
|
||||
In your browser, navigate to the DTR web UI, and choose **Settings**. In the
|
||||
**General** tab, scroll to **Domain & proxies**.
|
||||
This makes it so that when you access DTR's web user interface, you are redirected to the UCP login page for authentication. Upon successfully logging in, you are then redirected to your specified DTR external URL during installation.
|
||||
|
||||
Update the **Load balancer / public address** field to the url where users
|
||||
should be redirected once they are logged in.
|
||||
Use the domain name of DTR, or the domain name of a load balancer, if you're
|
||||
using one, to load-balance requests across multiple DTR replicas.
|
||||
## Post-installation
|
||||
|
||||
Then enable **Use single sign-on**.
|
||||
### Web user interface
|
||||
|
||||
{: .with-border}
|
||||
1. Navigate to `https://<dtr-url>` and log in with your credentials.
|
||||
2. Select **System** from the left navigation pane, and scroll down to **Domain & Proxies**.
|
||||
3. Update the **Load balancer / Public Address** field with the external URL where users
|
||||
should be redirected once they are logged in. Click **Save** to apply your changes.
|
||||
4. Toggle **Single Sign-on** to automatically redirect users to UCP for logging in.
|
||||
{: .with-border}
|
||||
|
||||
Once you save, users are redirected to UCP for logging in, and redirected back to
|
||||
DTR once they are authenticated.
|
||||
|
||||
|
||||
### Command line interface
|
||||
|
||||
You can also enable single sign-on from the command line by reconfiguring your DTR. To do so, run the following:
|
||||
|
||||
```bash
|
||||
docker run --rm -it \
|
||||
{{ page.dtr_org }}/{{ page.dtr_repo }}:{{ page.dtr_version }} reconfigure \
|
||||
--dtr-external-url dtr.example.com \
|
||||
--dtr-cert "$(cat cert.pem)" \
|
||||
--dtr-ca "$(cat dtr_ca.pem)" \
|
||||
--dtr-key "$(cat key.pem)" \
|
||||
--ucp-url ucp.example.com \
|
||||
--ucp-username admin \
|
||||
--ucp-ca "$(cat ucp_ca.pem)"
|
||||
```
|
||||
|
||||
## Where to go next
|
||||
|
||||
- [Use your own TLS certificates](use-your-own-tls-certificates.md)
|
||||
- [Use your own TLS certificates](use-your-own-tls-certificates)
|
||||
- [Enable authentication using client certificates](/ee/enable-authentication-via-client-certs/)
|
||||
|
|
|
@ -4,35 +4,33 @@ description: Learn how to license your Docker Trusted Registry installation.
|
|||
keywords: dtr, install, license
|
||||
---
|
||||
|
||||
By default, you don't need to license your Docker Trusted Registry. When
|
||||
installing DTR, it automatically starts using the same license file used on
|
||||
your Docker Universal Control Plane cluster.
|
||||
By default, Docker Trusted Registry (DTR) automatically uses the same license file applied to
|
||||
your Universal Control Plane (UCP). In the following scenarios, you need to
|
||||
manually apply a license to your DTR:
|
||||
|
||||
However, there are some situations when you have to manually license your
|
||||
DTR installation:
|
||||
|
||||
* When upgrading to a new major version,
|
||||
* When your current license expires.
|
||||
* Major version upgrade
|
||||
* License expiration
|
||||
|
||||
|
||||
## Download your license
|
||||
|
||||
Go to [Docker Hub](https://hub.docker.com/editions/enterprise/docker-ee-trial)
|
||||
to download a trial license.
|
||||
Visit Docker Hub's [Enterprise Trial page](https://hub.docker.com/editions/enterprise/docker-ee-trial) to start your one-month trial. After signing up, you should receive a confirmation email with a link to your subscription page. You can find your **License Key** in the **Resources** section of the Docker Enterprise Setup Instructions page.
|
||||
|
||||
{: .with-border}
|
||||
{: .with-border}
|
||||
|
||||
Click "License Key" to download your license.
|
||||
|
||||
## License your installation
|
||||
|
||||
Once you've downloaded the license file, you can apply it to your DTR
|
||||
installation. Navigate to the **DTR web UI**, and then go to the **Settings
|
||||
page**.
|
||||
After downloading your license key, navigate to `https://<dtr-url>` and log in with your credentials.
|
||||
Select **System** from the left navigation pane, and click *Apply new license* to upload your license
|
||||
key.
|
||||
|
||||
{: .with-border}
|
||||
{: .with-border}
|
||||
|
||||
Click the **Apply new license** button, and upload your new license file.
|
||||
Within **System > General** under the **License** section, you should see the tier, date of expiration, and ID for your license.
|
||||
|
||||
## Where to go next
|
||||
|
||||
- [Enable single sign-on](enable-single-sign-on.md)
|
||||
- [Use your own TLS certificates](use-your-own-tls-certificates)
|
||||
- [Enable single sign-on](enable-single-sign-on)
|
||||
|
|
|
@ -1,44 +1,45 @@
|
|||
---
|
||||
title: Use your own TLS certificates
|
||||
description: Learn how to configure Docker Trusted Registry with your own TLS certificates.
|
||||
keywords: dtr, tls
|
||||
keywords: dtr, tls, certificates, security
|
||||
---
|
||||
|
||||
By default the DTR services are exposed using HTTPS, to ensure all
|
||||
communications between clients and DTR is encrypted. Since DTR
|
||||
replicas use self-signed certificates for this, when a client accesses
|
||||
DTR, their browsers won't trust this certificate, so the browser displays a
|
||||
warning message.
|
||||
Docker Trusted Registry (DTR) services are exposed using HTTPS by default. This ensures encrypted communications between clients and your trusted registry. If you do not pass a PEM-encoded TLS certificate during installation, DTR will generate a [self-signed certificate](https://en.wikipedia.org/wiki/Self-signed_certificate). This leads to an insecure site warning when accessing DTR through a browser. Additionally, DTR includes an [HSTS (HTTP Strict-Transport-Security) header](https://en.wikipedia.org/wiki/HTTP_Strict_Transport_Security) in all API responses which can further lead to your browser refusing to load DTR's web interface.
|
||||
|
||||
You can configure DTR to use your own certificates, so that it is automatically
|
||||
trusted by your users' browser and client tools.
|
||||
You can configure DTR to use your own TLS certificates, so that it is automatically
|
||||
trusted by your users' browser and client tools. As of v2.7, you can also [enable user authentication via client certificates](/ee/enable-authentication-via-client-certs/) provided by your organization's public key infrastructure (PKI).
|
||||
|
||||
## Replace the server certificates
|
||||
|
||||
To configure DTR to use your own certificates and keys, go to the
|
||||
**DTR web UI**, navigate to the **Settings** page, and scroll down to the
|
||||
**Domain** section.
|
||||
You can upload your own TLS certificates and keys using the web interface, or pass them as CLI options when installing or reconfiguring your DTR instance.
|
||||
|
||||
{: .with-border}
|
||||
### Web interface
|
||||
|
||||
Navigate to `https://<dtr-url>` and log in with your credentials. Select **System** from the left navigation pane, and scroll down to **Domain & Proxies**.
|
||||
|
||||
Set the DTR domain name and upload the certificates and key:
|
||||
{: .with-border}
|
||||
|
||||
* Load balancer/public address, is the domain name clients will use to access DTR.
|
||||
* TLS certificate, is the server certificate and any intermediate CA public
|
||||
certificates. This certificate needs to be valid for the DTR public address,
|
||||
Enter your DTR domain name and upload or copy and paste the certificate details:
|
||||
|
||||
* ***Load balancer/public address.*** The domain name clients will use to access DTR.
|
||||
* ***TLS private key.*** The server private key.
|
||||
* ***TLS certificate chain.*** The server certificate and any intermediate public
|
||||
certificates from your certificate authority (CA). This certificate needs to be valid for the DTR public address,
|
||||
and have SANs for all addresses used to reach the DTR replicas, including load
|
||||
balancers.
|
||||
* TLS private key is the server private key.
|
||||
* TLS CA is the root CA public certificate.
|
||||
* ***TLS CA.*** The root CA public certificate.
|
||||
|
||||
Finally, click **Save** for the changes to take effect.
|
||||
Click **Save** to apply your changes.
|
||||
|
||||
If you're using certificates issued by a globally trusted certificate authority,
|
||||
If you've added certificates issued by a globally trusted CA,
|
||||
any web browser or client tool should now trust DTR. If you're using an internal
|
||||
certificate authority, you'll need to configure your system to trust that
|
||||
certificate authority.
|
||||
CA, you will need to configure the client systems to trust that
|
||||
CA.
|
||||
|
||||
### Command line interface
|
||||
|
||||
See [docker/dtr install](/reference/dtr/2.7/cli/install/) and [docker/dtr reconfigure](/reference/dtr/2.7/cli/reconfigure/) for TLS certificate options and usage.
|
||||
|
||||
## Where to go next
|
||||
|
||||
- [Set up external storage](external-storage/index.md)
|
||||
- [Enable single sign-on](enable-single-sign-on)
|
||||
- [Set up external storage](external-storage)
|
||||
|
|
|
@ -35,32 +35,43 @@ After you configure all the options, you should see a Docker CLI command that yo
|
|||
to install DTR.
|
||||
|
||||
```bash
|
||||
docker run -it --rm \
|
||||
{{ page.dtr_org }}/{{ page.dtr_repo }} install \
|
||||
--dtr-external-url dtr-example.com
|
||||
$ docker run -it --rm \
|
||||
{{ page.dtr_org }}/{{ page.dtr_repo }}:{{ page.dtr_version }} install \
|
||||
--dtr-external-url <dtr.example.com> \
|
||||
--ucp-node <ucp-node-name> \
|
||||
--ucp-username admin \
|
||||
--ucp-url <ucp-url>
|
||||
```
|
||||
|
||||
You can run the DTR install command on any node where `docker` is installed. To verify, run `docker version`.
|
||||
Check that your DTR version is compatible with your Engine - Enterprise and UCP versions using the [compatibility matrix](https://success.docker.com/article/compatibility-matrix).
|
||||
You can run the DTR install command on any node with the Docker Engine
|
||||
installed, ensure this node also has connectivity to the UCP Cluster. DTR will
|
||||
not be installed on the node you run the install command on. DTR will be
|
||||
installed on the ucp worker defined by the `--ucp-node` flag.
|
||||
|
||||
As an example, you can SSH into a UCP node and install DTR from there. Running the installation command in interactive TTY or `-it` mode means you will be prompted for any required additional information.
|
||||
[Learn more about installing DTR](/reference/dtr/2.7/cli/install/).
|
||||
As an example, you could SSH into a UCP node and run the DTR install command
|
||||
from there. Running the installation command in interactive TTY or `-it` mode
|
||||
means you will be prompted for any required additional information. [Learn
|
||||
more about installing DTR](/reference/dtr/2.7/cli/install/).
|
||||
|
||||
To pull a specific version of DTR, run the following:
|
||||
To install a specific version of DTR, replace `{{ page.dtr_version }}` with your
|
||||
desired version in the [installation command](#step-3-install-dtr) above. Find
|
||||
all DTR versions in the [DTR release notes](/ee/dtr/release-notes/) page.
|
||||
|
||||
```bash
|
||||
docker pull {{ page.dtr_org }}/{{ page.dtr_repo }}:{{ page.dtr_version }}
|
||||
```
|
||||
Replace `{{ page.dtr_version }}` with your desired version. Run the [installation command](#step-3-install-dtr) with the image you just pulled.
|
||||
DTR is deployed with self-signed certificates by default, so UCP might not be
|
||||
able to pull images from DTR. Using the `--dtr-external-url <dtr-domain>:<port>`
|
||||
optional flag during installation, or during a reconfiguration, so that UCP is
|
||||
automatically reconfigured to trust DTR.
|
||||
|
||||
DTR is deployed with self-signed certificates by default, so UCP might not be able to pull images from DTR.
|
||||
Use the `--dtr-external-url <dtr-domain>:<port>` optional flag during installation, or during a reconfiguration, so that DTR registers itself with UCP. To verify, see `https://<ucp-fqdn>/manage/settings/dtr` or navigate to **Admin Settings > Docker Trusted Registry** from the UCP web UI. Under the hood, UCP modifies `/etc/docker/certs.d` for each host and adds DTR's CA certificate. UCP can then pull images from DTR because the Docker Engine for each node in the UCP swarm has been configured to trust DTR.
|
||||
To verify, see `https://<ucp-fqdn>/manage/settings/dtr` or navigate to **Admin
|
||||
Settings > Docker Trusted Registry** from the UCP web UI. Under the hood, UCP
|
||||
modifies `/etc/docker/certs.d` for each host and adds DTR's CA certificate. UCP
|
||||
can then pull images from DTR because the Docker Engine for each node in the
|
||||
UCP swarm has been configured to trust DTR.
|
||||
|
||||
Additionally, with DTR 2.7, you can [enable browser authentication via client certificates](/ee/enable-authentication-via-client-certificates/)
|
||||
at install time. This bypasses the DTR login page and hides the logout button, thereby skipping the need for entering your username and password.
|
||||
Additionally, with DTR 2.7, you can [enable browser authentication via client
|
||||
certificates](/ee/enable-authentication-via-client-certificates/) at install
|
||||
time. This bypasses the DTR login page and hides the logout button, thereby
|
||||
skipping the need for entering your username and password.
|
||||
|
||||
## Step 4. Check that DTR is running
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ all the images. Then you copy that package to the host where you’ll install DT
|
|||
|
||||
## Versions available
|
||||
|
||||
{% include components/ddc_url_list_2.html product="dtr" version="2.6" %}
|
||||
{% include components/ddc_url_list_2.html product="dtr" version="2.7" %}
|
||||
|
||||
## Download the offline package
|
||||
|
||||
|
|
|
@ -30,6 +30,8 @@ Note that Windows container images are typically larger than Linux ones and for
|
|||
this reason, you should consider provisioning more local storage for Windows
|
||||
nodes and for DTR setups that will store Windows container images.
|
||||
|
||||
When image scanning feature is used, we recommend that you have at least 32 GB of RAM. As developers and teams push images into DTR, the repository grows over time so you should inspect RAM, CPU, and disk usage on DTR nodes and increase resources when resource saturation is observed on regular basis.
|
||||
|
||||
## Ports used
|
||||
|
||||
When installing DTR on a node, make sure the following ports are open on that
|
||||
|
@ -42,6 +44,23 @@ node:
|
|||
|
||||
These ports are configurable when installing DTR.
|
||||
|
||||
## UCP Configuration
|
||||
|
||||
When installing DTR on a UCP cluster, Administrators need to be able to deploy
|
||||
containers on "UCP manager nodes or nodes running DTR". This setting can be
|
||||
adjusted in the [UCP Settings
|
||||
menu](/ee/ucp/admin/configure/restrict-services-to-worker-nodes/). Once the
|
||||
installation has complete, and all additional DTR replicas have been deployed
|
||||
this UCP setting can be unchecked.
|
||||
|
||||
The DTR installation will fail with the following error message if
|
||||
Administrators are unable to deploy on "UCP manager nodes or nodes running
|
||||
DTR".
|
||||
|
||||
```
|
||||
Error response from daemon: {"message":"could not find any nodes on which the container could be created"}
|
||||
```
|
||||
|
||||
## Compatibility and maintenance lifecycle
|
||||
|
||||
Docker Enterprise Edition is a software subscription that includes three products:
|
||||
|
|
|
@ -4,7 +4,7 @@ description: Learn how to upgrade your Docker Trusted Registry
|
|||
keywords: dtr, upgrade, install
|
||||
---
|
||||
|
||||
{% assign previous_version="2.5" %}
|
||||
{% assign previous_version="2.6" %}
|
||||
|
||||
DTR uses [semantic versioning](http://semver.org/) and Docker aims to achieve specific
|
||||
guarantees while upgrading between versions. While downgrades are not supported, Docker supports upgrades according to the following rules:
|
||||
|
@ -40,7 +40,7 @@ to avoid any business impacts.
|
|||
|
||||
> Upgrade Best Practices
|
||||
>
|
||||
> There are [important changes to the upgrade process](/ee/upgrade) that, if not correctly followed, can have impact on the availability of applications running on the Swarm during upgrades. These constraints impact any upgrades coming from any version before `18.09` to version `18.09` or greater. See [Cluster Upgrade Best Practices](/ee/upgrade.md#cluster-upgrade-best-practices) for more details. Additionally, to ensure high availability during the DTR upgrade, you can also drain the DTR replicas and move their workloads to updated workers. To do this, you can join new workers as DTR replicas to your existing cluster and then remove the old replicas. See [docker/dtr join](/reference/dtr/2.6/cli/join) and [docker/dtr remove](/reference/dtr/2.6/cli/remove) for command options and details.
|
||||
> There are [important changes to the upgrade process](/ee/upgrade) that, if not correctly followed, can have impact on the availability of applications running on the Swarm during upgrades. These constraints impact any upgrades coming from any version before `18.09` to version `18.09` or greater. See [Cluster Upgrade Best Practices](/ee/upgrade.md#cluster-upgrade-best-practices) for more details. Additionally, to ensure high availability during the DTR upgrade, you can also drain the DTR replicas and move their workloads to updated workers. To do this, you can join new workers as DTR replicas to your existing cluster and then remove the old replicas. See [docker/dtr join](/reference/dtr/2.7/cli/join/) and [docker/dtr remove](/reference/dtr/2.7/cli/remove/) for command options and details.
|
||||
|
||||
## Minor upgrade
|
||||
|
||||
|
@ -73,13 +73,13 @@ nodes if upgrading offline), run the upgrade command:
|
|||
|
||||
```bash
|
||||
docker run -it --rm \
|
||||
{{ page.dtr_org }}/{{ page.dtr_repo }}:{{ page.dtr_version }} upgrade \
|
||||
--ucp-insecure-tls
|
||||
{{ page.dtr_org }}/{{ page.dtr_repo }}:{{ page.dtr_version }} upgrade
|
||||
```
|
||||
|
||||
By default the upgrade command runs in interactive mode and prompts you for
|
||||
any necessary information. You can also check the
|
||||
[reference documentation](/reference/dtr/2.6/cli/index.md) for other existing flags.
|
||||
[upgrade reference page](/reference/dtr/2.7/cli/upgrade/) for other existing flags.
|
||||
If you are performing the upgrade on an existing replica, pass the `--existing-replica-id` flag.
|
||||
|
||||
The upgrade command will start replacing every container in your DTR cluster,
|
||||
one replica at a time. It will also perform certain data migrations. If anything
|
||||
|
|
After Width: | Height: | Size: 25 KiB |
Before Width: | Height: | Size: 166 KiB After Width: | Height: | Size: 75 KiB |
Before Width: | Height: | Size: 105 KiB After Width: | Height: | Size: 25 KiB |
Before Width: | Height: | Size: 72 KiB |
After Width: | Height: | Size: 29 KiB |
Before Width: | Height: | Size: 117 KiB After Width: | Height: | Size: 125 KiB |
|
@ -20,10 +20,11 @@ to upgrade your installation to the latest release.
|
|||
* [Version 2.5](#version-25)
|
||||
* [Version 2.4](#version-24)
|
||||
|
||||
# Version 2.7
|
||||
## Version 2.7.0
|
||||
(2019-7-22)
|
||||
|
||||
## 2.7.0-beta4
|
||||
(2019-5-16)
|
||||
### Security
|
||||
Refer to [DTR image vulnerabilities](https://success.docker.com/article/dtr-image-vulnerabilities) for details regarding actions to be taken and any status updates, issues, and recommendations.
|
||||
|
||||
### New Features
|
||||
|
||||
|
@ -38,7 +39,6 @@ to upgrade your installation to the latest release.
|
|||
|
||||
* It is now possible to distribute [docker apps](https://github.com/docker/app) via DTR. This includes application pushes, pulls, and general management features like promotions, mirroring, and pruning.
|
||||
|
||||
|
||||
* **Registry CLI**
|
||||
|
||||
* The Docker CLI now includes a `docker registry` management command which lets you interact with Docker Hub and trusted registries.
|
||||
|
@ -55,18 +55,25 @@ to upgrade your installation to the latest release.
|
|||
|
||||
* Users can now edit mirroring policies. (docker/dhe-deploy #10157)
|
||||
* `docker run -it --rm docker/dtr:2.7.0-beta4` now includes a global option, `--version`, which prints the DTR version and associated commit hash. (docker/dhe-deploy #10144)
|
||||
* Users can now set up push and pull mirroring policies via the API using an authentication token instead of their credentials. (docker/dhe-deploy#10002)
|
||||
* Users can now set up push and pull mirroring policies through the API using an authentication token instead of their credentials. (docker/dhe-deploy#10002)
|
||||
* DTR is now on Golang `1.12.4`. (docker/dhe-deploy#10274)
|
||||
* For new mirroring policies, the **Mirror direction** now defaults to the Pull tab instead of Push. (docker/dhe-deploy#10004)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* Fixed an issue where a webhook notification was triggered twice on non-scanning image promotion events on a repository with scan on push enabled. (docker/dhe-deploy#9909)
|
||||
|
||||
|
||||
### Known issues
|
||||
|
||||
* Application mirroring to and from Docker Hub does not work as experimental applications are not yet fully supported on Docker Hub.
|
||||
* The time that an application is pushed is incorrect.
|
||||
* The Application Configuration in the UI says it is an invocation image.
|
||||
* When changing your password if an incorrect password is entered the UI will not give the appropriate error message, and the save button will stay in a loading state.
|
||||
* Workaround: Refresh the page.
|
||||
* After a promotion policy is created they cannot be edited through the UI.
|
||||
* Workaround: Either delete the promotion policy and recreate it. Alternatively, use the API to view and
|
||||
edit the promotion policy.
|
||||
* Non admin users cannot create promotion policies through the UI.
|
||||
|
||||
### Deprecations
|
||||
|
||||
|
@ -74,7 +81,6 @@ to upgrade your installation to the latest release.
|
|||
|
||||
* The `--no-image-check` flag has been removed from the `upgrade` command as image check is no longer a part of the upgrade process.
|
||||
|
||||
|
||||
# Version 2.6
|
||||
|
||||
## 2.6.8
|
||||
|
|
|
@ -0,0 +1,249 @@
|
|||
---
|
||||
title: Enable authentication using TLS client certificates
|
||||
description: Learn how to enable user authentication via client certificates from your own public key infrastructure (PKI).
|
||||
keywords: PKI, Client Certificates, Passwordless Authentication, Docker Enterprise
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
In many organizations, authenticating to systems with a username and password combination is either restricted or outright prohibited. With Docker Enterprise 3.0, UCP's [CLI client certificate-based authentication](/ee/ucp/user-access/cli/) has been extended to the web user interface (web UI). DTR has also been enhanced to work with UCP's internally generated client bundles for client certificate-based authentication. If you have an external public key infrastructure (PKI) system, you can manage user authentication using a pool of X.509 client certificates in lieu of usernames and passwords.
|
||||
|
||||
## Benefits
|
||||
|
||||
The following table outlines existing and added capabilities when using client certificates — both internal to UCP and issued by an external certificate authority (CA) — for authentication.
|
||||
|
||||
| Operation | Benefit |
|
||||
| ------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| [UCP browser authentication](#ucp--dtr-browser-authentication) | Previously, UCP client bundles enabled communication between a local Docker client and UCP without the need of a username and password. Importing your client certificates into the browser extends this capability to the UCP web UI. |
|
||||
| [DTR browser authentication](#ucp--dtr-browser-authentication) | You can bypass the login page for the DTR web UI when you use TLS client certificates as a DTR authentication method. |
|
||||
| [Image pulls and pushes to DTR](#image-pulls-and-pushes-to-dtr) | You can update Docker engine with a client certificate for image pulls and pushes to DTR without the need for `docker login`. |
|
||||
| [Image signing](#image-signing) | You can use client certificates to sign images that you push to DTR. Depending on which you configure to talk to DTR, the certificate files need to be located in certain directories. Alternatively, you can enable system-wide trust of your custom root certificates. |
|
||||
| [DTR API access](#dtr-api-access) | You can use TLS client certificates in lieu of your user credentials to access the DTR API. |
|
||||
| [Notary CLI operations with DTR](#notary-cli-operations-with-dtr) | You can set your DTR as the remote trust server location and pass the certificate flags directly to the Notary CLI to access your DTR repositories. |
|
||||
|
||||
## Limitations
|
||||
|
||||
- The security of client certificates issued by your organization's PKI is outside of UCP’s control. UCP administrators are responsible for instructing their users on how to authenticate via client certificates.
|
||||
- Username and password authentication cannot be disabled.
|
||||
- If client certificates have been configured, they will be used for
|
||||
all `docker push` and `docker pull` operations for _all users_ of the same
|
||||
machine.
|
||||
- Docker Enterprise 3.0 does not check certificate revocation lists (CRLs) or Online Certificate Status Protocol (OCSP) for revoked certificates.
|
||||
|
||||
## UCP / DTR browser authentication
|
||||
|
||||
The following instructions apply to UCP and DTR administrators. For non-admin users, contact your administrator for details on your PKI's client certificate configuration.
|
||||
|
||||
To bypass the browser login pages and hide the logout buttons for both UCP and DTR, follow the steps below.
|
||||
|
||||
1. Add your organization's root CA certificates [via the UCP web UI](/ee/ucp/admin/configure/use-your-own-tls-certificates/#configure-ucp-to-use-your-own-tls-certificates-and-keys) or [via the CLI installation command](https://success.docker.com/article/how-do-i-provide-an-externally-generated-security-certificate-during-the-ucp-command-line-installation).
|
||||
|
||||
For testing purposes, you can download an [admin client bundle](/ee/ucp/user-access/cli/#download-client-certificates) from UCP and [convert the client certificates to `pkcs12`](#convert-your-client-certificates-to-a-PKCS12-file)
|
||||
|
||||
1. Download UCP's `ca.pem` from `https://<ucp-url>/ca` either in the browser or via `curl`. When using `curl`, redirect the response output to a file.
|
||||
`curl -sk https://<ucp-url>/ca -o ca.pem`
|
||||
|
||||
1. Enable client certificate authentication for DTR. If previously installed, reconfigure DTR with your UCP hostname's root CA certificate. This will be your organization's root certificate(s) appended to UCP's internal root CA certificates.
|
||||
|
||||
```
|
||||
docker run --rm -it docker/dtr:2.7.0 reconfigure --debug --ucp-url \
|
||||
<ucp-url> --ucp-username <ucp_admin_user> --ucp-password \ <ucp_admin_password> --enable-client-cert-auth
|
||||
--client-cert-auth-ca "$(cat ca.pem)"
|
||||
```
|
||||
|
||||
See [DTR installation](/reference/dtr/2.7/cli/install/) and [DTR reconfiguration](/reference/dtr/2.7/cli/reconfigure/) CLI reference pages for an explanation of the different options.
|
||||
|
||||
1. Import the PKCS12 file into [the browser](#pkcs12-file-browser-import) or [Keychain Access](https://www.digicert.com/ssl-support/p12-import-export-mac-mavericks-server.htm#import_certificate) if you're running macOS.
|
||||
|
||||
### Client certificate to PKCS12 file conversion
|
||||
|
||||
From the command line, switch to the directory of your client bundle and run the following command to convert the client bundle public and private key pair to a `.p12` file.
|
||||
|
||||
```bash
|
||||
openssl pkcs12 -export -out cert.p12 -inkey key.pem -in cert.pem
|
||||
```
|
||||
|
||||
Create with a simple password, you will be prompted for it when you import the certificate into the browser or Mac's Keychain Access.
|
||||
|
||||
### PKCS12 file browser import
|
||||
|
||||
Instructions on how to import a certificate into a web browser vary according to your platform, OS, preferred browser and browser version. As a general rule, refer to one of the following how-to articles:
|
||||
- ***Firefox***:
|
||||
https://www.sslsupportdesk.com/how-to-import-a-certificate-into-firefox/
|
||||
- ***Chrome***:
|
||||
https://www.comodo.com/support/products/authentication_certs/setup/win_chrome.php
|
||||
- ***Internet Explorer***:
|
||||
https://www.comodo.com/support/products/authentication_certs/setup/ie7.php
|
||||
|
||||
## Image pulls and pushes to DTR
|
||||
|
||||
For pulling and pushing images to your DTR (with client certificate authentication method enabled) without performing a `docker login`, do the following:
|
||||
|
||||
1. Create a directory for your DTR public address or FQDN (Fully Qualified Domain Name) within your operating system's TLS certificate directory.
|
||||
|
||||
1. As a [superuser](https://en.wikipedia.org/wiki/Superuser), copy the private key (`client.pem`) and certificate (`client.cert`) to the machine you are using for pulling and pushing to DTR without doing a `docker login`. Note that the filenames must match.
|
||||
|
||||
1. Obtain the CA certificate from your DTR server, `ca.crt` from `https://<dtrurl>/ca`, and copy `ca.crt` to your operating system's TLS certificate directory so that your machine's Docker Engine will trust DTR. For Linux, this is `/etc/docker/certs.d/<dtrurl>/`. On Docker for Mac, this is `/<home_directory>/certs.d/<dtr_fqdn>/`.
|
||||
|
||||
This is a convenient alternative to, for Ubuntu as an example, adding the DTR server certificate to `/etc/ca-certs` and running `update-ca-certificates`.
|
||||
```curl
|
||||
curl -k https://<dtr>/ca -o ca.crt
|
||||
```
|
||||
|
||||
On Ubuntu
|
||||
````bash
|
||||
cp ca.crt /etc/ca-certs
|
||||
```
|
||||
|
||||
1. Restart the Docker daemon for the changes to take effect. See [Configure your host](/ee/dtr/user/access-dtr/#configure-your-host) for different ways to restart the Docker daemon.
|
||||
|
||||
### Add your DTR server CA certificate to system level
|
||||
|
||||
You have the option to add your DTR server CA certificate to your system's trusted root certificate pool. This is MacOS Keychain or `/etc/ca-certificates/` on Ubuntu. Note that you will have to remove the certificate if your DTR public address changes.
|
||||
|
||||
### Reference guides
|
||||
|
||||
- [Docker Engine](https://docs.docker.com/engine/security/certificates/)
|
||||
- Docker Desktop
|
||||
- [Enterprise for Mac](/ee/desktop/user/mac-user/#add-tls-certificates)
|
||||
- [Enterprise for Windows](/ee/desktop/user/windows-user/#adding-tls-certificates)
|
||||
- [Community for Mac](/docker-for-mac/#add-tls-certificates)
|
||||
- [Community for Windows](/docker-for-windows/faqs/#certificates)
|
||||
|
||||
Note: The above configuration means that Docker Engine will use the same client certificate for all pulls and pushes to DTR for ***all users*** of the same machine.
|
||||
|
||||
## Image signing
|
||||
|
||||
DTR provides the Notary service for using Docker Content Trust (DCT) out of the box.
|
||||
|
||||
<table style="width:100%;">
|
||||
<colgroup>
|
||||
<col style="width: 35%" />
|
||||
<col style="width: 30%" />
|
||||
<col style="width: 35%" />
|
||||
</colgroup>
|
||||
<thead>
|
||||
<tr class="night">
|
||||
<th>Implementation</th>
|
||||
<th>Component Pairing</th>
|
||||
<th>Settings</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr class="odd">
|
||||
<td><a href="/engine/security/trust/content_trust/#signing-images-with-docker-content-trust">Sign with <code>docker trust sign</code></a></td>
|
||||
<td><ul>
|
||||
<li>Docker Engine - Enterprise 18.03 or higher</li>
|
||||
<li>Docker Engine - Community 17.12 or higher</li>
|
||||
</ul></td>
|
||||
<td>Copy <code>ca.crt</code> from <code>https://<dtr-external-url>/ca</code> to:
|
||||
<ul>
|
||||
<li>Linux: <code>/etc/docker/certs.d/</code></li>
|
||||
<li>Mac: <code><home_directory>/.docker/certs.d/</code></li>
|
||||
</ul></td>
|
||||
</tr>
|
||||
<tr class="even">
|
||||
<td><a href="/engine/security/trust/content_trust/#runtime-enforcement-with-docker-content-trust">Enforce signature or hash verification on the Docker client</a></td>
|
||||
<td><ul>
|
||||
<li>Docker Engine - Enterprise 17.06 or higher</li>
|
||||
<li>Docker Engine - Community 17.06 or higher</li>
|
||||
</ul></td>
|
||||
<td><code>export DOCKER_CONTENT_TRUST=1</code> to enable content trust on the Docker client. Copy <code>ca.crt</code> from <code>https://<dtr-external-url>/ca</code> to <code>/<home_directory>/.docker/tls/</code> on Linux and macOS. <code>docker push</code> will sign your images.</td>
|
||||
|
||||
</tr>
|
||||
<tr class="odd">
|
||||
<td><a href="/ee/dtr/user/manage-images/sign-images/">Sign images that UCP can trust</a></td>
|
||||
<td><ul>
|
||||
<li>Docker Engine - Enterprise 17.06 or higher</li>
|
||||
<li>Docker UCP 2.2 or higher</li>
|
||||
</ul></td>
|
||||
<td>Configure UCP to <a href="/ee/ucp/admin/configure/run-only-the-images-you-trust/#configure-ucp">run only signed images</a>. See <a href="/ee/dtr/user/manage-images/">Sign an image</a> for detailed steps.</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
## DTR API access
|
||||
|
||||
With `curl`, you can interact with the DTR
|
||||
API by passing a public certificate and private key pair instead of
|
||||
your DTR username and password/authentication token.
|
||||
|
||||
```bash
|
||||
curl --cert cert.pem --key key.pem -X GET \
|
||||
"https://<dtr-external-url>/api/v0/repositories?pageSize=10&count=false" \
|
||||
-H "accept:application/json"
|
||||
```
|
||||
|
||||
In the above example, `cert.pem` contains the public certificate and `key.pem`
|
||||
contains the private key. For non-admin users, you can generate a client bundle from UCP or contact your administrator for your public and private key pair.
|
||||
|
||||
For Mac-specific quirks, see [curl on certain macOS versions](#curl-on-certain-macos-versions).
|
||||
|
||||
## Notary CLI operations with DTR
|
||||
|
||||
For establishing mutual trust between the Notary client and your trusted registry (DTR) using the Notary CLI, place your TLS client certificates in `<home_directory>/.docker/tls/<dtr-external-url>/` as `client.cert` and `client.key`. Note that the filenames must match. Pass the FQDN or publicly accessible IP address of your registry along with the TLS client certificate options to the Notary client. To get started, see [Use the Notary client for advanced users](/notary/advanced_usage/).
|
||||
|
||||
> ### Self-signed DTR server certificate
|
||||
>
|
||||
> Also place `ca.crt` in `<home_directory>/.docker/tls/<dtr-external-url>/` when you're using a self-signed server certificate for DTR.
|
||||
|
||||
## Troubleshooting tips
|
||||
|
||||
### DTR authentication via client Certificates
|
||||
|
||||
Hit your DTR's `basic_info` endpoint via `curl`:
|
||||
|
||||
```curl
|
||||
curl --cert cert.pem --key key.pem -X GET "https://<dtr-external-url>/basic_info"
|
||||
```
|
||||
|
||||
If successfully configured, you should see `TLSClientCertificate` listed as the `AuthnMethod` in the JSON response.
|
||||
|
||||
#### Example Response
|
||||
|
||||
```json
|
||||
{
|
||||
"CurrentVersion": "2.7.0",
|
||||
"User": {
|
||||
"name": "admin",
|
||||
"id": "30f53dd2-763b-430d-bafb-dfa361279b9c",
|
||||
"fullName": "",
|
||||
"isOrg": false,
|
||||
"isAdmin": true,
|
||||
"isActive": true,
|
||||
"isImported": false
|
||||
},
|
||||
"IsAdmin": true,
|
||||
"AuthnMethod": "TLSClientCertificate"
|
||||
}
|
||||
```
|
||||
|
||||
### DTR as an insecure registry
|
||||
|
||||
Avoid adding DTR to Docker Engine's list of insecure registries as a workaround. This has the side effect of disabling the use of TLS certificates.
|
||||
|
||||
### DTR server certificate errors
|
||||
|
||||
#### Example Error
|
||||
|
||||
```bash
|
||||
Error response from daemon: Get https://35.165.223.150/v2/: x509: certificate is valid for 172.17.0.1, not 35.165.223.150
|
||||
```
|
||||
|
||||
- On the web UI, make sure to add the IP address or the FQDN associated with your custom TLS certificate under **System > General > Domains & Proxies**.
|
||||
|
||||
- From the command line interface, [reconfigure DTR](/reference/dtr/2.7/cli/reconfigure/) with the `--dtr-external-url` option and the associated PEM files for your certificate.
|
||||
|
||||
### Intermediate certificates
|
||||
|
||||
For chain of trust which includes intermediate certificates, you may optionally add those certificates when installing or reconfiguring DTR with `--enable-client-cert-auth` and `--client-cert-auth-ca`. You can do so by combining all of the certificates into a single PEM file.
|
||||
|
||||
### curl on certain macOS versions
|
||||
|
||||
Some versions of macOS include `curl` which only accepts `.p12` files and specifically requires a `./` prefix in front of the file name if running `curl` from the same directory as the `.p12` file:
|
||||
|
||||
```
|
||||
curl --cert ./client.p12 -X GET \
|
||||
"https://<dtr-external-url>/api/v0/repositories?pageSize=10&count=false" \
|
||||
-H "accept:application/json"
|
||||
```
|
After Width: | Height: | Size: 313 KiB |
Before Width: | Height: | Size: 776 KiB |
18
ee/index.md
|
@ -7,12 +7,12 @@ redirect_from:
|
|||
- /manuals/
|
||||
---
|
||||
|
||||
Docker Enterprise 2.1 is a Containers-as-a-Service (CaaS) platform that enables a secure software supply
|
||||
Docker Enterprise is a Containers as a Service (CaaS) platform that enables a secure software supply
|
||||
chain and deploys diverse applications for high availability across disparate
|
||||
infrastructure, both on-premises and in the cloud.
|
||||
|
||||
Docker Enterprise is a secure, scalable, and supported container
|
||||
platform for building and orchestrating applications across multi-tenant Linux, and Windows Server 2016.
|
||||
Docker Enterprise is a secure, scalable, and supported container platform for building and
|
||||
orchestrating applications across multi-tenant Linux, Windows Server 2016, and Windows Server 2019.
|
||||
|
||||
Docker Enterprise enables deploying your workloads for high availability (HA) onto the
|
||||
orchestrator of your choice. Docker Enterprise automates many of the tasks that
|
||||
|
@ -20,7 +20,7 @@ orchestration requires, like provisioning pods, containers, and cluster
|
|||
resources. Self-healing components ensure that Docker Enterprise clusters remain highly
|
||||
available.
|
||||
|
||||
Role-based access control applies to Kubernetes and Swarm orchestrators, and
|
||||
Role-based access control (RBAC) applies to Kubernetes and Swarm orchestrators, and
|
||||
communication within the cluster is secured with TLS.
|
||||
[Docker Content Trust](/engine/security/trust/content_trust/) is enforced
|
||||
for images on all of the orchestrators.
|
||||
|
@ -34,7 +34,7 @@ cluster and applications through a single interface.
|
|||
|
||||
## Docker Enterprise features
|
||||
|
||||
Docker Enterprise 2.1 provides multi-architecture orchestration for Kubernetes and
|
||||
Docker Enterprise provides multi-architecture orchestration for Kubernetes and
|
||||
Swarm workloads. Docker Enterprise enables a secure software supply chain, with image
|
||||
promotion, mirroring between registries, and signing/scanning enforcement for
|
||||
Kubernetes images.
|
||||
|
@ -75,7 +75,7 @@ to schedule Kubernetes or Swarm workloads.
|
|||
|
||||
### Orchestration platform features
|
||||
|
||||
{: .with-border}
|
||||
{: .with-border}
|
||||
|
||||
- Docker Enterprise manager nodes are both Swarm managers and Kubernetes masters,
|
||||
to enable high availability
|
||||
|
@ -112,10 +112,8 @@ You can also deploy and monitor your applications and services.
|
|||
|
||||
## Built-in security and access control
|
||||
|
||||
Docker Enterprise has its own built-in authentication mechanism with role-based access
|
||||
control (RBAC), so that you can control who can access and make changes to your
|
||||
swarm and applications. Also, Docker Enterprise authentication integrates with LDAP
|
||||
services.
|
||||
Docker Enterprise has its own built-in authentication mechanism with RBAC, so that you can control who can access and make changes to your
|
||||
swarm and applications. Also, Docker Enterprise authentication integrates with LDAP services.
|
||||
[Learn about role-based access control](access-control/index.md).
|
||||
|
||||
{: .with-border}
|
||||
|
|
|
@ -1,30 +1,35 @@
|
|||
---
|
||||
title: Docker Enterprise Platform release notes
|
||||
description: Learn about the new features, bug fixes, and breaking changes for Docker Enterprise Platform.
|
||||
title: Docker Enterprise release notes
|
||||
description: Learn about the new features, bug fixes, and breaking changes for Docker Enterprise.
|
||||
keywords: engine enterprise, ucp, dtr, desktop enterprise, whats new, release notes
|
||||
---
|
||||
|
||||
This page provides information about Docker Enterprise 3.0. For
|
||||
detailed information about for each enterprise component, refer to the individual component release notes
|
||||
pages listed in the following **Docker Enterprise components install and upgrade** section.
|
||||
|
||||
## What’s New?
|
||||
|
||||
| Feature | Component | Component version |
|
||||
|---------|-----------|-------------------|
|
||||
| [Group Managed Service Accounts (gMSA)](#) | UCP | 3.2.0 |
|
||||
| [Open Security Controls Assessment Language (OSCAL)](#) | UCP | 3.2.0 |
|
||||
| [Container storage interface (CSI)](#) | UCP | 3.2.0 |
|
||||
| [Internet Small Computer System Interface (iSCSI)](#) | UCP | 3.2.0 |
|
||||
| [System for Cross-domain Identity Management (SCIM)](#) | UCP | 3.2.0 |
|
||||
| [Registry CLI](#) | DTR | 2.7.0 |
|
||||
| [App Distribution](#) | DTR | 2.7.0 |
|
||||
| [Client certificate-based Authentication](#) | DTR | 2.7.0 |
|
||||
| [Group Managed Service Accounts (gMSA)](/engine/swarm/services.md#gmsa-for-swarm) | UCP | 3.2.0 |
|
||||
| [Open Security Controls Assessment Language (OSCAL)](/compliance/oscal/) | UCP | 3.2.0 |
|
||||
| [Container storage interface (CSI)](/ee/ucp/kubernetes/storage/use-csi/) | UCP | 3.2.0 |
|
||||
| [Internet Small Computer System Interface (iSCSI)](/ee/ucp/kubernetes/storage/use-iscsi/) | UCP | 3.2.0 |
|
||||
| [System for Cross-domain Identity Management (SCIM)](/ee/ucp/admin/configure/integrate-scim/) | UCP | 3.2.0 |
|
||||
| [Pod Security Policies](/ee/ucp/kubernetes/pod-security-policies/) | UCP | 3.2.0 |
|
||||
| [Docker Registry CLI (Experimental)](/engine/reference/commandline/registry/) | DTR | 2.7.0 |
|
||||
| [App Distribution](/ee/dtr/user/manage-applications/) | DTR | 2.7.0 |
|
||||
| [Client certificate-based Authentication](/ee/enable-client-certificate-authentication/) | DTR and UCP|2.7.0 (DTR) and 3.2.0 (UCP)|
|
||||
| [Application Designer](/ee/desktop/app-designer/) | Docker Desktop Enterprise | 0.1.4 |
|
||||
| [Docker App (Experimental)](/app/working-with-app/) |CLI | 0.8.0 |
|
||||
| [Docker Assemble (Experimental)](/assemble/install/) | CLI | 0.36.0 |
|
||||
| [Docker Buildx (Experimental)](/buildx/working-with-buildx/)| CLI | 0.2.2 |
|
||||
| [Docker Cluster](/cluster/overview/) | CLI | 1.0.0 |
|
||||
| [Docker Cluster](/cluster/) | CLI | 1.0.0 |
|
||||
| [Docker Template CLI (Experimental)](/app-template/working-with-template/) | CLI | 0.1.4 |
|
||||
|
||||
|
||||
## Product install and upgrade
|
||||
## Docker Enterprise components install and upgrade
|
||||
|
||||
| Component Release Notes | Version | Install | Upgrade |
|
||||
|---------|-----------|-------------------|-------------- |
|
||||
|
@ -36,47 +41,5 @@ keywords: engine enterprise, ucp, dtr, desktop enterprise, whats new, release no
|
|||
Refer to the [Compatibility Matrix](https://success.docker.com/article/compatibility-matrix) and the [Maintenance Lifecycle](https://success.docker.com/article/maintenance-lifecycle) for compatibility and software maintenance details.
|
||||
|
||||
|
||||
## Known Issues
|
||||
|
||||
This is not an exhaustive list. For complete known issues information, refer to the individual component release notes page.
|
||||
<table>
|
||||
<colgroup>
|
||||
<col width="20%" />
|
||||
<col width="18%" />
|
||||
<col width="10%" />
|
||||
<col width="20%" />
|
||||
<col width="10%" />
|
||||
<col width="22%" />
|
||||
</colgroup>
|
||||
<thead class="night">
|
||||
<tr>
|
||||
<th>Issue Description</th>
|
||||
<th markdown="span">Issue Number</th>
|
||||
<th>Component</th>
|
||||
<th markdown="span">Affected Versions</th>
|
||||
<th>Fixed?</th>
|
||||
<th markdown="span">Version Fix - Pull Request</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td><code>docker registry info</code> authentication error (for example purposes)</td>
|
||||
<td>ENG-DTR #912</td>
|
||||
<td>DTR</td>
|
||||
<td>2.7.0-beta2</td>
|
||||
<td>Yes</td>
|
||||
<td>2.7.0</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Error when installing UCP with <code>"selinux-enabled": true</code></td>
|
||||
<td>???</td>
|
||||
<td>UCP</td>
|
||||
<td>UCP with Enterprise Engine 18.09 or 19.03</td>
|
||||
<td>No</td>
|
||||
<td>N/A</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
title: About Docker Enterprise
|
||||
description: Information about Docker Enterprise 2.1
|
||||
description: Docker Enterprise product information
|
||||
keywords: Docker Enterprise, enterprise, enterprise edition, ee, docker ee, docker enterprise edition, lts, commercial, cs engine, commercially supported
|
||||
redirect_from:
|
||||
- /enterprise/supported-platforms/
|
||||
|
@ -13,12 +13,13 @@ green-check: '{: style="height: 14px; mar
|
|||
install-prefix-ee: '/install/linux/docker-ee'
|
||||
---
|
||||
|
||||
Docker Enterprise is designed for enterprise development as well as IT teams who
|
||||
build, ship, and run business-critical applications
|
||||
in production and at scale. Docker Enterprise is integrated, certified,
|
||||
and supported to provide enterprises with the most secure container platform
|
||||
in the industry. For more info about Docker Enterprise, including purchasing
|
||||
options, see [Docker Enterprise](https://www.docker.com/enterprise-edition/).
|
||||
Docker Enterprise is designed for enterprise development as well as IT teams who build, share, and run business-critical
|
||||
applications at scale in production. Docker Enterprise is an integrated container platform that includes
|
||||
Docker Desktop Enterprise, a secure image registry, advanced management control plane, and Docker Engine - Enterprise.
|
||||
Docker Engine - Enterprise is a certified and supported container runtime that is also available as a standalone
|
||||
solution to provide enterprises with the most secure container engine in the industry. For more information
|
||||
about Docker Enterprise and Docker Engine - Enterprise, including purchasing options,
|
||||
see [Docker Enterprise](https://www.docker.com/enterprise-edition/).
|
||||
|
||||
> Compatibility Matrix
|
||||
>
|
||||
|
@ -26,20 +27,19 @@ options, see [Docker Enterprise](https://www.docker.com/enterprise-edition/).
|
|||
> for the latest list of supported platforms.
|
||||
{: .important}
|
||||
|
||||
## Docker Enterprise tiers
|
||||
## Docker Enterprise products
|
||||
|
||||
{% include docker_ee.md %}
|
||||
|
||||
> Note
|
||||
>
|
||||
> Starting with Docker Enterprise 2.1, Docker Enterprise --- Basic, Docker Enterprise --- Standard,
|
||||
> and Docker Enterprise --- Advanced are all now called Docker Enterprise.
|
||||
> Starting with Docker Enterprise 2.1, Docker Enterprise - Basic is now Docker Engine - Enterprise,
|
||||
> and both Docker Enterprise - Standard and Docker Enterprise - Advanced are now called Docker Enterprise.
|
||||
|
||||
### Docker Enterprise
|
||||
|
||||
With Docker Enterprise, you can deploy Docker Engine --- Enterprise
|
||||
to manage your container workloads in a flexible way. You can manage workloads
|
||||
on Windows, Linux, on site, or on the cloud.
|
||||
With Docker Enterprise, you can manage container workloads on Windows, Linux, on site, or on the cloud
|
||||
in a flexible way.
|
||||
|
||||
Docker Enterprise has private image management, integrated image signing policies, and cluster
|
||||
management with support for Kubernetes and Swarm orchestrators. It allows you to implement
|
||||
|
@ -47,70 +47,60 @@ node-based RBAC policies, image promotion policies, image mirroring, and
|
|||
scan your images for vulnerabilities. It also has support with defined SLAs and extended
|
||||
maintenance cycles for patches for up to 24 months.
|
||||
|
||||
### New Licensing for Docker Enterprise
|
||||
### New licensing for Docker Enterprise
|
||||
|
||||
In version 18.09, the Docker Enterprise --- Engine is aware of the license
|
||||
applied on the system. The license summary is available in the `docker info`
|
||||
output on standalone or manager nodes.
|
||||
Starting in version 18.09, Docker Enterprise is aware of the license applied on
|
||||
the system. The license summary is available in the `docker info` output on
|
||||
standalone or manager nodes.
|
||||
|
||||
For EE platform customers, when you license UCP, this same license is applied to
|
||||
the underlying engines in the cluster. Docker recommends platform customers use
|
||||
UCP to manage their license.
|
||||
For Docker Enterprise customers, when you license Universal Control Plane
|
||||
(UCP), this same license is applied to the underlying engines in the cluster.
|
||||
Docker recommends that Enterprise customers use UCP to manage their license.
|
||||
|
||||
Standalone EE engines can be licensed using `docker engine activate`.
|
||||
Standalone Docker Engine - Enterprise users can license engines using `docker engine activate`.
|
||||
|
||||
Offline activation of standalone EE engines can be performed by downloading the
|
||||
license and using the command `docker engine activate --license filename.lic`.
|
||||
Offline activation of standalone enterprise engines can be performed by downloading the license and using the command `docker engine activate --license filename.lic`.
|
||||
|
||||
Additionally, Docker is now distributing the CLI as a separate installation
|
||||
package. This gives Enterprise users the ability to install as many CLI
|
||||
packages as needed without using the Engine node licenses for client-only
|
||||
systems.
|
||||
Additionally, Docker is now distributing the CLI as a separate installation package. This gives Docker Enterprise users the ability to install as many CLI packages as needed without using the Engine node licenses for client-only systems.
|
||||
|
||||
[Learn more about Docker Enterprise](/ee/index.md).
|
||||
|
||||
|
||||
> When using Docker Enterprise
|
||||
>
|
||||
> IBM Power is not supported as managers or workers.
|
||||
> Microsoft Windows Server is not supported as a manager. Microsoft Windows
|
||||
> Server 1803 is not supported as a worker.
|
||||
|
||||
### Docker Certified Infrastructure
|
||||
|
||||
Docker Certified Infrastructure is Docker’s prescriptive approach to deploying
|
||||
Docker Enterprise on a range of infrastructure choices. Each Docker
|
||||
Certified Infrastructure includes a reference architecture, automation templates,
|
||||
and third-party ecosystem solution briefs.
|
||||
|
||||
| Platform | Docker Enterprise Edition |
|
||||
|:----------------------------------------------------------------------------------------|:-------------------------:|
|
||||
| [VMware](https://success.docker.com/article/certified-infrastructures-vmware-vsphere) | {{ page.green-check }} |
|
||||
| [Amazon Web Services](https://success.docker.com/article/certified-infrastructures-aws) | {{ page.green-check }} |
|
||||
| [Microsoft Azure](https://success.docker.com/article/certified-infrastructures-azure) | {{ page.green-check }} |
|
||||
| IBM Cloud | Coming soon |
|
||||
Docker Certified Infrastructure is Docker’s prescriptive approach to deploying Docker Enterprise
|
||||
on a variety of infrastructures. Each Docker Certified Infrastructure option includes a reference architecture,
|
||||
a CLI plugin for automated deployment and configuration, and third-party ecosystem solution briefs.
|
||||
|
||||
| Platform | Docker Enterprise support |
|
||||
:----------------------------------------------------------------------------------------|:-------------------------:|
|
||||
| [Amazon Web Services](..\cluster\aws.md) | {{ page.green-check }} |
|
||||
| VMware | coming soon |
|
||||
| Microsoft Azure | coming soon |
|
||||
|
||||
## Docker Enterprise release cycles
|
||||
|
||||
Each Docker Enterprise release is supported and maintained for 24 months, and
|
||||
receives security and critical bug fixes during this period.
|
||||
|
||||
The Docker API version is independent of the Docker platform version. We
|
||||
maintain careful API backward compatibility and deprecate APIs and features
|
||||
slowly and conservatively. We remove features after deprecating them for a
|
||||
period of three stable releases. Docker 1.13 introduced improved
|
||||
interoperability between clients and servers using different API versions,
|
||||
including dynamic feature negotiation.
|
||||
The Docker API version is independent of the Docker version. We maintain
|
||||
careful API backward compatibility and deprecate APIs and features slowly and
|
||||
conservatively. We remove features after deprecating them for a period of
|
||||
three stable releases. Docker 1.13 introduced improved interoperability
|
||||
between clients and servers using different API versions, including dynamic
|
||||
feature negotiation.
|
||||
|
||||
## Upgrades and support
|
||||
|
||||
If you're a Docker DDC or CS Engine customer, you don't need to upgrade to
|
||||
Docker Enterprise to continue to get support. We will continue to support
|
||||
customers with valid subscriptions whether the subscription covers Docker
|
||||
Enterprise or Commercially Supported Docker. You can choose to stay with your
|
||||
current deployed version, or you can upgrade to the latest Docker Enterprise
|
||||
version. For more info, see [Scope of Coverage and Maintenance Lifecycle](https://success.docker.com/Policies/Scope_of_Support).
|
||||
Docker supports Docker Enterprise minor releases for 24 months. Upgrades to the
|
||||
latest minor release of Docker Enterprise are not required, however we
|
||||
recommend staying on the latest maintenance release of the supported minor
|
||||
release you are on. Please see [Maintenance
|
||||
Lifecycle](https://success.docker.com/article/maintenance-lifecycle) for more
|
||||
details on EOL of minor and major versions of Docker Enterprise.
|
||||
|
||||
## Where to go next
|
||||
|
||||
|
|
|
@ -8,11 +8,11 @@ redirect_from:
|
|||
|
||||
Docker Engine - Enterprise version 17.06 and later includes a telemetry plugin.
|
||||
The plugin is enabled by default on Ubuntu starting with Docker Engine - Enterprise 17.06.0
|
||||
and on the rest of the EE-supported Linux distributions starting with version
|
||||
and on the rest of the Docker Engine - Enterprise supported Linux distributions starting with version
|
||||
17.06.2-ee-5. The telemetry plugin is not part of Docker Engine - Enterprise for Windows Server.
|
||||
|
||||
The telemetry plugin sends system information to Docker Inc. Docker uses this
|
||||
information to improve Docker EE. For details about the telemetry plugin and
|
||||
information to improve Docker Engine - Enterprise. For details about the telemetry plugin and
|
||||
the types of data it collects, see the
|
||||
[`telemetry` plugin documentation](https://hub.docker.com/community/images/docker/telemetry).
|
||||
|
||||
|
@ -27,7 +27,7 @@ plugin, either using the Docker CLI or using Universal Control Plane.
|
|||
|
||||
> UCP and CLI
|
||||
>
|
||||
> If you're using Docker EE Standard or Advanced with Universal Control Plane
|
||||
> If you're using Docker Engine - Enterprise with Universal Control Plane
|
||||
> (UCP), use UCP to enable and disable metrics. Use the CLI only if you don't
|
||||
> have UCP. UCP re-enables the telemetry plugin for hosts where it was
|
||||
> disabled with the CLI.
|
||||
|
@ -35,7 +35,7 @@ plugin, either using the Docker CLI or using Universal Control Plane.
|
|||
|
||||
### Use Universal Control Plane
|
||||
|
||||
If you use Universal Control Plane with Docker EE, do not use the Docker CLI to
|
||||
If you use Universal Control Plane with Docker Engine - Enterprise, do not use the Docker CLI to
|
||||
disable the telemetry plugin. Instead, you can manage the information sent to
|
||||
Docker by going to **Admin Settings** and choosing **Usage**.
|
||||
|
||||
|
|
|
@ -0,0 +1,55 @@
|
|||
---
|
||||
title: Admission controllers
|
||||
description: Learn about how admission controllers are used in docker.
|
||||
keywords: cluster, psp, security
|
||||
---
|
||||
|
||||
|
||||
This is the current list of admission controllers used by Docker:
|
||||
|
||||
### Default
|
||||
- [NamespaceLifecycle](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#namespacelifecycle)
|
||||
- [LimitRanger](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#limitranger)
|
||||
- [ServiceAccount](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#serviceaccount)
|
||||
- [PersistentVolumeLabel](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#persistentvolumelabel)
|
||||
- [DefaultStorageClass](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#defaultstorageclass)
|
||||
- [DefaultTolerationSeconds](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#defaulttolerationseconds)
|
||||
- [NodeRestriction](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#noderestriction)
|
||||
- [ResourceQuota](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#resourcequota)
|
||||
- [PodNodeSelector](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#podnodeselector)
|
||||
- [PodSecurityPolicy](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#podsecuritypolicy)
|
||||
|
||||
### Custom
|
||||
- **UCPAuthorization**
|
||||
This custom admission controller does several things:
|
||||
- Annotates Docker Compose-on-Kubernetes `Stack` resources with the identity
|
||||
of the user performing the request so that the Docker Compose-on-Kubernetes
|
||||
resource controller can manage `Stacks` with correct user authorization.
|
||||
- Detects when `ServiceAccount` resources are deleted so that they can be
|
||||
correctly removed from UCP's Node scheduling authorization backend.
|
||||
- Simplifies creation of `RoleBindings` and `ClusterRoleBindings` resources by
|
||||
automatically converting user, organization, and team Subject names into
|
||||
their corresponding unique identifiers.
|
||||
- Prevents users from deleting the built-in `cluster-admin` `ClusterRole` or
|
||||
`ClusterRoleBinding` resources.
|
||||
- Prevents under-privileged users from creating or updating `PersistintVolume`
|
||||
resources with host paths.
|
||||
- Works in conjunction with the built-in `PodSecurityPolicies` admission
|
||||
controller to prevent under-privileged users from creating `Pods` with
|
||||
privileged options.
|
||||
- **CheckImageSigning**
|
||||
Enforces UCP's Docker Content Trust policy which, if enabled, requires that all
|
||||
pods use container images which have been digitally signed by trusted and
|
||||
authorized users which are members of one or more teams in UCP.
|
||||
- **UCPNodeSelector**
|
||||
Adds a `com.docker.ucp.orchestrator.kubernetes:*` toleration to pods in the
|
||||
kube-system namespace and removes `com.docker.ucp.orchestrator.kubernetes`
|
||||
tolerations from pods in other namespaces. This ensures that user workloads do
|
||||
not run on swarm-only nodes, which UCP taints with
|
||||
`com.docker.ucp.orchestrator.kubernetes:NoExecute`. It also adds a node
|
||||
affinity to prevent pods from running on manager nodes depending on UCP's
|
||||
settings.
|
||||
|
||||
**Note:** you cannot enable or disable your own admission controllers. For more information, see [Supportability of custom kubernetes flags in universal control plane](https://success.docker.com/article/supportability-of-custom-kubernetes-flags-in-universal-control-plane)
|
||||
|
||||
For more information about pod security policies in Docker, see [Pod security policies](/ee/ucp/kubernetes/pod-security-policies.md).
|
|
@ -39,21 +39,27 @@ navigate to the **Admin Settings** page.
|
|||
|
||||
Upload your certificates and keys:
|
||||
|
||||
* A `ca.pem` file with the root CA public certificate.
|
||||
* A `ca.pem` file with the root CA (Certificate Authority) public certificate.
|
||||
* A `cert.pem` file with the TLS certificate for your domain and any intermediate public
|
||||
certificates, in this order.
|
||||
* A `key.pem` file with TLS private key. Make sure it is not encrypted with a password.
|
||||
Encrypted keys should have `ENCRYPTED` in the first line.
|
||||
|
||||
After replacing the TLS certificates, your users will not be able to authenticate
|
||||
with their old client certificate bundles. Ask your users to access the UCP
|
||||
web UI and [download new client certificate bundles](../../user-access/cli.md).
|
||||
|
||||
As of UCP v3.2, the **Certificates** page includes a new text field,
|
||||
***Client CA***, that allows you to paste or upload one or more custom root CA certificates which the UCP Controller will use to
|
||||
verify the authenticity of client certificates issued by your corporate or
|
||||
trusted third-party CAs. Note that your custom root certificates will be appended to UCP's internal root CA certificates.
|
||||
|
||||
Finally, click **Save** for the changes to take effect.
|
||||
|
||||
After replacing the TLS certificates, your users won't be able to authenticate
|
||||
with their old client certificate bundles. Ask your users to go to the UCP
|
||||
web UI and [get new client certificate bundles](../../user-access/cli.md).
|
||||
|
||||
If you deployed Docker Trusted Registry, you'll also need to reconfigure it
|
||||
to trust the new UCP TLS certificates.
|
||||
[Learn how to configure DTR](/reference/dtr/2.6/cli/reconfigure.md).
|
||||
[Learn how to configure DTR](/reference/dtr/2.7/cli/reconfigure.md).
|
||||
|
||||
## Where to go next
|
||||
|
||||
|
|
|
@ -4,6 +4,14 @@ description: Learn how to install Docker Universal Control Plane in an Amazon We
|
|||
keywords: Universal Control Plane, UCP, install, Docker EE, AWS, Kubernetes
|
||||
---
|
||||
|
||||
Docker Universal Control Plane (UCP) can be installed on top of AWS without any
|
||||
customisation following the UCP [install documentation](./install/). Therefore
|
||||
this document is **optional**, however if you are deploying Kubernetes
|
||||
workloads with UCP and want to leverage the [AWS kubernetes cloud
|
||||
provider](https://github.com/kubernetes/cloud-provider-aws), which provides
|
||||
dynamic volume and loadbalancer provisioning then you should follow this guide.
|
||||
This guide is not required if you are only deploying swarm workloads.
|
||||
|
||||
The requirements for installing UCP on AWS are included in the following sections:
|
||||
|
||||
## Instances
|
|
@ -2,6 +2,8 @@
|
|||
title: Install UCP on Azure
|
||||
description: Learn how to install Docker Universal Control Plane in a Microsoft Azure environment.
|
||||
keywords: Universal Control Plane, UCP, install, Docker EE, Azure, Kubernetes
|
||||
redirect_from:
|
||||
- /ee/ucp/admin/install/install-on-azure/
|
||||
---
|
||||
|
||||
Docker Universal Control Plane (UCP) closely integrates with Microsoft Azure for its Kubernetes Networking
|
|
@ -14,6 +14,14 @@ of the [requirements UCP needs to run](system-requirements.md).
|
|||
Also, you need to ensure that all nodes, physical and virtual, are running
|
||||
the same version of Docker Enterprise.
|
||||
|
||||
> Cloud Providers
|
||||
>
|
||||
> If you are installing on a public cloud platform there is cloud specific UCP
|
||||
> installation documentation. For [Microsoft
|
||||
> Azure](./cloudproviders/install-on-azure/) this is **mandatory**, for
|
||||
> [AWS](./cloudproviders/install-on-aws/) this is optional.
|
||||
{: important}
|
||||
|
||||
## Step 2: Install Docker Enterprise on all nodes
|
||||
|
||||
UCP is a containerized application that requires the commercially supported
|
||||
|
@ -83,11 +91,6 @@ To install UCP:
|
|||
>[Install an unmanaged CNI plugin](/ee/ucp/kubernetes/install-cni-plugin/).
|
||||
{: important}
|
||||
|
||||
3. Turn off scheduling on UCP managers and DTR nodes since it is on by default. Workloads cannot be run on manager nodes.
|
||||
Make sure all options shown in the following screen shot are unchecked:
|
||||
|
||||
{: .with-border}
|
||||
|
||||
## Step 5: License your installation
|
||||
|
||||
Now that UCP is installed, you need to license it. To use UCP you are required to have a Docker Enterprise standard or advanced subscription, or you can test the platform with a free trial license.
|
||||
|
|
|
@ -27,7 +27,7 @@ installation will fail.
|
|||
Use a computer with internet access to download the UCP package from the
|
||||
following links.
|
||||
|
||||
{% include components/ddc_url_list_2.html product="ucp" version="3.1" %}
|
||||
{% include components/ddc_url_list_2.html product="ucp" version="3.2" %}
|
||||
|
||||
## Download the offline package
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ copy this package to the host where you upgrade UCP.
|
|||
Use a computer with internet access to download the UCP package from the
|
||||
following links.
|
||||
|
||||
{% include components/ddc_url_list_2.html product="ucp" version="3.1" %}
|
||||
{% include components/ddc_url_list_2.html product="ucp" version="3.2" %}
|
||||
|
||||
## Download the offline package
|
||||
|
||||
|
|
Before Width: | Height: | Size: 82 KiB After Width: | Height: | Size: 100 KiB |
Before Width: | Height: | Size: 176 KiB After Width: | Height: | Size: 344 KiB |
Before Width: | Height: | Size: 135 KiB After Width: | Height: | Size: 264 KiB |
Before Width: | Height: | Size: 72 KiB |
Before Width: | Height: | Size: 96 KiB After Width: | Height: | Size: 54 KiB |
Before Width: | Height: | Size: 127 KiB After Width: | Height: | Size: 282 KiB |
Before Width: | Height: | Size: 93 KiB After Width: | Height: | Size: 290 KiB |
|
@ -0,0 +1,114 @@
|
|||
---
|
||||
title: Deploy a Sample Application with a Canary release (Experimental)
|
||||
description: Stage a canary release using weight-based load balancing between multiple backend applications.
|
||||
keywords: ucp, cluster, ingress, kubernetes
|
||||
---
|
||||
|
||||
{% include experimental-feature.md %}
|
||||
|
||||
# Deploy a Sample Application with a Canary release
|
||||
|
||||
This example stages a canary release using weight-based load balancing between
|
||||
multiple backend applications.
|
||||
|
||||
> **Note**: This guide assumes the [Deploy Sample Application](./ingress/)
|
||||
> tutorial was followed, with the artefacts still running on the cluster. If
|
||||
> they are not, please go back and follow this guide.
|
||||
|
||||
The following schema is used for this tutorial:
|
||||
- 80% of client traffic is sent to the production v1 service.
|
||||
- 20% of client traffic is sent to the staging v2 service.
|
||||
- All test traffic using the header `stage=dev` is sent to the v3 service.
|
||||
|
||||
A new Kubernetes manifest file with updated ingress rules can be found
|
||||
[here](./yaml/ingress-weighted.yaml)
|
||||
|
||||
1) Source a [UCP Client Bundle](/ee/ucp/user-access/cli/) attached to a
|
||||
cluster with Cluster Ingress installed.
|
||||
|
||||
2) Download the sample Kubernetes manifest file
|
||||
|
||||
```
|
||||
$ wget https://github.com/docker/docker.github.io/tree/master/ee/ucp/kubernetes/cluster-ingress/yaml/ingress-weighted.yaml
|
||||
```
|
||||
|
||||
3) Deploy the Kubernetes manifest file
|
||||
|
||||
```bash
|
||||
$ kubectl apply -f ingress-weighted.yaml
|
||||
|
||||
$ kubectl describe vs
|
||||
Hosts:
|
||||
demo.example.com
|
||||
Http:
|
||||
Match:
|
||||
Headers:
|
||||
Stage:
|
||||
Exact: dev
|
||||
Route:
|
||||
Destination:
|
||||
Host: demo-service
|
||||
Port:
|
||||
Number: 8080
|
||||
Subset: v3
|
||||
Route:
|
||||
Destination:
|
||||
Host: demo-service
|
||||
Port:
|
||||
Number: 8080
|
||||
Subset: v1
|
||||
Weight: 80
|
||||
Destination:
|
||||
Host: demo-service
|
||||
Port:
|
||||
Number: 8080
|
||||
Subset: v2
|
||||
Weight: 20
|
||||
```
|
||||
|
||||
This virtual service performs the following actions:
|
||||
|
||||
- Receives all traffic with host=demo.example.com.
|
||||
- If an exact match for HTTP header `stage=dev` is found, traffic is routed
|
||||
to v3.
|
||||
- All other traffic is routed to v1 and v2 in an 80:20 ratio.
|
||||
|
||||
Now we can send traffic to the application to view the applied load balancing
|
||||
algorithms.
|
||||
|
||||
```bash
|
||||
# Public IP Address of a Worker or Manager VM in the Cluster
|
||||
$ IPADDR=51.141.127.241
|
||||
|
||||
# Node Port
|
||||
$ PORT=$(kubectl get service demo-service --output jsonpath='{.spec.ports[?(@.name=="http")].nodePort}')
|
||||
|
||||
$ for i in {1..5}; do curl -H "Host: demo.example.com" http://$IPADDR:$PORT/ping; done
|
||||
{"instance":"demo-v1-7797b7c7c8-5vts2","version":"v1","metadata":"production","request_id":"d0671d32-48e7-41f7-a358-ddd7b47bba5f"}
|
||||
{"instance":"demo-v2-6c5b4c6f76-c6zhm","version":"v2","metadata":"staging","request_id":"ba6dcfd6-f62a-4c68-9dd2-b242179959e0"}
|
||||
{"instance":"demo-v1-7797b7c7c8-kw6gp","version":"v1","metadata":"production","request_id":"d87601c0-7935-4cfc-842c-37910e6cd573"}
|
||||
{"instance":"demo-v1-7797b7c7c8-5vts2","version":"v1","metadata":"production","request_id":"4c71ffab-8657-4d99-87b3-7a6933258990"}
|
||||
{"instance":"demo-v1-7797b7c7c8-gfwzj","version":"v1","metadata":"production","request_id":"c404471c-cc85-497e-9e5e-7bb666f4f309"}
|
||||
```
|
||||
|
||||
The split between v1 and v2 corresponds to the specified criteria. Within the
|
||||
v1 service, requests are load-balanced across the 3 backend replicas. v3 does
|
||||
not appear in the requests.
|
||||
|
||||
To send traffic to the 3rd service, we can add the HTTP header `stage=dev`.
|
||||
|
||||
```bash
|
||||
for i in {1..5}; do curl -H "Host: demo.example.com" -H "Stage: dev" http://$IPADDR:$PORT/ping; done
|
||||
{"instance":"demo-v3-d88dddb74-9k7qg","version":"v3","metadata":"dev","request_id":"52d7afe7-befb-4e17-a49c-ee63b96d0daf"}
|
||||
{"instance":"demo-v3-d88dddb74-9k7qg","version":"v3","metadata":"dev","request_id":"b2e664d2-5224-44b1-98d9-90b090578423"}
|
||||
{"instance":"demo-v3-d88dddb74-9k7qg","version":"v3","metadata":"dev","request_id":"5446c78e-8a77-4f7e-bf6a-63184db5350f"}
|
||||
{"instance":"demo-v3-d88dddb74-9k7qg","version":"v3","metadata":"dev","request_id":"657553c5-bc73-4a13-b320-f78f7e6c7457"}
|
||||
{"instance":"demo-v3-d88dddb74-9k7qg","version":"v3","metadata":"dev","request_id":"bae52f09-0510-42d9-aec0-ca6bbbaae168"}
|
||||
```
|
||||
|
||||
In this case, 100% of the traffic with the stage=dev header is sent to the v3
|
||||
service.
|
||||
|
||||
## Where to go next
|
||||
|
||||
- [Deploy the Sample Application with Sticky Sessions](./sticky/)
|
|
@ -0,0 +1,36 @@
|
|||
---
|
||||
title: Kubernetes Cluster Ingress (Experimental)
|
||||
description: Learn about Ingress host and path routing for Kubernetes applications.
|
||||
keywords: ucp, cluster, ingress, kubernetes
|
||||
redirect_from:
|
||||
- /ee/ucp/kubernetes/layer-7-routing/
|
||||
---
|
||||
|
||||
{% include experimental-feature.md %}
|
||||
|
||||
## Cluster Ingress capabilities
|
||||
|
||||
Cluster Ingress provides layer 7 services to traffic entering a Docker Enterprise cluster for a variety of different use-cases that help provide application resilience, security, and observability. Ingress provides dynamic control of L7 routing in a highly available architecture that is also highly performant.
|
||||
|
||||
UCP's Ingress for Kubernetes is based on the [Istio](https://istio.io/) control-plane and is a simplified deployment focused on just providing ingress services with minimal complexity. This includes features such as:
|
||||
|
||||
- L7 host and path routing
|
||||
- Complex path matching and redirection rules
|
||||
- Weight-based load balancing
|
||||
- TLS termination
|
||||
- Persistent L7 sessions
|
||||
- Hot config reloads
|
||||
- Redundant and highly available design
|
||||
|
||||
For a detailed look at Istio Ingress architecture, refer to the [Istio Ingress docs.](https://istio.io/docs/tasks/traffic-management/ingress/)
|
||||
|
||||
To get started with UCP Ingress, the following help topics are provided:
|
||||
|
||||
- [Install Cluster Ingress on to a UCP Cluster](./install/)
|
||||
- [Deploy a Sample Application with Ingress Rules](./ingress)
|
||||
- [Deploy a Sample Application with a Canary release](./canary/)
|
||||
- [Deploy a Sample Application with Sticky Sessions](./sticky/)
|
||||
|
||||
## Where to go next
|
||||
|
||||
- [Install Cluster Ingress on to a UCP Cluster](./install/)
|
|
@ -0,0 +1,168 @@
|
|||
---
|
||||
title: Deploy a Sample Application with Ingress (Experimental)
|
||||
description: Learn how to deploy Ingress rules for Kubernetes applications.
|
||||
keywords: ucp, cluster, ingress, kubernetes
|
||||
---
|
||||
|
||||
{% include experimental-feature.md %}
|
||||
|
||||
# Deploy a Sample Application with Ingress
|
||||
|
||||
Cluster Ingress is capable of routing based on many HTTP attributes, but most
|
||||
commonly the HTTP host and path. The following example shows the basics of
|
||||
deploying Ingress rules for a Kubernetes application. An example application is
|
||||
deployed from this [deployment manifest](./yaml/demo-app.yaml) and L7 Ingress
|
||||
rules are applied.
|
||||
|
||||
## Deploy a Sample Application
|
||||
|
||||
In this example, three different versions of the docker-demo application are
|
||||
deployed. The docker-demo application is able to display the container hostname,
|
||||
environment variables or labels in its HTTP responses, therefore is good sample
|
||||
application for an Ingress controller.
|
||||
|
||||
The 3 versions of the sample application are:
|
||||
|
||||
- v1: a production version with 3 replicas running.
|
||||
- v2: a staging version with a single replica running.
|
||||
- v3: a development version also with a single replica.
|
||||
|
||||
An example Kubernetes manifest file container all 3 deployments can be found [here](./yaml/demo-app.yaml)
|
||||
|
||||
1) Source a [UCP Client Bundle](/ee/ucp/user-access/cli/) attached to a
|
||||
cluster with Cluster Ingress installed.
|
||||
|
||||
2) Download the sample Kubernetes manifest file
|
||||
|
||||
```
|
||||
$ wget https://github.com/docker/docker.github.io/tree/master/ee/ucp/kubernetes/cluster-ingress/yaml/demo-app.yaml
|
||||
```
|
||||
|
||||
3) Deploy the sample Kubernetes manifest file
|
||||
|
||||
```bash
|
||||
$ kubectl apply -f demo-app.yaml
|
||||
```
|
||||
|
||||
4) Verify the sample applications are running
|
||||
|
||||
```bash
|
||||
$ kubectl get pods -n default
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
demo-v1-7797b7c7c8-5vts2 1/1 Running 0 3h
|
||||
demo-v1-7797b7c7c8-gfwzj 1/1 Running 0 3h
|
||||
demo-v1-7797b7c7c8-kw6gp 1/1 Running 0 3h
|
||||
demo-v2-6c5b4c6f76-c6zhm 1/1 Running 0 3h
|
||||
demo-v3-d88dddb74-9k7qg 1/1 Running 0 3h
|
||||
|
||||
$ kubectl get services -o wide
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
|
||||
demo-service NodePort 10.96.97.215 <none> 8080:33383/TCP 3h app=demo
|
||||
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 1d <none>
|
||||
```
|
||||
|
||||
This first part of the tutorial deployed the pods and a Kubernetes service,
|
||||
using Kubernetes NodePorts these pods can be accessed outside of the Cluster
|
||||
Ingress. This illustrate the standard L4 load balancing that a Kubernetes
|
||||
service applies.
|
||||
|
||||
```bash
|
||||
# Public IP Address of a Worker or Manager VM in the Cluster
|
||||
$ IPADDR=51.141.127.241
|
||||
|
||||
# Node Port
|
||||
$ PORT=$(kubectl get service demo-service --output jsonpath='{.spec.ports[?(@.name=="http")].nodePort}')
|
||||
|
||||
# Send traffic directly to the NodePort to bypass L7 Ingress
|
||||
|
||||
```bash
|
||||
$ for i in {1..5}; do curl http://$IPADDR:$PORT/ping; done
|
||||
{"instance":"demo-v3-d88dddb74-9k7qg","version":"v3","metadata":"dev"}
|
||||
{"instance":"demo-v3-d88dddb74-9k7qg","version":"v3","metadata":"dev"}
|
||||
{"instance":"demo-v2-6c5b4c6f76-c6zhm","version":"v2","metadata":"staging"}
|
||||
{"instance":"demo-v1-7797b7c7c8-gfwzj","version":"v1","metadata":"production"}
|
||||
{"instance":"demo-v1-7797b7c7c8-gfwzj","version":"v1","metadata":"production"}
|
||||
```
|
||||
|
||||
The L4 load balancing is applied to the number of replicas that exist for each
|
||||
service. Different scenarios require more complex logic to load balancing.
|
||||
Make sure to detach the number of backend instances from the load balancing
|
||||
algorithms used by the Ingress.
|
||||
|
||||
## Apply Ingress rules to Sample Application
|
||||
|
||||
To leverage the Cluster Ingress for the sample application, there are three custom resources types that need to be deployed:
|
||||
|
||||
- Gateway
|
||||
- Virtual Service
|
||||
- Destinationrule
|
||||
|
||||
For the sample application, an example manifest file with all 3 objects defined is [here](./yaml/ingress-simple.yaml).
|
||||
|
||||
1) Source a [UCP Client Bundle](/ee/ucp/user-access/cli/) attached to a
|
||||
cluster with Cluster Ingress installed.
|
||||
|
||||
2) Download the sample Kubernetes manifest file
|
||||
|
||||
```
|
||||
$ wget https://github.com/docker/docker.github.io/tree/master/ee/ucp/kubernetes/cluster-ingress/yaml/ingress-simple.yaml
|
||||
```
|
||||
|
||||
3) Deploy the sample Kubernetes manifest file
|
||||
|
||||
```bash
|
||||
$ kubectl apply -f ingress-simple.yaml
|
||||
|
||||
$ kubectl describe virtualservice demo-vs
|
||||
...
|
||||
Spec:
|
||||
Gateways:
|
||||
cluster-gateway
|
||||
Hosts:
|
||||
demo.example.com
|
||||
Http:
|
||||
Match: <nil>
|
||||
Route:
|
||||
Destination:
|
||||
Host: demo-service
|
||||
Port:
|
||||
Number: 8080
|
||||
Subset: v1
|
||||
```
|
||||
|
||||
This configuration matches all traffic with `demo.example.com` and sends it to
|
||||
the backend version=v1 deployment, regardless of the quantity of replicas in
|
||||
the backend.
|
||||
|
||||
Curl the service again using the port of the Ingress gateway. Because DNS is
|
||||
not set up, use the `--header` flag from curl to manually set the host header.
|
||||
|
||||
```bash
|
||||
# Find the Cluster Ingress Node Port
|
||||
$ PORT=$(kubectl get service -n istio-system istio-ingressgateway --output jsonpath='{.spec.ports[?(@.name=="http2")].nodePort}')
|
||||
|
||||
# Public IP Address of a Worker or Manager VM in the Cluster
|
||||
$ IPADDR=51.141.127.241
|
||||
|
||||
$ for i in {1..5}; do curl --header "Host: demo.example.com" http://$IPADDR:$PORT/ping; done
|
||||
{"instance":"demo-v1-7797b7c7c8-5vts2","version":"v1","metadata":"production","request_id":"2558fdd1-0cbd-4ba9-b104-0d4d0b1cef85"}
|
||||
{"instance":"demo-v1-7797b7c7c8-kw6gp","version":"v1","metadata":"production","request_id":"59f865f5-15fb-4f49-900e-40ab0c44c9e4"}
|
||||
{"instance":"demo-v1-7797b7c7c8-5vts2","version":"v1","metadata":"production","request_id":"fe233ca3-838b-4670-b6a0-3a02cdb91624"}
|
||||
{"instance":"demo-v1-7797b7c7c8-5vts2","version":"v1","metadata":"production","request_id":"842b8d03-8f8a-4b4b-b7f4-543f080c3097"}
|
||||
{"instance":"demo-v1-7797b7c7c8-kw6gp","version":"v1","metadata":"production","request_id":"197cbb1d-5381-4e40-bc6f-cccec22eccbc"}
|
||||
```
|
||||
|
||||
To have SNI (Server Name Indication) work with TLS services, use curl's
|
||||
`--resolve` flag.
|
||||
|
||||
```bash
|
||||
$ curl --resolve demo.example.com:$IPADDR:$PORT http://demo.example.com/ping
|
||||
```
|
||||
|
||||
In this instance, the three backend v1 replicas are load balanced and no
|
||||
requests are sent to the other versions.
|
||||
|
||||
## Where to go next
|
||||
|
||||
- [Deploy a Sample Application with a Canary release](./canary/)
|
||||
- [Deploy a Sample Application with Sticky Sessions](./sticky/)
|
|
@ -0,0 +1,132 @@
|
|||
---
|
||||
title: Install Cluster Ingress (Experimental)
|
||||
description: Learn how to deploy ingress rules using Kubernetes manifests.
|
||||
keywords: ucp, cluster, ingress, kubernetes
|
||||
---
|
||||
|
||||
{% include experimental-feature.md %}
|
||||
|
||||
# Install Cluster Ingress
|
||||
|
||||
Cluster Ingress for Kubernetes is currently deployed manually outside of UCP.
|
||||
Future plans for UCP include managing the full lifecycle of the Ingress
|
||||
components themselves. This guide describes how to manually deploy Ingress using
|
||||
Kubernetes deployment manifests.
|
||||
|
||||
## Offline Installation
|
||||
|
||||
If you are installing Cluster Ingress on a UCP cluster that does not have access
|
||||
to the Docker Hub, you will need to pre-pull the Ingress container images. If
|
||||
your cluster has access to the Docker Hub, you can move on to [deploying cluster
|
||||
ingress](#deploy-cluster-ingress)
|
||||
|
||||
Without access to the Docker Hub, you will need to download the container images
|
||||
on a workstation with access to the internet. Container images are distributed
|
||||
in a `.tar.gz` and can be downloaded at the following
|
||||
[URL](https://s3.amazonaws.com/docker-istio/istio-ingress-1.1.2.tgz).
|
||||
|
||||
Once the container images have been downloaded, they would then need to be
|
||||
copied on to the hosts in your UCP cluster, and then side loaded in Docker.
|
||||
Images can be side loaded with:
|
||||
|
||||
```bash
|
||||
$ docker load -i ucp.tar.gz
|
||||
```
|
||||
|
||||
There images should now be present on your nodes:
|
||||
|
||||
```bash
|
||||
$ docker images
|
||||
REPOSITORY TAG IMAGE ID CREATED SIZE
|
||||
docker/node-agent-k8s 1.1.2 4ddd06d05d5d 6 days ago 243MB
|
||||
docker/proxy_init 1.1.2 ff9628f32621 6 days ago 145MB
|
||||
docker/proxyv2 1.1.2 bebabbe114a4 6 days ago 360MB
|
||||
docker/pilot 1.1.2 58b6e18f3545 6 days ago 299MB
|
||||
```
|
||||
|
||||
## Deploy Cluster Ingress
|
||||
|
||||
This step deploys the Ingress controller components `istio-pilot` and
|
||||
`istio-ingressgateway`. Together, these components act as the control-plane and
|
||||
data-plane for ingress traffic. These components are a simplified deployment of
|
||||
Istio cluster Ingress functionality. Many other custom K8s resources (CRDs) are
|
||||
also created that aid in the Ingress functionality.
|
||||
|
||||
> **Note**: This does not deploy the service mesh capabilities of Istio as its
|
||||
> function in UCP is for Ingress.
|
||||
|
||||
As Cluster Ingress is not built into UCP in this release, a Cluster Admin will
|
||||
need to manually download and apply the following Kubernetes Manifest
|
||||
[file](https://s3.amazonaws.com/docker-istio/istio-ingress-1.1.2.yaml).
|
||||
|
||||
1) Download the Kubernetes manifest yaml
|
||||
|
||||
```bash
|
||||
$ wget https://s3.amazonaws.com/docker-istio/istio-ingress-1.1.2.yaml
|
||||
```
|
||||
2) Source a [UCP Client Bundle](/ee/ucp/user-access/cli/)
|
||||
|
||||
3) Deploy the Kubernetes manifest file
|
||||
|
||||
```bash
|
||||
$ kubectl apply -f istio-ingress-1.1.2.yaml
|
||||
```
|
||||
|
||||
4) Check the installation has been completely succesfully. It may take a minute
|
||||
or 2 for all pods to become ready.
|
||||
|
||||
```
|
||||
kubectl get pods -n istio-system -o wide
|
||||
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||||
istio-ingressgateway-747bc6b4cb-fkt6k 2/2 Running 0 44s 172.0.1.23 manager-02 <none> <none>
|
||||
istio-ingressgateway-747bc6b4cb-gr8f7 2/2 Running 0 61s 172.0.1.25 manager-02 <none> <none>
|
||||
istio-pilot-7b74c7568b-ntbjd 1/1 Running 0 61s 172.0.1.22 manager-02 <none> <none>
|
||||
istio-pilot-7b74c7568b-p5skc 1/1 Running 0 44s 172.0.1.24 manager-02 <none> <none>
|
||||
|
||||
$ kubectl get services -n istio-system -o wide
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
|
||||
istio-ingressgateway NodePort 10.96.32.197 <none> 80:33000/TCP,443:33001/TCP,31400:33002/TCP,15030:34420/TCP,15443:34368/TCP,15020:34300/TCP 86s app=istio-ingressgateway,istio=ingressgateway,release=istio
|
||||
istio-pilot ClusterIP 10.96.199.152 <none> 15010/TCP,15011/TCP,8080/TCP,15014/TCP 85s istio=pilot
|
||||
```
|
||||
|
||||
5) Test the Ingress Deployment
|
||||
|
||||
To test that the Envory proxy is working correclty in the Isitio Gateway pods,
|
||||
there is a status port configured on an internal port 15020. From the above
|
||||
output we can see that port 15020 is exposed as a Kubernetes NodePort, in the
|
||||
output above the NodePort is 34300 put this could be different in each
|
||||
environment.
|
||||
|
||||
To check the envoy proxy's status, there is a health endpoint at
|
||||
`/healthz/ready`.
|
||||
|
||||
```bash
|
||||
# Node Port
|
||||
$ PORT=$(kubectl get service -n istio-system istio-ingressgateway --output jsonpath='{.spec.ports[?(@.name=="status-port")].nodePort}')
|
||||
|
||||
# Public IP Address of a Worker or Manager VM in the Cluster
|
||||
$ IPADDR=51.141.127.241
|
||||
|
||||
# Use Curl to check the status port is available
|
||||
$ curl -vvv http://$IPADDR:$PORT/healthz/ready
|
||||
* Trying 51.141.127.241...
|
||||
* TCP_NODELAY set
|
||||
* Connected to 51.141.127.241 (51.141.127.241) port 34300 (#0)
|
||||
> GET /healthz/ready HTTP/1.1
|
||||
> Host: 51.141.127.241:34300
|
||||
> User-Agent: curl/7.58.0
|
||||
> Accept: */*
|
||||
>
|
||||
< HTTP/1.1 200 OK
|
||||
< Date: Wed, 19 Jun 2019 13:31:53 GMT
|
||||
< Content-Length: 0
|
||||
<
|
||||
* Connection #0 to host 51.141.127.241 left intact
|
||||
```
|
||||
|
||||
If the output is `HTTP/1.1 200 OK` Envoy is running correctly, ready to service
|
||||
applications.
|
||||
|
||||
## Where to go next
|
||||
|
||||
- [Deploy a Sample Application](./ingress/)
|
|
@ -0,0 +1,102 @@
|
|||
---
|
||||
title: Deploy a Sample Application with Sticky Sessions (Experimental)
|
||||
description: Learn how to use cookies with Ingress host and path routing.
|
||||
keywords: ucp, cluster, ingress, kubernetes
|
||||
---
|
||||
|
||||
{% include experimental-feature.md %}
|
||||
|
||||
# Deploy a Sample Application with Sticky Sessions
|
||||
|
||||
With persistent sessions, the Ingress controller can use a predetermined header
|
||||
or dynamically generate a HTTP header cookie for a client session to use, so
|
||||
that a clients requests are sent to the same backend.
|
||||
|
||||
> **Note**: This guide assumes the [Deploy Sample Application](./ingress/)
|
||||
> tutorial was followed, with the artefacts still running on the cluster. If
|
||||
> they are not, please go back and follow this guide.
|
||||
|
||||
This is specified within the Isitio Object `DestinationRule` via a
|
||||
`TrafficPolicy` for a given host. In the following example configuration,
|
||||
consistentHash is chosen as the load balancing method and a cookie named
|
||||
“session” is used to determine the consistent hash. If incoming requests do not
|
||||
have the “session” cookie set, the Ingress proxy sets it for use in future
|
||||
requests.
|
||||
|
||||
A Kubernetes manifest file with an updated DestinationRule can be found [here](./yaml/ingress-sticky.yaml)
|
||||
|
||||
1) Source a [UCP Client Bundle](/ee/ucp/user-access/cli/) attached to a
|
||||
cluster with Cluster Ingress installed.
|
||||
|
||||
2) Download the sample Kubernetes manifest file
|
||||
|
||||
```
|
||||
$ wget https://github.com/docker/docker.github.io/tree/master/ee/ucp/kubernetes/cluster-ingress/yaml/ingress-sticky.yaml
|
||||
```
|
||||
|
||||
3) Deploy the Kubernetes manifest file with the new DestinationRule, this has
|
||||
the consistentHash loadBalancer policy set.
|
||||
|
||||
```bash
|
||||
$ kubectl apply -f ingress-sticky.yaml
|
||||
```
|
||||
|
||||
2) Curl the service to view how requests are load balanced without using
|
||||
cookies. In this example, requests are bounced between different v1
|
||||
services.
|
||||
|
||||
```bash
|
||||
# Public IP Address of a Worker or Manager VM in the Cluster
|
||||
$ IPADDR=51.141.127.241
|
||||
|
||||
# Node Port
|
||||
$ PORT=$(kubectl get service demo-service --output jsonpath='{.spec.ports[?(@.name=="http")].nodePort}')
|
||||
|
||||
$ for i in {1..5}; do curl -H "Host: demo.example.com" http://$IPADDR:$PORT/ping; done
|
||||
{"instance":"demo-v1-7797b7c7c8-gfwzj","version":"v1","metadata":"production","request_id":"b40a0294-2629-413b-b876-76b59d72189b"}
|
||||
{"instance":"demo-v1-7797b7c7c8-kw6gp","version":"v1","metadata":"production","request_id":"721fe4ba-a785-484a-bba0-627ee6e47188"}
|
||||
{"instance":"demo-v1-7797b7c7c8-gfwzj","version":"v1","metadata":"production","request_id":"77ed801b-81aa-4c02-8cc9-7e3bd3244807"}
|
||||
{"instance":"demo-v1-7797b7c7c8-gfwzj","version":"v1","metadata":"production","request_id":"36d8aaed-fcdf-4489-a85e-76ea96949d6c"}
|
||||
{"instance":"demo-v1-7797b7c7c8-kw6gp","version":"v1","metadata":"production","request_id":"4693b6ad-286b-4470-9eea-c8656f6801ae"}
|
||||
```
|
||||
|
||||
3) Curl again and inspect the headers returned from the proxy.
|
||||
|
||||
```bash
|
||||
$ curl -i -H "Host: demo.example.com" http://$IPADDR:$PORT/ping
|
||||
HTTP/1.1 200 OK
|
||||
set-cookie: session=1555389679134464956; Path=/; Expires=Wed, 17 Apr 2019 04:41:19 GMT; Max-Age=86400
|
||||
date: Tue, 16 Apr 2019 04:41:18 GMT
|
||||
content-length: 131
|
||||
content-type: text/plain; charset=utf-8
|
||||
x-envoy-upstream-service-time: 0
|
||||
set-cookie: session="d7227d32eeb0524b"; Max-Age=60; HttpOnly
|
||||
server: istio-envoy
|
||||
|
||||
{"instance":"demo-v1-7797b7c7c8-kw6gp","version":"v1","metadata":"production","request_id":"011d5fdf-2285-4ce7-8644-c2df6481c584"}
|
||||
```
|
||||
|
||||
The Ingress proxy set a 60 second TTL cookie named “session” on this HTTP
|
||||
request. A browser or other client application can use that value in future
|
||||
requests.
|
||||
|
||||
4) Curl the service again using the flags that save cookies persistently
|
||||
across sessions. The header information shows the session is being set,
|
||||
persisted across requests, and that for a given session header, the
|
||||
responses are coming from the same backend.
|
||||
|
||||
```bash
|
||||
$ for i in {1..5}; do curl -c cookie.txt -b cookie.txt -H "Host: demo.example.com" http://$IPADDR:$PORT/ping; done
|
||||
{"instance":"demo-v1-7797b7c7c8-kw6gp","version":"v1","metadata":"production","request_id":"72b35296-d6bd-462a-9e62-0bd0249923d7"}
|
||||
{"instance":"demo-v1-7797b7c7c8-kw6gp","version":"v1","metadata":"production","request_id":"c8872f6c-f77c-4411-aed2-d7aa6d1d92e9"}
|
||||
{"instance":"demo-v1-7797b7c7c8-kw6gp","version":"v1","metadata":"production","request_id":"0e7b8725-c550-4923-acea-db94df1eb0e4"}
|
||||
{"instance":"demo-v1-7797b7c7c8-kw6gp","version":"v1","metadata":"production","request_id":"9996fe77-8260-4225-89df-0eaf7581e961"}
|
||||
{"instance":"demo-v1-7797b7c7c8-kw6gp","version":"v1","metadata":"production","request_id":"d35c380e-31d6-44ce-a5d0-f9f6179715ab"}
|
||||
```
|
||||
|
||||
When the HTTP uses the cookie that is set by the Ingress proxy, all requests
|
||||
are sent to the same backend demo-v1-7797b7c7c8-kw6gp.
|
||||
|
||||
## Where to to go next
|
||||
|
||||
- [Cluster Ingress Overview](./)
|
|
@ -0,0 +1,106 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: demo-service
|
||||
labels:
|
||||
app: demo
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 8080
|
||||
name: http
|
||||
selector:
|
||||
app: demo
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: demo-v1
|
||||
labels:
|
||||
app: demo
|
||||
version: v1
|
||||
spec:
|
||||
replicas: 3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: demo
|
||||
version: v1
|
||||
spec:
|
||||
containers:
|
||||
- name: webserver
|
||||
image: ehazlett/docker-demo
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
imagePullPolicy: IfNotPresent #Always
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
env:
|
||||
- name: VERSION
|
||||
value: "v1"
|
||||
- name: METADATA
|
||||
value: "production"
|
||||
|
||||
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: demo-v2
|
||||
labels:
|
||||
app: demo
|
||||
version: v2
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: demo
|
||||
version: v2
|
||||
spec:
|
||||
containers:
|
||||
- name: webserver
|
||||
image: ehazlett/docker-demo
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
imagePullPolicy: IfNotPresent #Always
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
env:
|
||||
- name: VERSION
|
||||
value: "v2"
|
||||
- name: METADATA
|
||||
value: "staging"
|
||||
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: demo-v3
|
||||
labels:
|
||||
app: demo
|
||||
version: v3
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: demo
|
||||
version: v3
|
||||
spec:
|
||||
containers:
|
||||
- name: webserver
|
||||
image: ehazlett/docker-demo
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
imagePullPolicy: IfNotPresent #Always
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
env:
|
||||
- name: VERSION
|
||||
value: "v3"
|
||||
- name: METADATA
|
||||
value: "dev"
|
|
@ -0,0 +1,47 @@
|
|||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: Gateway
|
||||
metadata:
|
||||
name: cluster-gateway
|
||||
spec:
|
||||
selector:
|
||||
istio: ingressgateway # use istio default controller
|
||||
servers:
|
||||
- port:
|
||||
number: 80
|
||||
name: http
|
||||
protocol: HTTP
|
||||
hosts:
|
||||
- "*"
|
||||
|
||||
---
|
||||
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: demo-vs
|
||||
spec:
|
||||
hosts:
|
||||
- "demo.example.com"
|
||||
gateways:
|
||||
- cluster-gateway
|
||||
http:
|
||||
- match:
|
||||
route:
|
||||
- destination:
|
||||
host: demo-service
|
||||
subset: v1
|
||||
port:
|
||||
number: 8080
|
||||
|
||||
---
|
||||
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: DestinationRule
|
||||
metadata:
|
||||
name: demo-destinationrule
|
||||
spec:
|
||||
host: demo-service
|
||||
subsets:
|
||||
- name: v1
|
||||
labels:
|
||||
version: v1
|
|
@ -0,0 +1,77 @@
|
|||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: Gateway
|
||||
metadata:
|
||||
name: cluster-gateway
|
||||
spec:
|
||||
selector:
|
||||
istio: ingressgateway # use istio default controller
|
||||
servers:
|
||||
- port:
|
||||
number: 80
|
||||
name: http
|
||||
protocol: HTTP
|
||||
hosts:
|
||||
- "*"
|
||||
|
||||
---
|
||||
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: demo-vs
|
||||
spec:
|
||||
hosts:
|
||||
- "demo.example.com"
|
||||
gateways:
|
||||
- cluster-gateway
|
||||
http:
|
||||
- match:
|
||||
- headers:
|
||||
stage:
|
||||
exact: dev
|
||||
route:
|
||||
- destination:
|
||||
host: demo-service
|
||||
subset: v3
|
||||
port:
|
||||
number: 8080
|
||||
- match:
|
||||
route:
|
||||
- destination:
|
||||
host: demo-service
|
||||
subset: v1
|
||||
port:
|
||||
number: 8080
|
||||
weight: 100
|
||||
- destination:
|
||||
host: demo-service
|
||||
subset: v2
|
||||
port:
|
||||
number: 8080
|
||||
weight: 0
|
||||
|
||||
---
|
||||
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: DestinationRule
|
||||
metadata:
|
||||
name: demo-destinationrule
|
||||
spec:
|
||||
host: demo-service
|
||||
|
||||
subsets:
|
||||
- name: v1
|
||||
labels:
|
||||
version: v1
|
||||
trafficPolicy:
|
||||
loadBalancer:
|
||||
consistentHash:
|
||||
httpCookie:
|
||||
name: session
|
||||
ttl: 60s
|
||||
- name: v2
|
||||
labels:
|
||||
version: v2
|
||||
- name: v3
|
||||
labels:
|
||||
version: v3
|
|
@ -0,0 +1,72 @@
|
|||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: Gateway
|
||||
metadata:
|
||||
name: cluster-gateway
|
||||
spec:
|
||||
selector:
|
||||
istio: ingressgateway # use istio default controller
|
||||
servers:
|
||||
- port:
|
||||
number: 80
|
||||
name: http
|
||||
protocol: HTTP
|
||||
hosts:
|
||||
- "*"
|
||||
|
||||
---
|
||||
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: demo-vs
|
||||
spec:
|
||||
hosts:
|
||||
- "demo.example.com"
|
||||
gateways:
|
||||
- cluster-gateway
|
||||
http:
|
||||
- match:
|
||||
- headers:
|
||||
stage:
|
||||
exact: dev
|
||||
route:
|
||||
- destination:
|
||||
host: demo-service
|
||||
subset: v3
|
||||
port:
|
||||
number: 8080
|
||||
- match:
|
||||
route:
|
||||
- destination:
|
||||
host: demo-service
|
||||
subset: v1
|
||||
port:
|
||||
number: 8080
|
||||
weight: 80
|
||||
- destination:
|
||||
host: demo-service
|
||||
subset: v2
|
||||
port:
|
||||
number: 8080
|
||||
weight: 20
|
||||
|
||||
|
||||
|
||||
---
|
||||
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: DestinationRule
|
||||
metadata:
|
||||
name: demo-destinationrule
|
||||
spec:
|
||||
host: demo-service
|
||||
subsets:
|
||||
- name: v1
|
||||
labels:
|
||||
version: v1
|
||||
- name: v2
|
||||
labels:
|
||||
version: v2
|
||||
- name: v3
|
||||
labels:
|
||||
version: v3
|
|
@ -1,25 +0,0 @@
|
|||
---
|
||||
title: Layer 7 routing
|
||||
description: Learn how to route traffic to your Kubernetes workloads in Docker Enterprise Edition.
|
||||
keywords: UCP, Kubernetes, ingress, routing
|
||||
redirect_from:
|
||||
- /ee/ucp/kubernetes/deploy-ingress-controller/
|
||||
---
|
||||
|
||||
When you deploy a Kubernetes application, you may want to make it accessible
|
||||
to users using hostnames instead of IP addresses.
|
||||
|
||||
Kubernetes provides **ingress controllers** for this. This functionality is
|
||||
specific to Kubernetes. If you're trying to route traffic to Swarm-based
|
||||
applications, check [layer 7 routing with Swarm](../interlock/index.md).
|
||||
|
||||
Use an ingress controller when you want to:
|
||||
|
||||
* Give your Kubernetes app an externally-reachable URL.
|
||||
* Load-balance traffic to your app.
|
||||
|
||||
A popular ingress controller within the Kubernetes Community is the [NGINX controller](https://github.com/kubernetes/ingress-nginx), and can be used in Docker Enterprise Edition, but it is not directly supported by Docker, Inc.
|
||||
|
||||
Learn about [ingress in Kubernetes](https://v1-11.docs.kubernetes.io/docs/concepts/services-networking/ingress/).
|
||||
|
||||
For an example of a YAML NGINX kube ingress deployment, refer to <https://success.docker.com/article/how-to-configure-a-default-tls-certificate-for-the-kubernetes-nginx-ingress-controller>.
|
|
@ -2,25 +2,26 @@
|
|||
title: Use Pod Security Policies in UCP
|
||||
description: Learn how to use Pod Security Policies to lock down Kubernetes as part of Universal Control Plane.
|
||||
keywords: UCP, Kubernetes, psps, pod security policies
|
||||
redirect_from:
|
||||
---
|
||||
Pod Security Policies (PSPs) are cluster-level resources which are enabled by default in Docker Universal Control Plane (UCP) 3.2. See [Pod Security Policy](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) for an explanation of this Kubernetes concept.
|
||||
|
||||
Pod Security Policies (PSPs) are cluster-level resources which are enabled by
|
||||
default in Docker Universal Control Plane (UCP) 3.2. See [Pod Security
|
||||
Policy](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) for an
|
||||
explanation of this Kubernetes concept.
|
||||
|
||||
There are two default PSPs in UCP: a `privileged` policy
|
||||
and an `unprivileged` policy. Administrators of the cluster can enforce additional
|
||||
policies and apply them to users and teams for further control of what runs in the
|
||||
Kubernetes cluster. This guide describes the two default policies, and
|
||||
provides two example use cases for custom policies.
|
||||
There are two default PSPs in UCP: a `privileged` policy and an `unprivileged`
|
||||
policy. Administrators of the cluster can enforce additional policies and apply
|
||||
them to users and teams for further control of what runs in the Kubernetes
|
||||
cluster. This guide describes the two default policies, and provides two
|
||||
example use cases for custom policies.
|
||||
|
||||
## Kubernetes Role Based Access Control (RBAC)
|
||||
|
||||
To interact with PSPs, a user will need to be granted access to
|
||||
the `PodSecurityPolicy` object in Kubernetes RBAC. If the user is a `UCP Admin`,
|
||||
or has been granted the `ClusterRole`: `cluster-admin` for all namespaces, then
|
||||
the user can already manipulate PSPs. Additionally, a normal
|
||||
user can interact with policies if a UCP admin creates the following
|
||||
`ClusterRole` and `ClusterRoleBinding`:
|
||||
To interact with PSPs, a user will need to be granted access to the
|
||||
`PodSecurityPolicy` object in Kubernetes RBAC. If the user is a `UCP Admin`,
|
||||
then the user can already manipulate PSPs. A normal user can interact with
|
||||
policies if a UCP admin creates the following `ClusterRole` and
|
||||
`ClusterRoleBinding`:
|
||||
|
||||
```
|
||||
$ cat <<EOF | kubectl create -f -
|
||||
|
@ -145,7 +146,9 @@ admin $ kubectl delete clusterrolebindings ucp:all:privileged-psp-role
|
|||
When the `ClusterRoleBinding` is removed, cluster admins can still deploy pods,
|
||||
and these pods are deployed with the `privileged` policy. But users or service
|
||||
accounts are unable to deploy pods, because Kubernetes does not know what pod
|
||||
security policy to apply.
|
||||
security policy to apply. Note cluster admins would not be able to deploy
|
||||
deployments, see [using the unprivileged policy in a
|
||||
deployment](#using-the-unprivileged-policy-in-a-deployment) for more details.
|
||||
|
||||
```bash
|
||||
user $ kubectl apply -f pod.yaml
|
||||
|
@ -188,7 +191,7 @@ In the following example, when user "jeff" deploys a basic `nginx` pod, the `unp
|
|||
then gets applied.
|
||||
|
||||
```bash
|
||||
user $ cat <<EOF | user create -f -
|
||||
user $ cat <<EOF | kubectl create -f -
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
|
@ -209,8 +212,201 @@ view of the pod spec using the `-o yaml` or `-o json` syntax with `kubectl`. You
|
|||
can parse JSON output with [jq](https://stedolan.github.io/jq/).
|
||||
|
||||
```bash
|
||||
user $ kubectl get pods demopod -o json | jq '.metadata.annotations."kubernetes.io/psp"'
|
||||
"unprivileged"
|
||||
user $ kubectl get pods demopod -o json | jq -r '.metadata.annotations."kubernetes.io/psp"'
|
||||
unprivileged
|
||||
```
|
||||
|
||||
### Using the unprivileged policy in a deployment
|
||||
|
||||
> Note: In a most use cases a Pod is not actually scheduled by a user. When
|
||||
> creating Kubernetes objects such as Deployments or Daemonsets the pods are
|
||||
> being scheduled by a service account or a controller.
|
||||
|
||||
If you have disabled the `privileged` PSP policy, and created a `RoleBinding`
|
||||
to map a user to a new PSP policy, Kubernetes objects like Deployments and
|
||||
Daemonsets will not be able to deploy pods. This is because Kubernetes objects,
|
||||
like Deployments, use a `Service Account` to schedule pods, instead of the user
|
||||
that created the Deployment.
|
||||
|
||||
```bash
|
||||
user $ kubectl get deployments
|
||||
NAME READY UP-TO-DATE AVAILABLE AGE
|
||||
nginx 0/1 0 0 88s
|
||||
|
||||
user $ kubectl get replicasets
|
||||
NAME DESIRED CURRENT READY AGE
|
||||
nginx-cdcdd9f5c 1 0 0 92s
|
||||
|
||||
user $ kubectl describe replicasets nginx-cdcdd9f5c
|
||||
...
|
||||
Warning FailedCreate 48s (x15 over 2m10s) replicaset-controller Error creating: pods "nginx-cdcdd9f5c-" is forbidden: unable to validate against any pod security policy: []
|
||||
```
|
||||
|
||||
For this deployment to be able to schedule pods, the service account defined
|
||||
wthin the deployment specification needs to be associated with a PSP policy.
|
||||
If a service account is not defined within a deployment spec, the default
|
||||
service account in a namespace is used.
|
||||
|
||||
This is the case in the deployment output above, there is no service account
|
||||
defined, therefore a `Rolebinding` to grant the default service account in the
|
||||
default namespace to use PSP policy is needed.
|
||||
|
||||
An example `RoleBinding` to associate the `unprivileged` PSP policy in UCP with
|
||||
the defaut service account in the default namespace is:
|
||||
|
||||
```bash
|
||||
admin $ cat <<EOF | kubectl create -f -
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: unprivileged-psp-role:defaultsa
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: unprivileged-psp-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: default
|
||||
namespace: default
|
||||
EOF
|
||||
```
|
||||
|
||||
This should allow the replica set to schedule pods within the cluster:
|
||||
|
||||
```bash
|
||||
user $ kubectl get deployments
|
||||
NAME READY UP-TO-DATE AVAILABLE AGE
|
||||
nginx 1/1 1 1 6m11s
|
||||
|
||||
user $ kubectl get replicasets
|
||||
NAME DESIRED CURRENT READY AGE
|
||||
nginx-cdcdd9f5c 1 1 1 6m16s
|
||||
|
||||
user $ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
nginx-cdcdd9f5c-9kknc 1/1 Running 0 6m17s
|
||||
|
||||
user $ kubectl get pod nginx-cdcdd9f5c-9kknc -o json | jq -r '.metadata.annotations."kubernetes.io/psp"'
|
||||
unprivileged
|
||||
```
|
||||
|
||||
### Applying the unprivileged PSP policy to a namespace
|
||||
|
||||
A common use case when using PSPs is to apply a particular policy to one
|
||||
namespace, but not configure the rest. An example could be where an admin
|
||||
might be want to configure keep the `privileged` policy for all of the
|
||||
infrastructure namespaces but configure the `unprivileged` policy for the
|
||||
end user namespaces. This can be done with the following example:
|
||||
|
||||
In this demonstration cluster, infrastructure workloads are deployed in the
|
||||
`kube-system` and the `monitoring` namespaces. End User workloads are deployed
|
||||
in the `default` namespace.
|
||||
|
||||
```bash
|
||||
admin $ kubectl get namespaces
|
||||
NAME STATUS AGE
|
||||
default Active 3d
|
||||
kube-node-lease Active 3d
|
||||
kube-public Active 3d
|
||||
kube-system Active 3d
|
||||
monitoring Active 3d
|
||||
```
|
||||
|
||||
First, delete the `ClusterRoleBinding` that is applied by default in UCP.
|
||||
|
||||
```bash
|
||||
admin $ kubectl delete clusterrolebindings ucp:all:privileged-psp-role
|
||||
```
|
||||
|
||||
Next, create a new `ClusterRoleBinding` that will enforce the `privileged` PSP
|
||||
policy for all users and service accounts in the `kube-system` and `monitoring`
|
||||
namespaces, where in this example cluster the infrastructure workloads are
|
||||
deployed.
|
||||
|
||||
```bash
|
||||
admin $ cat <<EOF | kubectl create -f -
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: ucp:infrastructure:privileged-psp-role
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: privileged-psp-role
|
||||
subjects:
|
||||
- kind: Group
|
||||
name: system:authenticated:kube-system
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
- kind: Group
|
||||
name: system:authenticated:monitoring
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
- kind: Group
|
||||
name: system:serviceaccounts:kube-system
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
- kind: Group
|
||||
name: system:serviceaccounts:monitoring
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
EOF
|
||||
```
|
||||
|
||||
Finally, create a `ClusterRoleBinding` to allow all users who deploy pods and
|
||||
deployments in the `default` namespace to use the `unprivileged` policy.
|
||||
|
||||
```bash
|
||||
admin $ cat <<EOF | kubectl create -f -
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: ucp:default:unprivileged-psp-role
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: unprivileged-psp-role
|
||||
subjects:
|
||||
- kind: Group
|
||||
name: system:authenticated:default
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
- kind: Group
|
||||
name: system:serviceaccounts:default
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
EOF
|
||||
```
|
||||
|
||||
Now when the user deploys in the `default` namespace they will get the
|
||||
`unprivileged` policy but when they deploy in the monitoring namespace they
|
||||
will get the `privileged` policy.
|
||||
|
||||
```bash
|
||||
user $ cat <<EOF | kubectl create -f -
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: demopod
|
||||
namespace: monitoring
|
||||
spec:
|
||||
containers:
|
||||
- name: demopod
|
||||
image: nginx
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: demopod
|
||||
namespace: default
|
||||
spec:
|
||||
containers:
|
||||
- name: demopod
|
||||
image: nginx
|
||||
EOF
|
||||
```
|
||||
|
||||
```bash
|
||||
user $ kubectl get pods demopod -n monitoring -o json | jq -r '.metadata.annotations."kubernetes.io/psp"'
|
||||
privileged
|
||||
|
||||
user $ kubectl get pods demopod -n default -o json | jq -r '.metadata.annotations."kubernetes.io/psp"'
|
||||
unprivileged
|
||||
```
|
||||
|
||||
## Reenable the privileged PSP for all users
|
||||
|
@ -220,7 +416,7 @@ accounts use the `privileged` PSP, recreate the default
|
|||
`ClusterRoleBinding`:
|
||||
|
||||
```bash
|
||||
admin $ cat <<EOF | user create -f -
|
||||
admin $ cat <<EOF | kubectl create -f -
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
|
@ -244,9 +440,9 @@ EOF
|
|||
UCP admins or users with the [correct
|
||||
permissions](#kubernetes-role-based-access-control) can create their own custom
|
||||
policies and attach them to UCP users or teams. This section highlights two
|
||||
potential use cases for custom PSPs. These two uses cases can
|
||||
be combined into the same policy. Note there are many more use cases with PSPs not covered
|
||||
in this document.
|
||||
potential use cases for custom PSPs. These two uses cases can be combined into
|
||||
the same policy. Note there are many more use cases with PSPs not covered in
|
||||
this document.
|
||||
|
||||
- Preventing containers that start as the Root User
|
||||
|
||||
|
@ -298,7 +494,7 @@ to link a user to the new `norootcontainers` policy.
|
|||
admin $ kubectl delete clusterrolebindings ucp:all:privileged-psp-role
|
||||
|
||||
# Create a ClusterRole Granting Access to the Policy
|
||||
admin $ cat <<EOF | admin create -f -
|
||||
admin $ cat <<EOF | kubectl create -f -
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
|
@ -318,7 +514,7 @@ EOF
|
|||
admin $ USER=jeff
|
||||
|
||||
# Create a RoleBinding attaching the User to the ClusterRole
|
||||
admin $ cat <<EOF | admin create -f -
|
||||
admin $ cat <<EOF | kubectl create -f -
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
|
@ -339,7 +535,7 @@ If a user tries to deploy a pod that runs as a root user, such as the upstream
|
|||
`nginx` image, this should fail with a `ConfigError`.
|
||||
|
||||
```bash
|
||||
user $ cat <<EOF | user create -f -
|
||||
user $ cat <<EOF | kubectl create -f -
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
|
|