diff --git a/_config.yml b/_config.yml index b5db4c4aa0..d685ea0e6b 100644 --- a/_config.yml +++ b/_config.yml @@ -22,7 +22,7 @@ exclude: ["_scripts", "apidocs/layouts", "Gemfile", "hooks", "index.html", "404. latest_engine_api_version: "1.40" docker_ce_version: "19.03" docker_ee_version: "19.03" -compose_version: "1.25.0" +compose_version: "1.24.1" compose_file_v3: "3.7" compose_file_v2: "2.4" machine_version: "0.16.1" diff --git a/_config_authoring.yml b/_config_authoring.yml index 83b59812cd..509ec13ffa 100644 --- a/_config_authoring.yml +++ b/_config_authoring.yml @@ -22,7 +22,7 @@ url: https://docs.docker.com latest_engine_api_version: "1.40" docker_ce_version: "19.03" docker_ee_version: "19.03" -compose_version: "1.24.0" +compose_version: "1.24.1" compose_file_v3: "3.7" compose_file_v2: "2.4" machine_version: "0.16.0" diff --git a/_data/cluster/docker_cluster.yaml b/_data/cluster/docker_cluster.yaml new file mode 100644 index 0000000000..6f28780aa0 --- /dev/null +++ b/_data/cluster/docker_cluster.yaml @@ -0,0 +1,49 @@ +command: docker cluster +short: Docker Cluster +long: A tool to build and manage Docker Clusters. +pname: docker +plink: docker.yaml +cname: +- docker cluster backup +- docker cluster create +- docker cluster inspect +- docker cluster ls +- docker cluster restore +- docker cluster rm +- docker cluster update +- docker cluster version +clink: +- docker_cluster_backup.yaml +- docker_cluster_create.yaml +- docker_cluster_inspect.yaml +- docker_cluster_ls.yaml +- docker_cluster_restore.yaml +- docker_cluster_rm.yaml +- docker_cluster_update.yaml +- docker_cluster_version.yaml +options: +- option: dry-run + value_type: bool + default_value: "false" + description: Skip provisioning resources + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +- option: log-level + value_type: string + default_value: warn + description: | + Set the logging level ("trace"|"debug"|"info"|"warn"|"error"|"fatal") + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_data/cluster/docker_cluster_backup.yaml b/_data/cluster/docker_cluster_backup.yaml new file mode 100644 index 0000000000..b7f6c5502d --- /dev/null +++ b/_data/cluster/docker_cluster_backup.yaml @@ -0,0 +1,60 @@ +command: docker cluster backup +short: Backup a running cluster +long: Backup a running cluster +usage: docker cluster backup [OPTIONS] cluster +pname: docker cluster +plink: docker_cluster.yaml +options: +- option: env + shorthand: e + value_type: stringSlice + default_value: '[]' + description: Set environment variables + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +- option: file + value_type: string + default_value: backup.tar.gz + description: Cluster backup filename + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +- option: passphrase + value_type: string + description: Cluster backup passphrase + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +inherited_options: +- option: dry-run + value_type: bool + default_value: "false" + description: Skip provisioning resources + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +- option: log-level + value_type: string + default_value: warn + description: | + Set the logging level ("trace"|"debug"|"info"|"warn"|"error"|"fatal") + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_data/cluster/docker_cluster_create.yaml b/_data/cluster/docker_cluster_create.yaml new file mode 100644 index 0000000000..f6de1d9d73 --- /dev/null +++ b/_data/cluster/docker_cluster_create.yaml @@ -0,0 +1,81 @@ +command: docker cluster create +short: Create a new Docker Cluster +long: Create a new Docker Cluster +usage: docker cluster create [OPTIONS] +pname: docker cluster +plink: docker_cluster.yaml +options: +- option: env + shorthand: e + value_type: stringSlice + default_value: '[]' + description: Set environment variables + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +- option: example + value_type: string + default_value: aws + description: Display an example cluster declaration + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +- option: file + shorthand: f + value_type: string + default_value: cluster.yml + description: Cluster declaration + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +- option: name + shorthand: "n" + value_type: string + description: Name for the cluster + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +- option: switch-context + shorthand: s + value_type: bool + default_value: "false" + description: Switch context after cluster create. + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +inherited_options: +- option: dry-run + value_type: bool + default_value: "false" + description: Skip provisioning resources + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +- option: log-level + value_type: string + default_value: warn + description: | + Set the logging level ("trace"|"debug"|"info"|"warn"|"error"|"fatal") + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_data/cluster/docker_cluster_inspect.yaml b/_data/cluster/docker_cluster_inspect.yaml new file mode 100644 index 0000000000..a0aa2f3bcf --- /dev/null +++ b/_data/cluster/docker_cluster_inspect.yaml @@ -0,0 +1,43 @@ +command: docker cluster inspect +short: Display detailed information about a cluster +long: Display detailed information about a cluster +usage: docker cluster inspect [OPTIONS] cluster +pname: docker cluster +plink: docker_cluster.yaml +options: +- option: all + shorthand: a + value_type: bool + default_value: "false" + description: Display complete info about cluster + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +inherited_options: +- option: dry-run + value_type: bool + default_value: "false" + description: Skip provisioning resources + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +- option: log-level + value_type: string + default_value: warn + description: | + Set the logging level ("trace"|"debug"|"info"|"warn"|"error"|"fatal") + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_data/cluster/docker_cluster_ls.yaml b/_data/cluster/docker_cluster_ls.yaml new file mode 100644 index 0000000000..7f542825c4 --- /dev/null +++ b/_data/cluster/docker_cluster_ls.yaml @@ -0,0 +1,43 @@ +command: docker cluster ls +short: List all available clusters +long: List all available clusters +usage: docker cluster ls [OPTIONS] +pname: docker cluster +plink: docker_cluster.yaml +options: +- option: quiet + shorthand: q + value_type: bool + default_value: "false" + description: Only display numeric IDs + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +inherited_options: +- option: dry-run + value_type: bool + default_value: "false" + description: Skip provisioning resources + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +- option: log-level + value_type: string + default_value: warn + description: | + Set the logging level ("trace"|"debug"|"info"|"warn"|"error"|"fatal") + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_data/cluster/docker_cluster_restore.yaml b/_data/cluster/docker_cluster_restore.yaml new file mode 100644 index 0000000000..5f54e07ae3 --- /dev/null +++ b/_data/cluster/docker_cluster_restore.yaml @@ -0,0 +1,60 @@ +command: docker cluster restore +short: Restore a cluster from a backup +long: Restore a cluster from a backup +usage: docker cluster restore [OPTIONS] cluster +pname: docker cluster +plink: docker_cluster.yaml +options: +- option: env + shorthand: e + value_type: stringSlice + default_value: '[]' + description: Set environment variables + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +- option: file + value_type: string + default_value: backup.tar.gz + description: Cluster backup filename + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +- option: passphrase + value_type: string + description: Cluster backup passphrase + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +inherited_options: +- option: dry-run + value_type: bool + default_value: "false" + description: Skip provisioning resources + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +- option: log-level + value_type: string + default_value: warn + description: | + Set the logging level ("trace"|"debug"|"info"|"warn"|"error"|"fatal") + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_data/cluster/docker_cluster_rm.yaml b/_data/cluster/docker_cluster_rm.yaml new file mode 100644 index 0000000000..1ff30f38e9 --- /dev/null +++ b/_data/cluster/docker_cluster_rm.yaml @@ -0,0 +1,53 @@ +command: docker cluster rm +short: Remove a cluster +long: Remove a cluster +usage: docker cluster rm [OPTIONS] cluster +pname: docker cluster +plink: docker_cluster.yaml +options: +- option: env + shorthand: e + value_type: stringSlice + default_value: '[]' + description: Set environment variables + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +- option: force + shorthand: f + value_type: bool + default_value: "false" + description: Force removal of the cluster files + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +inherited_options: +- option: dry-run + value_type: bool + default_value: "false" + description: Skip provisioning resources + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +- option: log-level + value_type: string + default_value: warn + description: | + Set the logging level ("trace"|"debug"|"info"|"warn"|"error"|"fatal") + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_data/cluster/docker_cluster_update.yaml b/_data/cluster/docker_cluster_update.yaml new file mode 100644 index 0000000000..90e59fd516 --- /dev/null +++ b/_data/cluster/docker_cluster_update.yaml @@ -0,0 +1,52 @@ +command: docker cluster update +short: Update a running cluster's desired state +long: Update a running cluster's desired state +usage: docker cluster update [OPTIONS] cluster +pname: docker cluster +plink: docker_cluster.yaml +options: +- option: env + shorthand: e + value_type: stringSlice + default_value: '[]' + description: Set environment variables + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +- option: file + shorthand: f + value_type: string + description: Cluster definition + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +inherited_options: +- option: dry-run + value_type: bool + default_value: "false" + description: Skip provisioning resources + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +- option: log-level + value_type: string + default_value: warn + description: | + Set the logging level ("trace"|"debug"|"info"|"warn"|"error"|"fatal") + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_data/cluster/docker_cluster_version.yaml b/_data/cluster/docker_cluster_version.yaml new file mode 100644 index 0000000000..facf61c08b --- /dev/null +++ b/_data/cluster/docker_cluster_version.yaml @@ -0,0 +1,42 @@ +command: docker cluster version +short: Print Version, Commit, and Build type +long: Print Version, Commit, and Build type +usage: docker cluster version +pname: docker cluster +plink: docker_cluster.yaml +options: +- option: json + value_type: bool + default_value: "false" + description: Formats output as JSON. Implies '--log-level error' + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +inherited_options: +- option: dry-run + value_type: bool + default_value: "false" + description: Skip provisioning resources + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +- option: log-level + value_type: string + default_value: warn + description: | + Set the logging level ("trace"|"debug"|"info"|"warn"|"error"|"fatal") + deprecated: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_data/ddc_offline_files_2.yaml b/_data/ddc_offline_files_2.yaml index 1a10b42f3a..464a898942 100644 --- a/_data/ddc_offline_files_2.yaml +++ b/_data/ddc_offline_files_2.yaml @@ -3,6 +3,15 @@ # environment that can't access the internet # Used by _includes/components/ddc_url_list_2.html +- product: "ucp" + version: "3.2" + tar-files: + - description: "3.2.0 Linux" + url: https://packages.docker.com/caas/ucp_images_3.2.0.tar.gz + - description: "3.2.0 Windows Server 2016 LTSC" + url: https://packages.docker.com/caas/ucp_images_win_2016_3.2.0.tar.gz + - description: "3.2.0 Windows Server 2019 LTSC" + url: https://packages.docker.com/caas/ucp_images_win_2019_3.2.0.tar.gz - product: "ucp" version: "3.1" tar-files: diff --git a/_data/engine-cli/docker_context_import.yaml b/_data/engine-cli/docker_context_import.yaml index cfdbfc1a4d..6c4f666f84 100644 --- a/_data/engine-cli/docker_context_import.yaml +++ b/_data/engine-cli/docker_context_import.yaml @@ -1,5 +1,5 @@ command: docker context import -short: Import a context from a tar file +short: Import a context from a tar or zip file long: Imports a context previously exported with `docker context export`. To import from stdin, use a hyphen (`-`) as filename. usage: docker context import CONTEXT FILE|- diff --git a/_data/engine-cli/docker_run.yaml b/_data/engine-cli/docker_run.yaml index b7b3214086..5ef06961f2 100644 --- a/_data/engine-cli/docker_run.yaml +++ b/_data/engine-cli/docker_run.yaml @@ -1343,6 +1343,33 @@ examples: |- > Windows containers. This option fails if the container isolation is `hyperv` > or when running Linux Containers on Windows (LCOW). + ### Access an NVIDIA GPU + + The `--gpus­` flag allows you to access NVIDIA GPU resources. First you need to + install [nvidia-container-runtime](https://nvidia.github.io/nvidia-container-runtime/). + Visit [Specify a container's resources](https://docs.docker.com/config/containers/resource_constraints/) + for more information. + + To use `--gpus`, specify which GPUs (or all) to use. If no value is provied, all + available GPUs are used. The example below exposes all available GPUs. + + ```bash + $ docker run -it --rm --gpus all ubuntu nvidia-smi + ``` + + Use the `device` option to specify GPUs. The example below exposes a specific + GPU. + + ```bash + $ docker run -it --rm --gpus device=GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a ubuntu nvidia-smi + ``` + + The example below exposes the first and third GPUs. + + ```bash + $ docker run -it --rm --gpus device=0,2 nvidia-smi + ``` + ### Restart policies (--restart) Use Docker's `--restart` to specify a container's *restart policy*. A restart @@ -1580,4 +1607,3 @@ experimental: false experimentalcli: false kubernetes: false swarm: false - diff --git a/_data/toc.yaml b/_data/toc.yaml index 71b3b04abc..d947eb6d63 100644 --- a/_data/toc.yaml +++ b/_data/toc.yaml @@ -469,7 +469,7 @@ guides: title: NIST ITL Bulletin October 2017 - sectiontitle: OSCAL section: - - path: /compliance/oscal + - path: /compliance/oscal/ title: OSCAL compliance guidance - sectiontitle: CIS Benchmarks section: @@ -629,6 +629,27 @@ reference: title: docker checkpoint ls - path: /engine/reference/commandline/checkpoint_rm/ title: docker checkpoint rm + + - sectiontitle: docker cluster * + section: + - path: /engine/reference/commandline/cluster/ + title: docker cluster + - path: /engine/reference/commandline/cluster_backup/ + title: docker cluster backup + - path: /engine/reference/commandline/cluster_create/ + title: docker cluster create + - path: /engine/reference/commandline/cluster_inspect/ + title: docker cluster inspect + - path: /engine/reference/commandline/cluster_ls/ + title: docker cluster ls + - path: /engine/reference/commandline/cluster_restore/ + title: docker cluster restore + - path: /engine/reference/commandline/cluster_rm/ + title: docker cluster rm + - path: /engine/reference/commandline/cluster_update/ + title: docker cluster update + - path: /engine/reference/commandline/cluster_version/ + title: docker cluster version - path: /engine/reference/commandline/commit/ title: docker commit - sectiontitle: docker config * @@ -1066,7 +1087,7 @@ reference: - path: /reference/dtr/2.7/cli/destroy/ title: destroy - path: /reference/dtr/2.7/cli/emergency-repair/ - title: emergency-repair7 + title: emergency-repair - path: /reference/dtr/2.7/cli/install/ title: install - path: /reference/dtr/2.7/cli/join/ @@ -1268,102 +1289,25 @@ manuals: - sectiontitle: Docker Enterprise section: - path: /ee/ - title: About Docker Enterprise - - sectiontitle: Release Notes - section: - - path: /ee/release-notes/ - title: Platform - - path: /engine/release-notes/ - title: Docker Engine - Enterprise and Engine - Community - nosync: true - - path: /ee/ucp/release-notes/ - title: Docker Universal Control Plane - nosync: true - - path: /ee/dtr/release-notes/ - title: Docker Trusted Registry - nosync: true - - path: /ee/desktop/release-notes/ - title: Docker Desktop Enterprise - nosync: true - - path: /ee/docker-ee-architecture/ - title: Docker Enterprise Architecture - - path: /ee/supported-platforms/ - title: Supported platforms - nosync: true - - sectiontitle: Deploy Docker Enterprise - section: - - sectiontitle: Docker Cluster - section: - - path: /cluster/overview/ - title: Overview - - path: /cluster/aws/ - title: Docker Cluster on AWS - - path: /cluster/cluster-file/ - title: Cluster file structure - - path: /cluster/reference/ - title: Subcommands - - path: /cluster/reference/envvars/ - title: Environment variables - - path: /ee/end-to-end-install/ - title: Install components individually - - sectiontitle: Back up Docker Enterprise - section: - - path: /ee/admin/backup/ - title: Overview - - path: /ee/admin/backup/back-up-swarm/ - title: Back up Docker Swarm - - path: /ee/admin/backup/back-up-ucp/ - title: Back up UCP - - path: /ee/admin/backup/back-up-dtr/ - title: Back up DTR - - path: /cluster/reference/backup/ - title: Back up clusters with Docker Cluster - - sectiontitle: Restore Docker Enterprise - section: - - path: /ee/admin/restore/ - title: Overview - - path: /ee/admin/restore/restore-swarm/ - title: Restore Docker Swarm - - path: /ee/admin/restore/restore-ucp/ - title: Restore UCP - - path: /ee/admin/restore/restore-dtr/ - title: Restore DTR - - path: /cluster/reference/restore/ - title: Restore clusters with Docker Cluster - - sectiontitle: Disaster Recovery - section: - - path: /ee/admin/disaster-recovery/ - title: Overview - - path: /ee/upgrade/ - title: Upgrade Docker Enterprise + title: Overview + - path: /ee/release-notes/ + title: Release notes - sectiontitle: Docker Cluster section: - - path: /cluster/overview/ + - path: /cluster/ title: Overview - path: /cluster/aws/ title: Docker Cluster on AWS - path: /cluster/cluster-file/ title: Cluster file structure - - path: /cluster/reference/ - title: Subcommands - path: /cluster/reference/envvars/ title: Environment variables - - path: /cluster/reference/ls/ - title: List clusters - - path: /cluster/reference/inspect/ - title: Inspect clusters - - path: /cluster/reference/update/ - title: Update clusters - - path: /cluster/reference/remove/ - title: Remove clusters - - path: /cluster/reference/version/ - title: Version information - - path: /ee/telemetry/ - title: Manage usage data collection + - path: /cluster/reference/ + title: Subcommands - sectiontitle: Docker Engine - Enterprise section: - path: /ee/supported-platforms/ - title: Install Docker Enterprise Engine + title: Install Docker Engine - Enterprise nosync: true - title: Release notes path: /engine/release-notes/ @@ -1385,10 +1329,12 @@ manuals: title: Install - path: /ee/ucp/admin/install/install-offline/ title: Install offline - - path: /ee/ucp/admin/install/install-on-azure/ - title: Install on Azure - - path: /ee/ucp/admin/install/install-on-aws/ - title: Install on AWS + - sectiontitle: Cloud Providers + section: + - path: /ee/ucp/admin/install/cloudproviders/install-on-azure/ + title: Install on Azure + - path: /ee/ucp/admin/install/cloudproviders/install-on-aws/ + title: Install on AWS - path: /ee/ucp/admin/install/upgrade/ title: Upgrade - path: /ee/ucp/admin/install/upgrade-offline/ @@ -1403,6 +1349,8 @@ manuals: title: Add labels to cluster nodes - path: /ee/ucp/admin/configure/add-sans-to-cluster/ title: Add SANs to cluster certificates + - path: /ee/ucp/admin/configure/admission-controllers + title: Admission Controllers - path: /ee/ucp/admin/configure/collect-cluster-metrics/ title: Collect UCP cluster metrics with Prometheus - path: /ee/ucp/admin/configure/metrics-descriptions/ @@ -1571,7 +1519,7 @@ manuals: - title: Securing services with TLS path: /ee/ucp/interlock/usage/tls/ - title: Configuring websockets - path: /ee/ucp/interlock/usage/websockets/ + path: /ee/ucp/interlock/usage/websockets/ - sectiontitle: Deploy apps with Kubernetes section: - title: Access Kubernetes Resources @@ -1582,16 +1530,12 @@ manuals: path: /ee/ucp/kubernetes/deploy-with-compose/ - title: Using Pod Security Policies path: /ee/ucp/kubernetes/pod-security-policies/ - - title: Deploy an ingress controller - path: /ee/ucp/kubernetes/layer-7-routing/ - title: Create a service account for a Kubernetes app path: /ee/ucp/kubernetes/create-service-account/ - title: Install an unmanaged CNI plugin path: /ee/ucp/kubernetes/install-cni-plugin/ - title: Kubernetes network encryption path: /ee/ucp/kubernetes/kubernetes-network-encryption/ - - title: Deploy a CSI plugin - path: /ee/ucp/kubernetes/use-csi/ - sectiontitle: Persistent Storage section: - title: Use NFS Storage @@ -1604,6 +1548,20 @@ manuals: path: /ee/ucp/kubernetes/storage/configure-aws-storage/ - title: Configure iSCSI path: /ee/ucp/kubernetes/storage/use-iscsi/ + - title: Deploy a CSI plugin + path: /ee/ucp/kubernetes/storage/use-csi/ + - sectiontitle: Cluster Ingress + section: + - title: Overview + path: /ee/ucp/kubernetes/cluster-ingress/ + - title: Install Ingress + path: /ee/ucp/kubernetes/cluster-ingress/install/ + - title: Deploy Simple Application + path: /ee/ucp/kubernetes/cluster-ingress/ingress/ + - title: Deploy a Canary Deployment + path: /ee/ucp/kubernetes/cluster-ingress/canary/ + - title: Implementing Persistent (sticky) Sessions + path: /ee/ucp/kubernetes/cluster-ingress/sticky/ - title: API reference path: /reference/ucp/3.2/api/ nosync: true @@ -2386,6 +2344,8 @@ manuals: - path: /ee/dtr/admin/configure/use-your-own-tls-certificates/ title: Use your own TLS certificates - path: /ee/dtr/admin/configure/enable-single-sign-on/ + title: Disable persistent cookies + - path: /ee/dtr/admin/configure/disable-persistent-cookies/ title: Enable single sign-on - sectiontitle: External storage section: @@ -3337,6 +3297,49 @@ manuals: title: Troubleshoot DDE issues on Mac - path: /ee/desktop/troubleshoot/windows-issues/ title: Troubleshoot DDE issues on Windows + - sectiontitle: Manage Docker Enterprise + section: + - path: /ee/docker-ee-architecture/ + title: Docker Enterprise Architecture + - path: /ee/supported-platforms/ + title: Supported platforms + nosync: true + - path: /ee/end-to-end-install/ + title: Deploy Docker Enterprise + - path: /ee/upgrade/ + title: Upgrade Docker Enterprise + - sectiontitle: Back up Docker Enterprise + section: + - path: /ee/admin/backup/ + title: Overview + - path: /ee/admin/backup/back-up-swarm/ + title: Back up Docker Swarm + - path: /ee/admin/backup/back-up-ucp/ + title: Back up UCP + - path: /ee/admin/backup/back-up-dtr/ + title: Back up DTR + - path: /cluster/reference/backup/ + title: Back up clusters with Docker Cluster + - sectiontitle: Restore Docker Enterprise + section: + - path: /ee/admin/restore/ + title: Overview + - path: /ee/admin/restore/restore-swarm/ + title: Restore Docker Swarm + - path: /ee/admin/restore/restore-ucp/ + title: Restore UCP + - path: /ee/admin/restore/restore-dtr/ + title: Restore DTR + - path: /cluster/reference/restore/ + title: Restore clusters with Docker Cluster + - sectiontitle: Disaster Recovery + section: + - path: /ee/admin/disaster-recovery/ + title: Overview + - path: /ee/enable-client-certificate-authentication/ + title: Enable client certificate authentication with your PKI + - path: /ee/telemetry/ + title: Manage usage data collection - title: Get support path: /ee/get-support/ - sectiontitle: Docker Assemble @@ -3369,6 +3372,12 @@ manuals: title: API reference - path: /engine/reference/commandline/template/ title: CLI reference +- sectiontitle: Docker Buildx + section: + - path: /buildx/working-with-buildx/ + title: Working with Docker Buildx + - path: /engine/reference/commandline/buildx/ + title: CLI reference - sectiontitle: Docker Compose section: - path: /compose/ @@ -3671,52 +3680,8 @@ manuals: title: Token scope documentation - path: /registry/spec/auth/token/ title: Token authentication specification -- sectiontitle: Release notes - section: - - path: /release-notes/ - title: Overview - - sectiontitle: Docker Enterprise Platform - section: - - path: /ee/release-notes/ - title: Platform - - path: /engine/release-notes/ - title: Docker Engine - Enterprise and Engine - Community - nosync: true - - path: /ee/ucp/release-notes/ - title: Docker Universal Control Plane - nosync: true - - path: /ee/dtr/release-notes/ - title: Docker Trusted Registry - nosync: true - - path: /ee/desktop/release-notes/ - title: Docker Desktop Enterprise - nosync: true - - path: /docker-for-mac/release-notes/ - title: Docker Desktop for Mac - nosync: true - - path: /docker-for-windows/release-notes/ - title: Docker Desktop for Windows - nosync: true - - path: /release-notes/docker-compose/ - title: Docker Compose - nosync: true - - path: /docker-for-aws/release-notes/ - title: Docker for AWS - nosync: true - - path: /docker-for-azure/release-notes/ - title: Docker for Azure - nosync: true - - path: /release-notes/docker-swarm/ - title: Docker Swarm release notes - nosync: true -- sectiontitle: Superseded products and tools - section: - - path: /cs-engine/1.13/release-notes/ - title: CS Docker Engine - - path: /release-notes/docker-engine/ - title: Docker (1.13 and earlier) - - path: /release-notes/docker-machine/ - title: Docker Machine +- path: /release-notes/ + title: Release notes - sectiontitle: Superseded products and tools section: - path: /cs-engine/1.13/release-notes/ diff --git a/_includes/docker_ee.md b/_includes/docker_ee.md index 9be2e3877f..35aa177ad9 100644 --- a/_includes/docker_ee.md +++ b/_includes/docker_ee.md @@ -4,6 +4,6 @@ |:---------------------------------------------------------------------|:-------------------------:|:----------------------------:| | Container engine and built in orchestration, networking, security | {{green-check}} | {{green-check}} | | Certified infrastructure, plugins and ISV containers | {{green-check}} | {{green-check}} | -| Image management | | {{green-check}} | -| Container app management | | {{green-check}} | -| Image security scanning | | {{green-check}} | +| Image management with Docker Trusted Registry security scanning | | {{green-check}} | +| Container app management with Universal Control Plane | | {{green-check}} | +| Developer solutions with Docker Desktop Enterprise | | {{green-check}} | diff --git a/app-template/working-with-template.md b/app-template/working-with-template.md index fcf6508715..a408231bd7 100644 --- a/app-template/working-with-template.md +++ b/app-template/working-with-template.md @@ -35,7 +35,7 @@ A service template provides the description required by Docker Template to scaff 1. `/run/configuration`, a JSON file which contains all settings such as parameters, image name, etc. For example: -``` +```json { "parameters": { "externalPort": "80", @@ -53,7 +53,7 @@ To create a basic service template, you need to create two files — a dockerfil `docker-compose.yaml` -``` +```yaml version: "3.6" services: mysql: @@ -62,7 +62,7 @@ services: `Dockerfile` -``` +```conf FROM alpine COPY docker-compose.yaml . CMD cp docker-compose.yaml /project/ @@ -80,7 +80,7 @@ Services that generate a template using code must contain the following files th Here’s an example of a simple NodeJS service: -``` +```bash my-service ├── Dockerfile # The Dockerfile of the service template └── assets @@ -92,7 +92,7 @@ The NodeJS service contains the following files: `my-service/Dockerfile` -``` +```conf FROM alpine COPY assets /assets CMD ["cp", "/assets", "/project"] @@ -103,7 +103,7 @@ COPY assets /assets `my-service/assets/docker-compose.yaml` {% raw %} -``` +```yaml version: "3.6" services: {{ .Name }}: @@ -115,7 +115,7 @@ services: `my-service/assets/Dockerfile` -``` +```conf FROM NODE:9 WORKDIR /app COPY package.json . @@ -128,7 +128,7 @@ CMD ["yarn", "run", "start"] The next step is to build and push the service template image to a remote repository by running the following command: -``` +```bash cd [...]/my-service docker build -t org/my-service . docker push org/my-service @@ -136,7 +136,7 @@ docker push org/my-service To build and push the image to an instance of Docker Trusted Registry(DTR), or to an external registry, specify the name of the repository: -``` +```bash cd [...]/my-service docker build -t myrepo:5000/my-service . docker push myrepo:5000/my-service @@ -151,7 +151,7 @@ Of all the available service and application definitions, Docker Template has ac Here is an example of the Express service definition: -``` +```yaml - apiVersion: v1alpha1 # constant kind: ServiceTemplate # constant metadata: @@ -180,7 +180,7 @@ To customize a service, you need to complete the following tasks: Add the parameters available to the application. The following example adds the NodeJS version and the external port: -``` +```yaml - [...] spec: [...] @@ -209,7 +209,7 @@ When you run the service template container, a volume is mounted making the serv The file matches the following go struct: -``` +```golang type TemplateContext struct { ServiceID string `json:"serviceId,omitempty"` Name string `json:"name,omitempty"` @@ -224,7 +224,7 @@ type TemplateContext struct { Where `ConfiguredService` is: -``` +```go type ConfiguredService struct { ID string `json:"serviceId,omitempty"` Name string `json:"name,omitempty"` @@ -236,7 +236,7 @@ You can then use the file to obtain values for the parameters and use this infor To use the `interpolator` image, update `my-service/Dockerfile` to use the following Dockerfile: -``` +```conf FROM dockertemplate/interpolator:v0.0.3-beta1 COPY assets . ``` @@ -245,7 +245,7 @@ COPY assets . This places the interpolator image in the `/assets` folder and copies the folder to the target `/project` folder. If you prefer to do this manually, use a Dockerfile instead: -``` +```conf WORKDIR /assets CMD ["/interpolator", "-config", "/run/configuration", "-source", "/assets", "-destination", "/project"] ``` @@ -270,7 +270,7 @@ Create a local repository file called `library.yaml` anywhere on your local driv `library.yaml` -``` +```yaml apiVersion: v1alpha1 generated: "2018-06-13T09:24:07.392654524Z" kind: RepositoryContent @@ -291,7 +291,7 @@ Now that you have created a local repository and added service definitions to it 1. Edit `~/.docker/dockertemplate/preferences.yaml` as follows: -``` +```yaml apiVersion: v1alpha1 channel: master kind: Preferences @@ -302,7 +302,7 @@ repositories: 2. Add your local repository: -``` +```yaml apiVersion: v1alpha1 channel: master kind: Preferences @@ -313,6 +313,13 @@ repositories: url: https://docker-application-template.s3.amazonaws.com/master/library.yaml ``` +When configuring a local repository on Windows, the `url` structure is slightly different: + +```yaml +- name: custom-services + url: file://c:/path/to/my/library.yaml +``` + After updating the `preferences.yaml` file, run `docker template ls` or restart the Application Designer and select **Custom application**. The new service should now be visible in the list of available services. ### Share custom service templates @@ -343,7 +350,7 @@ Before you create an application template definition, you must create a reposito For example, to create an Express and MySQL application, the application definition must be similar to the following yaml file: -``` +```yaml apiVersion: v1alpha1 #constant kind: ApplicationTemplate #constant metadata: @@ -366,7 +373,7 @@ Create a local repository file called `library.yaml` anywhere on your local driv `library.yaml` -``` +```yaml apiVersion: v1alpha1 generated: "2018-06-13T09:24:07.392654524Z" kind: RepositoryContent @@ -391,7 +398,7 @@ Now that you have created a local repository and added application definitions, 1. Edit `~/.docker/dockertemplate/preferences.yaml` as follows: -``` +```yaml apiVersion: v1alpha1 channel: master kind: Preferences @@ -402,7 +409,7 @@ repositories: 2. Add your local repository: -``` +```yaml apiVersion: v1alpha1 channel: master kind: Preferences @@ -413,6 +420,13 @@ repositories: url: https://docker-application-template.s3.amazonaws.com/master/library.yaml ``` +When configuring a local repository on Windows, the `url` structure is slightly different: + +```yaml +- name: custom-services + url: file://c:/path/to/my/library.yaml +``` + After updating the `preferences.yaml` file, run `docker template ls` or restart the Application Designer and select **Custom application**. The new template should now be visible in the list of available templates. ### Share the custom application template diff --git a/app/working-with-app.md b/app/working-with-app.md index 87b5d49939..87a83eabfe 100644 --- a/app/working-with-app.md +++ b/app/working-with-app.md @@ -1,407 +1,463 @@ ---- -title: Working with Docker App (experimental) -description: Learn about Docker App -keywords: Docker App, applications, compose, orchestration ---- - ->This is an experimental feature. -> ->{% include experimental.md %} - -## Overview - -Docker App is a CLI plug-in that introduces a top-level `docker app` command that brings the _container experience_ to applications. The following table compares Docker containers with Docker applications. - - -| Object | Config file | Build with | Execute with | Share with | -| ------------- |---------------| -------------------|-----------------------|-------------------| -| Container | Dockerfile | docker image build | docker container run | docker image push | -| App | App Package | docker app bundle | docker app install | docker app push | - - -With Docker App, entire applications can now be managed as easily as images and containers. For example, Docker App lets you _build_, _validate_ and _deploy_ applications with the `docker app` command. You can even leverage secure supply-chain features such as signed `push` and `pull` operations. - -This guide will walk you through two scenarios: - -1. Initialize and deploy a new Docker App project from scratch -1. Convert an existing Compose app into a Docker App project (added later in the beta process) - -The first scenario will familiarize you with the basic components of a Docker App and get you comfortable with the tools and workflow. - -## Initialize and deploy a new Docker App project from scratch - -In this section, we'll walk through the process of creating a new Docker App project. By then end, you'll be familiar with the workflow and most important commands. - -We'll complete the following steps: - -1. Prerequisites -1. Initialize an empty new project -1. Populate the project -1. Validate the app -1. Deploy the app -1. Push the app to Docker Hub or Docker Trusted Registry -1. Install the app directly from Docker Hub - -### Prerequisites - -In order to follow along, you'll need at least one Docker node operating in Swarm mode. You will also need the latest build of the Docker CLI with the App CLI plugin included. - -Depending on your Linux distribution and your security context, you may need to prepend commands with `sudo`. - - -### Initialize a new empty project - -The `docker app init` command is used to initialize a new Docker application project. If you run it on its own, it initializes a new empty project. If you point it to an existing `docker-compose.yml` file, it initializes a new project based on the Compose file. - -Use the following command to initialize a new empty project called "hello-world". - -``` -$ docker app init --single-file hello-world -Created "hello-world.dockerapp" -``` - -The command will produce a single file in your current directory called `hello-world.dockerapp`. The format of the file name is appended with `.dockerapp`. - -``` -$ ls -hello-world.dockerapp -``` - -If you run `docker app init` without the `--single-file` flag you will get a new directory containing three YAML files. The name of the directory will be the name of the project with `.dockerapp` appended, and the three YAML files will be: - -- `docker-compose.yml` -- `metadata.yml` -- `parameters.yml` - -However, the `--single-file` option merges the three YAML files into a single YAML file with three sections. Each of these sections relates to one of the three YAML files mentioned above:`docker-compose.yml`, `metadata.yml`, and `parameters.yml`. Using the `--single-file` option is great for enabling you to share your application via a single configuration file. - -Inspect the YAML with the following command. - -``` -$ cat hello-world.dockerapp -# Application metadata - equivalent to metadata.yml. -version: 0.1.0 -name: hello-world -description: ---- -# Application services - equivalent to docker-compose.yml. -version: "3.6" -services: {} ---- -# Default application parameters - equivalent to parameters.yml. -``` - -Your file may be more verbose. - -Notice that each of the three sections is separated by a set of three dashes ("---"). Let's quickly describe each section. - -The first section of the file is where you specify identification metadata such as name, version, description and maintainers. It accepts key-value pairs. This part of the file can be a separate file called `metadata.yml` - -The second section of the file describes the application. It can be a separate file called `docker-compose.yml`. - -The final section is where default values for application parameters can be expressed. It can be a separate file called `parameters.yml` - -### Populate the project - -In this section, we'll edit the project YAML file so that it runs a simple web app. - -Use your preferred editor to edit the `hello-world.dockerapp` YAML file and update the application section to the following: - -``` -version: "3.6" -services: - hello: - image: hashicorp/http-echo - command: ["-text", "${hello.text}"] - ports: - - ${hello.port}:5678 -``` - -Update the `Parameters` section to the following: - -``` -hello: - port: 8080 - text: Hello world! -``` - -The sections of the YAML file are currently order-based. This means it's important they remain in the order we've explained, with the _metadata_ section being first, the _app_ section being second, and the _parameters_ section being last. This may change to name-based sections in future releases. - -Save the changes. - -The application has been updated to run a single-container application based on the `hashicorp/http-echo` web server image. This image will have it execute a single command that displays some text and exposes itself on a network port. - -Following best practices, the configuration of the application has been decoupled form the application itself using variables. In this case, the text displayed by the app, and the port it will be published on, are controlled by two variables defined in the `Parameters` section of the file. - -Docker App provides the `inspect` subcommand to provide a prettified summary of the application configuration. It is a quick way to check how to configure the application before deployment, without having to read the `Compose file`. It's important to note that the application is not running at this point, and that the `inspect` operation inspects the configuration file(s). - -``` -$ docker app inspect hello-world.dockerapp -hello-world 0.1.0 - -Service (1) Replicas Ports Image ------------ -------- ----- ----- -hello 1 8080 hashicorp/http-echo - -Parameters (2) Value --------------- ----- -hello.port 8080 -hello.text Hello world! -``` - -`docker app inspect` operations will fail if the `Parameters` section doesn't specify a default value for every parameter expressed in the app section. - -The application is ready to be validated and rendered. - -### Validate the app -Docker App provides the `validate` subcommand to check syntax and other aspects of the configuration. If the app passes validation, the command returns no arguments. - -``` -$ docker app validate hello-world.dockerapp -Validated "hello-world.dockerapp" -``` - -`docker app validate` operations will fail if the `Parameters` section doesn't specify a default value for every parameter expressed in the app section. - - -As the `validate` operation has returned no problems, the app is ready to be deployed. - -### Deploy the app - -There are several options for deploying a Docker App project. - -1. Deploy as a native Docker App application -1. Deploy as a Compose app application -1. Deploy as a Docker Stack application - -We'll look at all three options, starting with deploying as a native Dock App application. - -#### Deploy as a native Docker App - -The process for deploying as a native Docker app is as follows. - -1. Use `docker app install` to deploy the application - -Use the following command to deploy (install) the application. - -``` -$ docker app install hello-world.dockerapp --name my-app -Creating network my-app_default -Creating service my-app_hello -Application "my-app" installed on context "default" -``` - -The app will be deployed using the stack orchestrator. This means you can inspect it with regular `docker stack` commands. - -``` -$ docker stack ls -NAME SERVICES ORCHESTRATOR -my-app 1 Swarm -``` - -You can also check the status of the app with the `docker app status ` command. - -``` -$ docker app status my-app -INSTALLATION ------------- -Name: my-app -Created: 35 seconds -Modified: 31 seconds -Revision: 01DCMY7MWW67AY03B029QATXFF -Last Action: install -Result: SUCCESS -Orchestrator: swarm - -APPLICATION ------------ -Name: hello-world -Version: 0.1.0 -Reference: - -PARAMETERS ----------- -hello.port: 8080 -hello.text: Hello, World! - -STATUS ------- -ID NAME MODE REPLICAS IMAGE PORTS -miqdk1v7j3zk my-app_hello replicated 1/1 hashicorp/http-echo:latest *:8080->5678/tcp -``` - -Now that the app is running, you can point a web browser at the DNS name or public IP of the Docker node on port 8080 and see the app in all its glory. You will need to ensure traffic to port 8080 is allowed on the connection form your browser to your Docker host. - -You can uninstall the app with `docker app uninstall my-app`. - -#### Deploy as a Docker Compose app - -The process for deploying a as a Compose app comprises two major steps: - -1. Render the Docker app project as a `docker-compose.yml` file. -1. Deploy the app using `docker-compose up`. - -You will need a recent version of Docker Compose to complete these steps. - -Rendering is the process of reading the entire application configuration and outputting it as a single `docker-compose.yml` file. This will create a Compose file with hard-coded values wherever a parameter was specified as a variable. - -Use the following command to render the app to a Compose file called `docker-compose.yml` in the current directory. - -``` -$ docker app render --output docker-compose.yml hello-world.dockerapp -``` - -Check the contents of the resulting `docker-compose.yml` file. - -``` -$ cat docker-compose.yml -version: "3.6" -services: - hello: - command: - - -text - - Hello world! - image: hashicorp/http-echo - ports: - - mode: ingress - target: 5678 - published: 8080 - protocol: tcp -``` - -Notice that the file contains hard-coded values that were expanded based on the contents of the Parameters section of the project's YAML file. For example, `${hello.text}` has been expanded to "Hello world!". Almost all the `docker app` commands propose the `--set key=value` flag to override a default parameter. - -Try to render the application with a different text: - -``` -$ docker app render hello-world.dockerapp --set hello.text="Hello whales!" -version: "3.6" -services: - hello: - command: - - -text - - Hello whales! - image: hashicorp/http-echo - ports: - - mode: ingress - target: 5678 - published: 8080 - protocol: tcp -``` - -Use `docker-compose up` to deploy the app. - -``` -$ docker-compose up --detach -WARNING: The Docker Engine you're using is running in swarm mode. - -``` - -The application is now running as a Docker compose app and should be reachable on port `8080` on your Docker host. You will need to ensure traffic to port 8080 is allowed on the connection form your browser to your Docker host. - -You can use `docker-compose down` to stop and remove the application. - -#### Deploy as a Docker Stack - -Deploying the app as a Docker stack is a two-step process very similar to deploying it as a Docker compose app. - -1. Render the Docker app project as a `docker-compose.yml` file. -1. Deploy the app using `docker stack deploy`. - -We'll assume that you've followed the steps to render the Docker app project as a compose file (shown in the previous section) and that you're ready to deploy it as a Docker Stack. Your Docker host will need to be in Swarm mode. - -``` -$ docker stack deploy hello-world-app -c docker-compose.yml -Creating network hello-world-app_default -Creating service hello-world-app_hello -``` - -The app is now deployed as a Docker stack and can be reached on port `8080` on your Docker host. - -Use the `docker stack rm hello-world-app` command to stop and remove the stack. You will need to ensure traffic to port 8080 is allowed on the connection form your browser to your Docker host. - -### Push the app to Docker Hub - -As mentioned in the intro, `docker app` lets you manage entire applications the same way that we currently manage container images. For example, you can push and pull entire applications from registries like Docker Hub with `docker app push` and `docker app pull`. Other `docker app` commands, such as `install`, `upgrade`, `inspect` and `render` can be performed directly on applications while they are stored in a registry. - -Let's see some examples. - -Push the application to Docker Hub. To complete this step, you'll need a valid Docker ID and you'll need to be logged in to the registry you are pushing the app to. - -Be sure to replace the registry ID in the example below with your own. - -``` -$ docker app push my-app --tag nigelpoulton/app-test:0.1.0 -docker app push hello-world.dockerapp --tag nigelpoulton/app-test:0.1.0 -docker.io/nigelpoulton/app-test:0.1.0-invoc -hashicorp/http-echo - application/vnd.docker.distribution.manifest.v2+json [2/2] (sha256:ba27d460...) - -``` - -The app is now stored in the container registry. - -### Push the app to DTR - -Pushing an app to Docker Trusted Registry (DTR) involves the same procedure as [pushing an app to Docker Hub](#push-the-app-to-docker-hub) except that you need your DTR user credentials and [your DTR repository information](/ee/dtr/user/manage-images/review-repository-info/). To use client certificates for DTR authentication, see [Enable Client Certificate Authentication](/ee/enable-client-certificate-authentication/). - -```bash -$ docker app push my-app --tag /nigelpoulton/app-test:0.1.0 -/nigelpoulton/app-test:0.1.0-invoc -hashicorp/http-echo - application/vnd.docker.distribution.manifest.v2+json [2/2] (sha256:bd1a813b...) -Successfully pushed bundle to /nigelpoulton/app-test:0.1.0. -Digest is sha256:bd1a813b6301939fa46e617f96711e0cca1e4065d2d724eb86abde6ef7b18e23. -``` - -The app is now stored in your DTR. - -### Install the app directly from Docker Hub or DTR - -Now that the app is pushed to the registry, try an `inspect` and `install` command against it. The location of your app will be different to the one shown in the examples. - -``` -$ docker app inspect nigelpoulton/app-test:0.1.0 -hello-world 0.1.0 - -Service (1) Replicas Ports Image ------------ -------- ----- ----- -hello 1 8080 nigelpoulton/app-test@sha256:ba27d460cd1f22a1a4331bdf74f4fccbc025552357e8a3249c40ae216275de96 - -Parameters (2) Value --------------- ----- -hello.port 8080 -hello.text Hello world! -``` - -This action was performed directly against the app in the registry. Note that for DTR, the application will be prefixed with the Fully Qualified Domain Name (FQDN) of your trusted registry. - -Now install it as a native Docker App by referencing the app in the registry, with a different port. - -``` -$ docker app install nigelpoulton/app-test:0.1.0 --set hello.port=8181 -Creating network hello-world_default -Creating service hello-world_hello -Application "hello-world" installed on context "default" -``` - -Test that the app is working. - -The app used in these examples is a simple web server that displays the text "Hello world!" on port 8181, your app may be different. - -``` -$ curl http://localhost:8181 -Hello world! -``` - -Uninstall the app. - -``` -$ docker app uninstall hello-world -Removing service hello-world_hello -Removing network hello-world_default -Application "hello-world" uninstalled on context "default" -``` - -You can see the name of your Docker App with the `docker stack ls` command. - +--- +title: Working with Docker App (experimental) +description: Learn about Docker App +keywords: Docker App, applications, compose, orchestration +--- + +>This is an experimental feature. +> +>{% include experimental.md %} + +## Overview + +Docker App is a CLI plug-in that introduces a top-level `docker app` command to bring +the _container experience_ to applications. The following table compares Docker containers with Docker applications. + + +| Object | Config file | Build with | Execute with | Share with | +| ------------- |---------------| -------------------|-----------------------|-------------------| +| Container | Dockerfile | docker image build | docker container run | docker image push | +| App | App Package | docker app bundle | docker app install | docker app push | + + +With Docker App, entire applications can now be managed as easily as images and containers. For example, +Docker App lets you _build_, _validate_ and _deploy_ applications with the `docker app` command. You can +even leverage secure supply-chain features such as signed `push` and `pull` operations. + +> **NOTE**: `docker app` works with `Engine - Community 19.03` or higher and `Engine - Enterprise 19.03` or higher. + +This guide walks you through two scenarios: + +1. Initialize and deploy a new Docker App project from scratch. +1. Convert an existing Compose app into a Docker App project (added later in the beta process). + +The first scenario describes basic components of a Docker App with tools and workflow. + +## Initialize and deploy a new Docker App project from scratch + +This section describes the steps for creating a new Docker App project to familiarize you with the workflow and most important commands. + +1. Prerequisites +1. Initialize an empty new project +1. Populate the project +1. Validate the app +1. Deploy the app +1. Push the app to Docker Hub or Docker Trusted Registry +1. Install the app directly from Docker Hub + +### Prerequisites + +You need at least one Docker node operating in Swarm mode. You also need the latest build of the Docker CLI +with the App CLI plugin included. + +Depending on your Linux distribution and your security context, you might need to prepend commands with `sudo`. + +### Initialize a new empty project + +The `docker app init` command is used to initialize a new Docker application project. If you run it on +its own, it initializes a new empty project. If you point it to an existing `docker-compose.yml` file, +it initializes a new project based on the Compose file. + +Use the following command to initialize a new empty project called "hello-world". + +``` +$ docker app init --single-file hello-world +Created "hello-world.dockerapp" +``` + +The command produces a single file in your current directory called `hello-world.dockerapp`. +The format of the file name is appended with `.dockerapp`. + +``` +$ ls +hello-world.dockerapp +``` + +If you run `docker app init` without the `--single-file` flag, you get a new directory containing three YAML files. +The name of the directory is the name of the project with `.dockerapp` appended, and the three YAML files are: + +- `docker-compose.yml` +- `metadata.yml` +- `parameters.yml` + +However, the `--single-file` option merges the three YAML files into a single YAML file with three sections. +Each of these sections relates to one of the three YAML files mentioned previously: `docker-compose.yml`, +`metadata.yml`, and `parameters.yml`. Using the `--single-file` option enables you to share your application +using a single configuration file. + +Inspect the YAML with the following command. + +``` +$ cat hello-world.dockerapp +# Application metadata - equivalent to metadata.yml. +version: 0.1.0 +name: hello-world +description: +--- +# Application services - equivalent to docker-compose.yml. +version: "3.6" +services: {} +--- +# Default application parameters - equivalent to parameters.yml. +``` + +Your file might be more verbose. + +Notice that each of the three sections is separated by a set of three dashes ("---"). Let's quickly describe each section. + +The first section of the file specifies identification metadata such as name, version, +description and maintainers. It accepts key-value pairs. This part of the file can be a separate file called `metadata.yml` + +The second section of the file describes the application. It can be a separate file called `docker-compose.yml`. + +The final section specifies default values for application parameters. It can be a separate file called `parameters.yml` + +### Populate the project + +This section describes editing the project YAML file so that it runs a simple web app. + +Use your preferred editor to edit the `hello-world.dockerapp` YAML file and update the application section with +the following information: + +``` +version: "3.6" +services: + hello: + image: hashicorp/http-echo + command: ["-text", "${hello.text}"] + ports: + - ${hello.port}:5678 +``` + +Update the `Parameters` section to the following: + +``` +hello: + port: 8080 + text: Hello world! +``` + +The sections of the YAML file are currently order-based. This means it's important they remain in the order we've explained, with the _metadata_ section being first, the _app_ section being second, and the _parameters_ section being last. This may change to name-based sections in future releases. + +Save the changes. + +The application is updated to run a single-container application based on the `hashicorp/http-echo` web server image. +This image has it execute a single command that displays some text and exposes itself on a network port. + +Following best practices, the configuration of the application is decoupled from the application itself using variables. +In this case, the text displayed by the app and the port on which it will be published are controlled by two variables defined in the `Parameters` section of the file. + +Docker App provides the `inspect` subcommand to provide a prettified summary of the application configuration. +It is a quick way to check how to configure the application before deployment, without having to read +the `Compose file`. It's important to note that the application is not running at this point, and that +the `inspect` operation inspects the configuration file(s). + +``` +$ docker app inspect hello-world.dockerapp +hello-world 0.1.0 + +Service (1) Replicas Ports Image +----------- -------- ----- ----- +hello 1 8080 hashicorp/http-echo + +Parameters (2) Value +-------------- ----- +hello.port 8080 +hello.text Hello world! +``` + +`docker app inspect` operations fail if the `Parameters` section doesn't specify a default value for +every parameter expressed in the app section. + +The application is ready to be validated and rendered. + +### Validate the app + +Docker App provides the `validate` subcommand to check syntax and other aspects of the configuration. +If the app passes validation, the command returns no arguments. + +``` +$ docker app validate hello-world.dockerapp +Validated "hello-world.dockerapp" +``` + +`docker app validate` operations fail if the `Parameters` section doesn't specify a default value for +every parameter expressed in the app section. + +As the `validate` operation has returned no problems, the app is ready to be deployed. + +### Deploy the app + +There are several options for deploying a Docker App project. + +- Deploy as a native Docker App application +- Deploy as a Compose app application +- Deploy as a Docker Stack application + +All three options are discussed, starting with deploying as a native Dock App application. + +#### Deploy as a native Docker App + +The process for deploying as a native Docker app is as follows: + +Use `docker app install` to deploy the application. + +Use the following command to deploy (install) the application. + +``` +$ docker app install hello-world.dockerapp --name my-app +Creating network my-app_default +Creating service my-app_hello +Application "my-app" installed on context "default" +``` + +By default, `docker app` uses the [current context](/engine/context/working-with-contexts) to run the +installation container and as a target context to deploy the application. You can override the second context +using the flag `--target-context` or by using the environment variable `DOCKER_TARGET_CONTEXT`. This flag is also +available for the commands `status`, `upgrade`, and `uninstall`. + +``` +$ docker app install hello-world.dockerapp --name my-app --target-context=my-big-production-cluster +Creating network my-app_default +Creating service my-app_hello +Application "my-app" installed on context "my-big-production-cluster" +``` + +> **Note**: Two applications deployed on the same target context cannot share the same name, but this is +valid if they are deployed on different target contexts. + +You can check the status of the app with the `docker app status ` command. + +``` +$ docker app status my-app +INSTALLATION +------------ +Name: my-app +Created: 35 seconds +Modified: 31 seconds +Revision: 01DCMY7MWW67AY03B029QATXFF +Last Action: install +Result: SUCCESS +Orchestrator: swarm + +APPLICATION +----------- +Name: hello-world +Version: 0.1.0 +Reference: + +PARAMETERS +---------- +hello.port: 8080 +hello.text: Hello, World! + +STATUS +------ +ID NAME MODE REPLICAS IMAGE PORTS +miqdk1v7j3zk my-app_hello replicated 1/1 hashicorp/http-echo:latest *:8080->5678/tcp +``` + +The app is deployed using the stack orchestrator. This means you can also inspect it using the regular `docker stack` commands. + +``` +$ docker stack ls +NAME SERVICES ORCHESTRATOR +my-app 1 Swarm +``` + +Now that the app is running, you can point a web browser at the DNS name or public IP of the Docker node on +port 8080 and see the app. You must ensure traffic to port 8080 is allowed on +the connection form your browser to your Docker host. + +Now change the port of the application using `docker app upgrade ` command. +``` +$ docker app upgrade my-app --hello.port=8181 +Upgrading service my-app_hello +Application "my-app" upgraded on context "default" +``` + +You can uninstall the app with `docker app uninstall my-app`. + +#### Deploy as a Docker Compose app + +The process for deploying as a Compose app comprises two major steps: + +1. Render the Docker app project as a `docker-compose.yml` file. +2. Deploy the app using `docker-compose up`. + +You need a recent version of Docker Compose to complete these steps. + +Rendering is the process of reading the entire application configuration and outputting it as a single `docker-compose.yml` file. This creates a Compose file with hard-coded values wherever a parameter was specified as a variable. + +Use the following command to render the app to a Compose file called `docker-compose.yml` in the current directory. + +``` +$ docker app render --output docker-compose.yml hello-world.dockerapp +``` + +Check the contents of the resulting `docker-compose.yml` file. + +``` +$ cat docker-compose.yml +version: "3.6" +services: + hello: + command: + - -text + - Hello world! + image: hashicorp/http-echo + ports: + - mode: ingress + target: 5678 + published: 8080 + protocol: tcp +``` + +Notice that the file contains hard-coded values that were expanded based on the contents of the `Parameters` +section of the project's YAML file. For example, `${hello.text}` has been expanded to "Hello world!". + +> **Note**: Almost all the `docker app` commands propose the `--set key=value` flag to override a default parameter. + +Try to render the application with a different text: + +``` +$ docker app render hello-world.dockerapp --set hello.text="Hello whales!" +version: "3.6" +services: + hello: + command: + - -text + - Hello whales! + image: hashicorp/http-echo + ports: + - mode: ingress + target: 5678 + published: 8080 + protocol: tcp +``` + +Use `docker-compose up` to deploy the app. + +``` +$ docker-compose up --detach +WARNING: The Docker Engine you're using is running in swarm mode. + +``` + +The application is now running as a Docker Compose app and should be reachable on port `8080` on your Docker host. +You must ensure traffic to port `8080` is allowed on the connection form your browser to your Docker host. + +You can use `docker-compose down` to stop and remove the application. + +#### Deploy as a Docker Stack + +Deploying the app as a Docker stack is a two-step process very similar to deploying it as a Docker Compose app. + +1. Render the Docker app project as a `docker-compose.yml` file. +2. Deploy the app using `docker stack deploy`. + +Complete the steps in the previous section to render the Docker app project as a Compose file and make sure +you're ready to deploy it as a Docker Stack. Your Docker host must be in Swarm mode. + +``` +$ docker stack deploy hello-world-app -c docker-compose.yml +Creating network hello-world-app_default +Creating service hello-world-app_hello +``` + +The app is now deployed as a Docker stack and can be reached on port `8080` on your Docker host. + +Use the `docker stack rm hello-world-app` command to stop and remove the stack. You must ensure traffic to +port `8080` is allowed on the connection form your browser to your Docker host. + +### Push the app to Docker Hub + +As mentioned in the introduction, `docker app` lets you manage entire applications the same way that you +currently manage container images. For example, you can push and pull entire applications from registries like +Docker Hub with `docker app push` and `docker app pull`. Other `docker app` commands, such +as `install`, `upgrade`, `inspect` and `render` can be performed directly on applications while they are +stored in a registry. + +The following section contains some examples. + +Push the application to Docker Hub. To complete this step, you need a valid Docker ID and you must be +logged in to the registry to which you are pushing the app. + +Be sure to replace the registry ID in the following example with your own. + +``` +$ docker app push my-app --tag nigelpoulton/app-test:0.1.0 +docker app push hello-world.dockerapp --tag nigelpoulton/app-test:0.1.0 +docker.io/nigelpoulton/app-test:0.1.0-invoc +hashicorp/http-echo + application/vnd.docker.distribution.manifest.v2+json [2/2] (sha256:ba27d460...) + +``` + +The app is now stored in the container registry. + +### Push the app to DTR + +Pushing an app to Docker Trusted Registry (DTR) involves the same procedure as [pushing an app to Docker Hub](#push-the-app-to-docker-hub) except that you need your DTR user credentials and [your DTR repository information](/ee/dtr/user/manage-images/review-repository-info/). To use client certificates for DTR authentication, see [Enable Client Certificate Authentication](/ee/enable-client-certificate-authentication/). + +```bash +$ docker app push my-app --tag /nigelpoulton/app-test:0.1.0 +/nigelpoulton/app-test:0.1.0-invoc +hashicorp/http-echo + application/vnd.docker.distribution.manifest.v2+json [2/2] (sha256:bd1a813b...) +Successfully pushed bundle to /nigelpoulton/app-test:0.1.0. +Digest is sha256:bd1a813b6301939fa46e617f96711e0cca1e4065d2d724eb86abde6ef7b18e23. +``` + +The app is now stored in your DTR. + +### Install the app directly from Docker Hub or DTR + +Now that the app is pushed to the registry, try an `inspect` and `install` command against it. +The location of your app is different from the one provided in the examples. + +``` +$ docker app inspect nigelpoulton/app-test:0.1.0 +hello-world 0.1.0 + +Service (1) Replicas Ports Image +----------- -------- ----- ----- +hello 1 8080 nigelpoulton/app-test@sha256:ba27d460cd1f22a1a4331bdf74f4fccbc025552357e8a3249c40ae216275de96 + +Parameters (2) Value +-------------- ----- +hello.port 8080 +hello.text Hello world! +``` + +This action was performed directly against the app in the registry. Note that for DTR, the application will be prefixed with the Fully Qualified Domain Name (FQDN) of your trusted registry. + +Now install it as a native Docker App by referencing the app in the registry, with a different port. + +``` +$ docker app install nigelpoulton/app-test:0.1.0 --set hello.port=8181 +Creating network hello-world_default +Creating service hello-world_hello +Application "hello-world" installed on context "default" +``` + +Test that the app is working. + +The app used in these examples is a simple web server that displays the text "Hello world!" on port 8181, +your app might be different. + +``` +$ curl http://localhost:8181 +Hello world! +``` + +Uninstall the app. + +``` +$ docker app uninstall hello-world +Removing service hello-world_hello +Removing network hello-world_default +Application "hello-world" uninstalled on context "default" +``` + +You can see the name of your Docker App with the `docker stack ls` command. diff --git a/cluster/aws.md b/cluster/aws.md index 2a3a6ab87f..f67f5de43a 100644 --- a/cluster/aws.md +++ b/cluster/aws.md @@ -105,7 +105,7 @@ The values are substituted in the cluster definition, which makes it easy to define a re-usable cluster definition and then change the variables to create multiple instances of a cluster. -Run `docker cluster create --file cluster.yml --name quickstart` +Run `docker cluster create --file cluster.yml --name quickstart`. $ docker cluster create --file cluster.yml --name quickstart Please provide a value for ucp_password @@ -114,7 +114,7 @@ Run `docker cluster create --file cluster.yml --name quickstart` Planning cluster on aws [OK] Creating: [=========================== ] 44% -After approximately 10 minutes, resources are provisioned Docker Enterprise installation is started: +After approximately 10 minutes, resources are provisioned, and Docker Enterprise installation is started: $ docker cluster create --file cluster.yml --name quickstart Please provide a value for ucp_password @@ -156,7 +156,7 @@ To view an inventory of the clusters you created, run `docker cluster ls`: ID NAME PROVIDER ENGINE UCP DTR STATE 911c882340b2 quickstart acme, aws ee-stable-18.09.5 docker/ucp:3.1.6 docker/dtr:2.6.5 running -For detailed information about the cluster, run `docker cluster inspect quickstart` +For detailed information about the cluster, run `docker cluster inspect quickstart`. $ docker cluster inspect quickstart ```yaml @@ -398,5 +398,5 @@ All provisioned resources are destroyed and the context for the cluster is remov ## Where to go next - View the quick start guide for [Azure](azure.md) or [vSphere](vsphere.md) -- [Explore the full list of Cluster commands](./reference/index.md) -- [Cluster configuration file reference](./cluster-file/index.md) +- [Explore the full list of Cluster commands](/engine/reference/commandline/cluster/) +- [Cluster configuration file reference](./cluster-file.md) diff --git a/cluster/cluster-file.md b/cluster/cluster-file.md new file mode 100644 index 0000000000..f628c61ee0 --- /dev/null +++ b/cluster/cluster-file.md @@ -0,0 +1,529 @@ +--- +description: Cluster file reference and guidelines +keywords: documentation, docs, docker, cluster, infrastructure, automation +title: Cluster file version 1 reference +toc_max: 5 +toc_min: 1 +--- + +This topic describes version 1 of the Cluster file format. + +## Cluster file structure and examples + +
+ +
+

+variable:
+  domain: "YOUR DOMAIN, e.g. docker.com"
+  subdomain: "A SUBDOMAIN, e.g. cluster"
+  region: "THE AWS REGION TO DEPLOY, e.g. us-east-1"
+  email: "YOUR.EMAIL@COMPANY.COM"
+  ucp_password:
+    type: prompt
+provider:
+  acme:
+    email: ${email}
+    server_url: https://acme-staging-v02.api.letsencrypt.org/directory
+  aws:
+    region: ${region}
+cluster:
+  dtr:
+    version: docker/dtr:2.6.5
+  engine:
+    version: ee-stable-18.09.5
+  ucp:
+    username: admin
+    password: ${ucp_password}
+    version: docker/ucp:3.1.6
+resource:
+  aws_instance:
+    managers:
+      instance_type: t2.xlarge
+      os: Ubuntu 16.04
+      quantity: 3
+    registry:
+      instance_type: t2.xlarge
+      os: Ubuntu 16.04
+      quantity: 3
+    workers:
+      instance_type: t2.xlarge
+      os: Ubuntu 16.04
+      quantity: 3
+  aws_lb:
+    apps:
+      domain: ${subdomain}.${domain}
+      instances:
+      - workers
+      ports:
+      - 80:8080
+      - 443:8443
+    dtr:
+      domain: ${subdomain}.${domain}
+      instances:
+      - registry
+      ports:
+      - 443:443
+    ucp:
+      domain: ${subdomain}.${domain}
+      instances:
+      - managers
+      ports:
+      - 443:443
+      - 6443:6443
+  aws_route53_zone:
+    dns:
+      domain: ${domain}
+      subdomain: ${subdomain}
+
+
+
+ +The topics on this reference page are organized alphabetically by top-level keys +to reflect the structure of the Cluster file. Top-level keys that define +a section in the configuration file, such as `cluster`, `provider`, and `resource`, +are listed with the options that support them as sub-topics. This information +maps to the indent structure of the Cluster file. + +### cluster +Specifies components to install and configure for a cluster. + +The following components are available: + +- `subscription`: (Optional) A string value representing the subscription ID. +- `license`: (Optional) A path to the cluster's license file. +- `cloudstor`: (Optional) Configuration options for Docker CloudStor. +- `dtr`: (Optional) Configuration options for Docker Trusted Registry. +- `engine`: (Optional) Configuration options for Docker Engine. +- `ucp`: (Optional) Configuration options for Docker Universal Control Plane. +- `registry`: (Optional) Configuration options for authenticating nodes with a registry to pull Docker images. + +#### cloudstor +Customizes the installation of Docker Cloudstor. + +- `version`: (Optional) The version of Cloudstor to install. Default is `1.0`. +- `use_efs`: (Optional) Specifies whether an Elastic File System should be provisioned. Defaults to `false`. + +#### dtr +Customizes the installation of Docker Trusted Registry. +```yaml +cluster: + dtr: + version: "docker/dtr:2.6.5" + install_options: + - "--debug" + - "--enable-pprof" +``` + +The following optional elements can be specified: + +- `version`: (Optional) The version of DTR to install. Defaults to `docker/dtr:2.6.5`. +- `ca`: (Optional) The path to a root CA public certificate. +- `key`: (Optional) The path to a TLS private key. +- `cert`: (Optional) The path to a public key certificate. +- `install_options`: (Optional) Additional [DTR install options](https://docs.docker.com/reference/dtr/2.6/cli/install/). + +#### engine +Customizes the installation of Docker Enterprise Engine. +```yaml +cluster: + engine: + channel: "stable" + edition: "ee" + version: "19.03" +``` + +The following optional elements can be specified: +- `version`: (Optional) The version of the Docker Engine to install. Defaults to `19.03`. +- `edition`: (Optional) The family of Docker Engine to install. Defaults to `ee` for Enterprise edition. +- `channel`: (Optional) The channel on the repository to pull updated packages. Defaults to `stable`. +- `url`: (Optional) Defaults to "https://storebits.docker.com/ee". +- `storage_driver`: (Optional) The storage driver to use for the storage volume. Default +value is dependent on the operating system. + - Amazon Linux 2 is `overlay2`. + - Centos is `overlay2`. + - Oracle Linux is `overlay2`. + - RedHat is `overlay2`. + - SLES is `btrfs`. + - Ubuntu is `overlay2`. +- `storage_fstype`: (Optional) File system to use for storage volume. Default value is dependent on the operating system. + - Amazon Linux 2 is `xfs`. + - Centos is `xfs`. + - Oracle Linux is `xfs`. + - RedHat is `xfs`. + - SLES is `btrfs`. + - Ubuntu is `ext4`. +- `storage_volume`: (Optional) Docker storage volume path for `/var/lib/docker` Default value is provider dependent. + - AWS + - non-NVME is `/dev/xvdb`. + - NVME disks are one of `/dev/nvme[0-26]n1`. + - Azure is `/dev/disk/azure/scsi1/lun0`. +- `daemon`: (Optional) Provides docker daemon options. Defaults to "". +- `ca`: (dev) Defaults to "". +- `key`: (dev) Defaults to "". +- `enable_remote_tcp`: (dev) Enables direct access to docker engine. Defaults to `false`. + +*dev indicates that the functionality is only for development and testing. + +#### kubernetes +Enables provider-specific options for Kubernetes support. + +##### AWS Kubernetes options + +- `cloud_provider`: (Optional)Enable cloud provider support for Kubernetes. Defaults to `false`. +- `ebs_persistent_volumes`: (Optional) Enable persistent volume support with EBS volumes. Defaults to `false`. +- `efs_persistent_volumes`: (Optional) Enable persistent volume support with EFS. Defaults to `false`. +- `load_balancer`: (Optional) Enable Kubernetes pods to instantiate a load-balancer. Defaults to `false`. +- `nfs_storage`: (Optional) Install additional packages on node for NFS support. Defaults to `false`. +- `lifecycle`: (Optional) Defaults to `owned`. + +#### registry +Customizes the registry from which the installation should pull images. By default, Docker Hub and credentials to access Docker Hub are used. + +```yaml +cluster: + registry: + password: ${base64decode("TVJYeTNDQWpTSk5HTW1ZRzJQcE1kM0tVRlQ=")} + url: https://index.docker.io/v1/ + username: user +``` + +The following optional elements can be specified: +- `username`: The username for logging in to the registry on each node. Default value is the current docker user. +- `url`: The registry to use for pulling Docker images. Defaults to "https://index.docker.io/v1/". +- `password`: The password for logging in to the registry on each node. Default value is the current docker user's password base64 encoded and wrapped in a call to base64decode. + +#### ucp + +- `version`: Specifies the version of UCP to install. Defaults to `docker/ucp:3.1.6`. +- `username`: Specifies the username of the first user to create in UCP. Defaults to `admin`. +- `password`: Specifies the password of the first user to create in UCP. Defaults to `dockerdocker`. +- `ca`: Specifies a path to a root CA public certificate. +- `key`: Specifies a path to a TLS private key. +- `cert`: Specifies a path to a public key certificate. +- `install_options`: Lists additional [UCP install options](https://docs.docker.com/reference/ucp/3.1/cli/install/) + +##### Additional UCP configuration options +Docker Cluster also accepts all UCP configuration options and creates the initial UCP config on +installation. The following list provides supported options: +- `anonymize_tracking`: Anonymizes analytic data. Specify 'true' to hide the license ID. Defaults to 'false'. +- `audit_level`: Specifies the audit logging level. Leave empty for disabling audit logs (default). +Other valid values are 'metadata' and 'request'. +- `auto_refresh`: Specify 'true' to enable attempted automatic license renewal when the license +nears expiration. If disabled, you must manually upload renewed license after expiration. Defaults to 'true'. +- `azure_ip_count`: Sets the IP count for azure allocator to allocate IPs per Azure virtual machine. +- `backend`: Specifie the name of the authorization backend to use, either 'managed' or 'ldap'. Defaults to 'managed'. +- `calico_mtu`: Specifies the MTU (maximum transmission unit) size for the Calico plugin. Defaults to '1480'. +- `cloud_provider`: Specifies the cloud provider for the kubernetes cluster. +- `cluster_label`: Specifies a label to be included with analytics/. +- `cni_installer_url`: Specifies the URL of a Kubernetes YAML file to be used for installing a CNI plugin. +Only applies during initial installation. If empty, the default CNI plugin is used. +- `controller_port`: Configures the port that the 'ucp-controller' listens to. Defaults to '443'. +- `custom_header_name`: Specifies the name of the custom header with 'name' = '*X-Custom-Header-Name*'. +- `custom_header_value`: Specifies the value of the custom header with 'value' = '*Custom Header Value*'. +- `default_new_user_role`: Specifies the role that new users get for their private resource sets. +Values are 'admin', 'viewonly', 'scheduler', 'restrictedcontrol', or 'fullcontrol'. Defaults to 'restrictedcontrol'. +- `default_node_orchestrator`: Specifies the type of orchestrator to use for new nodes that are +joined to the cluster. Can be 'swarm' or 'kubernetes'. Defaults to 'swarm'. +- `disable_tracking`: Specify 'true' to disable analytics of API call information. Defaults to 'false'. +- `disable_usageinfo`: Specify 'true' to disable analytics of usage information. Defaults to 'false'. +- `dns`: Specifies a CSV list of IP addresses to add as nameservers. +- `dns_opt`: Specifies a CSV list of options used by DNS resolvers. +- `dns_search`: Specifies a CSV list of domain names to search when a bare unqualified hostname is +used inside of a container. +- `enable_admin_ucp_scheduling`: Specify 'true' to allow admins to schedule on containers on manager nodes. +Defaults to 'false'. +- `external_service_lb`: Specifies an optional external load balancer for default links to services with +exposed ports in the web interface. +- `host_address`: Specifies the address for connecting to the DTR instance tied to this UCP cluster. +- `log_host`: Specifies a remote syslog server to send UCP controller logs to. If omitted, controller +logs are sent through the default docker daemon logging driver from the 'ucp-controller' container. +- `idpMetadataURL`: Specifies the Identity Provider Metadata URL. +- `image_repository`: Specifies the repository to use for UCP images. +- `install_args`: Specifies additional arguments to pass to the UCP installer. +- `ipip_mtu`: Specifies the IPIP MTU size for the calico IPIP tunnel interface. +- `kube_apiserver_port`: Configures the port to which the Kubernetes API server listens. +- `kv_snapshot_count`: Sets the key-value store snapshot count setting. Defaults to '20000'. +- `kv_timeout`: Sets the key-value store timeout setting, in milliseconds. Defaults to '5000'. +- `lifetime_minutes`: Specifies the initial session lifetime, in minutes. Defaults to `4320`, which is 72 hours. +- `local_volume_collection_mapping`: Stores data about collections for volumes in UCP's local KV store +instead of on the volume labels. This is used for enforcing access control on volumes. +- `log_level`: Specifies the logging level for UCP components. Values are syslog priority +levels (https://linux.die.net/man/5/syslog.conf): 'debug', 'info', 'notice', 'warning', 'err', 'crit', 'alert', +and 'emerg'. +- `managedPasswordDisabled`: Indicates if managed password is disabled. Defaults to false. +- `managedPasswordFallbackUser`: The fallback user when the managed password authentication is disabled. Defaults to "". +- `manager_kube_reserved_resources`: Specifies reserve resources for Docker UCP and Kubernetes components +that are running on manager nodes. +- `metrics_disk_usage_interval`: Specifies the interval for how frequently storage metrics are gathered. +This operation can impact performance when large volumes are present. +- `metrics_retention_time`: Adjusts the metrics retention time. +- `metrics_scrape_interval`: Specifies the interval for how frequently managers gather metrics from nodes in the cluster. +- `nodeport_range`: Specifies the port range that for Kubernetes services of type NodePort can be exposed in. +Defaults to '32768-35535'. +- `per_user_limit`: Specifies the maximum number of sessions that a user can have active simultaneously. If +the creation of a new session would put a user over this limit, the least recently used session is deleted. +A value of zero disables limiting the number of sessions that users can have. Defaults to `5`. +- `pod_cidr`: Specifies the subnet pool from which the IP for the Pod should be allocated from the CNI ipam plugin. +- `profiling_enabled`: Specify 'true' to enable specialized debugging endpoints for profiling UCP performance. +Defaults to 'false'. +- `log_protocol`: Specifies the protocol to use for remote logging. Values are 'tcp' and 'udp'. Defaults to 'tcp'. +- `renewal_threshold_minutes`: Specifies the length of time, in minutes, before the expiration of a +session. When used, a session is extended by the current configured lifetime from that point in time. A zero value disables session extension. Defaults to `1440`, which is 24 hours. +- `require_content_trust`: Specify 'true' to require images be signed by content trust. Defaults to 'false'. +- `require_signature_from`: Specifies a csv list of users or teams required to sign images. +- `rethinkdb_cache_size`: Sets the size of the cache used by UCP's RethinkDB servers. TDefaults to 1GB, +but leaving this field empty or specifying `auto` instructs RethinkDB to determine a cache size automatically. +- `rootCerts`: Defaults to empty. +- `samlEnabled`: Indicates if saml is used. +- `samlLoginText`: Specifies the customized SAML login button text. +- `service_id`: Specifies the DTR instance's OpenID Connect Client ID, as registered with the Docker +authentication provider. +- `spHost`: Specifies the Service Provider Host. +- `storage_driver`: Specifies the UCP storage driver to install. +- `support_dump_include_audit_logs`: When set to `true`, support dumps include audit logs in the logs +of the 'ucp-controller' container of each manager node. Defaults to 'false'. +- `swarm_port`: Configures the port that the 'ucp-swarm-manager' listens to. Defaults to '2376'. +- `swarm_strategy`: Configures placement strategy for container scheduling. +This doesn't affect swarm-mode services. Values are 'spread', 'binpack', and 'random'. +- `tlsSkipVerify`: Specifies TLS Skip verify for IdP Metadata. +- `unmanaged_cni`: Defaults to 'false'. +- `worker_kube_reserved_resources`: Reserves resources for Docker UCP and Kubernetes components +that are running on worker nodes. +- `custom_kube_api_server_flags`: Specifies the configuration options for the Kubernetes API server. (dev) +- `custom_kube_controller_manager_flags`: Specifies the configuration options for the Kubernetes controller manager. (dev) +- `custom_kube_scheduler_flags`: Specifies the configuration options for the Kubernetes scheduler. (dev) +- `custom_kubelet_flags`: Specifies the configuration options for Kubelets. (dev) + +*dev indicates that the functionality is only for development and testing. Arbitrary Kubernetes configuration parameters are not tested and supported under the Docker Enterprise Software Support Agreement. + +### provider +Defines where the cluster's resources are provisioned, as well as provider-specific configuration such as tags. + +{% raw %} +```yaml +provider: + acme: + email: ${email} + server_url: https://acme-staging-v02.api.letsencrypt.org/directory + aws: + region: ${region} +``` +{% endraw %} + +#### acme +The Automated Certificate Management Environment (ACME) is an evolving standard for the automation of a domain-validated certificate authority. Docker Cluster uses the ACME provider to create SSL certificates that are signed by [Let's Encrypt](https://letsencrypt.org/). + +The ACME provider Configuration for the ACME provider supports arguments that closely align with the [Terraform ACME provider](https://www.terraform.io/docs/providers/acme/index.html): + +The following elements can be specified: +- `email`: (Required) The email to associate the certificates with. +- `server_url`: (Optional) The URL to the ACME endpoint's directory. Default is "https://acme-v02.api.letsencrypt.org/directory" + +#### aws +Configuration for the AWS provider supports arguments that closely align with the [Terraform AWS provider](https://www.terraform.io/docs/providers/aws/index.html). + +```yaml +aws: + region: "us-east-1" + tags: + Owner: "Infra" + Environment: "Test" +``` +The following elements can be specified: +- `region` - (Required) This is the AWS region. It can be sourced from the `AWS_DEFAULT_REGION` environment variables, or + via a shared credentials file if `profile` is specified. +- `tags` - (Optional) Additional name value pairs to assign to every resource (which + supports tagging) in the cluster. +- `access_key` - (Required) This is the AWS access key. It can be sourced from +the `AWS_ACCESS_KEY_ID` environment variable, or via + a shared credentials file if `profile` is specified. +- `secret_key` - (Required) This is the AWS secret key. It can be sourced from +the `AWS_SECRET_ACCESS_KEY` environment variable, or + via a shared credentials file if `profile` is specified. +- `profile` - (Optional) This is the AWS profile name as set in the shared credentials + file. +- `assume_role` - (Optional) An `assume_role` block (documented below). Only one + `assume_role` block can be in the configuration. +- `endpoints` - (Optional) Configuration block for customizing service endpoints. See the +[Custom Service Endpoints Guide](/docs/providers/aws/guides/custom-service-endpoints.html) +for more information about connecting to alternate AWS endpoints or AWS compatible solutions. +- `shared_credentials_file` = (Optional) This is the path to the shared + credentials file. If this is not set and a profile is specified, + `~/.aws/credentials` is used. +- `token` - (Optional) Session token for validating temporary credentials. +Typically provided after successful identity federation or Multi-Factor +Authentication (MFA) login. With MFA login, this is the session token +provided afterwards, not the 6 digit MFA code used to get temporary +credentials. It can also be sourced from the `AWS_SESSION_TOKEN` +environment variable. +- `max_retries` - (Optional) This is the maximum number of times an API + call is retried, in the case where requests are being throttled or + experiencing transient failures. The delay between the subsequent API + calls increases exponentially. +- `allowed_account_ids` - (Optional) List of allowed, white listed, AWS + account IDs to prevent you from mistakenly using an incorrect one (and + potentially end up destroying a live environment). Conflicts with + `forbidden_account_ids`. +- `forbidden_account_ids` - (Optional) List of forbidden, blacklisted, + AWS account IDs to prevent you mistakenly using a wrong one (and + potentially end up destroying a live environment). Conflicts with + `allowed_account_ids`. +- `insecure` - (Optional) Explicitly allows the provider to + perform "insecure" SSL requests. If omitted, defaults to `false`. +- `skip_credentials_validation` - (Optional) Skips the credentials + validation via the STS API. Useful for AWS API implementations that do + not have STS available or implemented. +- `skip_get_ec2_platforms` - (Optional) Skips getting the supported EC2 + platforms. Used by users that don't have `ec2:DescribeAccountAttributes` + permissions. +- `skip_region_validation` - (Optional) Skips validation of provided region name. + Useful for AWS-like implementations that use their own region names + or to bypass the validation for regions that aren't publicly available yet. + +### resource +Resources to provision for a cluster. Resources are organized as shown in the following example: + +```yaml +resource: + type: + name: + parameters +``` +For a given `type`, there may be more one or more named resources to provision. + +For a given `name`, a resource may have one or more parameters. + +#### aws_instance + +```yaml +resource: + aws_instance: + workers: + instance_type: t2.xlarge + price: 0.25 + os: Ubuntu 16.04 +``` +- `quantity`: (Required) The number of instances to create. +- `os`: An alias that is expanded by `docker cluster` to the AMI owner and AMI name to install. +The following aliases are supported by `docker cluster`: + - `CentOS 7` + - `RHEL 7.1` + - `RHEL 7.2` + - `RHEL 7.3` + - `RHEL 7.4` + - `RHEL 7.5` + - `RHEL 7.6` + - `Oracle Linux 7.3` + - `Oracle Linux 7.4` + - `Oracle Linux 7.5` + - `SLES 12.2` + - `SLES 12.3` + - `SLES 15` + - `Ubuntu 14.04` + - `Ubuntu 16.04` + - `Ubuntu 18.04` + - `Windows Server 2016` + - `Windows Server 1709` + - `Windows Server 1803` + - `Windows Server 2019` + > Note: Make sure the OS you select is [compatible](https://success.docker.com/article/compatibility-matrix) + with the product you're installing. Docker Cluster validates the support during installation. +- `instance_type`: Specifies the [AWS instance type](https://aws.amazon.com/ec2/instance-types/) to provision. +- `key_name`: By default, Docker Cluster creates an [AWS EC2 Key Pair](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) and registers it with AWS for the cluster. +To use an existing AWS EC2 Key Pair, set this value to the name of the AWS EC2 Key Pair. +- `ssh_private_key`: By default, Docker Cluster creates an [AWS EC2 Key Pair](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) and registers it with AWS for the cluster. To use an existing AWS EC2 Key Pair, set this value to the path of the private SSH key. +- `username`: Specifies the username for the node with Administrative privileges. By default, the `os` option +sets this to the well-known username for the AMIs (which can change by distribution): + - Amazon Linux 2 is `ec2-user`. + - Centos is `centos`. + - Oracle Linux is `ec2-user`. + - RedHat is `ec2-user`. + - SLES is `ec2-user`. + - Ubuntu is `ubuntu`. + - Windows is `Administrator`. +- `password`: This value is only used by Windows nodes. By default, Windows nodes have a random password generated. +- `ami`: Specifies a custom AMI, or one that's not currently available as an OS. Specify either the id or +the owner/name to query for the latest. + - `id`: Specifies the ID of the AMI. For example, `ami-0510c89f1a2691cf2`. + - `owner`: Specifies the AWS account ID of the image owner. For example, `099720109477`. + - `name`: Specifies the name of the AMI that was provided during image creation. For example, `ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-*`. + - `platform`: Specify `windows` for Windows instances. +- `tags`: (Optional) Specifies additional name value pairs to assign to every instance. +- `swarm_labels`: (Optional) Specifies additional key value pairs that represent swarm labels to apply to every node. + +#### aws_spot_instance_request + +Provisions a spot instance request in AWS to dramatically reduce the cost of instances. Spot instance +availability is not guaranteed. Therefore, it is recommended to use `aws_spot_instance_request` for +additional worker nodes and not for mission-critical nodes like managers and registry. + +```yaml +resource: + aws_spot_instance_request: + workers: + instance_type: t2.xlarge + price: 0.25 + os: Ubuntu 16.04 + quantity: 3 +``` + +Supports the same set of parameters as [aws_instance](index.md#aws_instance), with the addition of an optional price to limit the max bid for a spot instance. +- `price`: (Optional) Specifies a maximum price to bid on the spot instance. + +#### aws_lb +Provisions an AWS Load Balancer. +```yaml +resource: + aws_lb: + ucp: + domain: "example.com" + instances: + - managers + ports: + - 443:443 + - 6443:6443 +``` +The following options are supported: + +- `instances`: (Required) Specifies a list of `aws_instance` and `aws_spot_instance_request` names to +attach to the load balancer. +- `ports`: (Required) Specifies a list of `listening port[/protocol]:target port[/protocol]` mappings +to define how the load balancer should route traffic. By default, the protocol is `tcp`. +- `domain`: Specifies the domain in which to create DNS records for this load balancer. The record is named the +same as this resource, appended by the domain. For example, if the resource is `ucp` and the domain is `example.com`, +the `A` record is `ucp.example.com`. +- `internal`: (Optional) Defaults to `false`. +- `type`: (Optional) Defaults to `network`. +- `enable_cross_zone_load_balancing`: (Optional) Defaults to `false`. + +#### aws_route53_zone +Creates a subdomain in an AWS route53 zone. The following example creates a public zone for `testing.example.com`: + +```yaml +resource: + aws_route53_zone: + dns: + domain: example.com + subdomain: testing +``` +The following elements are required: +- `domain`: (Required) Specifies the name of the hosted zone. +- `subdomain`: (Required) Specifies the subdomain to create in the `domain` hosted zone. + +### variable +Docker cluster supports basic parameterization. The variable section defines a make of keys and values. A key can have a sub-key named `type`, which changes the behavior of the variable. + +```yaml +variable: + region: "us-east-1" + password: + type: prompt +``` + +Variables are referenced in the cluster definition as `${variable_name}`. For example, `${region}` is substituted as `us-east-2` through the cluster definition. + +The type defines how the variable behaves. This is currently limited in scope to: +- `prompt`: Requests the value from the user and does not echo characters as the value is entered. diff --git a/cluster/cluster-file/index.md b/cluster/cluster-file/index.md deleted file mode 100644 index b712d97ec0..0000000000 --- a/cluster/cluster-file/index.md +++ /dev/null @@ -1,527 +0,0 @@ ---- -description: Cluster file reference and guidelines -keywords: documentation, docs, docker, cluster, infrastructure, automation -title: Cluster file version 1 reference -toc_max: 5 -toc_min: 1 ---- - -This topic describes version 1 of the Cluster file format. - -## Cluster file structure and examples -``` -
- -
-

-    variable:
-      domain: "YOUR DOMAIN, e.g. docker.com"
-      subdomain: "A SUBDOMAIN, e.g. cluster"
-      region: "THE AWS REGION TO DEPLOY, e.g. us-east-1"
-      email: "YOUR.EMAIL@COMPANY.COM"
-      ucp_password:
-        type: prompt
-    provider:
-      acme:
-        email: ${email}
-        server_url: https://acme-staging-v02.api.letsencrypt.org/directory
-      aws:
-        region: ${region}
-    cluster:
-      dtr:
-        version: docker/dtr:2.6.5
-      engine:
-        version: ee-stable-18.09.5
-      ucp:
-        username: admin
-        password: ${ucp_password}
-        version: docker/ucp:3.1.6
-    resource:
-      aws_instance:
-        managers:
-          instance_type: t2.xlarge
-          os: Ubuntu 16.04
-          quantity: 3
-        registry:
-          instance_type: t2.xlarge
-          os: Ubuntu 16.04
-          quantity: 3
-        workers:
-          instance_type: t2.xlarge
-          os: Ubuntu 16.04
-          quantity: 3
-      aws_lb:
-        apps:
-          domain: ${subdomain}.${domain}
-          instances:
-          - workers
-          ports:
-          - 80:8080
-          - 443:8443
-        dtr:
-          domain: ${subdomain}.${domain}
-          instances:
-          - registry
-          ports:
-          - 443:443
-        ucp:
-          domain: ${subdomain}.${domain}
-          instances:
-          - managers
-          ports:
-          - 443:443
-          - 6443:6443
-      aws_route53_zone:
-        dns:
-          domain: ${domain}
-          subdomain: ${subdomain}
-
-
-
-``` -The topics on this reference page are organized alphabetically by top-level keys -to reflect the structure of the Cluster file. Top-level keys that define -a section in the configuration file, such as `cluster`, `provider`, and `resource`, -are listed with the options that support them as sub-topics. This information -maps to the indent structure of the Cluster file. - -### cluster -Specifies components to install and configure for a cluster. - -The following components are available: - -- `subscription`: (Optional) A string value representing the subscription ID. -- `license`: (Optional) A path to the cluster's license file. -- `cloudstor`: (Optional) Configuration options for Docker CloudStor -- `dtr`: (Optional) Configuration options for Docker Trusted Registry -- `engine`: (Optional) Configuration options for Docker Engine -- `ucp`: (Optional) Configuration options for Docker Universal Control Plane -- `registry`: (Optional) Configuration options for authenticating nodes with a registry to pull Docker images. - -#### cloudstor -Customizes the installation of Docker Cloudstor. - -- `version`: (Optional) The version of Cloudstor to install. Default is `1.0` -- `use_efs`: (Optional) Specifies whether an Elastic File System should be provisioned. Defaults to `false`. - -#### dtr -Customizes the installation of Docker Trusted Registry. -```yaml -cluster: - dtr: - version: "docker/dtr:2.6.5" - install_options: - - "--debug" - - "--enable-pprof" -``` - -The following optional elements can be specified: - -- `version`: (Optional) The version of DTR to install. Defaults to `docker/dtr:2.6.5`. -- `ca`: (Optional) The path to a root CA public certificate. -- `key`: (Optional) The path to a TLS private key. -- `cert`: (Optional) The path to a public key certificate. -- `install_options`: (Optional) Additional [DTR install options](https://docs.docker.com/reference/dtr/2.6/cli/install/) - -#### engine -Customizes the installation of Docker Enterprise Engine. -```yaml -cluster: - engine: - channel: "stable" - edition: "ee" - version: "19.03" -``` - -The following optional elements can be specified: -- `version`: (Optional) The version of the Docker Engine to install. Defaults to `19.03`. -- `edition`: (Optional) The family of Docker Engine to install. Defaults to `ee` for Enterprise edition. -- `channel`: (Optional) The channel on the repository to pull updated packages. Defaults to `stable`. -- `url`: (Optional) Defaults to "https://storebits.docker.com/ee". -- `storage_driver`: (Optional) The storage driver to use for the storage volume. Default -value is dependent on the operating system. - - Amazon Linux 2 is `overlay2`. - - Centos is `overlay2`. - - Oracle Linux is `overlay2`. - - RedHat is `overlay2`. - - SLES is `btrfs`. - - Ubuntu is `overlay2`. -- `storage_fstype`: (Optional) File system to use for storage volume. Default value is dependent on the operating system. - - Amazon Linux 2 is `xfs`. - - Centos is `xfs`. - - Oracle Linux is `xfs`. - - RedHat is `xfs`. - - SLES is `btrfs`. - - Ubuntu is `ext4`. -- `storage_volume`: (Optional) Docker storage volume path for `/var/lib/docker` Default value is provider dependent. - - AWS - - non-NVME is `/dev/xvdb`. - - NVME disks are one of `/dev/nvme[0-26]n1`. - - Azure is `/dev/disk/azure/scsi1/lun0`. -- `daemon`: (Optional) Provides docker daemon options. Defaults to "". -- `ca`: (dev) Defaults to "". -- `key`: (dev) Defaults to "". -- `enable_remote_tcp`: (dev) Enables direct access to docker engine. Defaults to `false`. - -*dev indicates that the functionality is only for development and testing. - -#### kubernetes -Enables provider-specific options for Kubernetes support. - -##### AWS Kubernetes options - -- `cloud_provider`: (Optional)Enable cloud provider support for Kubernetes. Defaults to `false`. -- `ebs_persistent_volumes`: (Optional) Enable persistent volume support with EBS volumes. Defaults to `false`. -- `efs_persistent_volumes`: (Optional) Enable persistent volume support with EFS. Defaults to `false`. -- `load_balancer`: (Optional) Enable Kubernetes pods to instantiate a load-balancer. Defaults to `false`. -- `nfs_storage`: (Optional) Install additional packages on node for NFS support. Defaults to `false`. -- `lifecycle`: (Optional) Defaults to `owned`. - -#### registry -Customizes the registry from which the installation should pull images. By default, Docker Hub and credentials to access Docker Hub are used. - -```yaml -cluster: - registry: - password: ${base64decode("TVJYeTNDQWpTSk5HTW1ZRzJQcE1kM0tVRlQ=")} - url: https://index.docker.io/v1/ - username: user -``` - -The following optional elements can be specified: -- `username`: The username for logging in to the registry on each node. Default value is the current docker user. -- `url`: The registry to use for pulling Docker images. Defaults to "https://index.docker.io/v1/". -- `password`: The password for logging in to the registry on each node. Default value is the current docker user's password base64 encoded and wrapped in a call to base64decode. - -#### ucp - -- `version`: Specifies the version of UCP to install. Defaults to `docker/ucp:3.1.6`. -- `username`: Specifies the username of the first user to create in UCP. Defaults to `admin`. -- `password`: Specifies the password of the first user to create in UCP. Defaults to `dockerdocker`. -- `ca`: Specifies a path to a root CA public certificate. -- `key`: Specifies a path to a TLS private key. -- `cert`: Specifies a path to a public key certificate. -- `install_options`: Lists additional [UCP install options](https://docs.docker.com/reference/ucp/3.1/cli/install/) - -##### Additional UCP configuration options: -Docker Cluster also accepts all UCP configuration options and creates the initial UCP config on -installation. The following list provides supported options: -- `anonymize_tracking`: Anonymizes analytic data. Specify 'true' to hide the license ID. Defaults to 'false'. -- `audit_level`: Specifies the audit logging level. Leave empty for disabling audit logs (default). -Other valid values are 'metadata' and 'request'. -- `auto_refresh`: Specify 'true' to enable attempted automatic license renewal when the license -nears expiration. If disabled, you must manually upload renewed license after expiration. Defaults to 'true'. -- `azure_ip_count`: Sets the IP count for azure allocator to allocate IPs per Azure virtual machine. -- `backend`: Specifie the name of the authorization backend to use, either 'managed' or 'ldap'. Defaults to 'managed'. -- `calico_mtu`: Specifies the MTU (maximum transmission unit) size for the Calico plugin. Defaults to '1480'. -- `cloud_provider`: Specifies the cloud provider for the kubernetes cluster. -- `cluster_label`: Specifies a label to be included with analytics/. -- `cni_installer_url`: Specifies the URL of a Kubernetes YAML file to be used for installing a CNI plugin. -Only applies during initial installation. If empty, the default CNI plugin is used. -- `controller_port`: Configures the port that the 'ucp-controller' listens to. Defaults to '443'. -- `custom_header_name`: Specifies the name of the custom header with 'name' = '*X-Custom-Header-Name*'. -- `custom_header_value`: Specifies the value of the custom header with 'value' = '*Custom Header Value*'. -- `default_new_user_role`: Specifies the role that new users get for their private resource sets. -Values are 'admin', 'viewonly', 'scheduler', 'restrictedcontrol', or 'fullcontrol'. Defaults to 'restrictedcontrol'. -- `default_node_orchestrator`: Specifies the type of orchestrator to use for new nodes that are -joined to the cluster. Can be 'swarm' or 'kubernetes'. Defaults to 'swarm'. -- `disable_tracking`: Specify 'true' to disable analytics of API call information. Defaults to 'false'. -- `disable_usageinfo`: Specify 'true' to disable analytics of usage information. Defaults to 'false'. -- `dns`: Specifies a CSV list of IP addresses to add as nameservers. -- `dns_opt`: Specifies a CSV list of options used by DNS resolvers. -- `dns_search`: Specifies a CSV list of domain names to search when a bare unqualified hostname is -used inside of a container. -- `enable_admin_ucp_scheduling`: Specify 'true' to allow admins to schedule on containers on manager nodes. -Defaults to 'false'. -- `external_service_lb`: Specifies an optional external load balancer for default links to services with -exposed ports in the web interface. -- `host_address`: Specifies the address for connecting to the DTR instance tied to this UCP cluster. -- `log_host`: Specifies a remote syslog server to send UCP controller logs to. If omitted, controller -logs are sent through the default docker daemon logging driver from the 'ucp-controller' container. -- `idpMetadataURL`: Specifies the Identity Provider Metadata URL. -- `image_repository`: Specifies the repository to use for UCP images. -- `install_args`: Specifies additional arguments to pass to the UCP installer. -- `ipip_mtu`: Specifies the IPIP MTU size for the calico IPIP tunnel interface. -- `kube_apiserver_port`: Configures the port to which the Kubernetes API server listens. -- `kv_snapshot_count`: Sets the key-value store snapshot count setting. Defaults to '20000'. -- `kv_timeout`: Sets the key-value store timeout setting, in milliseconds. Defaults to '5000'. -- `lifetime_minutes`: Specifies the initial session lifetime, in minutes. Defaults to `4320`, which is 72 hours. -- `local_volume_collection_mapping`: Stores data about collections for volumes in UCP's local KV store -instead of on the volume labels. This is used for enforcing access control on volumes. -- `log_level`: Specifies the logging level for UCP components. Values are syslog priority -levels (https://linux.die.net/man/5/syslog.conf): 'debug', 'info', 'notice', 'warning', 'err', 'crit', 'alert', -and 'emerg'. -- `managedPasswordDisabled`: Indicates if managed password is disabled. Defaults to false. -- `managedPasswordFallbackUser`: The fallback user when the managed password authentication is disabled. Defaults to "". -- `manager_kube_reserved_resources`: Specifies reserve resources for Docker UCP and Kubernetes components -that are running on manager nodes. -- `metrics_disk_usage_interval`: Specifies the interval for how frequently storage metrics are gathered. -This operation can impact performance when large volumes are present. -- `metrics_retention_time`: Adjusts the metrics retention time. -- `metrics_scrape_interval`: Specifies the interval for how frequently managers gather metrics from nodes in the cluster. -- `nodeport_range`: Specifies the port range that for Kubernetes services of type NodePort can be exposed in. -Defaults to '32768-35535'. -- `per_user_limit`: Specifies the maximum number of sessions that a user can have active simultaneously. If -the creation of a new session would put a user over this limit, the least recently used session is deleted. -A value of zero disables limiting the number of sessions that users can have. Defaults to `5`. -- `pod_cidr`: Specifies the subnet pool from which the IP for the Pod should be allocated from the CNI ipam plugin. -- `profiling_enabled`: Specify 'true' to enable specialized debugging endpoints for profiling UCP performance. -Defaults to 'false'. -- `log_protocol`: Specifies the protocol to use for remote logging. Values are 'tcp' and 'udp'. Defaults to 'tcp'. -- `renewal_threshold_minutes`: Specifies the length of time, in minutes, before the expiration of a -session. When used, a session is extended by the current configured lifetime from that point in time. A zero value disables session extension. Defaults to `1440`, which is 24 hours. -- `require_content_trust`: Specify 'true' to require images be signed by content trust. Defaults to 'false'. -- `require_signature_from`: Specifies a csv list of users or teams required to sign images. -- `rethinkdb_cache_size`: Sets the size of the cache used by UCP's RethinkDB servers. TDefaults to 1GB, -but leaving this field empty or specifying `auto` instructs RethinkDB to determine a cache size automatically. -- `rootCerts`: Defaults to empty. -- `samlEnabled`: Indicates if saml is used. -- `samlLoginText`: Specifies the customized SAML login button text. -- `service_id`: Specifies the DTR instance's OpenID Connect Client ID, as registered with the Docker -authentication provider. -- `spHost`: Specifies the Service Provider Host. -- `storage_driver`: Specifies the UCP storage driver to install. -- `support_dump_include_audit_logs`: When set to `true`, support dumps include audit logs in the logs -of the 'ucp-controller' container of each manager node. Defaults to 'false'. -- `swarm_port`: Configures the port that the 'ucp-swarm-manager' listens to. Defaults to '2376'. -- `swarm_strategy`: Configures placement strategy for container scheduling. -This doesn't affect swarm-mode services. Values are 'spread', 'binpack', and 'random'. -- `tlsSkipVerify`: Specifies TLS Skip verify for IdP Metadata. -- `unmanaged_cni`: Defaults to 'false'. -- `worker_kube_reserved_resources`: Reserves resources for Docker UCP and Kubernetes components -that are running on worker nodes. -- `custom_kube_api_server_flags`: Specifies the configuration options for the Kubernetes API server. (dev) -- `custom_kube_controller_manager_flags`: Specifies the configuration options for the Kubernetes controller manager. (dev) -- `custom_kube_scheduler_flags`: Specifies the configuration options for the Kubernetes scheduler. (dev) -- `custom_kubelet_flags`: Specifies the configuration options for Kubelets. (dev) - -*dev indicates that the functionality is only for development and testing. Arbitrary Kubernetes configuration parameters are not tested and supported under the Docker Enterprise Software Support Agreement. - -### provider -Defines where the cluster's resources are provisioned, as well as provider-specific configuration such as tags. - -```yaml -provider: - acme: - email: ${email} - server_url: https://acme-staging-v02.api.letsencrypt.org/directory - aws: - region: ${region} -``` - -#### acme -The Automated Certificate Management Environment (ACME) is an evolving standard for the automation of a domain-validated certificate authority. Docker Cluster uses the ACME provider to create SSL certificates that are signed by [Let's Encrypt](https://letsencrypt.org/). - -The ACME provider Configuration for the ACME provider supports arguments that closely align with the [Terraform ACME provider](https://www.terraform.io/docs/providers/acme/index.html): - -The following elements can be specified: -- `email`: (Required) The email to associate the certificates with. -- `server_url`: (Optional) The URL to the ACME endpoint's directory. Default is "https://acme-v02.api.letsencrypt.org/directory" - -#### aws -Configuration for the AWS provider supports arguments that closely align with the [Terraform AWS provider](https://www.terraform.io/docs/providers/aws/index.html). - -```yaml -aws: - region: "us-east-1" - tags: - Owner: "Infra" - Environment: "Test" -``` -The following elements can be specified: -- `region` - (Required) This is the AWS region. It can be sourced from the `AWS_DEFAULT_REGION` environment variables, or - via a shared credentials file if `profile` is specified. -- `tags` - (Optional) Additional name value pairs to assign to every resource (which - supports tagging) in the cluster. -- `access_key` - (Required) This is the AWS access key. It can be sourced from -the `AWS_ACCESS_KEY_ID` environment variable, or via - a shared credentials file if `profile` is specified. -- `secret_key` - (Required) This is the AWS secret key. It can be sourced from -the `AWS_SECRET_ACCESS_KEY` environment variable, or - via a shared credentials file if `profile` is specified. -- `profile` - (Optional) This is the AWS profile name as set in the shared credentials - file. -- `assume_role` - (Optional) An `assume_role` block (documented below). Only one - `assume_role` block can be in the configuration. -- `endpoints` - (Optional) Configuration block for customizing service endpoints. See the -[Custom Service Endpoints Guide](/docs/providers/aws/guides/custom-service-endpoints.html) -for more information about connecting to alternate AWS endpoints or AWS compatible solutions. -- `shared_credentials_file` = (Optional) This is the path to the shared - credentials file. If this is not set and a profile is specified, - `~/.aws/credentials` is used. -- `token` - (Optional) Session token for validating temporary credentials. -Typically provided after successful identity federation or Multi-Factor -Authentication (MFA) login. With MFA login, this is the session token -provided afterwards, not the 6 digit MFA code used to get temporary -credentials. It can also be sourced from the `AWS_SESSION_TOKEN` -environment variable. -- `max_retries` - (Optional) This is the maximum number of times an API - call is retried, in the case where requests are being throttled or - experiencing transient failures. The delay between the subsequent API - calls increases exponentially. -- `allowed_account_ids` - (Optional) List of allowed, white listed, AWS - account IDs to prevent you from mistakenly using an incorrect one (and - potentially end up destroying a live environment). Conflicts with - `forbidden_account_ids`. -- `forbidden_account_ids` - (Optional) List of forbidden, blacklisted, - AWS account IDs to prevent you mistakenly using a wrong one (and - potentially end up destroying a live environment). Conflicts with - `allowed_account_ids`. -- `insecure` - (Optional) Explicitly allows the provider to - perform "insecure" SSL requests. If omitted, defaults to `false`. -- `skip_credentials_validation` - (Optional) Skips the credentials - validation via the STS API. Useful for AWS API implementations that do - not have STS available or implemented. -- `skip_get_ec2_platforms` - (Optional) Skips getting the supported EC2 - platforms. Used by users that don't have `ec2:DescribeAccountAttributes` - permissions. -- `skip_region_validation` - (Optional) Skips validation of provided region name. - Useful for AWS-like implementations that use their own region names - or to bypass the validation for regions that aren't publicly available yet. - -### resource -Resources to provision for a cluster. Resources are organized as shown in the following example: - -```yaml -resource: - type: - name: - parameters -``` -For a given `type`, there may be more one or more named resources to provision. - -For a given `name`, a resource may have one or more parameters. - -#### aws_instance - -```yaml -resource: - aws_instance: - workers: - instance_type: t2.xlarge - price: 0.25 - os: Ubuntu 16.04 -``` -- `quantity`: (Required) The number of instances to create. -- `os`: An alias that is expanded by `docker cluster` to the AMI owner and AMI name to install. -The following aliases are supported by `docker cluster`: - - `CentOS 7` - - `RHEL 7.1` - - `RHEL 7.2` - - `RHEL 7.3` - - `RHEL 7.4` - - `RHEL 7.5` - - `RHEL 7.6` - - `Oracle Linux 7.3` - - `Oracle Linux 7.4` - - `Oracle Linux 7.5` - - `SLES 12.2` - - `SLES 12.3` - - `SLES 15` - - `Ubuntu 14.04` - - `Ubuntu 16.04` - - `Ubuntu 18.04` - - `Windows Server 2016` - - `Windows Server 1709` - - `Windows Server 1803` - - `Windows Server 2019` - > Note: Make sure the OS you select is [compatible](https://success.docker.com/article/compatibility-matrix) - with the product you're installing. Docker Cluster validates the support during installation. -- `instance_type`: Specifies the [AWS instance type](https://aws.amazon.com/ec2/instance-types/) to provision. -- `key_name`: By default, Docker Cluster creates an [AWS EC2 Key Pair](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) and registers it with AWS for the cluster. -To use an existing AWS EC2 Key Pair, set this value to the name of the AWS EC2 Key Pair. -- `ssh_private_key`: By default, Docker Cluster creates an [AWS EC2 Key Pair](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) and registers it with AWS for the cluster. To use an existing AWS EC2 Key Pair, set this value to the path of the private SSH key. -- `username`: Specifies the username for the node with Administrative privileges. By default, the `os` option -sets this to the well-known username for the AMIs (which can change by distribution): - - Amazon Linux 2 is `ec2-user`. - - Centos is `centos`. - - Oracle Linux is `ec2-user`. - - RedHat is `ec2-user`. - - SLES is `ec2-user`. - - Ubuntu is `ubuntu`. - - Windows is `Administrator`. -- `password`: This value is only used by Windows nodes. By default, Windows nodes have a random password generated. -- `ami`: Specifies a custom AMI, or one that's not currently available as an OS. Specify either the id or -the owner/name to query for the latest. - - `id`: Specifies the ID of the AMI. For example, `ami-0510c89f1a2691cf2`. - - `owner`: Specifies the AWS account ID of the image owner. For example, `099720109477`. - - `name`: Specifies the name of the AMI that was provided during image creation. For example, `ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-*`. - - `platform`: Specify `windows` for Windows instances. -- `tags`: (Optional) Specifies additional name value pairs to assign to every instance. -- `swarm_labels`: (Optional) Specifies additional key value pairs that represent swarm labels to apply to every node. - -#### aws_spot_instance_request - -Provisions a spot instance request in AWS to dramatically reduce the cost of instances. Spot instance -availability is not guaranteed. Therefore, it is recommended to use `aws_spot_instance_request` for -additional worker nodes and not for mission-critical nodes like managers and registry. - -```yaml -resource: - aws_spot_instance_request: - workers: - instance_type: t2.xlarge - price: 0.25 - os: Ubuntu 16.04 - quantity: 3 -``` - -Supports the same set of parameters as [aws_instance](index.md#aws_instance), with the addition of an optional price to limit the max bid for a spot instance. -- `price`: (Optional) Specifies a maximum price to bid on the spot instance. - -#### aws_lb -Provisions an AWS Load Balancer. -```yaml -resource: - aws_lb: - ucp: - domain: "example.com" - instances: - - managers - ports: - - 443:443 - - 6443:6443 -``` -The following options are supported: - -- `instances`: (Required) Specifies a list of `aws_instance` and `aws_spot_instance_request` names to -attach to the load balancer. -- `ports`: (Required) Specifies a list of `listening port[/protocol]:target port[/protocol]` mappings -to define how the load balancer should route traffic. By default, the protocol is `tcp`. -- `domain`: Specifies the domain in which to create DNS records for this load balancer. The record is named the -same as this resource, appended by the domain. For example, if the resource is `ucp` and the domain is `example.com`, -the `A` record is `ucp.example.com`. -- `internal`: (Optional) Defaults to `false`. -- `type`: (Optional) Defaults to `network`. -- `enable_cross_zone_load_balancing`: (Optional) Defaults to `false`. - -#### aws_route53_zone -Creates a subdomain in an AWS route53 zone. The following example creates a public zone for `testing.example.com`: - -```yaml -resource: - aws_route53_zone: - dns: - domain: example.com - subdomain: testing -``` -The following elements are required: -- `domain`: (Required) Specifies the name of the hosted zone. -- `subdomain`: (Required) Specifies the subdomain to create in the `domain` hosted zone. - -### variable -Docker cluster supports basic parameterization. The variable section defines a make of keys and values. A key can have a sub-key named `type`, which changes the behavior of the variable. - -```yaml -variable: - region: "us-east-1" - password: - type: prompt -``` - -Variables are referenced in the cluster definition as `${variable_name}`. For example, `${region}` is substituted as `us-east-2` through the cluster definition. - -The type defines how the variable behaves. This is currently limited in scope to: -- `prompt`: Requests the value from the user and does not echo characters as the value is entered. diff --git a/cluster/overview.md b/cluster/index.md similarity index 65% rename from cluster/overview.md rename to cluster/index.md index 0bed1a6932..ec2413b796 100644 --- a/cluster/overview.md +++ b/cluster/index.md @@ -19,30 +19,34 @@ Using Docker Cluster is a three-step process: A `cluster.yml` file resembles the following example: - variable: - region: us-east-2 - ucp_password: - type: prompt +{% raw %} +```yaml +variable: + region: us-east-2 + ucp_password: + type: prompt - provider: - aws: - region: ${region} +provider: + aws: + region: ${region} - cluster: - engine: - version: "ee-stable-18.09.5" - ucp: - version: "docker/ucp:3.1.6" - username: "admin" - password: ${ucp_password} +cluster: + engine: + version: "ee-stable-18.09.5" + ucp: + version: "docker/ucp:3.1.6" + username: "admin" + password: ${ucp_password} - resource: - aws_instance: - managers: - quantity: 1 +resource: + aws_instance: + managers: + quantity: 1 +``` +{% endraw %} For more information about Cluster files, refer to the -[Cluster file reference](cluster-file/index.md). +[Cluster file reference](cluster-file.md). Docker Cluster has commands for managing the whole lifecycle of your cluster: @@ -52,9 +56,9 @@ Docker Cluster has commands for managing the whole lifecycle of your cluster: * View the status of clusters * Backup and Restore clusters -## Cluster documentation +## Cluster reference pages - [Get started with Docker Cluster on AWS](aws.md) -- [Command line reference](./reference/index.md) -- [Cluster file reference](./cluster-file/index.md) +- [Command line reference](/engine/reference/commandline/cluster/) +- [Cluster file reference](./cluster-file.md) diff --git a/cluster/reference/backup.md b/cluster/reference/backup.md deleted file mode 100644 index cd4539d8da..0000000000 --- a/cluster/reference/backup.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -description: Back up a running cluster -keywords: documentation, docs, docker, cluster, infrastructure, automation -title: docker cluster backup -notoc: true ---- - -## Usage -``` -docker cluster backup [OPTIONS] cluster -``` - -Use the following options as needed to back up a running cluster: - -- `--dry-run`: Skips resource provisioning. -- `--file string`: Specifies a cluster backup filename. Defaults to `backup.tar.gz`. -- `--log-level string`: Specifies the logging level. Valid values include: `trace`,`debug`,`info`,`warn`,`error`, and `fatal`. -Defaults to `warn`. -- `--passphrase string`: Specifies a cluster backup passphrase. - -The backup command performs a full Docker Cluster backup following the steps found in [Backup and Restore Best Practices](https://success.docker.com/article/backup-restore-best-practices). diff --git a/cluster/reference/index.md b/cluster/reference/index.md deleted file mode 100644 index 040812961c..0000000000 --- a/cluster/reference/index.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -description: Cluster CLI reference -keywords: documentation, docs, docker, cluster, infrastructure, automation -title: Cluster command-line reference -notoc: true ---- - -The following pages describe the usage information for the [docker cluster](overview) subcommands. You can also view this information by running `docker cluster [subcommand] --help` from the command line. - -* [docker cluster](overview) -* [backup](backup) -* [create](create) -* [inspect](inspect) -* [logs](logs) -* [ls](ls) -* [restore](restore) -* [rm](rm) -* [update](update) -* [version](version) - -## Where to go next - -* [CLI environment variables](envvars) -* [docker cluster command](overview) diff --git a/cluster/reference/inspect.md b/cluster/reference/inspect.md deleted file mode 100644 index ba0149687e..0000000000 --- a/cluster/reference/inspect.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -description: Inspect clusters -keywords: documentation, docs, docker, cluster, infrastructure, automation -title: docker cluster inspect -notoc: true ---- - -## Usage -``` -docker cluster inspect [OPTIONS] cluster -``` -Use the following options as needed to display detailed information about a cluster: - -- `-a, --all`: Displays complete information about the cluster. -- `--dry-run`: Skips resource provisioning. -- `--log-level string`: Specifies the logging level. Valid values include: `trace`,`debug`,`info`,`warn`,`error`, and `fatal`. Defaults to `warn`. diff --git a/cluster/reference/ls.md b/cluster/reference/ls.md deleted file mode 100644 index 093944e88f..0000000000 --- a/cluster/reference/ls.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -description: List all available clusters -keywords: documentation, docs, docker, cluster, infrastructure, automation -title: docker cluster ls -notoc: true ---- - -## Usage -``` -docker cluster ls [OPTIONS] -``` -Use the following options as needed to list all available clusters: - -- `--dry-run`: Skips resource provisioning. -- `--log-level string`: Specifies the logging level. Valid values include: `trace`,`debug`,`info`,`warn`,`error`, and `fatal`. Defaults to `warn`. -- `-q`, `--quiet`: Displays only numeric IDs. diff --git a/cluster/reference/overview.md b/cluster/reference/overview.md deleted file mode 100644 index fb3d2ccaf9..0000000000 --- a/cluster/reference/overview.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -description: Overview of docker cluster CLI -keywords: documentation, docs, docker, cluster, infrastructure, automation -title: Overview of docker cluster CLI ---- - -This page provides usage information for the `docker cluster` CLI plugin command options. - -You can also view this information by running `docker cluster --help` from the -command line. - -## Usage -``` -docker cluster [Options] [Commands] -``` - -Options: - -- `--dry-run`: Skips resource provisioning. -- `--log-level string`: Specifies the logging level. Valid values include: `trace`,`debug`,`info`,`warn`,`error`, and `fatal`. Defaults to `warn`. - -Commands: - -- `backup`: Backs up a running cluster. -- `begin`: Creates an example cluster declaration. -- `create`: Creates a new Docker cluster. -- `inspect`: Provides detailed information about a cluster. -- `logs`:TODO: Fetches cluster logs. -- `ls`: Lists all available clusters. -- `restore`: Restores a cluster from a backup. -- `rm`: Removes a cluster. -- `update`: Updates a running cluster's desired state. -- `version`: Displays Version, Commit, and Build type. - -Run 'docker cluster [Command] --help' for more information about a command. -``` - -## Specify name and path of one or more cluster files - -Use the `-f` flag to specify the location of a cluster configuration file. - -## Set up environment variables - -You can set [environment variables](envvars) for various -`docker cluster` options, including the `-f` and `-p` flags. - -## Where to go next - -* [CLI environment variables](envvars) diff --git a/cluster/reference/restore.md b/cluster/reference/restore.md deleted file mode 100644 index e47f70bf51..0000000000 --- a/cluster/reference/restore.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -description: Restore to a running cluster -keywords: documentation, docs, docker, cluster, infrastructure, automation -title: docker cluster restore -notoc: true ---- - -## Usage -``` -docker cluster restore [OPTIONS] cluster -``` -Use the following options as needed to restore a cluster from a backup: - -- `--dry-run`: Skips resource provisioning. -- `--file string`: Specifies a cluster backup filename. Defaults to `backup.tar.gz`. -- `--log-level string`: Specifies the logging level. Valid values include: -`trace`,`debug`,`info`,`warn`,`error`, and `fatal`. Defaults to `warn`. -- `--passphrase string`: Specifies a cluster backup passphrase. - -The restore command performs a full Docker Cluster restore following the steps found in [Backup and Restore Best Practices](https://success.docker.com/article/backup-restore-best-practices). diff --git a/cluster/reference/rm.md b/cluster/reference/rm.md deleted file mode 100644 index ad95c788af..0000000000 --- a/cluster/reference/rm.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -description: Remove a cluster -keywords: documentation, docs, docker, cluster, infrastructure, automation -title: docker cluster rm -notoc: true ---- - -## Usage -``` -docker cluster rm [OPTIONS] cluster -``` -Use the following options as needed when removing a cluster: - -- `--dry-run`: Skips resource provisioning. -- `-f`, `--force`: Forces removal of the cluster files. -- `--log-level string`: Specifies the logging level. Valid values include: `trace`,`debug`,`info`,`warn`,`error`, and `fatal`. Defaults to `warn`. diff --git a/cluster/reference/update.md b/cluster/reference/update.md deleted file mode 100644 index de694b813f..0000000000 --- a/cluster/reference/update.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -description: Update a cluster -keywords: documentation, docs, docker, cluster, infrastructure, automation -title: docker cluster update -notoc: true ---- - -## Usage -``` -docker cluster update [Options] cluster -``` -Use the following options as needed to update a running cluster's desired state: - -Options: - -- `--dry-run`: Skips resource provisioning. -- `-f`, `--file string`: Specfies cluster definition. -- `--log-level string`: Specifies the logging level. Valid values include: `trace`,`debug`,`info`,`warn`,`error`, and `fatal`. Defaults to `warn`. diff --git a/cluster/reference/version.md b/cluster/reference/version.md deleted file mode 100644 index 70c3588956..0000000000 --- a/cluster/reference/version.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -description: Print Version -keywords: documentation, docs, docker, cluster, infrastructure, automation -title: docker cluster version -notoc: true ---- - -## Usage -``` -docker cluster version -``` -Use the following options as needed for printing Version, Commit, and Build type: - -- `--dry-run`: Skips resource provisioning. -- `--log-level string`: Specifies the logging level. Valid values include: `trace`,`debug`,`info`,`warn`,`error`, and `fatal`. Defaults to `warn`. diff --git a/config/containers/resource_constraints.md b/config/containers/resource_constraints.md index 36fab1b785..d9ebe1ee25 100644 --- a/config/containers/resource_constraints.md +++ b/config/containers/resource_constraints.md @@ -2,8 +2,8 @@ redirect_from: - /engine/articles/systemd/ - /engine/admin/resource_constraints/ -title: "Limit a container's resources" -description: "Limiting the system resources a container can use" +title: "Specify a container's resources" +description: "Specify the system resources a container can use" keywords: "docker, daemon, configuration" --- @@ -258,3 +258,98 @@ $ docker run -it --cpu-rt-runtime=950000 \ ``` If the kernel or Docker daemon is not configured correctly, an error occurs. + +## GPU + +### Access an NVIDIA GPU + +#### Prerequisites + +Visit the official [NVIDIA drivers page](https://www.nvidia.com/Download/index.aspx) +to download and install the proper drivers. Reboot your system once you have +done so. + +Verify that your GPU is running and accessible. + +#### Install nvidia-container-runtime + +Follow the instructions at (https://nvidia.github.io/nvidia-container-runtime/) +and then run this command: + +```bash +$ apt-get install nvidia-container-runtime +``` + +Ensure the `nvidia-container-runtime-hook` is accessible from `$PATH`. + +```bash +$ which nvidia-container-runtime-hook +``` + +Restart the Docker daemon. + +#### Expose GPUs for use + +Include the `--gpus` flag when you start a container to access GPU resources. +Specify how many GPUs to use. For example: + +```bash +$ docker run -it --rm --gpus all ubuntu nvidia-smi +``` + +Exposes all available GPUs and returns a result akin to the following: + +```bash ++-----------------------------------------------------------------------------+ +| NVIDIA-SMI 384.130 Driver Version: 384.130 | +|-------------------------------+----------------------+----------------------+ +| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | +|===============================+======================+======================| +| 0 GRID K520 Off | 00000000:00:03.0 Off | N/A | +| N/A 36C P0 39W / 125W | 0MiB / 4036MiB | 0% Default | ++-------------------------------+----------------------+----------------------+ ++-----------------------------------------------------------------------------+ +| Processes: GPU Memory | +| GPU PID Type Process name Usage | +|=============================================================================| +| No running processes found | ++-----------------------------------------------------------------------------+ +``` + +Use the `device` option to specify GPUs. For example: + +```bash +$ docker run -it --rm --gpus device=GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a ubuntu nvidia-smi +``` + +Exposes that specific GPU. + +```bash +$ docker run -it --rm --gpus device=0,2 nvidia-smi +``` + +Exposes the first and third GPUs. + +> **Note**: NVIDIA GPUs can only be accessed by systems running a single engine. + +#### Set NVIDIA capabilities + +You can set capabilities manually. For example, on Ubuntu you can run the +following: + +``` +docker run --gpus 'all,capabilities=utility' --rm ubuntu nvidia-smi +``` + +This enables the `utility` driver capability which adds the `nvidia-smi` tool to +the container. + +Capabilities as well as other configurations can be set in images via +environment variables. More information on valid variables can be found at the +[nvidia-container-runtime](https://github.com/NVIDIA/nvidia-container-runtime) +GitHub page. These variables can be set in a Dockerfile. + +You can also utitize CUDA images which sets these variables automatically. See +the [CUDA images](https://github.com/NVIDIA/nvidia-docker/wiki/CUDA) GitHub page +for more information. diff --git a/docker-hub/official_images.md b/docker-hub/official_images.md index d924313c56..3f1bd8a11d 100644 --- a/docker-hub/official_images.md +++ b/docker-hub/official_images.md @@ -16,7 +16,7 @@ designed to: starting point for the majority of users. * Provide drop-in solutions for popular programming language runtimes, data - stores, and other services, similar to what a Platform-as-a-Service (PAAS) + stores, and other services, similar to what a Platform as a Service (PAAS) would offer. * Exemplify [`Dockerfile` best practices](/engine/userguide/eng-image/dockerfile_best-practices/) diff --git a/docker-hub/publish/certify-images.md b/docker-hub/publish/certify-images.md index ba5c3aef82..500aa76b61 100644 --- a/docker-hub/publish/certify-images.md +++ b/docker-hub/publish/certify-images.md @@ -1,6 +1,6 @@ --- description: Run certification tests against your images -keywords: Docker, docker, store, certified content, images +keywords: Docker, Docker Hub, store, certified content, images title: Certify Docker images redirect_from: - /docker-store/certify-images/ @@ -30,7 +30,7 @@ The `inspectDockerImage` tool does the following: - Checks if a Linux Docker image is running `supervisord` to launch multiple services. - > Running `supervisord` in a container is not a best practice for images destined for Doctor Store. The recommended best practice is to split the multiple services into separate Docker images and run them in separate containers. + > Running `supervisord` in a container is not a best practice for images destined for Docker Hub. The recommended best practice is to split the multiple services into separate Docker images and run them in separate containers. - Attempts to start a container from the Docker image to ensure that the image is functional. diff --git a/ee/desktop/admin/install/windows.md b/ee/desktop/admin/install/windows.md index 1dd8dbdaa4..95225e83d9 100644 --- a/ee/desktop/admin/install/windows.md +++ b/ee/desktop/admin/install/windows.md @@ -89,7 +89,7 @@ You can also set the following properties: For example: - msiexec /i DockerDesktop.msi /quiet AUTOSTART=no STARTMENUSHORTCUT=no INSTALLDIR=”D:\Docker Desktop” + msiexec /i DockerDesktop.msi /quiet STARTMENUSHORTCUT=no INSTALLDIR=”D:\Docker Desktop” Docker Desktop Enterprise includes a command line executable to install and uninstall version packs. When you install DDE, the command line tool is installed at the following location: diff --git a/ee/desktop/images/docker-menu-context-switch.png b/ee/desktop/images/docker-menu-context-switch.png index 59f9a33344..dea6e01b94 100644 Binary files a/ee/desktop/images/docker-menu-context-switch.png and b/ee/desktop/images/docker-menu-context-switch.png differ diff --git a/ee/desktop/images/docker-menu-settings.png b/ee/desktop/images/docker-menu-settings.png index e446dfef27..88030451c0 100644 Binary files a/ee/desktop/images/docker-menu-settings.png and b/ee/desktop/images/docker-menu-settings.png differ diff --git a/ee/desktop/images/docker-menu-switch.png b/ee/desktop/images/docker-menu-switch.png index 56a7ffeb13..6a8914fed0 100644 Binary files a/ee/desktop/images/docker-menu-switch.png and b/ee/desktop/images/docker-menu-switch.png differ diff --git a/ee/desktop/images/kube-context.png b/ee/desktop/images/kube-context.png index 3e07073072..667fa4ac8e 100644 Binary files a/ee/desktop/images/kube-context.png and b/ee/desktop/images/kube-context.png differ diff --git a/ee/desktop/images/prefs.png b/ee/desktop/images/prefs.png index 5d7edee27a..67a5be3ce4 100644 Binary files a/ee/desktop/images/prefs.png and b/ee/desktop/images/prefs.png differ diff --git a/ee/desktop/images/settings-advanced.png b/ee/desktop/images/settings-advanced.png index 4afb4f4354..e398bcdc44 100644 Binary files a/ee/desktop/images/settings-advanced.png and b/ee/desktop/images/settings-advanced.png differ diff --git a/ee/desktop/images/settings-daemon-advanced.png b/ee/desktop/images/settings-daemon-advanced.png index 7a9774ae70..0a4f7e36db 100644 Binary files a/ee/desktop/images/settings-daemon-advanced.png and b/ee/desktop/images/settings-daemon-advanced.png differ diff --git a/ee/desktop/images/settings-daemon-basic.png b/ee/desktop/images/settings-daemon-basic.png index 8a32b9a1c7..023daa94d0 100644 Binary files a/ee/desktop/images/settings-daemon-basic.png and b/ee/desktop/images/settings-daemon-basic.png differ diff --git a/ee/desktop/images/settings-general.png b/ee/desktop/images/settings-general.png index 024149c51b..ecf93b97ad 100644 Binary files a/ee/desktop/images/settings-general.png and b/ee/desktop/images/settings-general.png differ diff --git a/ee/desktop/images/settings-kubernetes.png b/ee/desktop/images/settings-kubernetes.png index ebc25a87d2..867e9a4995 100644 Binary files a/ee/desktop/images/settings-kubernetes.png and b/ee/desktop/images/settings-kubernetes.png differ diff --git a/ee/desktop/images/settings-proxies.png b/ee/desktop/images/settings-proxies.png index 05b7649140..572ce6cc6f 100644 Binary files a/ee/desktop/images/settings-proxies.png and b/ee/desktop/images/settings-proxies.png differ diff --git a/ee/desktop/images/settings-reset.png b/ee/desktop/images/settings-reset.png index aaa7cdd2dc..06f16b2a39 100644 Binary files a/ee/desktop/images/settings-reset.png and b/ee/desktop/images/settings-reset.png differ diff --git a/ee/desktop/images/settings-shared-drives.png b/ee/desktop/images/settings-shared-drives.png index 14a8ca0413..5e978c2e07 100644 Binary files a/ee/desktop/images/settings-shared-drives.png and b/ee/desktop/images/settings-shared-drives.png differ diff --git a/ee/desktop/images/whale-icon-systray-hidden.png b/ee/desktop/images/whale-icon-systray-hidden.png index b49050feb2..bbd73218cf 100644 Binary files a/ee/desktop/images/whale-icon-systray-hidden.png and b/ee/desktop/images/whale-icon-systray-hidden.png differ diff --git a/ee/desktop/images/win-ver-select.PNG b/ee/desktop/images/win-ver-select.PNG index fc43d0b610..456f797499 100644 Binary files a/ee/desktop/images/win-ver-select.PNG and b/ee/desktop/images/win-ver-select.PNG differ diff --git a/ee/desktop/release-notes.md b/ee/desktop/release-notes.md index a788cb25a8..bbb68d3c80 100644 --- a/ee/desktop/release-notes.md +++ b/ee/desktop/release-notes.md @@ -2,10 +2,11 @@ title: Docker Desktop Enterprise release notes description: Release notes for Docker Desktop Enterprise keywords: Docker Desktop Enterprise, Windows, Mac, Docker Desktop, Enterprise, +toc_min: 1 +toc_max: 2 --- -This topic contains information about the main improvements and issues, starting with the -current release. The documentation is updated for each release. +This page contains information about the new features, improvements, known issues, and bug fixes in the Docker Desktop Enterprise release. Documentation is updated for each release. We suggest that you regularly visit this page to learn about updates. For information on system requirements, installation, and download, see: @@ -14,155 +15,170 @@ For information on system requirements, installation, and download, see: For Docker Enterprise Engine release notes, see [Docker Engine release notes](/engine/release-notes). -## Docker Desktop Enterprise Releases of 2019 +## Version 2.1.0.1 +2019-07-22 -### Docker Desktop Enterprise 2.0.0.6 +Docker Desktop Enterprise 2.1.0.1 contains a Kubernetes upgrade. Note that your local Kubernetes cluster will be reset after installing this release. +### Upgrades + +- [Docker 19.03.0](https://docs.docker.com/engine/release-notes/) in Version Pack Enterprise 3.0 +- [Kubernetes 1.14.3](https://github.com/kubernetes/kubernetes/releases/tag/v1.14.3) in Version Pack Enterprise 3.0 +- [Compose on Kubernetes 0.4.23](https://github.com/docker/compose-on-kubernetes/releases/tag/v0.4.23) in Version Pack Enterprise 3.0 +- [Docker Compose 1.24.1](https://github.com/docker/compose/releases/tag/1.24.1) +- [Docker 18.09.8](https://docs.docker.com/engine/release-notes/) in Version Pack Enterprise 2.1 +- [Docker 17.06.2-ee-23](https://docs.docker.com/engine/release-notes/) in Version Pack Enterprise 2.0 +- [Docker Credential Helpers 0.6.3](https://github.com/docker/docker-credential-helpers/releases/tag/v0.6.3) +- [Application Designer 0.1.4](/ee/desktop/app-designer/) + +### Known issue + +When you sign out of Windows while Docker Desktop is still running, and then sign in and start Docker Desktop, attempts to run Docker commands that rely on network connections can fail. For example, the command `docker pull alpine` returns `Error response from daemon: Get https://registry-1.docker.io/v2/: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)`. + +Note that this issue occurs only when you sign out of Windows and not when you shut down or restart Windows. + +Workaround: After signing back into Windows, when Docker Desktop has started, right-click the Docker menu from the systray and then select Quit Docker Desktop. When this is complete, start Docker Desktop normally. + +## Version 2.0.0.6 2019-06-06 -- Upgrades +### Upgrades - - [Docker 19.03.0-rc2](https://docs.docker.com/engine/release-notes/) in Version Pack Enterprise 3.0 - - Application Designer 0.1.3 - -- Bug fixes and minor changes: +- [Docker 19.03.0-rc2](https://docs.docker.com/engine/release-notes/) in Version Pack Enterprise 3.0 +- Application Designer 0.1.3 - - Application Designer/ Docker Template CLI: The `DefaultServer` preference has been renamed to `DefaultRegistry`. - - Windows: Docker Desktop now allows administrators to specify authorized named pipes that can be mounted on Windows containers. - - Windows: Docker Desktop checks for stored credentials at startup before attempting to mount any shared drives. This prompts users to reenter the credentials if they are invalid. - - Mac: Docker Deskop does not send DNS queries for `docker-desktop.` every 10s. It relies on the host's DNS domain search order rather than trying to replicate it inside the VM. - - Mac: Docker Desktop now uses a separate thread to move its storage to a different disk. This allows the UI to remain responsive during the disk move. Fixes [docker/for-mac#3592](https://github.com/docker/for-mac/issues/3592) +### Bug fixes and minor changes -### Docker Desktop Enterprise 2.0.0.5 +- Application Designer/ Docker Template CLI: The `DefaultServer` preference has been renamed to `DefaultRegistry`. +- Windows: Docker Desktop now allows administrators to specify authorized named pipes that can be mounted on Windows containers. +- Windows: Docker Desktop checks for stored credentials at startup before attempting to mount any shared drives. This prompts users to reenter the credentials if they are invalid. +- Mac: Docker Deskop does not send DNS queries for `docker-desktop.` every 10s. It relies on the host's DNS domain search order rather than trying to replicate it inside the VM. +- Mac: Docker Desktop now uses a separate thread to move its storage to a different disk. This allows the UI to remain responsive during the disk move. Fixes [docker/for-mac#3592](https://github.com/docker/for-mac/issues/3592) +## Version 2.0.0.5 2019-05-30 -- Upgrades +### Upgrades - - [Docker 19.03.0-rc1](https://docs.docker.com/engine/release-notes/) in Enterprise 3.0 version pack - - Application Designer 0.1.2 - - [Qemu 4.0.0](https://github.com/docker/binfmt) to cross-compile ARM devices +- [Docker 19.03.0-rc1](https://docs.docker.com/engine/release-notes/) in Enterprise 3.0 version pack +- Application Designer 0.1.2 +- [Qemu 4.0.0](https://github.com/docker/binfmt) to cross-compile ARM devices -- Bug fixes and minor changes +### Bug fixes and minor changes - - Application Designer now allows users to copy and paste application logs. - - Users can browse the scaffold logs when scaffolding a new application using the Application Designer. - - Application Designer allows users to set preferences, including the default organization and registry. - - Docker Desktop admins can enforce Application Designer preferences using the `admin-settings.json` file. - - Security improvements: Docker Desktop now checks TLS certificates for the target endpoints when using `kubectl`. - - Fixed an issue where Visual Studio Code IDE path was not detected properly. - -### Docker Desktop Enterprise 2.0.0.4 +- Application Designer now allows users to copy and paste application logs. +- Users can browse the scaffold logs when scaffolding a new application using the Application Designer. +- Application Designer allows users to set preferences, including the default organization and registry. +- Docker Desktop admins can enforce Application Designer preferences using the `admin-settings.json` file. +- Security improvements: Docker Desktop now checks TLS certificates for the target endpoints when using `kubectl`. +- Fixed an issue where Visual Studio Code IDE path was not detected properly. +## Version 2.0.0.4 2019-05-16 -- Upgrades +### Upgrades - - [Docker 19.03.0-beta4](https://docs.docker.com/engine/release-notes/) in Enterprise 3.0 version pack - - [Docker 18.09.6](https://docs.docker.com/engine/release-notes/), [Kubernetes 1.11.10](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.11.md#v11110) in Enterprise 2.1 version pack - - [LinuxKit v0.7](https://github.com/linuxkit/linuxkit/releases/tag/v0.7) +- [Docker 19.03.0-beta4](https://docs.docker.com/engine/release-notes/) in Enterprise 3.0 version pack +- [Docker 18.09.6](https://docs.docker.com/engine/release-notes/), [Kubernetes 1.11.10](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.11.md#v11110) in Enterprise 2.1 version pack +- [LinuxKit v0.7](https://github.com/linuxkit/linuxkit/releases/tag/v0.7) -- Bug fixes and minor changes +### Bug fixes and minor changes - - Fixed a stability issue with the DNS resolver. - - Fixed a race condition where Kubernetes sometimes failed to start after restarting the application. - - Fixed a bug that causes Docker Compose to fail when a user logs out after logging in. See [docker/compose#6517](https://github.com/docker/compose/issues/6517) - - Improved the reliability of `com.docker.osxfs trace` performance profiling command. - - Docker Desktop now supports large lists of resource DNS records on Mac. See [docker/for-mac#2160](https://github.com/docker/for-mac/issues/2160#issuecomment-431571031). - - Users can now run a Docker registry in a container. See [docker/for-mac#3611](https://github.com/docker/for-mac/issues/3611). - - For Linux containers on Windows (LCOW), one physical computer system running Windows 10 Professional or Windows 10 Enterprise version 1809 or later is required. - - Added a dialog box during startup when a shared drive fails to mount. This allows users to retry mounting the drive or remove it from the shared drive list. - - Removed the ability to log in using an email address as a username as this is not supported by the Docker command line. - -### Docker Desktop Enterprise 2.0.0.3 +- Fixed a stability issue with the DNS resolver. +- Fixed a race condition where Kubernetes sometimes failed to start after restarting the application. +- Fixed a bug that causes Docker Compose to fail when a user logs out after logging in. See [docker/compose#6517](https://github.com/docker/compose/issues/6517) +- Improved the reliability of `com.docker.osxfs trace` performance profiling command. +- Docker Desktop now supports large lists of resource DNS records on Mac. See [docker/for-mac#2160](https://github.com/docker/for-mac/issues/2160#issuecomment-431571031). +- Users can now run a Docker registry in a container. See [docker/for-mac#3611](https://github.com/docker/for-mac/issues/3611). +- For Linux containers on Windows (LCOW), one physical computer system running Windows 10 Professional or Windows 10 Enterprise version 1809 or later is required. +- Added a dialog box during startup when a shared drive fails to mount. This allows users to retry mounting the drive or remove it from the shared drive list. +- Removed the ability to log in using an email address as a username as this is not supported by the Docker command line. +## Version 2.0.0.3 2019-04-26 -- Upgrades +### Upgrades - - [Docker Engine 19.03.0-beta2](https://docs.docker.com/engine/release-notes/) for Version Pack Enterprise 3.0. - -### Docker Desktop Enterprise 2.0.0.2 +- [Docker Engine 19.03.0-beta2](https://docs.docker.com/engine/release-notes/) for Version Pack Enterprise 3.0. +## Version 2.0.0.2 2019-04-19 **WARNING:** You must upgrade the previously installed Version Packs to the latest revision. -- New +### New - - Version Pack Enterprise 3.0 with [Docker Engine 19.03.0-beta1](https://docs.docker.com/engine/release-notes/) and [Kubernetes 1.14.1](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.14.md#changelog-since-v1141) +- Version Pack Enterprise 3.0 with [Docker Engine 19.03.0-beta1](https://docs.docker.com/engine/release-notes/) and [Kubernetes 1.14.1](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.14.md#changelog-since-v1141) - - Application Designer now includes new templates for AngularJS and VueJS. +- Application Designer now includes new templates for AngularJS and VueJS. -- Upgrades +### Upgrades - - [Docker Compose 1.24.0](https://github.com/docker/compose/releases/tag/1.24.0) - - [Docker Engine 18.09.5](https://docs.docker.com/engine/release-notes/), [Kubernetes 1.11.7](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.11.md#v1117) and [Compose on Kubernetes 0.4.22](https://github.com/docker/compose-on-kubernetes/releases/tag/v0.4.22) for Version Pack Enterprise 2.1 - - [Docker Engine 17.06.2-ee-21](https://docs.docker.com/engine/release-notes/) for Version Pack Enterprise 2.0 +- [Docker Compose 1.24.0](https://github.com/docker/compose/releases/tag/1.24.0) +- [Docker Engine 18.09.5](https://docs.docker.com/engine/release-notes/), [Kubernetes 1.11.7](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.11.md#v1117) and [Compose on Kubernetes 0.4.22](https://github.com/docker/compose-on-kubernetes/releases/tag/v0.4.22) for Version Pack Enterprise 2.1 +- [Docker Engine 17.06.2-ee-21](https://docs.docker.com/engine/release-notes/) for Version Pack Enterprise 2.0 -- Bug fixes and minor changes +### Bug fixes and minor changes - - For security, only administrators can install or upgrade Version Packs using the `dockerdesktop-admin` tool. - - Truncate UDP DNS responses which are over 512 bytes in size - - Fixed airgap install of kubernetes in version pack enterprise-2.0 - - Reset to factory default now resets to admin defaults +- For security, only administrators can install or upgrade Version Packs using the `dockerdesktop-admin` tool. +- Truncate UDP DNS responses which are over 512 bytes in size +- Fixed airgap install of kubernetes in version pack enterprise-2.0 +- Reset to factory default now resets to admin defaults -- Known issues +### Known issues - - The Docker Template CLI plugin included in this version is an outdated version of the plugin and will fail when scaffolding templates. Note that the Application Designer is not affected by this outdated version of the CLI plugin. - -### Docker Desktop Enterprise 2.0.0.1 +- The Docker Template CLI plugin included in this version is an outdated version of the plugin and will fail when scaffolding templates. Note that the Application Designer is not affected by this outdated version of the CLI plugin. +## Version 2.0.0.1 2019-03-01 **WARNING:** You must upgrade the previously installed Version Packs to the latest revision. -#### Windows +### Windows -Upgrades: +#### Upgrades - Docker 18.09.3 for Version Pack Enterprise 2.1, fixes [CVE-2019-5736](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-5736) - Docker 17.06.2-ee-20 for Version Pack Enterprise 2.0, fixes [CVE-2019-5736](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-5736) -Bug fixes and minor changes: +#### Bug fixes and minor changes - Fixed port 8080 that was used on localhost when starting Kubernetes. - Fixed Hub login through the desktop UI not sync with login through `docker login` command line. - Fixed crash in system tray menu when the Hub login fails or Air gap mode. -#### Mac +### Mac -New features: +#### New features - Added ability to list all installed version packs with the admin CLI command `dockerdesktop-admin version-pack list`. - `dockerdesktop-admin app uninstall` will also remove Docker Desktop user files. - Upgrades: +#### Upgrades - Docker 18.09.3 for Version Pack Enterprise 2.1, fixes [CVE-2019-5736](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-5736) - Docker 17.06.2-ee-20 for Version Pack Enterprise 2.0, fixes [CVE-2019-5736](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-5736) -Bug fixes and minor changes: +#### Bug fixes and minor changes - Fixed port 8080 that was used on localhost when starting Kubernetes. - Improved error messaging to suggest running diagnostics / resetting to factory default only when it is appropriate. -### Docker Desktop Enterprise 2.0.0.0 - +## Version 2.0.0.0 2019-01-31 -New features: +### New features - - **Version selection**: Configurable version packs ensure the local +- **Version selection**: Configurable version packs ensure the local instance of Docker Desktop Enterprise is a precise copy of the production environment where applications are deployed, and developers can switch between versions of Docker and Kubernetes with a single click. - - **Application Designer**: Application templates allow you to choose a +- **Application Designer**: Application templates allow you to choose a technology and focus on business logic. Updates can be made with minimal syntax knowledge. - - **Device management**: The Docker Desktop Enterprise installer is available as standard MSI (Win) and PKG (Mac) downloads, which allows administrators to script an installation across many developer machines. +- **Device management**: The Docker Desktop Enterprise installer is available as standard MSI (Win) and PKG (Mac) downloads, which allows administrators to script an installation across many developer machines. - - **Administrative control**: IT organizations can specify and lock configuration parameters for creation of a standardized development environment, including disabling drive sharing and limiting version pack installations. Developers run commands in the command line without worrying about configuration settings. +- **Administrative control**: IT organizations can specify and lock configuration parameters for creation of a standardized development environment, including disabling drive sharing and limiting version pack installations. Developers run commands in the command line without worrying about configuration settings. diff --git a/ee/desktop/user/mac-user.md b/ee/desktop/user/mac-user.md index 4ee9578598..2b68b36483 100644 --- a/ee/desktop/user/mac-user.md +++ b/ee/desktop/user/mac-user.md @@ -187,7 +187,7 @@ You can configure options on the Docker daemon that determine how your containers run. Select **Basic** to configure the daemon with interactive settings, or select -**Advanced** to edit the JSON directly. +**Advanced** to edit the JSON file directly. ![Daemon](../images/prefs-daemon-basic.png) @@ -196,7 +196,7 @@ Select **Basic** to configure the daemon with interactive settings, or select Docker Desktop Enterprise has experimental features enabled on Docker Engine, as described in [Docker Experimental Features](https://github.com/docker/cli/blob/master/experimental/README.md) Readme. If you don't select **Experimental Features**, Docker Desktop Enterprise uses the current generally available release of Docker Engine. -> **Note:** Do not enable experimental features in production. Experimental features are not appropriate for production environments or workloads. They are meant to be sandbox experiments for new ideas. +> {% include experimental.md %} You can see whether you are running experimental mode at the command line. If `Experimental` is `true`, then Docker is running in experimental mode, as shown @@ -224,9 +224,9 @@ For more information, see: Click the **Advanced** tab to configure the daemon from the JSON file. For a full list of options, see the Docker Engine [dockerd command line reference](https://docs.docker.com/engine/reference/commandline/dockerd). -Click **Apply & Restart** to save your settings and reboot Docker. Or, to cancel +Click **Apply & Restart** to save your settings and reboot Docker. To cancel changes, click another preference tab, then choose to discard or not apply -changes when asked. +changes when prompted. ![Docker Daemon](../images/prefs-daemon-adv.png) @@ -277,7 +277,7 @@ Click on the Docker icon from the menu bar and then **Preferences**. Click **Res ![Uninstall or reset Docker](../images/prefs-reset-mac.png) -### Diagnose and Feedback +### Troubleshoot The **Diagnose and Feedback** option allows you troubleshoot any issues you may be experiencing with Docker Desktop Enterprise. For more information, see [Troubleshoot DDE issues on Mac](/ee/desktop/troubleshoot/mac-issues). @@ -316,7 +316,7 @@ an example. $ sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain ca.crt ``` -Or, if you prefer to add the certificate to your own local keychain only (rather +If you prefer to add the certificate to your own local keychain only (rather than for all users), run this command instead: ``` diff --git a/ee/desktop/user/windows-user.md b/ee/desktop/user/windows-user.md index 80e103c9d7..e4c49ff8a1 100644 --- a/ee/desktop/user/windows-user.md +++ b/ee/desktop/user/windows-user.md @@ -251,6 +251,8 @@ The Linux VM restarts after changing the settings on the **Advanced** tab. This - **Memory** - Change the amount of memory the Docker Desktop Enterprise Linux VM uses. +- **Swap** - Configure the swap file size. + #### Network You can configure Docker Desktop Enterprise networking to work on a virtual private network (VPN). @@ -314,8 +316,7 @@ The **Basic** mode lets you configure the more common daemon options with intera Docker Desktop Enterprise has the experimental version of Docker Engine enabled, described in the [Docker Experimental Features](https://github.com/docker/cli/blob/master/experimental/README.md) readme. If you don't select **Experimental Features**, Docker Desktop Enterprise uses the current generally available release of Docker Engine. -> **Note:** Do not enable experimental features in production. Experimental features are not appropriate for production environments or -workloads. They are meant to be sandbox experiments for new ideas. +> {% include experimental.md %} Run `docker version` to see if you are in Experimental mode. Experimental mode is listed under `Server` data. If `Experimental` is `true`, then Docker is @@ -438,7 +439,7 @@ To switch to a different version pack, simply click on the version pack you woul ![Version Selection](../images/win-ver-select.PNG) -### Diagnose and Feedback +### Troubleshoot The **Diagnose and Feedback** option allows you troubleshoot any issues you may be experiencing with Docker Desktop Enterprise. For more information, see [Troubleshoot DDE issues on Windows](/ee/desktop/troubleshoot/windows-issues). diff --git a/ee/dtr/admin/configure/disable-persistent-cookies.md b/ee/dtr/admin/configure/disable-persistent-cookies.md new file mode 100644 index 0000000000..564c850559 --- /dev/null +++ b/ee/dtr/admin/configure/disable-persistent-cookies.md @@ -0,0 +1,17 @@ +--- +title: Disable persistent cookies +description: Learn how to disable persistent cookies for Docker Trusted Registry. +keywords: dtr, browser cookies, sso +--- +If you want your Docker Trusted Registry (DTR) to use session-based authentication cookies that expire when you close your browser, toggle "Disable persistent cookies". + +![](/ee/dtr/images/disable-persistent-cookies-1.png){: .with-border} + +## Verify your DTR cookies setting + +You may need to disable Single Sign-On (SSO). From the DTR web UI in a Chrome browser, right-click on any page and click **Inspect**. With the Developer Tools open, select **Application > Storage > Cookies > `https://`**. Verify that the cookies has "Session" as the setting for **Expires / Max-Age**. + +## Where to go next + +- [Use your own TLS certificates](use-your-own-tls-certificates) +- [Enable authentication using client certificates](/ee/enable-client-certificate-authentication/) diff --git a/ee/dtr/admin/configure/enable-single-sign-on.md b/ee/dtr/admin/configure/enable-single-sign-on.md index 595a01eb4a..8e57cdc0b4 100644 --- a/ee/dtr/admin/configure/enable-single-sign-on.md +++ b/ee/dtr/admin/configure/enable-single-sign-on.md @@ -4,42 +4,62 @@ description: Learn how to set up single sign-on between UCP and DTR, so that you keywords: dtr, login, sso --- -By default, users are shared between UCP and DTR, but you have to authenticate -separately on the web UI of both applications. +Users are shared between UCP and DTR by default, but the applications have separate browser-based interfaces which require authentication. -You can configure DTR to have single sign-on (SSO) with UCP, so that users only -have to authenticate once. +To only authenticate once, you can configure DTR to have single sign-on (SSO) with UCP. > **Note**: After configuring single sign-on with DTR, users accessing DTR via > `docker login` should create an [access token](/ee/dtr/user/access-tokens/) and use it to authenticate. -## At installation time +## At install time -When installing DTR, use the `docker/dtr install --dtr-external-url ` -option to enable SSO. When accessing the DTR web UI, users are redirected to the -UCP login page, and once they are authenticated, they're redirected to the URL -you provided to `--dtr-external-url`. +When [installing DTR](/reference/dtr/2.7/install/), pass `--dtr-external-url ` to enable SSO. [Specify the Fully Qualified Domain Name (FQDN)](/use-your-own-tls-certificates/) of your DTR, or a load balancer, to load-balance requests across multiple DTR replicas. -Use the domain name of DTR, or the domain name of a load balancer, if you're -using one, to load-balance requests across multiple DTR replicas. -## After install +```bash +docker run --rm -it \ +{{ page.dtr_org }}/{{ page.dtr_repo }}:{{ page.dtr_version }} install \ +--dtr-external-url dtr.example.com \ +--dtr-cert "$(cat cert.pem)" \ +--dtr-ca "$(cat dtr_ca.pem)" \ +--dtr-key "$(cat key.pem)" \ +--ucp-url ucp.example.com \ +--ucp-username admin \ +--ucp-ca "$(cat ucp_ca.pem)" +``` -In your browser, navigate to the DTR web UI, and choose **Settings**. In the -**General** tab, scroll to **Domain & proxies**. +This makes it so that when you access DTR's web user interface, you are redirected to the UCP login page for authentication. Upon successfully logging in, you are then redirected to your specified DTR external URL during installation. -Update the **Load balancer / public address** field to the url where users -should be redirected once they are logged in. -Use the domain name of DTR, or the domain name of a load balancer, if you're -using one, to load-balance requests across multiple DTR replicas. +## Post-installation -Then enable **Use single sign-on**. +### Web user interface -![](../../images/enable-sso-1.png){: .with-border} +1. Navigate to `https://` and log in with your credentials. +2. Select **System** from the left navigation pane, and scroll down to **Domain & Proxies**. +3. Update the **Load balancer / Public Address** field with the external URL where users +should be redirected once they are logged in. Click **Save** to apply your changes. +4. Toggle **Single Sign-on** to automatically redirect users to UCP for logging in. + ![](/ee/dtr/images/single-sign-on-1.png){: .with-border} -Once you save, users are redirected to UCP for logging in, and redirected back to -DTR once they are authenticated. + + +### Command line interface + +You can also enable single sign-on from the command line by reconfiguring your DTR. To do so, run the following: + +```bash +docker run --rm -it \ +{{ page.dtr_org }}/{{ page.dtr_repo }}:{{ page.dtr_version }} reconfigure \ +--dtr-external-url dtr.example.com \ +--dtr-cert "$(cat cert.pem)" \ +--dtr-ca "$(cat dtr_ca.pem)" \ +--dtr-key "$(cat key.pem)" \ +--ucp-url ucp.example.com \ +--ucp-username admin \ +--ucp-ca "$(cat ucp_ca.pem)" +``` ## Where to go next -- [Use your own TLS certificates](use-your-own-tls-certificates.md) +- [Use your own TLS certificates](use-your-own-tls-certificates) +- [Enable authentication using client certificates](/ee/enable-authentication-via-client-certs/) diff --git a/ee/dtr/admin/configure/license-your-installation.md b/ee/dtr/admin/configure/license-your-installation.md index 8a148c1f0e..c96568e876 100644 --- a/ee/dtr/admin/configure/license-your-installation.md +++ b/ee/dtr/admin/configure/license-your-installation.md @@ -4,35 +4,33 @@ description: Learn how to license your Docker Trusted Registry installation. keywords: dtr, install, license --- -By default, you don't need to license your Docker Trusted Registry. When -installing DTR, it automatically starts using the same license file used on -your Docker Universal Control Plane cluster. +By default, Docker Trusted Registry (DTR) automatically uses the same license file applied to +your Universal Control Plane (UCP). In the following scenarios, you need to +manually apply a license to your DTR: -However, there are some situations when you have to manually license your -DTR installation: - -* When upgrading to a new major version, -* When your current license expires. +* Major version upgrade +* License expiration ## Download your license -Go to [Docker Hub](https://hub.docker.com/editions/enterprise/docker-ee-trial) -to download a trial license. +Visit Docker Hub's [Enterprise Trial page](https://hub.docker.com/editions/enterprise/docker-ee-trial) to start your one-month trial. After signing up, you should receive a confirmation email with a link to your subscription page. You can find your **License Key** in the **Resources** section of the Docker Enterprise Setup Instructions page. -![](../../images/license-1.png){: .with-border} +![](/ee/dtr/images/license-1.png){: .with-border} +Click "License Key" to download your license. ## License your installation -Once you've downloaded the license file, you can apply it to your DTR -installation. Navigate to the **DTR web UI**, and then go to the **Settings -page**. +After downloading your license key, navigate to `https://` and log in with your credentials. +Select **System** from the left navigation pane, and click *Apply new license* to upload your license +key. -![](../../images/license-2.png){: .with-border} +![](/ee/dtr/images/license-2.png){: .with-border} -Click the **Apply new license** button, and upload your new license file. +Within **System > General** under the **License** section, you should see the tier, date of expiration, and ID for your license. ## Where to go next -- [Enable single sign-on](enable-single-sign-on.md) +- [Use your own TLS certificates](use-your-own-tls-certificates) +- [Enable single sign-on](enable-single-sign-on) diff --git a/ee/dtr/admin/configure/use-your-own-tls-certificates.md b/ee/dtr/admin/configure/use-your-own-tls-certificates.md index ee175edc04..e6478d5021 100644 --- a/ee/dtr/admin/configure/use-your-own-tls-certificates.md +++ b/ee/dtr/admin/configure/use-your-own-tls-certificates.md @@ -1,44 +1,45 @@ --- title: Use your own TLS certificates description: Learn how to configure Docker Trusted Registry with your own TLS certificates. -keywords: dtr, tls +keywords: dtr, tls, certificates, security --- -By default the DTR services are exposed using HTTPS, to ensure all -communications between clients and DTR is encrypted. Since DTR -replicas use self-signed certificates for this, when a client accesses -DTR, their browsers won't trust this certificate, so the browser displays a -warning message. +Docker Trusted Registry (DTR) services are exposed using HTTPS by default. This ensures encrypted communications between clients and your trusted registry. If you do not pass a PEM-encoded TLS certificate during installation, DTR will generate a [self-signed certificate](https://en.wikipedia.org/wiki/Self-signed_certificate). This leads to an insecure site warning when accessing DTR through a browser. Additionally, DTR includes an [HSTS (HTTP Strict-Transport-Security) header](https://en.wikipedia.org/wiki/HTTP_Strict_Transport_Security) in all API responses which can further lead to your browser refusing to load DTR's web interface. -You can configure DTR to use your own certificates, so that it is automatically -trusted by your users' browser and client tools. +You can configure DTR to use your own TLS certificates, so that it is automatically +trusted by your users' browser and client tools. As of v2.7, you can also [enable user authentication via client certificates](/ee/enable-authentication-via-client-certs/) provided by your organization's public key infrastructure (PKI). ## Replace the server certificates -To configure DTR to use your own certificates and keys, go to the -**DTR web UI**, navigate to the **Settings** page, and scroll down to the -**Domain** section. +You can upload your own TLS certificates and keys using the web interface, or pass them as CLI options when installing or reconfiguring your DTR instance. -![](../../images/use-your-certificates-1.png){: .with-border} +### Web interface +Navigate to `https://` and log in with your credentials. Select **System** from the left navigation pane, and scroll down to **Domain & Proxies**. -Set the DTR domain name and upload the certificates and key: +![](/ee/dtr/images/use-your-certificates-1.png){: .with-border} -* Load balancer/public address, is the domain name clients will use to access DTR. -* TLS certificate, is the server certificate and any intermediate CA public -certificates. This certificate needs to be valid for the DTR public address, +Enter your DTR domain name and upload or copy and paste the certificate details: + +* ***Load balancer/public address.*** The domain name clients will use to access DTR. +* ***TLS private key.*** The server private key. +* ***TLS certificate chain.*** The server certificate and any intermediate public +certificates from your certificate authority (CA). This certificate needs to be valid for the DTR public address, and have SANs for all addresses used to reach the DTR replicas, including load balancers. -* TLS private key is the server private key. -* TLS CA is the root CA public certificate. +* ***TLS CA.*** The root CA public certificate. -Finally, click **Save** for the changes to take effect. +Click **Save** to apply your changes. -If you're using certificates issued by a globally trusted certificate authority, +If you've added certificates issued by a globally trusted CA, any web browser or client tool should now trust DTR. If you're using an internal -certificate authority, you'll need to configure your system to trust that -certificate authority. +CA, you will need to configure the client systems to trust that +CA. + +### Command line interface + +See [docker/dtr install](/reference/dtr/2.7/cli/install/) and [docker/dtr reconfigure](/reference/dtr/2.7/cli/reconfigure/) for TLS certificate options and usage. ## Where to go next - -- [Set up external storage](external-storage/index.md) +- [Enable single sign-on](enable-single-sign-on) +- [Set up external storage](external-storage) diff --git a/ee/dtr/admin/install/index.md b/ee/dtr/admin/install/index.md index bfe1c4e448..07206b19ae 100644 --- a/ee/dtr/admin/install/index.md +++ b/ee/dtr/admin/install/index.md @@ -35,32 +35,43 @@ After you configure all the options, you should see a Docker CLI command that yo to install DTR. ```bash -docker run -it --rm \ - {{ page.dtr_org }}/{{ page.dtr_repo }} install \ - --dtr-external-url dtr-example.com +$ docker run -it --rm \ + {{ page.dtr_org }}/{{ page.dtr_repo }}:{{ page.dtr_version }} install \ + --dtr-external-url \ --ucp-node \ --ucp-username admin \ --ucp-url ``` -You can run the DTR install command on any node where `docker` is installed. To verify, run `docker version`. -Check that your DTR version is compatible with your Engine - Enterprise and UCP versions using the [compatibility matrix](https://success.docker.com/article/compatibility-matrix). +You can run the DTR install command on any node with the Docker Engine +installed, ensure this node also has connectivity to the UCP Cluster. DTR will +not be installed on the node you run the install command on. DTR will be +installed on the ucp worker defined by the `--ucp-node` flag. -As an example, you can SSH into a UCP node and install DTR from there. Running the installation command in interactive TTY or `-it` mode means you will be prompted for any required additional information. -[Learn more about installing DTR](/reference/dtr/2.7/cli/install/). +As an example, you could SSH into a UCP node and run the DTR install command +from there. Running the installation command in interactive TTY or `-it` mode +means you will be prompted for any required additional information. [Learn +more about installing DTR](/reference/dtr/2.7/cli/install/). -To pull a specific version of DTR, run the following: +To install a specific version of DTR, replace `{{ page.dtr_version }}` with your +desired version in the [installation command](#step-3-install-dtr) above. Find +all DTR versions in the [DTR release notes](/ee/dtr/release-notes/) page. -```bash -docker pull {{ page.dtr_org }}/{{ page.dtr_repo }}:{{ page.dtr_version }} -``` -Replace `{{ page.dtr_version }}` with your desired version. Run the [installation command](#step-3-install-dtr) with the image you just pulled. +DTR is deployed with self-signed certificates by default, so UCP might not be +able to pull images from DTR. Using the `--dtr-external-url :` +optional flag during installation, or during a reconfiguration, so that UCP is +automatically reconfigured to trust DTR. -DTR is deployed with self-signed certificates by default, so UCP might not be able to pull images from DTR. -Use the `--dtr-external-url :` optional flag during installation, or during a reconfiguration, so that DTR registers itself with UCP. To verify, see `https:///manage/settings/dtr` or navigate to **Admin Settings > Docker Trusted Registry** from the UCP web UI. Under the hood, UCP modifies `/etc/docker/certs.d` for each host and adds DTR's CA certificate. UCP can then pull images from DTR because the Docker Engine for each node in the UCP swarm has been configured to trust DTR. +To verify, see `https:///manage/settings/dtr` or navigate to **Admin +Settings > Docker Trusted Registry** from the UCP web UI. Under the hood, UCP +modifies `/etc/docker/certs.d` for each host and adds DTR's CA certificate. UCP +can then pull images from DTR because the Docker Engine for each node in the +UCP swarm has been configured to trust DTR. -Additionally, with DTR 2.7, you can [enable browser authentication via client certificates](/ee/enable-authentication-via-client-certificates/) -at install time. This bypasses the DTR login page and hides the logout button, thereby skipping the need for entering your username and password. +Additionally, with DTR 2.7, you can [enable browser authentication via client +certificates](/ee/enable-authentication-via-client-certificates/) at install +time. This bypasses the DTR login page and hides the logout button, thereby +skipping the need for entering your username and password. ## Step 4. Check that DTR is running diff --git a/ee/dtr/admin/install/install-offline.md b/ee/dtr/admin/install/install-offline.md index 5c22de8cb6..5e840ed5c8 100644 --- a/ee/dtr/admin/install/install-offline.md +++ b/ee/dtr/admin/install/install-offline.md @@ -15,7 +15,7 @@ all the images. Then you copy that package to the host where you’ll install DT ## Versions available -{% include components/ddc_url_list_2.html product="dtr" version="2.6" %} +{% include components/ddc_url_list_2.html product="dtr" version="2.7" %} ## Download the offline package diff --git a/ee/dtr/admin/install/system-requirements.md b/ee/dtr/admin/install/system-requirements.md index e4f88f17fa..35b275f61e 100644 --- a/ee/dtr/admin/install/system-requirements.md +++ b/ee/dtr/admin/install/system-requirements.md @@ -30,6 +30,8 @@ Note that Windows container images are typically larger than Linux ones and for this reason, you should consider provisioning more local storage for Windows nodes and for DTR setups that will store Windows container images. +When image scanning feature is used, we recommend that you have at least 32 GB of RAM. As developers and teams push images into DTR, the repository grows over time so you should inspect RAM, CPU, and disk usage on DTR nodes and increase resources when resource saturation is observed on regular basis. + ## Ports used When installing DTR on a node, make sure the following ports are open on that @@ -42,6 +44,23 @@ node: These ports are configurable when installing DTR. +## UCP Configuration + +When installing DTR on a UCP cluster, Administrators need to be able to deploy +containers on "UCP manager nodes or nodes running DTR". This setting can be +adjusted in the [UCP Settings +menu](/ee/ucp/admin/configure/restrict-services-to-worker-nodes/). Once the +installation has complete, and all additional DTR replicas have been deployed +this UCP setting can be unchecked. + +The DTR installation will fail with the following error message if +Administrators are unable to deploy on "UCP manager nodes or nodes running +DTR". + +``` +Error response from daemon: {"message":"could not find any nodes on which the container could be created"} +``` + ## Compatibility and maintenance lifecycle Docker Enterprise Edition is a software subscription that includes three products: diff --git a/ee/dtr/admin/upgrade.md b/ee/dtr/admin/upgrade.md index f053ff15d9..228f067291 100644 --- a/ee/dtr/admin/upgrade.md +++ b/ee/dtr/admin/upgrade.md @@ -4,7 +4,7 @@ description: Learn how to upgrade your Docker Trusted Registry keywords: dtr, upgrade, install --- -{% assign previous_version="2.5" %} +{% assign previous_version="2.6" %} DTR uses [semantic versioning](http://semver.org/) and Docker aims to achieve specific guarantees while upgrading between versions. While downgrades are not supported, Docker supports upgrades according to the following rules: @@ -40,7 +40,7 @@ to avoid any business impacts. > Upgrade Best Practices > -> There are [important changes to the upgrade process](/ee/upgrade) that, if not correctly followed, can have impact on the availability of applications running on the Swarm during upgrades. These constraints impact any upgrades coming from any version before `18.09` to version `18.09` or greater. See [Cluster Upgrade Best Practices](/ee/upgrade.md#cluster-upgrade-best-practices) for more details. Additionally, to ensure high availability during the DTR upgrade, you can also drain the DTR replicas and move their workloads to updated workers. To do this, you can join new workers as DTR replicas to your existing cluster and then remove the old replicas. See [docker/dtr join](/reference/dtr/2.6/cli/join) and [docker/dtr remove](/reference/dtr/2.6/cli/remove) for command options and details. +> There are [important changes to the upgrade process](/ee/upgrade) that, if not correctly followed, can have impact on the availability of applications running on the Swarm during upgrades. These constraints impact any upgrades coming from any version before `18.09` to version `18.09` or greater. See [Cluster Upgrade Best Practices](/ee/upgrade.md#cluster-upgrade-best-practices) for more details. Additionally, to ensure high availability during the DTR upgrade, you can also drain the DTR replicas and move their workloads to updated workers. To do this, you can join new workers as DTR replicas to your existing cluster and then remove the old replicas. See [docker/dtr join](/reference/dtr/2.7/cli/join/) and [docker/dtr remove](/reference/dtr/2.7/cli/remove/) for command options and details. ## Minor upgrade @@ -73,13 +73,13 @@ nodes if upgrading offline), run the upgrade command: ```bash docker run -it --rm \ - {{ page.dtr_org }}/{{ page.dtr_repo }}:{{ page.dtr_version }} upgrade \ - --ucp-insecure-tls + {{ page.dtr_org }}/{{ page.dtr_repo }}:{{ page.dtr_version }} upgrade ``` By default the upgrade command runs in interactive mode and prompts you for any necessary information. You can also check the -[reference documentation](/reference/dtr/2.6/cli/index.md) for other existing flags. +[upgrade reference page](/reference/dtr/2.7/cli/upgrade/) for other existing flags. +If you are performing the upgrade on an existing replica, pass the `--existing-replica-id` flag. The upgrade command will start replacing every container in your DTR cluster, one replica at a time. It will also perform certain data migrations. If anything diff --git a/ee/dtr/images/disable-persistent-cookies-1.png b/ee/dtr/images/disable-persistent-cookies-1.png new file mode 100644 index 0000000000..8dc34f78e0 Binary files /dev/null and b/ee/dtr/images/disable-persistent-cookies-1.png differ diff --git a/ee/dtr/images/license-1.png b/ee/dtr/images/license-1.png index 3b0b0c07bb..d555ea9811 100644 Binary files a/ee/dtr/images/license-1.png and b/ee/dtr/images/license-1.png differ diff --git a/ee/dtr/images/license-2.png b/ee/dtr/images/license-2.png index bbff97fe76..f869c4a793 100644 Binary files a/ee/dtr/images/license-2.png and b/ee/dtr/images/license-2.png differ diff --git a/ee/dtr/images/scheduling-options.png b/ee/dtr/images/scheduling-options.png deleted file mode 100644 index 5f84a0ae53..0000000000 Binary files a/ee/dtr/images/scheduling-options.png and /dev/null differ diff --git a/ee/dtr/images/single-sign-on-1.png b/ee/dtr/images/single-sign-on-1.png new file mode 100644 index 0000000000..c41d8970ec Binary files /dev/null and b/ee/dtr/images/single-sign-on-1.png differ diff --git a/ee/dtr/images/use-your-certificates-1.png b/ee/dtr/images/use-your-certificates-1.png index bf841a31f5..6f8b185cd1 100644 Binary files a/ee/dtr/images/use-your-certificates-1.png and b/ee/dtr/images/use-your-certificates-1.png differ diff --git a/ee/dtr/release-notes.md b/ee/dtr/release-notes.md index 58b6e7e606..a7db573b13 100644 --- a/ee/dtr/release-notes.md +++ b/ee/dtr/release-notes.md @@ -20,10 +20,11 @@ to upgrade your installation to the latest release. * [Version 2.5](#version-25) * [Version 2.4](#version-24) -# Version 2.7 +## Version 2.7.0 +(2019-7-22) -## 2.7.0-beta4 -(2019-5-16) +### Security +Refer to [DTR image vulnerabilities](https://success.docker.com/article/dtr-image-vulnerabilities) for details regarding actions to be taken and any status updates, issues, and recommendations. ### New Features @@ -38,7 +39,6 @@ to upgrade your installation to the latest release. * It is now possible to distribute [docker apps](https://github.com/docker/app) via DTR. This includes application pushes, pulls, and general management features like promotions, mirroring, and pruning. - * **Registry CLI** * The Docker CLI now includes a `docker registry` management command which lets you interact with Docker Hub and trusted registries. @@ -55,18 +55,25 @@ to upgrade your installation to the latest release. * Users can now edit mirroring policies. (docker/dhe-deploy #10157) * `docker run -it --rm docker/dtr:2.7.0-beta4` now includes a global option, `--version`, which prints the DTR version and associated commit hash. (docker/dhe-deploy #10144) -* Users can now set up push and pull mirroring policies via the API using an authentication token instead of their credentials. (docker/dhe-deploy#10002) +* Users can now set up push and pull mirroring policies through the API using an authentication token instead of their credentials. (docker/dhe-deploy#10002) * DTR is now on Golang `1.12.4`. (docker/dhe-deploy#10274) * For new mirroring policies, the **Mirror direction** now defaults to the Pull tab instead of Push. (docker/dhe-deploy#10004) - ### Bug Fixes * Fixed an issue where a webhook notification was triggered twice on non-scanning image promotion events on a repository with scan on push enabled. (docker/dhe-deploy#9909) - ### Known issues +* Application mirroring to and from Docker Hub does not work as experimental applications are not yet fully supported on Docker Hub. +* The time that an application is pushed is incorrect. +* The Application Configuration in the UI says it is an invocation image. +* When changing your password if an incorrect password is entered the UI will not give the appropriate error message, and the save button will stay in a loading state. + * Workaround: Refresh the page. +* After a promotion policy is created they cannot be edited through the UI. + * Workaround: Either delete the promotion policy and recreate it. Alternatively, use the API to view and + edit the promotion policy. +* Non admin users cannot create promotion policies through the UI. ### Deprecations @@ -74,7 +81,6 @@ to upgrade your installation to the latest release. * The `--no-image-check` flag has been removed from the `upgrade` command as image check is no longer a part of the upgrade process. - # Version 2.6 ## 2.6.8 diff --git a/ee/enable-client-certificate-authentication.md b/ee/enable-client-certificate-authentication.md new file mode 100644 index 0000000000..8c1b02bfc3 --- /dev/null +++ b/ee/enable-client-certificate-authentication.md @@ -0,0 +1,249 @@ +--- +title: Enable authentication using TLS client certificates +description: Learn how to enable user authentication via client certificates from your own public key infrastructure (PKI). +keywords: PKI, Client Certificates, Passwordless Authentication, Docker Enterprise +--- + +## Overview + +In many organizations, authenticating to systems with a username and password combination is either restricted or outright prohibited. With Docker Enterprise 3.0, UCP's [CLI client certificate-based authentication](/ee/ucp/user-access/cli/) has been extended to the web user interface (web UI). DTR has also been enhanced to work with UCP's internally generated client bundles for client certificate-based authentication. If you have an external public key infrastructure (PKI) system, you can manage user authentication using a pool of X.509 client certificates in lieu of usernames and passwords. + +## Benefits + +The following table outlines existing and added capabilities when using client certificates — both internal to UCP and issued by an external certificate authority (CA) — for authentication. + +| Operation | Benefit | +| ------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [UCP browser authentication](#ucp--dtr-browser-authentication) | Previously, UCP client bundles enabled communication between a local Docker client and UCP without the need of a username and password. Importing your client certificates into the browser extends this capability to the UCP web UI. | +| [DTR browser authentication](#ucp--dtr-browser-authentication) | You can bypass the login page for the DTR web UI when you use TLS client certificates as a DTR authentication method. | +| [Image pulls and pushes to DTR](#image-pulls-and-pushes-to-dtr) | You can update Docker engine with a client certificate for image pulls and pushes to DTR without the need for `docker login`. | +| [Image signing](#image-signing) | You can use client certificates to sign images that you push to DTR. Depending on which you configure to talk to DTR, the certificate files need to be located in certain directories. Alternatively, you can enable system-wide trust of your custom root certificates. | +| [DTR API access](#dtr-api-access) | You can use TLS client certificates in lieu of your user credentials to access the DTR API. | +| [Notary CLI operations with DTR](#notary-cli-operations-with-dtr) | You can set your DTR as the remote trust server location and pass the certificate flags directly to the Notary CLI to access your DTR repositories. | + +## Limitations + +- The security of client certificates issued by your organization's PKI is outside of UCP’s control. UCP administrators are responsible for instructing their users on how to authenticate via client certificates. +- Username and password authentication cannot be disabled. +- If client certificates have been configured, they will be used for +all `docker push` and `docker pull` operations for _all users_ of the same +machine. +- Docker Enterprise 3.0 does not check certificate revocation lists (CRLs) or Online Certificate Status Protocol (OCSP) for revoked certificates. + +## UCP / DTR browser authentication + +The following instructions apply to UCP and DTR administrators. For non-admin users, contact your administrator for details on your PKI's client certificate configuration. + +To bypass the browser login pages and hide the logout buttons for both UCP and DTR, follow the steps below. + +1. Add your organization's root CA certificates [via the UCP web UI](/ee/ucp/admin/configure/use-your-own-tls-certificates/#configure-ucp-to-use-your-own-tls-certificates-and-keys) or [via the CLI installation command](https://success.docker.com/article/how-do-i-provide-an-externally-generated-security-certificate-during-the-ucp-command-line-installation). + + For testing purposes, you can download an [admin client bundle](/ee/ucp/user-access/cli/#download-client-certificates) from UCP and [convert the client certificates to `pkcs12`](#convert-your-client-certificates-to-a-PKCS12-file) + +1. Download UCP's `ca.pem` from `https:///ca` either in the browser or via `curl`. When using `curl`, redirect the response output to a file. + `curl -sk https:///ca -o ca.pem` + +1. Enable client certificate authentication for DTR. If previously installed, reconfigure DTR with your UCP hostname's root CA certificate. This will be your organization's root certificate(s) appended to UCP's internal root CA certificates. + + ``` + docker run --rm -it docker/dtr:2.7.0 reconfigure --debug --ucp-url \ + --ucp-username --ucp-password \ --enable-client-cert-auth + --client-cert-auth-ca "$(cat ca.pem)" + ``` + + See [DTR installation](/reference/dtr/2.7/cli/install/) and [DTR reconfiguration](/reference/dtr/2.7/cli/reconfigure/) CLI reference pages for an explanation of the different options. + +1. Import the PKCS12 file into [the browser](#pkcs12-file-browser-import) or [Keychain Access](https://www.digicert.com/ssl-support/p12-import-export-mac-mavericks-server.htm#import_certificate) if you're running macOS. + +### Client certificate to PKCS12 file conversion + +From the command line, switch to the directory of your client bundle and run the following command to convert the client bundle public and private key pair to a `.p12` file. + +```bash + openssl pkcs12 -export -out cert.p12 -inkey key.pem -in cert.pem +``` + +Create with a simple password, you will be prompted for it when you import the certificate into the browser or Mac's Keychain Access. + +### PKCS12 file browser import + +Instructions on how to import a certificate into a web browser vary according to your platform, OS, preferred browser and browser version. As a general rule, refer to one of the following how-to articles: +- ***Firefox***: +https://www.sslsupportdesk.com/how-to-import-a-certificate-into-firefox/ +- ***Chrome***: +https://www.comodo.com/support/products/authentication_certs/setup/win_chrome.php +- ***Internet Explorer***: +https://www.comodo.com/support/products/authentication_certs/setup/ie7.php + +## Image pulls and pushes to DTR + +For pulling and pushing images to your DTR (with client certificate authentication method enabled) without performing a `docker login`, do the following: + +1. Create a directory for your DTR public address or FQDN (Fully Qualified Domain Name) within your operating system's TLS certificate directory. + +1. As a [superuser](https://en.wikipedia.org/wiki/Superuser), copy the private key (`client.pem`) and certificate (`client.cert`) to the machine you are using for pulling and pushing to DTR without doing a `docker login`. Note that the filenames must match. + +1. Obtain the CA certificate from your DTR server, `ca.crt` from `https:///ca`, and copy `ca.crt` to your operating system's TLS certificate directory so that your machine's Docker Engine will trust DTR. For Linux, this is `/etc/docker/certs.d//`. On Docker for Mac, this is `//certs.d//`. + + This is a convenient alternative to, for Ubuntu as an example, adding the DTR server certificate to `/etc/ca-certs` and running `update-ca-certificates`. + ```curl + curl -k https:///ca -o ca.crt + ``` + + On Ubuntu + ````bash + cp ca.crt /etc/ca-certs + ``` + +1. Restart the Docker daemon for the changes to take effect. See [Configure your host](/ee/dtr/user/access-dtr/#configure-your-host) for different ways to restart the Docker daemon. + +### Add your DTR server CA certificate to system level + +You have the option to add your DTR server CA certificate to your system's trusted root certificate pool. This is MacOS Keychain or `/etc/ca-certificates/` on Ubuntu. Note that you will have to remove the certificate if your DTR public address changes. + +### Reference guides + +- [Docker Engine](https://docs.docker.com/engine/security/certificates/) +- Docker Desktop + - [Enterprise for Mac](/ee/desktop/user/mac-user/#add-tls-certificates) + - [Enterprise for Windows](/ee/desktop/user/windows-user/#adding-tls-certificates) + - [Community for Mac](/docker-for-mac/#add-tls-certificates) + - [Community for Windows](/docker-for-windows/faqs/#certificates) + +Note: The above configuration means that Docker Engine will use the same client certificate for all pulls and pushes to DTR for ***all users*** of the same machine. + +## Image signing + +DTR provides the Notary service for using Docker Content Trust (DCT) out of the box. + + +++++ + + + + + + + + + + + + + + + + + + + + + + + + + +
ImplementationComponent PairingSettings
Sign with docker trust sign
    +
  • Docker Engine - Enterprise 18.03 or higher
  • +
  • Docker Engine - Community 17.12 or higher
  • +
Copy ca.crt from https://<dtr-external-url>/ca to: +
    +
  • Linux: /etc/docker/certs.d/
  • +
  • Mac: <home_directory>/.docker/certs.d/
  • +
Enforce signature or hash verification on the Docker client
    +
  • Docker Engine - Enterprise 17.06 or higher
  • +
  • Docker Engine - Community 17.06 or higher
  • +
export DOCKER_CONTENT_TRUST=1 to enable content trust on the Docker client. Copy ca.crt from https://<dtr-external-url>/ca to /<home_directory>/.docker/tls/ on Linux and macOS. docker push will sign your images.
Sign images that UCP can trust
    +
  • Docker Engine - Enterprise 17.06 or higher
  • +
  • Docker UCP 2.2 or higher
  • +
Configure UCP to run only signed images. See Sign an image for detailed steps.
+ +## DTR API access + +With `curl`, you can interact with the DTR +API by passing a public certificate and private key pair instead of +your DTR username and password/authentication token. + +```bash +curl --cert cert.pem --key key.pem -X GET \ +"https:///api/v0/repositories?pageSize=10&count=false" \ +-H "accept:application/json" +``` + +In the above example, `cert.pem` contains the public certificate and `key.pem` +contains the private key. For non-admin users, you can generate a client bundle from UCP or contact your administrator for your public and private key pair. + +For Mac-specific quirks, see [curl on certain macOS versions](#curl-on-certain-macos-versions). + +## Notary CLI operations with DTR + +For establishing mutual trust between the Notary client and your trusted registry (DTR) using the Notary CLI, place your TLS client certificates in `/.docker/tls//` as `client.cert` and `client.key`. Note that the filenames must match. Pass the FQDN or publicly accessible IP address of your registry along with the TLS client certificate options to the Notary client. To get started, see [Use the Notary client for advanced users](/notary/advanced_usage/). + +> ### Self-signed DTR server certificate +> +> Also place `ca.crt` in `/.docker/tls//` when you're using a self-signed server certificate for DTR. + +## Troubleshooting tips + +### DTR authentication via client Certificates + +Hit your DTR's `basic_info` endpoint via `curl`: + +```curl +curl --cert cert.pem --key key.pem -X GET "https:///basic_info" +``` + +If successfully configured, you should see `TLSClientCertificate` listed as the `AuthnMethod` in the JSON response. + +#### Example Response + +```json +{ +"CurrentVersion": "2.7.0", +"User": { +"name": "admin", +"id": "30f53dd2-763b-430d-bafb-dfa361279b9c", +"fullName": "", +"isOrg": false, +"isAdmin": true, +"isActive": true, +"isImported": false +}, +"IsAdmin": true, +"AuthnMethod": "TLSClientCertificate" +} +``` + +### DTR as an insecure registry + +Avoid adding DTR to Docker Engine's list of insecure registries as a workaround. This has the side effect of disabling the use of TLS certificates. + +### DTR server certificate errors + +#### Example Error + +```bash +Error response from daemon: Get https://35.165.223.150/v2/: x509: certificate is valid for 172.17.0.1, not 35.165.223.150 +``` + +- On the web UI, make sure to add the IP address or the FQDN associated with your custom TLS certificate under **System > General > Domains & Proxies**. + +- From the command line interface, [reconfigure DTR](/reference/dtr/2.7/cli/reconfigure/) with the `--dtr-external-url` option and the associated PEM files for your certificate. + +### Intermediate certificates + +For chain of trust which includes intermediate certificates, you may optionally add those certificates when installing or reconfiguring DTR with `--enable-client-cert-auth` and `--client-cert-auth-ca`. You can do so by combining all of the certificates into a single PEM file. + +### curl on certain macOS versions + +Some versions of macOS include `curl` which only accepts `.p12` files and specifically requires a `./` prefix in front of the file name if running `curl` from the same directory as the `.p12` file: + +``` +curl --cert ./client.p12 -X GET \ +"https:///api/v0/repositories?pageSize=10&count=false" \ +-H "accept:application/json" +``` diff --git a/ee/images/docker-ee-overview-4.png b/ee/images/docker-ee-overview-4.png new file mode 100644 index 0000000000..82ad7e0983 Binary files /dev/null and b/ee/images/docker-ee-overview-4.png differ diff --git a/ee/images/docker-ee-overview-4.svg b/ee/images/docker-ee-overview-4.svg deleted file mode 100644 index 17ad56e60c..0000000000 --- a/ee/images/docker-ee-overview-4.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/ee/index.md b/ee/index.md index 7a14af77a8..f2e20cefe7 100644 --- a/ee/index.md +++ b/ee/index.md @@ -7,12 +7,12 @@ redirect_from: - /manuals/ --- -Docker Enterprise 2.1 is a Containers-as-a-Service (CaaS) platform that enables a secure software supply +Docker Enterprise is a Containers as a Service (CaaS) platform that enables a secure software supply chain and deploys diverse applications for high availability across disparate infrastructure, both on-premises and in the cloud. -Docker Enterprise is a secure, scalable, and supported container -platform for building and orchestrating applications across multi-tenant Linux, and Windows Server 2016. +Docker Enterprise is a secure, scalable, and supported container platform for building and +orchestrating applications across multi-tenant Linux, Windows Server 2016, and Windows Server 2019. Docker Enterprise enables deploying your workloads for high availability (HA) onto the orchestrator of your choice. Docker Enterprise automates many of the tasks that @@ -20,7 +20,7 @@ orchestration requires, like provisioning pods, containers, and cluster resources. Self-healing components ensure that Docker Enterprise clusters remain highly available. -Role-based access control applies to Kubernetes and Swarm orchestrators, and +Role-based access control (RBAC) applies to Kubernetes and Swarm orchestrators, and communication within the cluster is secured with TLS. [Docker Content Trust](/engine/security/trust/content_trust/) is enforced for images on all of the orchestrators. @@ -34,7 +34,7 @@ cluster and applications through a single interface. ## Docker Enterprise features -Docker Enterprise 2.1 provides multi-architecture orchestration for Kubernetes and +Docker Enterprise provides multi-architecture orchestration for Kubernetes and Swarm workloads. Docker Enterprise enables a secure software supply chain, with image promotion, mirroring between registries, and signing/scanning enforcement for Kubernetes images. @@ -75,7 +75,7 @@ to schedule Kubernetes or Swarm workloads. ### Orchestration platform features -![](images/docker-ee-overview-4.svg){: .with-border} +![](images/docker-ee-overview-4.png){: .with-border} - Docker Enterprise manager nodes are both Swarm managers and Kubernetes masters, to enable high availability @@ -112,10 +112,8 @@ You can also deploy and monitor your applications and services. ## Built-in security and access control -Docker Enterprise has its own built-in authentication mechanism with role-based access -control (RBAC), so that you can control who can access and make changes to your -swarm and applications. Also, Docker Enterprise authentication integrates with LDAP -services. +Docker Enterprise has its own built-in authentication mechanism with RBAC, so that you can control who can access and make changes to your +swarm and applications. Also, Docker Enterprise authentication integrates with LDAP services. [Learn about role-based access control](access-control/index.md). ![](images/docker-ee-overview-2.png){: .with-border} diff --git a/ee/release-notes.md b/ee/release-notes.md index 32edbceaf6..55c98ff4fc 100644 --- a/ee/release-notes.md +++ b/ee/release-notes.md @@ -1,30 +1,35 @@ --- -title: Docker Enterprise Platform release notes -description: Learn about the new features, bug fixes, and breaking changes for Docker Enterprise Platform. +title: Docker Enterprise release notes +description: Learn about the new features, bug fixes, and breaking changes for Docker Enterprise. keywords: engine enterprise, ucp, dtr, desktop enterprise, whats new, release notes --- +This page provides information about Docker Enterprise 3.0. For +detailed information about for each enterprise component, refer to the individual component release notes +pages listed in the following **Docker Enterprise components install and upgrade** section. + ## What’s New? | Feature | Component | Component version | |---------|-----------|-------------------| -| [Group Managed Service Accounts (gMSA)](#) | UCP | 3.2.0 | -| [Open Security Controls Assessment Language (OSCAL)](#) | UCP | 3.2.0 | -| [Container storage interface (CSI)](#) | UCP | 3.2.0 | -| [Internet Small Computer System Interface (iSCSI)](#) | UCP | 3.2.0 | -| [System for Cross-domain Identity Management (SCIM)](#) | UCP | 3.2.0 | -| [Registry CLI](#) | DTR | 2.7.0 | -| [App Distribution](#) | DTR | 2.7.0 | -| [Client certificate-based Authentication](#) | DTR | 2.7.0 | -| [Application Designer](/ee/desktop/app-designer/) | Docker Desktop Enterprise | 0.1.4 | +| [Group Managed Service Accounts (gMSA)](/engine/swarm/services.md#gmsa-for-swarm) | UCP | 3.2.0 | +| [Open Security Controls Assessment Language (OSCAL)](/compliance/oscal/) | UCP | 3.2.0 | +| [Container storage interface (CSI)](/ee/ucp/kubernetes/storage/use-csi/) | UCP | 3.2.0 | +| [Internet Small Computer System Interface (iSCSI)](/ee/ucp/kubernetes/storage/use-iscsi/) | UCP | 3.2.0 | +| [System for Cross-domain Identity Management (SCIM)](/ee/ucp/admin/configure/integrate-scim/) | UCP | 3.2.0 | +| [Pod Security Policies](/ee/ucp/kubernetes/pod-security-policies/) | UCP | 3.2.0 | +| [Docker Registry CLI (Experimental)](/engine/reference/commandline/registry/) | DTR | 2.7.0 | +| [App Distribution](/ee/dtr/user/manage-applications/) | DTR | 2.7.0 | +| [Client certificate-based Authentication](/ee/enable-client-certificate-authentication/) | DTR and UCP|2.7.0 (DTR) and 3.2.0 (UCP)| +| [Application Designer](/ee/desktop/app-designer/) | Docker Desktop Enterprise | 0.1.4 | | [Docker App (Experimental)](/app/working-with-app/) |CLI | 0.8.0 | | [Docker Assemble (Experimental)](/assemble/install/) | CLI | 0.36.0 | | [Docker Buildx (Experimental)](/buildx/working-with-buildx/)| CLI | 0.2.2 | -| [Docker Cluster](/cluster/overview/) | CLI | 1.0.0 | +| [Docker Cluster](/cluster/) | CLI | 1.0.0 | | [Docker Template CLI (Experimental)](/app-template/working-with-template/) | CLI | 0.1.4 | -## Product install and upgrade +## Docker Enterprise components install and upgrade | Component Release Notes | Version | Install | Upgrade | |---------|-----------|-------------------|-------------- | @@ -34,49 +39,7 @@ keywords: engine enterprise, ucp, dtr, desktop enterprise, whats new, release no | [Docker Desktop Enterprise](/ee/desktop/release-notes/) | 2.1.0 |Install Docker Desktop Enterprise [Mac](/ee/desktop/admin/install/mac/), [Windows](/ee/desktop/admin/install/windows/) | Upgrade Docker Desktop Enterprise [Mac](/ee/desktop/admin/install/mac/), [Windows](/ee/desktop/admin/install/windows/) | Refer to the [Compatibility Matrix](https://success.docker.com/article/compatibility-matrix) and the [Maintenance Lifecycle](https://success.docker.com/article/maintenance-lifecycle) for compatibility and software maintenance details. - -## Known Issues - -This is not an exhaustive list. For complete known issues information, refer to the individual component release notes page. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Issue DescriptionIssue NumberComponentAffected VersionsFixed?Version Fix - Pull Request
docker registry info authentication error (for example purposes)ENG-DTR #912DTR2.7.0-beta2Yes2.7.0
Error when installing UCP with "selinux-enabled": true???UCPUCP with Enterprise Engine 18.09 or 19.03NoN/A
diff --git a/ee/supported-platforms.md b/ee/supported-platforms.md index 48fdffb23e..426064f085 100644 --- a/ee/supported-platforms.md +++ b/ee/supported-platforms.md @@ -1,6 +1,6 @@ --- title: About Docker Enterprise -description: Information about Docker Enterprise 2.1 +description: Docker Enterprise product information keywords: Docker Enterprise, enterprise, enterprise edition, ee, docker ee, docker enterprise edition, lts, commercial, cs engine, commercially supported redirect_from: - /enterprise/supported-platforms/ @@ -13,12 +13,13 @@ green-check: '![yes](/install/images/green-check.svg){: style="height: 14px; mar install-prefix-ee: '/install/linux/docker-ee' --- -Docker Enterprise is designed for enterprise development as well as IT teams who -build, ship, and run business-critical applications -in production and at scale. Docker Enterprise is integrated, certified, -and supported to provide enterprises with the most secure container platform -in the industry. For more info about Docker Enterprise, including purchasing -options, see [Docker Enterprise](https://www.docker.com/enterprise-edition/). +Docker Enterprise is designed for enterprise development as well as IT teams who build, share, and run business-critical +applications at scale in production. Docker Enterprise is an integrated container platform that includes +Docker Desktop Enterprise, a secure image registry, advanced management control plane, and Docker Engine - Enterprise. +Docker Engine - Enterprise is a certified and supported container runtime that is also available as a standalone +solution to provide enterprises with the most secure container engine in the industry. For more information +about Docker Enterprise and Docker Engine - Enterprise, including purchasing options, +see [Docker Enterprise](https://www.docker.com/enterprise-edition/). > Compatibility Matrix > @@ -26,20 +27,19 @@ options, see [Docker Enterprise](https://www.docker.com/enterprise-edition/). > for the latest list of supported platforms. {: .important} -## Docker Enterprise tiers +## Docker Enterprise products {% include docker_ee.md %} > Note > -> Starting with Docker Enterprise 2.1, Docker Enterprise --- Basic, Docker Enterprise --- Standard, -> and Docker Enterprise --- Advanced are all now called Docker Enterprise. +> Starting with Docker Enterprise 2.1, Docker Enterprise - Basic is now Docker Engine - Enterprise, +> and both Docker Enterprise - Standard and Docker Enterprise - Advanced are now called Docker Enterprise. ### Docker Enterprise -With Docker Enterprise, you can deploy Docker Engine --- Enterprise -to manage your container workloads in a flexible way. You can manage workloads -on Windows, Linux, on site, or on the cloud. +With Docker Enterprise, you can manage container workloads on Windows, Linux, on site, or on the cloud +in a flexible way. Docker Enterprise has private image management, integrated image signing policies, and cluster management with support for Kubernetes and Swarm orchestrators. It allows you to implement @@ -47,70 +47,60 @@ node-based RBAC policies, image promotion policies, image mirroring, and scan your images for vulnerabilities. It also has support with defined SLAs and extended maintenance cycles for patches for up to 24 months. -### New Licensing for Docker Enterprise +### New licensing for Docker Enterprise -In version 18.09, the Docker Enterprise --- Engine is aware of the license -applied on the system. The license summary is available in the `docker info` -output on standalone or manager nodes. +Starting in version 18.09, Docker Enterprise is aware of the license applied on +the system. The license summary is available in the `docker info` output on +standalone or manager nodes. -For EE platform customers, when you license UCP, this same license is applied to -the underlying engines in the cluster. Docker recommends platform customers use -UCP to manage their license. +For Docker Enterprise customers, when you license Universal Control Plane +(UCP), this same license is applied to the underlying engines in the cluster. +Docker recommends that Enterprise customers use UCP to manage their license. -Standalone EE engines can be licensed using `docker engine activate`. +Standalone Docker Engine - Enterprise users can license engines using `docker engine activate`. -Offline activation of standalone EE engines can be performed by downloading the -license and using the command `docker engine activate --license filename.lic`. +Offline activation of standalone enterprise engines can be performed by downloading the license and using the command `docker engine activate --license filename.lic`. -Additionally, Docker is now distributing the CLI as a separate installation -package. This gives Enterprise users the ability to install as many CLI -packages as needed without using the Engine node licenses for client-only -systems. +Additionally, Docker is now distributing the CLI as a separate installation package. This gives Docker Enterprise users the ability to install as many CLI packages as needed without using the Engine node licenses for client-only systems. [Learn more about Docker Enterprise](/ee/index.md). > When using Docker Enterprise -> -> IBM Power is not supported as managers or workers. > Microsoft Windows Server is not supported as a manager. Microsoft Windows > Server 1803 is not supported as a worker. ### Docker Certified Infrastructure -Docker Certified Infrastructure is Docker’s prescriptive approach to deploying -Docker Enterprise on a range of infrastructure choices. Each Docker -Certified Infrastructure includes a reference architecture, automation templates, -and third-party ecosystem solution briefs. - -| Platform | Docker Enterprise Edition | -|:----------------------------------------------------------------------------------------|:-------------------------:| -| [VMware](https://success.docker.com/article/certified-infrastructures-vmware-vsphere) | {{ page.green-check }} | -| [Amazon Web Services](https://success.docker.com/article/certified-infrastructures-aws) | {{ page.green-check }} | -| [Microsoft Azure](https://success.docker.com/article/certified-infrastructures-azure) | {{ page.green-check }} | -| IBM Cloud | Coming soon | +Docker Certified Infrastructure is Docker’s prescriptive approach to deploying Docker Enterprise +on a variety of infrastructures. Each Docker Certified Infrastructure option includes a reference architecture, +a CLI plugin for automated deployment and configuration, and third-party ecosystem solution briefs. +| Platform | Docker Enterprise support | +:----------------------------------------------------------------------------------------|:-------------------------:| +| [Amazon Web Services](..\cluster\aws.md) | {{ page.green-check }} | +| VMware | coming soon | +| Microsoft Azure | coming soon | ## Docker Enterprise release cycles Each Docker Enterprise release is supported and maintained for 24 months, and receives security and critical bug fixes during this period. -The Docker API version is independent of the Docker platform version. We -maintain careful API backward compatibility and deprecate APIs and features -slowly and conservatively. We remove features after deprecating them for a -period of three stable releases. Docker 1.13 introduced improved -interoperability between clients and servers using different API versions, -including dynamic feature negotiation. +The Docker API version is independent of the Docker version. We maintain +careful API backward compatibility and deprecate APIs and features slowly and +conservatively. We remove features after deprecating them for a period of +three stable releases. Docker 1.13 introduced improved interoperability +between clients and servers using different API versions, including dynamic +feature negotiation. ## Upgrades and support - -If you're a Docker DDC or CS Engine customer, you don't need to upgrade to -Docker Enterprise to continue to get support. We will continue to support -customers with valid subscriptions whether the subscription covers Docker -Enterprise or Commercially Supported Docker. You can choose to stay with your -current deployed version, or you can upgrade to the latest Docker Enterprise -version. For more info, see [Scope of Coverage and Maintenance Lifecycle](https://success.docker.com/Policies/Scope_of_Support). +Docker supports Docker Enterprise minor releases for 24 months. Upgrades to the +latest minor release of Docker Enterprise are not required, however we +recommend staying on the latest maintenance release of the supported minor +release you are on. Please see [Maintenance +Lifecycle](https://success.docker.com/article/maintenance-lifecycle) for more +details on EOL of minor and major versions of Docker Enterprise. ## Where to go next diff --git a/ee/telemetry.md b/ee/telemetry.md index 1e22397d8b..b891625d5d 100644 --- a/ee/telemetry.md +++ b/ee/telemetry.md @@ -8,11 +8,11 @@ redirect_from: Docker Engine - Enterprise version 17.06 and later includes a telemetry plugin. The plugin is enabled by default on Ubuntu starting with Docker Engine - Enterprise 17.06.0 -and on the rest of the EE-supported Linux distributions starting with version +and on the rest of the Docker Engine - Enterprise supported Linux distributions starting with version 17.06.2-ee-5. The telemetry plugin is not part of Docker Engine - Enterprise for Windows Server. The telemetry plugin sends system information to Docker Inc. Docker uses this -information to improve Docker EE. For details about the telemetry plugin and +information to improve Docker Engine - Enterprise. For details about the telemetry plugin and the types of data it collects, see the [`telemetry` plugin documentation](https://hub.docker.com/community/images/docker/telemetry). @@ -27,7 +27,7 @@ plugin, either using the Docker CLI or using Universal Control Plane. > UCP and CLI > -> If you're using Docker EE Standard or Advanced with Universal Control Plane +> If you're using Docker Engine - Enterprise with Universal Control Plane > (UCP), use UCP to enable and disable metrics. Use the CLI only if you don't > have UCP. UCP re-enables the telemetry plugin for hosts where it was > disabled with the CLI. @@ -35,7 +35,7 @@ plugin, either using the Docker CLI or using Universal Control Plane. ### Use Universal Control Plane -If you use Universal Control Plane with Docker EE, do not use the Docker CLI to +If you use Universal Control Plane with Docker Engine - Enterprise, do not use the Docker CLI to disable the telemetry plugin. Instead, you can manage the information sent to Docker by going to **Admin Settings** and choosing **Usage**. diff --git a/ee/ucp/admin/configure/admission-controllers.md b/ee/ucp/admin/configure/admission-controllers.md new file mode 100644 index 0000000000..3d4ab0a076 --- /dev/null +++ b/ee/ucp/admin/configure/admission-controllers.md @@ -0,0 +1,55 @@ +--- +title: Admission controllers +description: Learn about how admission controllers are used in docker. +keywords: cluster, psp, security +--- + + +This is the current list of admission controllers used by Docker: + +### Default +- [NamespaceLifecycle](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#namespacelifecycle) +- [LimitRanger](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#limitranger) +- [ServiceAccount](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#serviceaccount) +- [PersistentVolumeLabel](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#persistentvolumelabel) +- [DefaultStorageClass](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#defaultstorageclass) +- [DefaultTolerationSeconds](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#defaulttolerationseconds) +- [NodeRestriction](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#noderestriction) +- [ResourceQuota](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#resourcequota) +- [PodNodeSelector](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#podnodeselector) +- [PodSecurityPolicy](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#podsecuritypolicy) + +### Custom +- **UCPAuthorization** +This custom admission controller does several things: + - Annotates Docker Compose-on-Kubernetes `Stack` resources with the identity +of the user performing the request so that the Docker Compose-on-Kubernetes +resource controller can manage `Stacks` with correct user authorization. + - Detects when `ServiceAccount` resources are deleted so that they can be +correctly removed from UCP's Node scheduling authorization backend. + - Simplifies creation of `RoleBindings` and `ClusterRoleBindings` resources by +automatically converting user, organization, and team Subject names into +their corresponding unique identifiers. + - Prevents users from deleting the built-in `cluster-admin` `ClusterRole` or +`ClusterRoleBinding` resources. + - Prevents under-privileged users from creating or updating `PersistintVolume` +resources with host paths. + - Works in conjunction with the built-in `PodSecurityPolicies` admission +controller to prevent under-privileged users from creating `Pods` with +privileged options. +- **CheckImageSigning** +Enforces UCP's Docker Content Trust policy which, if enabled, requires that all +pods use container images which have been digitally signed by trusted and +authorized users which are members of one or more teams in UCP. +- **UCPNodeSelector** +Adds a `com.docker.ucp.orchestrator.kubernetes:*` toleration to pods in the +kube-system namespace and removes `com.docker.ucp.orchestrator.kubernetes` +tolerations from pods in other namespaces. This ensures that user workloads do +not run on swarm-only nodes, which UCP taints with +`com.docker.ucp.orchestrator.kubernetes:NoExecute`. It also adds a node +affinity to prevent pods from running on manager nodes depending on UCP's +settings. + +**Note:** you cannot enable or disable your own admission controllers. For more information, see [Supportability of custom kubernetes flags in universal control plane](https://success.docker.com/article/supportability-of-custom-kubernetes-flags-in-universal-control-plane) + +For more information about pod security policies in Docker, see [Pod security policies](/ee/ucp/kubernetes/pod-security-policies.md). diff --git a/ee/ucp/admin/configure/use-your-own-tls-certificates.md b/ee/ucp/admin/configure/use-your-own-tls-certificates.md index 7c4e5d58cf..13e32cd636 100644 --- a/ee/ucp/admin/configure/use-your-own-tls-certificates.md +++ b/ee/ucp/admin/configure/use-your-own-tls-certificates.md @@ -39,21 +39,27 @@ navigate to the **Admin Settings** page. Upload your certificates and keys: -* A `ca.pem` file with the root CA public certificate. +* A `ca.pem` file with the root CA (Certificate Authority) public certificate. * A `cert.pem` file with the TLS certificate for your domain and any intermediate public certificates, in this order. * A `key.pem` file with TLS private key. Make sure it is not encrypted with a password. Encrypted keys should have `ENCRYPTED` in the first line. +After replacing the TLS certificates, your users will not be able to authenticate +with their old client certificate bundles. Ask your users to access the UCP +web UI and [download new client certificate bundles](../../user-access/cli.md). + +As of UCP v3.2, the **Certificates** page includes a new text field, +***Client CA***, that allows you to paste or upload one or more custom root CA certificates which the UCP Controller will use to +verify the authenticity of client certificates issued by your corporate or +trusted third-party CAs. Note that your custom root certificates will be appended to UCP's internal root CA certificates. + Finally, click **Save** for the changes to take effect. -After replacing the TLS certificates, your users won't be able to authenticate -with their old client certificate bundles. Ask your users to go to the UCP -web UI and [get new client certificate bundles](../../user-access/cli.md). If you deployed Docker Trusted Registry, you'll also need to reconfigure it to trust the new UCP TLS certificates. -[Learn how to configure DTR](/reference/dtr/2.6/cli/reconfigure.md). +[Learn how to configure DTR](/reference/dtr/2.7/cli/reconfigure.md). ## Where to go next diff --git a/ee/ucp/admin/install/install-on-aws.md b/ee/ucp/admin/install/cloudproviders/install-on-aws.md similarity index 85% rename from ee/ucp/admin/install/install-on-aws.md rename to ee/ucp/admin/install/cloudproviders/install-on-aws.md index b0b243da52..9b1e8af002 100644 --- a/ee/ucp/admin/install/install-on-aws.md +++ b/ee/ucp/admin/install/cloudproviders/install-on-aws.md @@ -4,6 +4,14 @@ description: Learn how to install Docker Universal Control Plane in an Amazon We keywords: Universal Control Plane, UCP, install, Docker EE, AWS, Kubernetes --- +Docker Universal Control Plane (UCP) can be installed on top of AWS without any +customisation following the UCP [install documentation](./install/). Therefore +this document is **optional**, however if you are deploying Kubernetes +workloads with UCP and want to leverage the [AWS kubernetes cloud +provider](https://github.com/kubernetes/cloud-provider-aws), which provides +dynamic volume and loadbalancer provisioning then you should follow this guide. +This guide is not required if you are only deploying swarm workloads. + The requirements for installing UCP on AWS are included in the following sections: ## Instances diff --git a/ee/ucp/admin/install/install-on-azure.md b/ee/ucp/admin/install/cloudproviders/install-on-azure.md similarity index 99% rename from ee/ucp/admin/install/install-on-azure.md rename to ee/ucp/admin/install/cloudproviders/install-on-azure.md index 3bd53ee017..0801661a12 100644 --- a/ee/ucp/admin/install/install-on-azure.md +++ b/ee/ucp/admin/install/cloudproviders/install-on-azure.md @@ -2,6 +2,8 @@ title: Install UCP on Azure description: Learn how to install Docker Universal Control Plane in a Microsoft Azure environment. keywords: Universal Control Plane, UCP, install, Docker EE, Azure, Kubernetes +redirect_from: +- /ee/ucp/admin/install/install-on-azure/ --- Docker Universal Control Plane (UCP) closely integrates with Microsoft Azure for its Kubernetes Networking diff --git a/ee/ucp/admin/install/index.md b/ee/ucp/admin/install/index.md index b8f290e34e..a215c86ee8 100644 --- a/ee/ucp/admin/install/index.md +++ b/ee/ucp/admin/install/index.md @@ -14,6 +14,14 @@ of the [requirements UCP needs to run](system-requirements.md). Also, you need to ensure that all nodes, physical and virtual, are running the same version of Docker Enterprise. +> Cloud Providers +> +> If you are installing on a public cloud platform there is cloud specific UCP +> installation documentation. For [Microsoft +> Azure](./cloudproviders/install-on-azure/) this is **mandatory**, for +> [AWS](./cloudproviders/install-on-aws/) this is optional. +{: important} + ## Step 2: Install Docker Enterprise on all nodes UCP is a containerized application that requires the commercially supported @@ -83,11 +91,6 @@ To install UCP: >[Install an unmanaged CNI plugin](/ee/ucp/kubernetes/install-cni-plugin/). {: important} -3. Turn off scheduling on UCP managers and DTR nodes since it is on by default. Workloads cannot be run on manager nodes. -Make sure all options shown in the following screen shot are unchecked: - - ![Scheduling options](../../images/scheduling-options.png){: .with-border} - ## Step 5: License your installation Now that UCP is installed, you need to license it. To use UCP you are required to have a Docker Enterprise standard or advanced subscription, or you can test the platform with a free trial license. diff --git a/ee/ucp/admin/install/install-offline.md b/ee/ucp/admin/install/install-offline.md index 40a0fbc209..a8975f3904 100644 --- a/ee/ucp/admin/install/install-offline.md +++ b/ee/ucp/admin/install/install-offline.md @@ -27,7 +27,7 @@ installation will fail. Use a computer with internet access to download the UCP package from the following links. -{% include components/ddc_url_list_2.html product="ucp" version="3.1" %} +{% include components/ddc_url_list_2.html product="ucp" version="3.2" %} ## Download the offline package diff --git a/ee/ucp/admin/install/upgrade-offline.md b/ee/ucp/admin/install/upgrade-offline.md index 660a3e1085..7bf5953a1e 100644 --- a/ee/ucp/admin/install/upgrade-offline.md +++ b/ee/ucp/admin/install/upgrade-offline.md @@ -17,7 +17,7 @@ copy this package to the host where you upgrade UCP. Use a computer with internet access to download the UCP package from the following links. -{% include components/ddc_url_list_2.html product="ucp" version="3.1" %} +{% include components/ddc_url_list_2.html product="ucp" version="3.2" %} ## Download the offline package diff --git a/ee/ucp/images/cli-based-access-1.png b/ee/ucp/images/cli-based-access-1.png index 57452b631a..d9ca551a6e 100644 Binary files a/ee/ucp/images/cli-based-access-1.png and b/ee/ucp/images/cli-based-access-1.png differ diff --git a/ee/ucp/images/cli-based-access-2.png b/ee/ucp/images/cli-based-access-2.png index c4067603d9..b9d0d4ba51 100644 Binary files a/ee/ucp/images/cli-based-access-2.png and b/ee/ucp/images/cli-based-access-2.png differ diff --git a/ee/ucp/images/cli-based-access-3.png b/ee/ucp/images/cli-based-access-3.png index 5d274e7207..74ad674050 100644 Binary files a/ee/ucp/images/cli-based-access-3.png and b/ee/ucp/images/cli-based-access-3.png differ diff --git a/ee/ucp/images/scheduling-options.png b/ee/ucp/images/scheduling-options.png deleted file mode 100644 index 5f84a0ae53..0000000000 Binary files a/ee/ucp/images/scheduling-options.png and /dev/null differ diff --git a/ee/ucp/images/use-externally-signed-certs-2.png b/ee/ucp/images/use-externally-signed-certs-2.png index b08d65659b..a4dda2cb34 100644 Binary files a/ee/ucp/images/use-externally-signed-certs-2.png and b/ee/ucp/images/use-externally-signed-certs-2.png differ diff --git a/ee/ucp/images/web-based-access-1.png b/ee/ucp/images/web-based-access-1.png index fb7304147d..c6a21b9a5c 100644 Binary files a/ee/ucp/images/web-based-access-1.png and b/ee/ucp/images/web-based-access-1.png differ diff --git a/ee/ucp/images/web-based-access-2.png b/ee/ucp/images/web-based-access-2.png index 00437d1c22..abcffe76f6 100644 Binary files a/ee/ucp/images/web-based-access-2.png and b/ee/ucp/images/web-based-access-2.png differ diff --git a/ee/ucp/kubernetes/cluster-ingress/canary.md b/ee/ucp/kubernetes/cluster-ingress/canary.md new file mode 100644 index 0000000000..045da3a748 --- /dev/null +++ b/ee/ucp/kubernetes/cluster-ingress/canary.md @@ -0,0 +1,114 @@ +--- +title: Deploy a Sample Application with a Canary release (Experimental) +description: Stage a canary release using weight-based load balancing between multiple backend applications. +keywords: ucp, cluster, ingress, kubernetes +--- + +{% include experimental-feature.md %} + +# Deploy a Sample Application with a Canary release + +This example stages a canary release using weight-based load balancing between +multiple backend applications. + +> **Note**: This guide assumes the [Deploy Sample Application](./ingress/) +> tutorial was followed, with the artefacts still running on the cluster. If +> they are not, please go back and follow this guide. + +The following schema is used for this tutorial: +- 80% of client traffic is sent to the production v1 service. +- 20% of client traffic is sent to the staging v2 service. +- All test traffic using the header `stage=dev` is sent to the v3 service. + +A new Kubernetes manifest file with updated ingress rules can be found +[here](./yaml/ingress-weighted.yaml) + + 1) Source a [UCP Client Bundle](/ee/ucp/user-access/cli/) attached to a + cluster with Cluster Ingress installed. + + 2) Download the sample Kubernetes manifest file + + ``` + $ wget https://github.com/docker/docker.github.io/tree/master/ee/ucp/kubernetes/cluster-ingress/yaml/ingress-weighted.yaml + ``` + + 3) Deploy the Kubernetes manifest file + + ```bash + $ kubectl apply -f ingress-weighted.yaml + + $ kubectl describe vs + Hosts: + demo.example.com + Http: + Match: + Headers: + Stage: + Exact: dev + Route: + Destination: + Host: demo-service + Port: + Number: 8080 + Subset: v3 + Route: + Destination: + Host: demo-service + Port: + Number: 8080 + Subset: v1 + Weight: 80 + Destination: + Host: demo-service + Port: + Number: 8080 + Subset: v2 + Weight: 20 + ``` + +This virtual service performs the following actions: + + - Receives all traffic with host=demo.example.com. + - If an exact match for HTTP header `stage=dev` is found, traffic is routed + to v3. + - All other traffic is routed to v1 and v2 in an 80:20 ratio. + +Now we can send traffic to the application to view the applied load balancing +algorithms. + +```bash +# Public IP Address of a Worker or Manager VM in the Cluster +$ IPADDR=51.141.127.241 + +# Node Port +$ PORT=$(kubectl get service demo-service --output jsonpath='{.spec.ports[?(@.name=="http")].nodePort}') + +$ for i in {1..5}; do curl -H "Host: demo.example.com" http://$IPADDR:$PORT/ping; done +{"instance":"demo-v1-7797b7c7c8-5vts2","version":"v1","metadata":"production","request_id":"d0671d32-48e7-41f7-a358-ddd7b47bba5f"} +{"instance":"demo-v2-6c5b4c6f76-c6zhm","version":"v2","metadata":"staging","request_id":"ba6dcfd6-f62a-4c68-9dd2-b242179959e0"} +{"instance":"demo-v1-7797b7c7c8-kw6gp","version":"v1","metadata":"production","request_id":"d87601c0-7935-4cfc-842c-37910e6cd573"} +{"instance":"demo-v1-7797b7c7c8-5vts2","version":"v1","metadata":"production","request_id":"4c71ffab-8657-4d99-87b3-7a6933258990"} +{"instance":"demo-v1-7797b7c7c8-gfwzj","version":"v1","metadata":"production","request_id":"c404471c-cc85-497e-9e5e-7bb666f4f309"} +``` + +The split between v1 and v2 corresponds to the specified criteria. Within the +v1 service, requests are load-balanced across the 3 backend replicas. v3 does +not appear in the requests. + +To send traffic to the 3rd service, we can add the HTTP header `stage=dev`. + +```bash +for i in {1..5}; do curl -H "Host: demo.example.com" -H "Stage: dev" http://$IPADDR:$PORT/ping; done +{"instance":"demo-v3-d88dddb74-9k7qg","version":"v3","metadata":"dev","request_id":"52d7afe7-befb-4e17-a49c-ee63b96d0daf"} +{"instance":"demo-v3-d88dddb74-9k7qg","version":"v3","metadata":"dev","request_id":"b2e664d2-5224-44b1-98d9-90b090578423"} +{"instance":"demo-v3-d88dddb74-9k7qg","version":"v3","metadata":"dev","request_id":"5446c78e-8a77-4f7e-bf6a-63184db5350f"} +{"instance":"demo-v3-d88dddb74-9k7qg","version":"v3","metadata":"dev","request_id":"657553c5-bc73-4a13-b320-f78f7e6c7457"} +{"instance":"demo-v3-d88dddb74-9k7qg","version":"v3","metadata":"dev","request_id":"bae52f09-0510-42d9-aec0-ca6bbbaae168"} +``` + +In this case, 100% of the traffic with the stage=dev header is sent to the v3 +service. + +## Where to go next + +- [Deploy the Sample Application with Sticky Sessions](./sticky/) \ No newline at end of file diff --git a/ee/ucp/kubernetes/cluster-ingress/index.md b/ee/ucp/kubernetes/cluster-ingress/index.md new file mode 100644 index 0000000000..5f40fcbce4 --- /dev/null +++ b/ee/ucp/kubernetes/cluster-ingress/index.md @@ -0,0 +1,36 @@ +--- +title: Kubernetes Cluster Ingress (Experimental) +description: Learn about Ingress host and path routing for Kubernetes applications. +keywords: ucp, cluster, ingress, kubernetes +redirect_from: + - /ee/ucp/kubernetes/layer-7-routing/ +--- + +{% include experimental-feature.md %} + +## Cluster Ingress capabilities + +Cluster Ingress provides layer 7 services to traffic entering a Docker Enterprise cluster for a variety of different use-cases that help provide application resilience, security, and observability. Ingress provides dynamic control of L7 routing in a highly available architecture that is also highly performant. + +UCP's Ingress for Kubernetes is based on the [Istio](https://istio.io/) control-plane and is a simplified deployment focused on just providing ingress services with minimal complexity. This includes features such as: + +- L7 host and path routing +- Complex path matching and redirection rules +- Weight-based load balancing +- TLS termination +- Persistent L7 sessions +- Hot config reloads +- Redundant and highly available design + +For a detailed look at Istio Ingress architecture, refer to the [Istio Ingress docs.](https://istio.io/docs/tasks/traffic-management/ingress/) + +To get started with UCP Ingress, the following help topics are provided: + +- [Install Cluster Ingress on to a UCP Cluster](./install/) +- [Deploy a Sample Application with Ingress Rules](./ingress) +- [Deploy a Sample Application with a Canary release](./canary/) +- [Deploy a Sample Application with Sticky Sessions](./sticky/) + +## Where to go next + +- [Install Cluster Ingress on to a UCP Cluster](./install/) \ No newline at end of file diff --git a/ee/ucp/kubernetes/cluster-ingress/ingress.md b/ee/ucp/kubernetes/cluster-ingress/ingress.md new file mode 100644 index 0000000000..44c2012851 --- /dev/null +++ b/ee/ucp/kubernetes/cluster-ingress/ingress.md @@ -0,0 +1,168 @@ +--- +title: Deploy a Sample Application with Ingress (Experimental) +description: Learn how to deploy Ingress rules for Kubernetes applications. +keywords: ucp, cluster, ingress, kubernetes +--- + +{% include experimental-feature.md %} + +# Deploy a Sample Application with Ingress + +Cluster Ingress is capable of routing based on many HTTP attributes, but most +commonly the HTTP host and path. The following example shows the basics of +deploying Ingress rules for a Kubernetes application. An example application is +deployed from this [deployment manifest](./yaml/demo-app.yaml) and L7 Ingress +rules are applied. + +## Deploy a Sample Application + +In this example, three different versions of the docker-demo application are +deployed. The docker-demo application is able to display the container hostname, +environment variables or labels in its HTTP responses, therefore is good sample +application for an Ingress controller. + +The 3 versions of the sample application are: + +- v1: a production version with 3 replicas running. +- v2: a staging version with a single replica running. +- v3: a development version also with a single replica. + +An example Kubernetes manifest file container all 3 deployments can be found [here](./yaml/demo-app.yaml) + + 1) Source a [UCP Client Bundle](/ee/ucp/user-access/cli/) attached to a + cluster with Cluster Ingress installed. + + 2) Download the sample Kubernetes manifest file + + ``` + $ wget https://github.com/docker/docker.github.io/tree/master/ee/ucp/kubernetes/cluster-ingress/yaml/demo-app.yaml + ``` + + 3) Deploy the sample Kubernetes manifest file + + ```bash + $ kubectl apply -f demo-app.yaml + ``` + + 4) Verify the sample applications are running + + ```bash + $ kubectl get pods -n default + NAME READY STATUS RESTARTS AGE + demo-v1-7797b7c7c8-5vts2 1/1 Running 0 3h + demo-v1-7797b7c7c8-gfwzj 1/1 Running 0 3h + demo-v1-7797b7c7c8-kw6gp 1/1 Running 0 3h + demo-v2-6c5b4c6f76-c6zhm 1/1 Running 0 3h + demo-v3-d88dddb74-9k7qg 1/1 Running 0 3h + + $ kubectl get services -o wide + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR + demo-service NodePort 10.96.97.215 8080:33383/TCP 3h app=demo + kubernetes ClusterIP 10.96.0.1 443/TCP 1d + ``` + +This first part of the tutorial deployed the pods and a Kubernetes service, +using Kubernetes NodePorts these pods can be accessed outside of the Cluster +Ingress. This illustrate the standard L4 load balancing that a Kubernetes +service applies. + +```bash +# Public IP Address of a Worker or Manager VM in the Cluster +$ IPADDR=51.141.127.241 + +# Node Port +$ PORT=$(kubectl get service demo-service --output jsonpath='{.spec.ports[?(@.name=="http")].nodePort}') + +# Send traffic directly to the NodePort to bypass L7 Ingress + +```bash +$ for i in {1..5}; do curl http://$IPADDR:$PORT/ping; done +{"instance":"demo-v3-d88dddb74-9k7qg","version":"v3","metadata":"dev"} +{"instance":"demo-v3-d88dddb74-9k7qg","version":"v3","metadata":"dev"} +{"instance":"demo-v2-6c5b4c6f76-c6zhm","version":"v2","metadata":"staging"} +{"instance":"demo-v1-7797b7c7c8-gfwzj","version":"v1","metadata":"production"} +{"instance":"demo-v1-7797b7c7c8-gfwzj","version":"v1","metadata":"production"} +``` + +The L4 load balancing is applied to the number of replicas that exist for each +service. Different scenarios require more complex logic to load balancing. +Make sure to detach the number of backend instances from the load balancing +algorithms used by the Ingress. + +## Apply Ingress rules to Sample Application + +To leverage the Cluster Ingress for the sample application, there are three custom resources types that need to be deployed: + + - Gateway + - Virtual Service + - Destinationrule + +For the sample application, an example manifest file with all 3 objects defined is [here](./yaml/ingress-simple.yaml). + + 1) Source a [UCP Client Bundle](/ee/ucp/user-access/cli/) attached to a + cluster with Cluster Ingress installed. + + 2) Download the sample Kubernetes manifest file + + ``` + $ wget https://github.com/docker/docker.github.io/tree/master/ee/ucp/kubernetes/cluster-ingress/yaml/ingress-simple.yaml + ``` + + 3) Deploy the sample Kubernetes manifest file + + ```bash + $ kubectl apply -f ingress-simple.yaml + + $ kubectl describe virtualservice demo-vs + ... + Spec: + Gateways: + cluster-gateway + Hosts: + demo.example.com + Http: + Match: + Route: + Destination: + Host: demo-service + Port: + Number: 8080 + Subset: v1 + ``` + +This configuration matches all traffic with `demo.example.com` and sends it to +the backend version=v1 deployment, regardless of the quantity of replicas in +the backend. + +Curl the service again using the port of the Ingress gateway. Because DNS is +not set up, use the `--header` flag from curl to manually set the host header. + +```bash +# Find the Cluster Ingress Node Port +$ PORT=$(kubectl get service -n istio-system istio-ingressgateway --output jsonpath='{.spec.ports[?(@.name=="http2")].nodePort}') + +# Public IP Address of a Worker or Manager VM in the Cluster +$ IPADDR=51.141.127.241 + +$ for i in {1..5}; do curl --header "Host: demo.example.com" http://$IPADDR:$PORT/ping; done +{"instance":"demo-v1-7797b7c7c8-5vts2","version":"v1","metadata":"production","request_id":"2558fdd1-0cbd-4ba9-b104-0d4d0b1cef85"} +{"instance":"demo-v1-7797b7c7c8-kw6gp","version":"v1","metadata":"production","request_id":"59f865f5-15fb-4f49-900e-40ab0c44c9e4"} +{"instance":"demo-v1-7797b7c7c8-5vts2","version":"v1","metadata":"production","request_id":"fe233ca3-838b-4670-b6a0-3a02cdb91624"} +{"instance":"demo-v1-7797b7c7c8-5vts2","version":"v1","metadata":"production","request_id":"842b8d03-8f8a-4b4b-b7f4-543f080c3097"} +{"instance":"demo-v1-7797b7c7c8-kw6gp","version":"v1","metadata":"production","request_id":"197cbb1d-5381-4e40-bc6f-cccec22eccbc"} +``` + +To have SNI (Server Name Indication) work with TLS services, use curl's +`--resolve` flag. + +```bash +$ curl --resolve demo.example.com:$IPADDR:$PORT http://demo.example.com/ping +``` + +In this instance, the three backend v1 replicas are load balanced and no +requests are sent to the other versions. + +## Where to go next + +- [Deploy a Sample Application with a Canary release](./canary/) +- [Deploy a Sample Application with Sticky Sessions](./sticky/) \ No newline at end of file diff --git a/ee/ucp/kubernetes/cluster-ingress/install.md b/ee/ucp/kubernetes/cluster-ingress/install.md new file mode 100644 index 0000000000..9a406e947c --- /dev/null +++ b/ee/ucp/kubernetes/cluster-ingress/install.md @@ -0,0 +1,132 @@ +--- +title: Install Cluster Ingress (Experimental) +description: Learn how to deploy ingress rules using Kubernetes manifests. +keywords: ucp, cluster, ingress, kubernetes +--- + +{% include experimental-feature.md %} + +# Install Cluster Ingress + +Cluster Ingress for Kubernetes is currently deployed manually outside of UCP. +Future plans for UCP include managing the full lifecycle of the Ingress +components themselves. This guide describes how to manually deploy Ingress using +Kubernetes deployment manifests. + +## Offline Installation + +If you are installing Cluster Ingress on a UCP cluster that does not have access +to the Docker Hub, you will need to pre-pull the Ingress container images. If +your cluster has access to the Docker Hub, you can move on to [deploying cluster +ingress](#deploy-cluster-ingress) + +Without access to the Docker Hub, you will need to download the container images +on a workstation with access to the internet. Container images are distributed +in a `.tar.gz` and can be downloaded at the following +[URL](https://s3.amazonaws.com/docker-istio/istio-ingress-1.1.2.tgz). + +Once the container images have been downloaded, they would then need to be +copied on to the hosts in your UCP cluster, and then side loaded in Docker. +Images can be side loaded with: + +```bash +$ docker load -i ucp.tar.gz +``` + +There images should now be present on your nodes: + +```bash +$ docker images +REPOSITORY TAG IMAGE ID CREATED SIZE +docker/node-agent-k8s 1.1.2 4ddd06d05d5d 6 days ago 243MB +docker/proxy_init 1.1.2 ff9628f32621 6 days ago 145MB +docker/proxyv2 1.1.2 bebabbe114a4 6 days ago 360MB +docker/pilot 1.1.2 58b6e18f3545 6 days ago 299MB +``` + +## Deploy Cluster Ingress + +This step deploys the Ingress controller components `istio-pilot` and +`istio-ingressgateway`. Together, these components act as the control-plane and +data-plane for ingress traffic. These components are a simplified deployment of +Istio cluster Ingress functionality. Many other custom K8s resources (CRDs) are +also created that aid in the Ingress functionality. + +> **Note**: This does not deploy the service mesh capabilities of Istio as its +> function in UCP is for Ingress. + +As Cluster Ingress is not built into UCP in this release, a Cluster Admin will +need to manually download and apply the following Kubernetes Manifest +[file](https://s3.amazonaws.com/docker-istio/istio-ingress-1.1.2.yaml). + +1) Download the Kubernetes manifest yaml + +```bash +$ wget https://s3.amazonaws.com/docker-istio/istio-ingress-1.1.2.yaml +``` +2) Source a [UCP Client Bundle](/ee/ucp/user-access/cli/) + +3) Deploy the Kubernetes manifest file + +```bash +$ kubectl apply -f istio-ingress-1.1.2.yaml +``` + +4) Check the installation has been completely succesfully. It may take a minute + or 2 for all pods to become ready. + +``` + kubectl get pods -n istio-system -o wide +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +istio-ingressgateway-747bc6b4cb-fkt6k 2/2 Running 0 44s 172.0.1.23 manager-02 +istio-ingressgateway-747bc6b4cb-gr8f7 2/2 Running 0 61s 172.0.1.25 manager-02 +istio-pilot-7b74c7568b-ntbjd 1/1 Running 0 61s 172.0.1.22 manager-02 +istio-pilot-7b74c7568b-p5skc 1/1 Running 0 44s 172.0.1.24 manager-02 + +$ kubectl get services -n istio-system -o wide +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR +istio-ingressgateway NodePort 10.96.32.197 80:33000/TCP,443:33001/TCP,31400:33002/TCP,15030:34420/TCP,15443:34368/TCP,15020:34300/TCP 86s app=istio-ingressgateway,istio=ingressgateway,release=istio +istio-pilot ClusterIP 10.96.199.152 15010/TCP,15011/TCP,8080/TCP,15014/TCP 85s istio=pilot +``` + +5) Test the Ingress Deployment + +To test that the Envory proxy is working correclty in the Isitio Gateway pods, +there is a status port configured on an internal port 15020. From the above +output we can see that port 15020 is exposed as a Kubernetes NodePort, in the +output above the NodePort is 34300 put this could be different in each +environment. + +To check the envoy proxy's status, there is a health endpoint at +`/healthz/ready`. + +```bash +# Node Port +$ PORT=$(kubectl get service -n istio-system istio-ingressgateway --output jsonpath='{.spec.ports[?(@.name=="status-port")].nodePort}') + +# Public IP Address of a Worker or Manager VM in the Cluster +$ IPADDR=51.141.127.241 + +# Use Curl to check the status port is available +$ curl -vvv http://$IPADDR:$PORT/healthz/ready +* Trying 51.141.127.241... +* TCP_NODELAY set +* Connected to 51.141.127.241 (51.141.127.241) port 34300 (#0) +> GET /healthz/ready HTTP/1.1 +> Host: 51.141.127.241:34300 +> User-Agent: curl/7.58.0 +> Accept: */* +> +< HTTP/1.1 200 OK +< Date: Wed, 19 Jun 2019 13:31:53 GMT +< Content-Length: 0 +< +* Connection #0 to host 51.141.127.241 left intact +``` + +If the output is `HTTP/1.1 200 OK` Envoy is running correctly, ready to service +applications. + +## Where to go next + +- [Deploy a Sample Application](./ingress/) \ No newline at end of file diff --git a/ee/ucp/kubernetes/cluster-ingress/sticky.md b/ee/ucp/kubernetes/cluster-ingress/sticky.md new file mode 100644 index 0000000000..1e84545a24 --- /dev/null +++ b/ee/ucp/kubernetes/cluster-ingress/sticky.md @@ -0,0 +1,102 @@ +--- +title: Deploy a Sample Application with Sticky Sessions (Experimental) +description: Learn how to use cookies with Ingress host and path routing. +keywords: ucp, cluster, ingress, kubernetes +--- + +{% include experimental-feature.md %} + +# Deploy a Sample Application with Sticky Sessions + +With persistent sessions, the Ingress controller can use a predetermined header +or dynamically generate a HTTP header cookie for a client session to use, so +that a clients requests are sent to the same backend. + +> **Note**: This guide assumes the [Deploy Sample Application](./ingress/) +> tutorial was followed, with the artefacts still running on the cluster. If +> they are not, please go back and follow this guide. + +This is specified within the Isitio Object `DestinationRule` via a +`TrafficPolicy` for a given host. In the following example configuration, +consistentHash is chosen as the load balancing method and a cookie named +“session” is used to determine the consistent hash. If incoming requests do not +have the “session” cookie set, the Ingress proxy sets it for use in future +requests. + +A Kubernetes manifest file with an updated DestinationRule can be found [here](./yaml/ingress-sticky.yaml) + + 1) Source a [UCP Client Bundle](/ee/ucp/user-access/cli/) attached to a + cluster with Cluster Ingress installed. + + 2) Download the sample Kubernetes manifest file + + ``` + $ wget https://github.com/docker/docker.github.io/tree/master/ee/ucp/kubernetes/cluster-ingress/yaml/ingress-sticky.yaml + ``` + + 3) Deploy the Kubernetes manifest file with the new DestinationRule, this has + the consistentHash loadBalancer policy set. + + ```bash + $ kubectl apply -f ingress-sticky.yaml + ``` + + 2) Curl the service to view how requests are load balanced without using + cookies. In this example, requests are bounced between different v1 + services. + + ```bash + # Public IP Address of a Worker or Manager VM in the Cluster + $ IPADDR=51.141.127.241 + + # Node Port + $ PORT=$(kubectl get service demo-service --output jsonpath='{.spec.ports[?(@.name=="http")].nodePort}') + + $ for i in {1..5}; do curl -H "Host: demo.example.com" http://$IPADDR:$PORT/ping; done + {"instance":"demo-v1-7797b7c7c8-gfwzj","version":"v1","metadata":"production","request_id":"b40a0294-2629-413b-b876-76b59d72189b"} + {"instance":"demo-v1-7797b7c7c8-kw6gp","version":"v1","metadata":"production","request_id":"721fe4ba-a785-484a-bba0-627ee6e47188"} + {"instance":"demo-v1-7797b7c7c8-gfwzj","version":"v1","metadata":"production","request_id":"77ed801b-81aa-4c02-8cc9-7e3bd3244807"} + {"instance":"demo-v1-7797b7c7c8-gfwzj","version":"v1","metadata":"production","request_id":"36d8aaed-fcdf-4489-a85e-76ea96949d6c"} + {"instance":"demo-v1-7797b7c7c8-kw6gp","version":"v1","metadata":"production","request_id":"4693b6ad-286b-4470-9eea-c8656f6801ae"} + ``` + + 3) Curl again and inspect the headers returned from the proxy. + + ```bash + $ curl -i -H "Host: demo.example.com" http://$IPADDR:$PORT/ping + HTTP/1.1 200 OK + set-cookie: session=1555389679134464956; Path=/; Expires=Wed, 17 Apr 2019 04:41:19 GMT; Max-Age=86400 + date: Tue, 16 Apr 2019 04:41:18 GMT + content-length: 131 + content-type: text/plain; charset=utf-8 + x-envoy-upstream-service-time: 0 + set-cookie: session="d7227d32eeb0524b"; Max-Age=60; HttpOnly + server: istio-envoy + + {"instance":"demo-v1-7797b7c7c8-kw6gp","version":"v1","metadata":"production","request_id":"011d5fdf-2285-4ce7-8644-c2df6481c584"} + ``` + + The Ingress proxy set a 60 second TTL cookie named “session” on this HTTP + request. A browser or other client application can use that value in future + requests. + + 4) Curl the service again using the flags that save cookies persistently + across sessions. The header information shows the session is being set, + persisted across requests, and that for a given session header, the + responses are coming from the same backend. + + ```bash + $ for i in {1..5}; do curl -c cookie.txt -b cookie.txt -H "Host: demo.example.com" http://$IPADDR:$PORT/ping; done + {"instance":"demo-v1-7797b7c7c8-kw6gp","version":"v1","metadata":"production","request_id":"72b35296-d6bd-462a-9e62-0bd0249923d7"} + {"instance":"demo-v1-7797b7c7c8-kw6gp","version":"v1","metadata":"production","request_id":"c8872f6c-f77c-4411-aed2-d7aa6d1d92e9"} + {"instance":"demo-v1-7797b7c7c8-kw6gp","version":"v1","metadata":"production","request_id":"0e7b8725-c550-4923-acea-db94df1eb0e4"} + {"instance":"demo-v1-7797b7c7c8-kw6gp","version":"v1","metadata":"production","request_id":"9996fe77-8260-4225-89df-0eaf7581e961"} + {"instance":"demo-v1-7797b7c7c8-kw6gp","version":"v1","metadata":"production","request_id":"d35c380e-31d6-44ce-a5d0-f9f6179715ab"} + ``` + + When the HTTP uses the cookie that is set by the Ingress proxy, all requests + are sent to the same backend demo-v1-7797b7c7c8-kw6gp. + +## Where to to go next + +- [Cluster Ingress Overview](./) diff --git a/ee/ucp/kubernetes/cluster-ingress/yaml/demo-app.yaml b/ee/ucp/kubernetes/cluster-ingress/yaml/demo-app.yaml new file mode 100644 index 0000000000..9238325420 --- /dev/null +++ b/ee/ucp/kubernetes/cluster-ingress/yaml/demo-app.yaml @@ -0,0 +1,106 @@ +apiVersion: v1 +kind: Service +metadata: + name: demo-service + labels: + app: demo +spec: + type: NodePort + ports: + - port: 8080 + name: http + selector: + app: demo +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: demo-v1 + labels: + app: demo + version: v1 +spec: + replicas: 3 + template: + metadata: + labels: + app: demo + version: v1 + spec: + containers: + - name: webserver + image: ehazlett/docker-demo + resources: + requests: + cpu: "100m" + imagePullPolicy: IfNotPresent #Always + ports: + - containerPort: 8080 + env: + - name: VERSION + value: "v1" + - name: METADATA + value: "production" + + +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: demo-v2 + labels: + app: demo + version: v2 +spec: + replicas: 1 + template: + metadata: + labels: + app: demo + version: v2 + spec: + containers: + - name: webserver + image: ehazlett/docker-demo + resources: + requests: + cpu: "100m" + imagePullPolicy: IfNotPresent #Always + ports: + - containerPort: 8080 + env: + - name: VERSION + value: "v2" + - name: METADATA + value: "staging" + +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: demo-v3 + labels: + app: demo + version: v3 +spec: + replicas: 1 + template: + metadata: + labels: + app: demo + version: v3 + spec: + containers: + - name: webserver + image: ehazlett/docker-demo + resources: + requests: + cpu: "100m" + imagePullPolicy: IfNotPresent #Always + ports: + - containerPort: 8080 + env: + - name: VERSION + value: "v3" + - name: METADATA + value: "dev" diff --git a/ee/ucp/kubernetes/cluster-ingress/yaml/ingress-simple.yaml b/ee/ucp/kubernetes/cluster-ingress/yaml/ingress-simple.yaml new file mode 100644 index 0000000000..bb7c1d0f91 --- /dev/null +++ b/ee/ucp/kubernetes/cluster-ingress/yaml/ingress-simple.yaml @@ -0,0 +1,47 @@ +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: cluster-gateway +spec: + selector: + istio: ingressgateway # use istio default controller + servers: + - port: + number: 80 + name: http + protocol: HTTP + hosts: + - "*" + +--- + +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: demo-vs +spec: + hosts: + - "demo.example.com" + gateways: + - cluster-gateway + http: + - match: + route: + - destination: + host: demo-service + subset: v1 + port: + number: 8080 + +--- + +apiVersion: networking.istio.io/v1alpha3 +kind: DestinationRule +metadata: + name: demo-destinationrule +spec: + host: demo-service + subsets: + - name: v1 + labels: + version: v1 diff --git a/ee/ucp/kubernetes/cluster-ingress/yaml/ingress-sticky.yaml b/ee/ucp/kubernetes/cluster-ingress/yaml/ingress-sticky.yaml new file mode 100644 index 0000000000..df6ff04dd7 --- /dev/null +++ b/ee/ucp/kubernetes/cluster-ingress/yaml/ingress-sticky.yaml @@ -0,0 +1,77 @@ +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: cluster-gateway +spec: + selector: + istio: ingressgateway # use istio default controller + servers: + - port: + number: 80 + name: http + protocol: HTTP + hosts: + - "*" + +--- + +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: demo-vs +spec: + hosts: + - "demo.example.com" + gateways: + - cluster-gateway + http: + - match: + - headers: + stage: + exact: dev + route: + - destination: + host: demo-service + subset: v3 + port: + number: 8080 + - match: + route: + - destination: + host: demo-service + subset: v1 + port: + number: 8080 + weight: 100 + - destination: + host: demo-service + subset: v2 + port: + number: 8080 + weight: 0 + +--- + +apiVersion: networking.istio.io/v1alpha3 +kind: DestinationRule +metadata: + name: demo-destinationrule +spec: + host: demo-service + + subsets: + - name: v1 + labels: + version: v1 + trafficPolicy: + loadBalancer: + consistentHash: + httpCookie: + name: session + ttl: 60s + - name: v2 + labels: + version: v2 + - name: v3 + labels: + version: v3 diff --git a/ee/ucp/kubernetes/cluster-ingress/yaml/ingress-weighted.yaml b/ee/ucp/kubernetes/cluster-ingress/yaml/ingress-weighted.yaml new file mode 100644 index 0000000000..92ee332874 --- /dev/null +++ b/ee/ucp/kubernetes/cluster-ingress/yaml/ingress-weighted.yaml @@ -0,0 +1,72 @@ +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: cluster-gateway +spec: + selector: + istio: ingressgateway # use istio default controller + servers: + - port: + number: 80 + name: http + protocol: HTTP + hosts: + - "*" + +--- + +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: demo-vs +spec: + hosts: + - "demo.example.com" + gateways: + - cluster-gateway + http: + - match: + - headers: + stage: + exact: dev + route: + - destination: + host: demo-service + subset: v3 + port: + number: 8080 + - match: + route: + - destination: + host: demo-service + subset: v1 + port: + number: 8080 + weight: 80 + - destination: + host: demo-service + subset: v2 + port: + number: 8080 + weight: 20 + + + +--- + +apiVersion: networking.istio.io/v1alpha3 +kind: DestinationRule +metadata: + name: demo-destinationrule +spec: + host: demo-service + subsets: + - name: v1 + labels: + version: v1 + - name: v2 + labels: + version: v2 + - name: v3 + labels: + version: v3 diff --git a/ee/ucp/kubernetes/layer-7-routing.md b/ee/ucp/kubernetes/layer-7-routing.md deleted file mode 100644 index 25808e6f5f..0000000000 --- a/ee/ucp/kubernetes/layer-7-routing.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: Layer 7 routing -description: Learn how to route traffic to your Kubernetes workloads in Docker Enterprise Edition. -keywords: UCP, Kubernetes, ingress, routing -redirect_from: - - /ee/ucp/kubernetes/deploy-ingress-controller/ ---- - -When you deploy a Kubernetes application, you may want to make it accessible -to users using hostnames instead of IP addresses. - -Kubernetes provides **ingress controllers** for this. This functionality is -specific to Kubernetes. If you're trying to route traffic to Swarm-based -applications, check [layer 7 routing with Swarm](../interlock/index.md). - -Use an ingress controller when you want to: - -* Give your Kubernetes app an externally-reachable URL. -* Load-balance traffic to your app. - -A popular ingress controller within the Kubernetes Community is the [NGINX controller](https://github.com/kubernetes/ingress-nginx), and can be used in Docker Enterprise Edition, but it is not directly supported by Docker, Inc. - -Learn about [ingress in Kubernetes](https://v1-11.docs.kubernetes.io/docs/concepts/services-networking/ingress/). - -For an example of a YAML NGINX kube ingress deployment, refer to . diff --git a/ee/ucp/kubernetes/pod-security-policies.md b/ee/ucp/kubernetes/pod-security-policies.md index 887bf424a4..af23fd0829 100644 --- a/ee/ucp/kubernetes/pod-security-policies.md +++ b/ee/ucp/kubernetes/pod-security-policies.md @@ -2,25 +2,26 @@ title: Use Pod Security Policies in UCP description: Learn how to use Pod Security Policies to lock down Kubernetes as part of Universal Control Plane. keywords: UCP, Kubernetes, psps, pod security policies -redirect_from: --- -Pod Security Policies (PSPs) are cluster-level resources which are enabled by default in Docker Universal Control Plane (UCP) 3.2. See [Pod Security Policy](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) for an explanation of this Kubernetes concept. +Pod Security Policies (PSPs) are cluster-level resources which are enabled by +default in Docker Universal Control Plane (UCP) 3.2. See [Pod Security +Policy](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) for an +explanation of this Kubernetes concept. -There are two default PSPs in UCP: a `privileged` policy -and an `unprivileged` policy. Administrators of the cluster can enforce additional -policies and apply them to users and teams for further control of what runs in the -Kubernetes cluster. This guide describes the two default policies, and -provides two example use cases for custom policies. +There are two default PSPs in UCP: a `privileged` policy and an `unprivileged` +policy. Administrators of the cluster can enforce additional policies and apply +them to users and teams for further control of what runs in the Kubernetes +cluster. This guide describes the two default policies, and provides two +example use cases for custom policies. ## Kubernetes Role Based Access Control (RBAC) -To interact with PSPs, a user will need to be granted access to -the `PodSecurityPolicy` object in Kubernetes RBAC. If the user is a `UCP Admin`, -or has been granted the `ClusterRole`: `cluster-admin` for all namespaces, then -the user can already manipulate PSPs. Additionally, a normal -user can interact with policies if a UCP admin creates the following -`ClusterRole` and `ClusterRoleBinding`: +To interact with PSPs, a user will need to be granted access to the +`PodSecurityPolicy` object in Kubernetes RBAC. If the user is a `UCP Admin`, +then the user can already manipulate PSPs. A normal user can interact with +policies if a UCP admin creates the following `ClusterRole` and +`ClusterRoleBinding`: ``` $ cat < Note: In a most use cases a Pod is not actually scheduled by a user. When +> creating Kubernetes objects such as Deployments or Daemonsets the pods are +> being scheduled by a service account or a controller. + +If you have disabled the `privileged` PSP policy, and created a `RoleBinding` +to map a user to a new PSP policy, Kubernetes objects like Deployments and +Daemonsets will not be able to deploy pods. This is because Kubernetes objects, +like Deployments, use a `Service Account` to schedule pods, instead of the user +that created the Deployment. + +```bash +user $ kubectl get deployments +NAME READY UP-TO-DATE AVAILABLE AGE +nginx 0/1 0 0 88s + +user $ kubectl get replicasets +NAME DESIRED CURRENT READY AGE +nginx-cdcdd9f5c 1 0 0 92s + +user $ kubectl describe replicasets nginx-cdcdd9f5c +... + Warning FailedCreate 48s (x15 over 2m10s) replicaset-controller Error creating: pods "nginx-cdcdd9f5c-" is forbidden: unable to validate against any pod security policy: [] +``` + +For this deployment to be able to schedule pods, the service account defined +wthin the deployment specification needs to be associated with a PSP policy. +If a service account is not defined within a deployment spec, the default +service account in a namespace is used. + +This is the case in the deployment output above, there is no service account +defined, therefore a `Rolebinding` to grant the default service account in the +default namespace to use PSP policy is needed. + +An example `RoleBinding` to associate the `unprivileged` PSP policy in UCP with +the defaut service account in the default namespace is: + +```bash +admin $ cat <``` format. +This passes the gMSA credentials file directly to nodes before a container starts. +- Open Security Controls Assessment Language (OSCAL). +OSCAL API endpoints have been added in Engine and UCP. These endpoints are enabled by default. +- Container storage interface (CSI). +Version 1.0 of the CSI specification is now supported for container orchestrators to manage storage plugins. +Note: As of May 2019, none of the [available CSI drivers](https://kubernetes-csi.github.io/docs/drivers.html) are production quality and are considered pre-GA. +- Internet Small Computer System Interface (iSCSI). +Using iSCSI, a storage admin can now provision a UCP cluster with persistent storage from which UCP end +users can request storage resources without needing underlying infrastructure knowledge. +- System for Cross-domain Identity Management (SCIM). +SCIM implementation allows proactive synchronization with UCP and eliminates manual intervention for changing +user status and group membership. +- Support for Pod Security Policies (PSPs) within Kubernetes. +Pod Security Policies are enabled by default in UCP 3.2 allowing platform +operators to enforce security controls on what can run on top of Kubernetes. For +more information see +[Using Pod Security](/ee/ucp/kubernetes/pod-security-policies/). +- Client Cert-based Authentication + - Users can now use UCP client bundles for DTR authentication. + - Users can now add their client certificate and key to their local Engine for performing pushes and pulls without logging in. + - Users can now use client certificates to make API requests to DTR instead of providing their credentials. + +### Enhancements + +#### Backup/restore + +- Backups no longer halt UCP containers. +- Backup contents can now be redirected to a file instead of stdout/err. +- You can now view information for all backups performed, including the date, status, and contents filenames. Error log information can be accessed for troubleshooting. + +#### Upgrade + +- Improved progress information for install and upgrade. +- You can now manually control worker node upgrades. +- User workloads no longer experience downtime during an upgrade. + +#### Buildkit + +- You can now use a UCP client bundle with buildkit. + +### Deprecations +The following features are deprecated in UCP 3.2: + +- Collections + - The ability to create a nested collection of more than 2 layers deep within the root /Swarm/collection is + now deprecated and will not be included in future versions of the product. However, current nested collections + with more than 2 layers are still retained. + - Docker recommends a maximum of two layers when creating collections within UCP under the shared cluster + collection designated as /Swarm/. For example, if a production collection called /Swarm/production is created + under the shared cluster collection /Swarm/, only one level of nesting should be created, for + example, /Swarm/production/app/. See Nested collections for more details. +- UCP `stop` and `restart` + - Additional upgrade functionality has been included which eliminates the need for these commands. +- `ucp-agent-pause` + - `ucp-agent-pause` is no longer supported. To pause UCP reconciliation on a specific node, for example, when repairing unhealthy `etcd` or `rethinkdb` replicas, you can use swarm node labels as shown in the following example: + ``` + docker node update --label-add com.docker.ucpagent-pause=true + ``` +- Windows 2016 is formally deprecated from Docker Enterprise 3.0. Only non-overlay networks are supported +on Windows 2016 in Docker Enterprise 3.0. EOL of Windows Server 2016 support will occur in Docker +Enterprise 3.1. Upgrade to Windows Server 2019 for continued support on Docker Enterprise. +- Support for updating the UCP config with `docker service update ucp-manager-agent --config-add ...` +is deprecated and will be removed in a future release. To update the UCP config, use the `/api/ucp/config-toml` +endpoint described in https://docs.docker.com/ee/ucp/admin/configure/ucp-configuration-file/. +- Generating a backup from a UCP manager that has lost quorum is no longer supported. We recommend that you +regularly schedule backups on your cluster so that you have always have a recent backup. +Refer to [UCP backup information](/ee/admin/backup/back-up-ucp/) for detailed UCP back up information. + +If your cluster has lost quorum and you cannot recover it on your own, please contact Docker Support. + +- Browser support +In order to optimize user experience and security, support for Internet Explorer (IE) version 11 is not provided for Windows 7 with UCP version 3.2. Docker recommends updating to a newer browser version if you plan to use UCP 3.2, or remaining on UCP 3.1.x or older until EOL of IE11 in January 2020. + +- Kubernetes + + - Integrated Kubernetes Ingress + - You can now dynamiclly deploy L7 routes for applications, scale out multi-tenant ingress for shared clusters, + and give applications TLS termination, path-based routing, and high-performance L7 load-balancing in a + centralized and controlled manner. + + - Updated Kubernetes to version 1.14. + + - Enhancements: + - PodShareProcessNamespace + + - The PodShareProcessNamespace feature, available by default, configures PID namespace sharing within a pod. See [Share Process Namespace between Containers in a Pod](https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/) for more information. + - https://github.com/kubernetes/kubernetes/pull/66507 + - Volume Dynamic Provisioning + - Combined `VolumeScheduling` and `DynamicProvisioningScheduling`. + - Added allowedTopologies description in kubectl. + - ACTION REQUIRED: The DynamicProvisioningScheduling alpha feature gate has been removed. The VolumeScheduling beta feature gate is still required for this feature. - https://github.com/kubernetes/kubernetes/pull/67432 + - TokenRequest and TokenRequestProjection + - Enable these features by starting the API server with the following flags: + * --service-account-issuer + * --service-account-signing-key-file + * --service-account-api-audiences + - https://github.com/kubernetes/kubernetes/pull/67349 + - Removed `--cadvisor-port flag` from kubelet + + - ACTION REQUIRED: The cAdvisor web UI that the kubelet started using `--cadvisor-port` was removed + in 1.12. If cAdvisor is needed, run it via a DaemonSet. + - https://github.com/kubernetes/kubernetes/pull/65707 + - Support for Out-of-tree CSI Volume Plugins (stable) with API + + - Allows volume plugins to be developed out-of-tree. + - Not require building volume plugins (or their dependencies) into Kubernetes binaries. + - Not requiring direct machine access to deploy new volume plugins (drivers). + - https://github.com/kubernetes/enhancements/issues/178 + - Server-side Apply leveraged by the UCP GUI for the yaml create page + + - Moved "apply" and declarative object management from kubectl to the apiserver. Added "field ownership". + - https://github.com/kubernetes/enhancements/issues/555 + - The PodPriority admission plugin + + - For `kube-apiserver`, the `Priority` admission plugin is now enabled by default when using `--enable-admission-plugins`. If using `--admission-control` to fully specify the set of admission plugins, the `Priority` admission plugin should be added if using the `PodPriority` feature, which is enabled by default in 1.11. + - The priority admission plugin: + - Allows pod creation to include an explicit priority field if it matches the computed + priority (allows export/import cases to continue to work on the same cluster, between + clusters that match priorityClass values, and between clusters where priority is unused + and all pods get priority:0) + - Preserves existing priority if a pod update does not include a priority value and the old + pod did (allows POST, PUT, PUT, PUT workflows to continue to work, with the admission-set value + on create being preserved by the admission plugin on update) + - https://github.com/kubernetes/kubernetes/pull/65739 + - Volume Topology + + - Made the scheduler aware of a Pod's volume's topology constraints, such as zone or node. + - https://github.com/kubernetes/enhancements/issues/490 + - Docs pr here: kubernetes/website#10736 + - Admin RBAC role and edit RBAC roles + - The admin RBAC role is aggregated from edit and view. The edit RBAC role is aggregated from a + separate edit and view. + - https://github.com/kubernetes/kubernetes/pull/66684 + - API + - `autoscaling/v2beta2` and `custom_metrics/v1beta2` implement metric selectors for Object and Pods + metrics, as well as allow AverageValue targets on Objects, similar to External metrics. + - https://github.com/kubernetes/kubernetes/pull/64097 + - Version updates + - Client-go libraries bump + - ACTION REQUIRED: the API server and client-go libraries support additional non-alpha-numeric + characters in UserInfo "extra" data keys. Both support extra data containing "/" characters or + other characters disallowed in HTTP headers. + - Old clients sending keys that were %-escaped by the user have their values unescaped by new API servers. + New clients sending keys containing illegal characters (or "%") to old API servers do not have their values unescaped. + - https://github.com/kubernetes/kubernetes/pull/65799 + - audit.k8s.io API group bump + - The audit.k8s.io API group has been bumped to v1. + - Deprecated element metav1.ObjectMeta and Timestamp are removed from audit Events in v1 version. + - Default value of option --audit-webhook-version and --audit-log-version are changed from `audit.k8s.io/v1beta1` to `audit.k8s.io/v1`. + - https://github.com/kubernetes/kubernetes/pull/65891 + ### Known issues +- kubelets or Calico-node pods are Down + + The symptom of this issue is that kubelets or Calico-node pods are down with one of the following error messages: + - Kubelet is unhealthy + - Calico-node pod is unhealthy + + This is a rare issue, but there is a race condition in UCP today where Docker iptables rules get permanently deleted. This happens when Calico tries to update the iptables state using delete commands passed to iptables-restore while Docker simultaneously updates its iptables state and Calico ends up deleting the wrong rules. + + Rules that are affected: + ``` + /sbin/iptables --wait -I FORWARD -o docker_gwbridge -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + + /sbin/iptables --wait -I FORWARD -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + + /sbin/iptables --wait -I POSTROUTING -s 172.17.0.0/24 ! -o docker0 -j MASQUERADE + ``` + + The fix for this issue should be available as a minor version release in Calico and incorporated into UCP in a subsequent patch release. + + Until then as a workaround we recommend: + - re-adding the above rules manually or via cron or + - restarting Docker + - Running the engine with `"selinux-enabled": true` and installing UCP returns the following error: ``` time="2019-05-22T00:27:54Z" level=fatal msg="the following required ports are blocked on your host: 179, 443, 2376, 6443, 6444, 10250, 12376, 12378 - 12386. Check your firewall settings" @@ -55,165 +238,66 @@ upgrade your installation to the latest release. name: persistent-volume-binder namespace: kube-system ``` - > **Note**: This issue also applies to UCP 3.0.x and 3.1.x. -# Version 3.2.0-beta -(2019-5-16) +- Using iSCSI on a SLES 15 Kubernetes cluster results in failures + - Using Kubernetes iSCSI on SLES 15 hosts results in failures. Kubelet logs might have errors similar to the following, when there's an attempt to attach the iSCSI based persistent volume: + ``` + {kubelet ip-172-31-13-214.us-west-2.compute.internal} FailedMount: MountVolume.WaitForAttach failed for volume "iscsi-4mpvj" : exit status 127" + ``` + - Reason: The failure is because the containerized kubelet in UCP does not contain the library dependency (libopeniscsiusr) for iscsiadm version 2.0.876 on SLES15. + - Workaround: use a swarm service to deploy this change across the cluster as follows: + 1. Install UCP and have nodes configured as swarm workers. + 2. Perform iSCSI initiator related configuration on the nodes. + - Install packages: + ``` + zypper -n install open-iscsi + ``` + - Modprobe the relevant kernel modules + ``` + modprobe iscsi_tcp + ``` + - Start the iscsi daemon + ``` + service start iscsid + ``` -## New features + 3. Create a global docker service that updates the dynamic library configuration path of the ucp-kubelet with relevant host paths. For this, use the UCP client bundle to point to the UCP cluster and run the following swarm commands: + ``` + docker service create --mode=global --restart-condition none --mount type=bind,source=/var/run/docker.sock,target=/var/run/docker.sock mavenugo/swarm-exec:17.03.0-ce docker exec ucp-kubelet "/bin/bash" "-c" "echo /rootfs/usr/lib64 >> /etc/ld.so.conf.d/libc.conf && ldconfig" + 4b1qxigqht0vf5y4rtplhygj8 + overall progress: 0 out of 3 tasks + overall progress: 0 out of 3 tasks + overall progress: 0 out of 3 tasks + ugb24g32knzv: running + overall progress: 0 out of 3 tasks + overall progress: 0 out of 3 tasks + overall progress: 0 out of 3 tasks + overall progress: 0 out of 3 tasks -### Group Managed Service Accounts (gMSA) -On Windows, you can create or update a service using --credential-spec with the config:// format. -This passes the gMSA credentials file directly to nodes before a container starts. + + Operation continuing in background. + Use `docker service ps 4b1qxigqht0vf5y4rtplhygj8` to check progress. -### Open Security Controls Assessment Language (OSCAL) -OSCAL API endpoints have been added in Engine and UCP. These endpoints are enabled by default. + $ docker service ps 4b1qxigqht0vf5y4rtplhygj8 + ID NAME IMAGE NODE DESIRED STATE CURRENT STATE + ERROR PORTS + bkgqsbsffsvp hopeful_margulis.ckh79t5dot7pdv2jsl3gs9ifa mavenugo/swarm-exec:17.03.0-ce user-testkit-4DA6F6-sles-1 Shutdown Complete 7 minutes ago + nwnur7r1mq77 hopeful_margulis.2gzhtgazyt3hyjmffq8f2vro4 mavenugo/swarm-exec:17.03.0-ce user-testkit-4DA6F6-sles-0 Shutdown Complete 7 minutes ago + uxd7uxde21gx hopeful_margulis.ugb24g32knzvvjq9d82jbuba1 mavenugo/swarm-exec:17.03.0-ce user + -testkit-4DA6F6-sles-2 Shutdown Complete 7 minutes ago + ``` -### Container storage interface (CSI) -Version 1.0 of the CSI specification is now supported for container orchestrators to manage storage plugins. -Note: As of May 2019, none of the -(available CSI drivers)[https://kubernetes-csi.github.io/docs/drivers.html] are production quality and are considered pre-GA. + 4. Switch cluster to run kubernetes workloads. Your cluster is now set to run iSCSI workloads. + +### Components -### Internet Small Computer System Interface (iSCSI) -Using iSCSI, a storage admin can now provision a UCP cluster with persistent storage from which UCP end -users can request storage resources without needing underlying infrastructure knowledge. - -### System for Cross-domain Identity Management (SCIM) -SCIM implementation allows proactive synchronization with UCP and eliminates manual intervention for changing -user status and group membership. - -### Support for Pod Security Policies (PSPs) within Kubernetes -Pod Security Policies are enabled by default in UCP 3.2 allowing platform -operators to enforce security controls on what can run on top of Kubernetes. For -more information see -[Using Pod Security](/ee/ucp/kubernetes/pod-security-policies/) - -## Enhancements - -### Backup/restore - -- Backups no longer halt UCP containers. -- Backup contents can now be redirected to a file instead of stdout/err. -- You can now view information for all backups performed, including the date, status, and contents filenames. -Error log information can be accessed for troubleshooting. - -### Upgrade - -- Improved progress information for install and upgrade. -- You can now manually control worker node upgrades. -- User workloads no longer experience downtime during an upgrade. - -### Buildkit - -- You can now use a UCP client bundle with buildkit. - -## Deprecations -The following features are deprecated in UCP 3.2: - -- Collections - - The ability to create a nested collection of more than 2 layers deep within the root /Swarm/collection is - now deprecated and will not be included in future versions of the product. However, current nested collections - with more than 2 layers are still retained. - - Docker recommends a maximum of two layers when creating collections within UCP under the shared cluster - collection designated as /Swarm/. For example, if a production collection called /Swarm/production is created - under the shared cluster collection /Swarm/, only one level of nesting should be created, for - example, /Swarm/production/app/. See Nested collections for more details. -- UCP `stop` and `restart` - - Additional upgrade functionality has been included which eliminates the need for these commands. -- `ucp-agent-pause` - - `ucp-agent-pause` is no longer supported. To pause UCP reconciliation on a specific node, for example, when repairing unhealthy `etcd` or `rethinkdb` replicas, you can use swarm node labels as shown in the following example: - ``` - docker node update --label-add com.docker.ucpagent-pause=true - ``` - -## Browser support -In order to optimize user experience and security, support for Internet Explorer (IE) version 11 is not provided for Windows 7 with UCP version 3.2. Docker recommends updating to a newer browser version if you plan to use UCP 3.2, or remaining on UCP 3.1.x or older until EOL of IE11 in January 2020. - -## Kubernetes - -- Integrated Kubernetes Ingress - - You can now dynamiclly deploy L7 routes for applications, scale out multi-tenant ingress for shared clusters, - and give applications TLS termination, path-based routing, and high-performance L7 load-balancing in a - centralized and controlled manner. - -- Updated Kubernetes to version 1.14. - - - Enhancements: - - PodShareProcessNamespace - - - The PodShareProcessNamespace feature, available by default, configures PID namespace sharing within a pod. - See [Share Process Namespace between Containers in a Pod](https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/) for more information. - - https://github.com/kubernetes/kubernetes/pull/66507 - - Volume Dynamic Provisioning - - - Combined `VolumeScheduling` and `DynamicProvisioningScheduling`. - - Added allowedTopologies description in kubectl. - - ACTION REQUIRED: The DynamicProvisioningScheduling alpha feature gate has been removed. - The VolumeScheduling beta feature gate is still required for this feature) -https://github.com/kubernetes/kubernetes/pull/67432 - - TokenRequest and TokenRequestProjection - - - Enable these features by starting the API server with the following flags: - * --service-account-issuer - * --service-account-signing-key-file - * --service-account-api-audiences - - https://github.com/kubernetes/kubernetes/pull/67349 - - Removed `--cadvisor-port flag` from kubelet - - - ACTION REQUIRED: The cAdvisor web UI that the kubelet started using `--cadvisor-port` was removed - in 1.12. If cAdvisor is needed, run it via a DaemonSet. - - https://github.com/kubernetes/kubernetes/pull/65707 - - Support for Out-of-tree CSI Volume Plugins (stable) with API - - - Allows volume plugins to be developed out-of-tree. - - Not require building volume plugins (or their dependencies) into Kubernetes binaries. - - Not requiring direct machine access to deploy new volume plugins (drivers). - - https://github.com/kubernetes/enhancements/issues/178 - - Server-side Apply leveraged by the UCP GUI for the yaml create page - - - Moved "apply" and declarative object management from kubectl to the apiserver. Added "field ownership". - - https://github.com/kubernetes/enhancements/issues/555 - - The PodPriority admission plugin - - - For `kube-apiserver`, the `Priority` admission plugin is now enabled by default when using `--enable-admission-plugins`. If using `--admission-control` to fully specify the set of admission plugins, the `Priority` admission plugin should be added if using the `PodPriority` feature, which is enabled by default in 1.11. - - The priority admission plugin: - - Allows pod creation to include an explicit priority field if it matches the computed - priority (allows export/import cases to continue to work on the same cluster, between - clusters that match priorityClass values, and between clusters where priority is unused - and all pods get priority:0) - - Preserves existing priority if a pod update does not include a priority value and the old - pod did (allows POST, PUT, PUT, PUT workflows to continue to work, with the admission-set value - on create being preserved by the admission plugin on update) - - https://github.com/kubernetes/kubernetes/pull/65739 - - Volume Topology - - - Made the scheduler aware of a Pod's volume's topology constraints, such as zone or node. - - https://github.com/kubernetes/enhancements/issues/490, Docs pr here: kubernetes/website#10736 - - Admin RBAC role and edit RBAC roles - - The admin RBAC role is aggregated from edit and view. The edit RBAC role is aggregated from a - separate edit and view. - - https://github.com/kubernetes/kubernetes/pull/66684 - - API - - `autoscaling/v2beta2` and `custom_metrics/v1beta2` implement metric selectors for Object and Pods - metrics, as well as allow AverageValue targets on Objects, similar to External metrics. - - https://github.com/kubernetes/kubernetes/pull/64097 - - Version updates - - Client-go libraries bump - - ACTION REQUIRED: the API server and client-go libraries support additional non-alpha-numeric - characters in UserInfo "extra" data keys. Both support extra data containing "/" characters or - other characters disallowed in HTTP headers. - - Old clients sending keys that were %-escaped by the user have their values unescaped by new API servers. - New clients sending keys containing illegal characters (or "%") to old API servers do not have their values unescaped. - - https://github.com/kubernetes/kubernetes/pull/65799 - - audit.k8s.io API group bump - - The audit.k8s.io API group has been bumped to v1. - - Deprecated element metav1.ObjectMeta and Timestamp are removed from audit Events in v1 version. - - Default value of option --audit-webhook-version and --audit-log-version are changed from `audit.k8s.io/v1beta1` - to `audit.k8s.io/v1`. - - https://github.com/kubernetes/kubernetes/pull/65891 - - Known issues - - Backwards-incompatible changes in the Kube API that might affect user workloads will require warnings/documentation in the UCP release notes for Amberjack (list of deprecated features and APIs TBD). - - Does anything need to be noted for Kube 1.12 (deprecations, etc. that is not covered for 1.13?) +| Component | Version | +| ----------- | ----------- | +| UCP | 3.2.0 | +| Kubernetes | 1.14.3 | +| Calico | 3.5.7 | +| Interlock | 2.4.0 | +| Interlock NGINX proxy | 1.14.2 | # Version 3.1 @@ -380,7 +464,7 @@ https://github.com/kubernetes/kubernetes/pull/67432 | Calico | 3.5.3 | | Interlock (nginx) | 1.14.0 | -## 3.1.5 +## 3.1.5 2019-03-28 ### Kubernetes @@ -425,7 +509,7 @@ https://github.com/kubernetes/kubernetes/pull/67432 | Calico | 3.5.2 | | Interlock (nginx) | 1.14.0 | -## 3.1.4 +## 3.1.4 2019-02-28 @@ -494,7 +578,7 @@ https://github.com/kubernetes/kubernetes/pull/67432 * Fixed an issue with ucp-proxy health check. (docker/orca#15814, docker/orca#15813, docker/orca#16021, docker/orca#15811) * Fixed an issue with manual creation of a **ClusterRoleBinding** or **RoleBinding** for `User` or `Group` subjects requiring the ID of the user, organization, or team. (docker/orca#14935) * Fixed an issue in which Kube Rolebindings only worked on UCP User ID and not UCP username. (docker/orca#14935) - + ### Known issue * By default, Kubelet begins deleting images, starting with the oldest unused images, after exceeding 85% disk space utilization. This causes an issue in an air-gapped environment. (docker/orca#16082) @@ -544,7 +628,7 @@ now configurable within the UCP web interface. (#15466) | Calico | 3.2.3 | | Interlock (nginx) | 1.14.0 | -## 3.1.1 +## 3.1.1 (2018-12-04) @@ -559,7 +643,7 @@ now configurable within the UCP web interface. (#15466) | Calico | 3.2.3 | | Interlock (nginx) | 1.13.12 | -## 3.1.0 +## 3.1.0 2018-11-08 @@ -698,7 +782,6 @@ The following features are deprecated in UCP 3.1. | Interlock (nginx) | 1.13.12 | ## 3.0.11 - 2019-05-06 ### Bug fixes @@ -745,7 +828,7 @@ The following features are deprecated in UCP 3.1. Storage Class, as this allowed non-admins to by pass security controls and mount host directories. (docker/orca#15936) * Added support for the limit arg in `docker ps`. (#15812) - + ### Known issue * By default, Kubelet begins deleting images, starting with the oldest unused images, after exceeding 85% disk space utilization. This causes an issue in an air-gapped environment. @@ -764,21 +847,21 @@ The following features are deprecated in UCP 3.1. ### Bug fixes * Core - * Significantly reduced database load in environments with a lot of concurrent + * Significantly reduced database load in environments with a lot of concurrent and repeated API requests by the same user. (docker/escalation#911) * Added the ability to set custom HTTP response headers to be returned by the UCP Controller API Server. (docker/orca#10733) * UCP backend will now complain when a service is created/updated if the - `com.docker.lb.network` label is not correctly specified. (docker/orca#15015) + `com.docker.lb.network` label is not correctly specified. (docker/orca#15015) * LDAP group member attribute is now case insensitive. (docker/escalation#917) * Fixed an issue that caused a system hang after UCP backup and the attempted shutdown of the Docker daemon to perform a swarm backup. /dev/shm is now unmounted when starting the kubelet container. (docker/orca#15672, docker/escalation#841) - + * Interlock * Interlock headers can now be hidden. (docker/escalation#833) * Respect `com.docker.lb.network` labels and only attach the specified networks to the Interlock proxy. (docker/interlock#169) - * Add support for 'VIP' backend mode, in which the Interlock proxy connects to the - backend service's Virtual IP instead of load-balancing directly to each task IP. + * Add support for 'VIP' backend mode, in which the Interlock proxy connects to the + backend service's Virtual IP instead of load-balancing directly to each task IP. (docker/interlock#206, escalation/920) ### Components @@ -790,7 +873,7 @@ The following features are deprecated in UCP 3.1. | Calico | 3.0.8 | | Interlock (nginx) | 1.13.12 | -## 3.0.7 +## 3.0.7 2018-12-04 @@ -805,7 +888,7 @@ The following features are deprecated in UCP 3.1. | Calico | 3.0.8 | | Interlock (nginx) | 1.13.12 | -## 3.0.6 +## 3.0.6 2018-10-25 @@ -820,10 +903,10 @@ The following features are deprecated in UCP 3.1. * Fixed an issue that caused the Interlock proxy service to keep restarting. (docker/escalation#814) * Fixed an issue that caused Kubernetes DNS to not work. (#14064, #11981) * Fixed an issue that caused "Missing swarm placement constraints" warning banner to appear unnecessarily. (docker/orca#14539) - + * Security * Fixed `libcurl` vulnerability in RethinkDB image. (docker/orca#15169) - + * UI * Fixed an issue that prevented "Per User Limit" on Admin Settings from working. (docker/escalation#639) @@ -836,7 +919,7 @@ The following features are deprecated in UCP 3.1. | Calico | 3.0.8 | | Interlock (nginx) | 1.13.12 | -## 3.0.5 +## 3.0.5 2018-08-30 @@ -853,7 +936,7 @@ The following features are deprecated in UCP 3.1. Alternately, you can just `docker pull docker/ucp-agent:3.0.5` on every manager node. This issue is fixed in 3.0.5. Any upgrade from 3.0.5 or above should work without manually pulling the images. - + ### Components @@ -864,7 +947,7 @@ The following features are deprecated in UCP 3.1. | Calico | 3.0.8 | | Interlock (nginx) | 1.13.12 | -## 3.0.4 +## 3.0.4 2018-08-09 @@ -886,7 +969,7 @@ The following features are deprecated in UCP 3.1. | Calico | 3.0.8 | | Interlock (nginx) | 1.13.12 | -## 3.0.3 +## 3.0.3 2018-07-26 @@ -918,7 +1001,7 @@ The following features are deprecated in UCP 3.1. | Calico | 3.0.8 | | Interlock (nginx) | 1.13.12 | -## 3.0.2 +## 3.0.2 2018-06-21 @@ -1003,7 +1086,7 @@ Azure Disk when installing UCP with the `--cloud-provider` option. depending on how quickly `calico-node` gets upgraded on those nodes. * `ucp-interlock-proxy` may fail to start when two or more services are configured with two or more backend hosts. [You can use this workaround](https://success.docker.com/article/how-do-i-ensure-the-ucp-routing-mesh-ucp-interlock-proxy-continues-running-in-the-event-of-a-failed-update). - + ### Components | Component | Version | @@ -1013,7 +1096,7 @@ Azure Disk when installing UCP with the `--cloud-provider` option. | Calico | 3.0.1 | | Interlock (nginx) | 1.13.8 | -## Version 3.0.0 +## Version 3.0.0 2018-04-17 @@ -1223,7 +1306,6 @@ instead of the correct image for the worker architecture. `/`. ## Version 2.2.18 - 2019-05-06 ### Bug fixes @@ -1277,7 +1359,7 @@ instead of the correct image for the worker architecture. ### Bug fixes * Added support for the `limit` argument in `docker ps`. (#15812) - + ### Known issues * Docker currently has limitations related to overlay networking and services using VIP-based endpoints. These limitations apply to use of the HTTP Routing Mesh (HRM). HRM users should familiarize themselves with these limitations. In particular, HRM may encounter virtual IP exhaustion (as evidenced by `failed to allocate network IP for task` Docker log messages). If this happens, and if the HRM service is restarted or rescheduled for any reason, HRM may fail to resume operation automatically. See the Docker EE 17.06-ee5 release notes for details. @@ -1301,11 +1383,11 @@ instead of the correct image for the worker architecture. ### Bug fixes * Core - * Significantly reduced database load in environments with a lot of concurrent and repeated API requests by the same user. - * Added the ability to set custom HTTP response headers to be returned by the UCP Controller API Server. + * Significantly reduced database load in environments with a lot of concurrent and repeated API requests by the same user. + * Added the ability to set custom HTTP response headers to be returned by the UCP Controller API Server. * Web interface * Fixed stack creation for non admin user when UCP uses a custom controller port. - + ### Known issues * Docker currently has limitations related to overlay networking and services using VIP-based endpoints. These limitations apply to use of the HTTP Routing Mesh (HRM). HRM users should familiarize themselves with these limitations. In particular, HRM may encounter virtual IP exhaustion (as evidenced by `failed to allocate network IP for task` Docker log messages). If this happens, and if the HRM service is restarted or rescheduled for any reason, HRM may fail to resume operation automatically. See the Docker EE 17.06-ee5 release notes for details. @@ -1323,7 +1405,7 @@ instead of the correct image for the worker architecture. * You can't create a bridge network from the web interface. As a workaround use `/`. -## Version 2.2.14 +## Version 2.2.14 2018-10-25 @@ -1335,7 +1417,7 @@ instead of the correct image for the worker architecture. * Web Interface * Fixed an issue that prevented "Per User Limit" on Admin Settings from working. (docker/escalation#639) - + ### Known issues * Docker currently has limitations related to overlay networking and services using VIP-based endpoints. These limitations apply to use of the HTTP Routing Mesh (HRM). HRM users should familiarize themselves with these limitations. In particular, HRM may encounter virtual IP exhaustion (as evidenced by `failed to allocate network IP for task` Docker log messages). If this happens, and if the HRM service is restarted or rescheduled for any reason, HRM may fail to resume operation automatically. See the Docker EE 17.06-ee5 release notes for details. @@ -1353,7 +1435,7 @@ instead of the correct image for the worker architecture. * You can't create a bridge network from the web interface. As a workaround use `/`. -## Version 2.2.13 +## Version 2.2.13 2018-08-30 @@ -1362,7 +1444,7 @@ instead of the correct image for the worker architecture. * Security * Fixed a critical security issue to prevent UCP from accepting certificates from the system pool when adding client CAs to the server that requires mutual authentication. - + ### Known issues * Docker currently has limitations related to overlay networking and services using VIP-based endpoints. These limitations apply to use of the HTTP Routing Mesh (HRM). HRM users should familiarize themselves with these limitations. In particular, HRM may encounter virtual IP exhaustion (as evidenced by `failed to allocate network IP for task` Docker log messages). If this happens, and if the HRM service is restarted or rescheduled for any reason, HRM may fail to resume operation automatically. See the Docker EE 17.06-ee5 release notes for details. @@ -1380,7 +1462,7 @@ instead of the correct image for the worker architecture. * You can't create a bridge network from the web interface. As a workaround use `/`. -## Version 2.2.12 +## Version 2.2.12 2018-08-09 @@ -1391,7 +1473,7 @@ instead of the correct image for the worker architecture. were stored in cleartext on UCP hosts. Please refer to the following KB article https://success.docker.com/article/upgrading-to-ucp-2-2-12-ucp-3-0-4/ for proper implementation of this fix. - + ### Known issues * Docker currently has limitations related to overlay networking and services using VIP-based endpoints. These limitations apply to use of the HTTP Routing Mesh (HRM). HRM users should familiarize themselves with these limitations. In particular, HRM may encounter virtual IP exhaustion (as evidenced by `failed to allocate network IP for task` Docker log messages). If this happens, and if the HRM service is restarted or rescheduled for any reason, HRM may fail to resume operation automatically. See the Docker EE 17.06-ee5 release notes for details. @@ -1409,7 +1491,7 @@ instead of the correct image for the worker architecture. * You can't create a bridge network from the web interface. As a workaround use `/`. -## Version 2.2.11 +## Version 2.2.11 2018-07-26 @@ -1432,7 +1514,7 @@ instead of the correct image for the worker architecture. * UI * Fixed an issue that causes the web interface to not parse volume options correctly. * Fixed an issue that prevents the user from deploying stacks through the web interface. - + ### Known issues * Docker currently has limitations related to overlay networking and services using VIP-based endpoints. These limitations apply to use of the HTTP Routing Mesh (HRM). HRM users should familiarize themselves with these limitations. In particular, HRM may encounter virtual IP exhaustion (as evidenced by `failed to allocate network IP for task` Docker log messages). If this happens, and if the HRM service is restarted or rescheduled for any reason, HRM may fail to resume operation automatically. See the Docker EE 17.06-ee5 release notes for details. @@ -1450,7 +1532,7 @@ instead of the correct image for the worker architecture. * You can't create a bridge network from the web interface. As a workaround use `/`. -## Version 2.2.10 +## Version 2.2.10 2018-05-17 @@ -1503,7 +1585,7 @@ instead of the correct image for the worker architecture. * You can't create a bridge network from the web interface. As a workaround use `/`. -## Version 2.2.9 +## Version 2.2.9 2018-04-17 @@ -1517,7 +1599,7 @@ instead of the correct image for the worker architecture. * Core * Fixed an issue that causes container fail to start with `container ID not found` during high concurrent API calls to create and start containers. - + ### Known issues * RethinkDB can only run with up to 127 CPU cores. @@ -1539,7 +1621,7 @@ is always used, regardless of which one is actually the best match. * You can't create a bridge network from the web interface. As a workaround use `/`. -## Version 2.2.7 +## Version 2.2.7 2018-03-26 @@ -1548,7 +1630,7 @@ is always used, regardless of which one is actually the best match. * Fixed an issue where the minimum TLS version setting is not correctly handled, leading to non-default values causing `ucp-controller` and `ucp-agent` to keep restarting. - + ### Known issues * RethinkDB can only run with up to 127 CPU cores. @@ -1570,7 +1652,7 @@ is always used, regardless of which one is actually the best match. * You can't create a bridge network from the web interface. As a workaround use `/`. -## Version 2.2.6 +## Version 2.2.6 2018-03-19 @@ -1640,7 +1722,7 @@ is always used, regardless of which one is actually the best match. `/`. -## Version 2.2.5 +## Version 2.2.5 2018-01-16 @@ -1676,7 +1758,7 @@ for volumes. * You can't create a bridge network from the web interface. As a workaround use `/`. -## Version 2.2.4 +## Version 2.2.4 2017-11-02 @@ -1721,7 +1803,7 @@ for volumes. * You can't create a bridge network from the web interface. As a workaround use `/`. -## Version 2.2.3 +## Version 2.2.3 2017-09-13 @@ -1774,7 +1856,7 @@ for volumes. * You can't create a bridge network from the web interface. As a workaround use `/`. -## version 2.2.2 +## version 2.2.2 2017-08-30 @@ -1836,7 +1918,7 @@ for volumes. * If upgrading from an existing deployment, ensure that HRM is using a non-encrypted network prior to attaching Windows services. -## Version 2.2.0 +## Version 2.2.0 2017-08-16 diff --git a/ee/ucp/ucp-architecture.md b/ee/ucp/ucp-architecture.md index 0cd2b314a4..ead953a75b 100644 --- a/ee/ucp/ucp-architecture.md +++ b/ee/ucp/ucp-architecture.md @@ -52,7 +52,7 @@ If the node is a: ## UCP internal components -The core component of UCP is a globally-scheduled service called `ucp-agent`. +The core component of UCP is a globally scheduled service called `ucp-agent`. When you install UCP on a node, or join a node to a swarm that's being managed by UCP, the `ucp-agent` service starts running on that node. @@ -66,11 +66,6 @@ on a node depend on whether the node is a manager or a worker. > on Windows, the `ucp-agent` component is named `ucp-agent-win`. > [Learn about architecture-specific images](admin/install/architecture-specific-images.md). -Internally, UCP uses the following components: - -* Calico v3.5.3 -* Kubernetes v1.11.9 - ### UCP components in manager nodes Manager nodes run all UCP services, including the web UI and data stores that @@ -79,7 +74,7 @@ persist the state of UCP. These are the UCP services running on manager nodes: | UCP component | Description | |:--------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | k8s_calico-kube-controllers | A cluster-scoped Kubernetes controller used to coordinate Calico networking. Runs on one manager node only. | -| k8s_calico-node | The Calico node agent, which coordinates networking fabric according to the cluster-wide Calico configuration. Part of the `calico-node` daemonset. Runs on all nodes. Configure the CNI plugin by using the `--cni-installer-url` flag. If this flag isn't set, UCP uses Calico as the default CNI plugin. | +| k8s_calico-node | The Calico node agent, which coordinates networking fabric according to the cluster-wide Calico configuration. Part of the `calico-node` daemonset. Runs on all nodes. Configure the container network interface (CNI) plugin by using the `--cni-installer-url` flag. If this flag isn't set, UCP uses Calico as the default CNI plugin. | | k8s_install-cni_calico-node | A container that's responsible for installing the Calico CNI plugin binaries and configuration on each host. Part of the `calico-node` daemonset. Runs on all nodes. | | k8s_POD_calico-node | Pause container for the `calico-node` pod. | | k8s_POD_calico-kube-controllers | Pause container for the `calico-kube-controllers` pod. | @@ -120,7 +115,7 @@ services running on worker nodes: |:----------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | k8s_calico-node | The Calico node agent, which coordinates networking fabric according to the cluster-wide Calico configuration. Part of the `calico-node` daemonset. Runs on all nodes. | | k8s_install-cni_calico-node | A container that's responsible for installing the Calico CNI plugin binaries and configuration on each host. Part of the `calico-node` daemonset. Runs on all nodes. | -| k8s_POD_calico-node | "Pause" container for the Calico-node pod. By default, this container is hidden, but you can see it by running `docker ps -a`. | +| k8s_POD_calico-node | Pause container for the Calico-node pod. By default, this container is hidden, but you can see it by running `docker ps -a`. | | ucp-agent | Monitors the node and ensures the right UCP services are running | | ucp-interlock-extension | Helper service that reconfigures the ucp-interlock-proxy service based on the swarm workloads that are running. | | ucp-interlock-proxy | A service that provides load balancing and proxying for swarm workloads. Only runs when you enable Layer 7 routing. | diff --git a/ee/ucp/user-access/index.md b/ee/ucp/user-access/index.md index f20b9534d6..c5e7dbfc5d 100644 --- a/ee/ucp/user-access/index.md +++ b/ee/ucp/user-access/index.md @@ -20,7 +20,7 @@ From the browser, administrators can: * Manage the permissions of users, teams, and organizations, * See all images, networks, volumes, and containers. * Grant permissions to users for scheduling tasks on specific nodes - (with the Docker EE Advanced license). + (with the Docker Enterprise license). ![](../images/web-based-access-2.png){: .with-border} diff --git a/ee/upgrade.md b/ee/upgrade.md index 28f0120f75..4a68e13505 100644 --- a/ee/upgrade.md +++ b/ee/upgrade.md @@ -1,21 +1,22 @@ --- -title: Upgrade Docker EE -description: Learn how to upgrade your Docker Enterprise Edition, to start using the latest features and security patches. +title: Upgrade Docker Enterprise +description: Learn how to upgrade your Docker Enterprise to start using the latest features and security patches. keywords: enterprise, upgrade redirect_from: - /enterprise/upgrade/ --- -## Docker Engine - Enterprise 18.09 Upgrades +To upgrade Docker Enterprise, you must individually upgrade each of the +following components: -In Docker Engine - Enterprise 18.09, significant architectural improvements were made to the network -architecture in Swarm to increase the performance and scale of the built-in load balancing functionality. +1. Docker Engine - Enterprise. +2. [Universal Control Plane (UCP)](/ee/ucp/admin/install/upgrade/). +3. [Docker Trusted Registry (DTR)](/ee/dtr/admin/upgrade/). -> **Note**: These changes introduce new constraints to the Docker Engine - Enterprise upgrade process that, -> if not correctly followed, can have impact on the availability of applications running on the Swarm. These -> constraints impact any upgrades coming from any version before 18.09 to version 18.09 or greater. +Because some components become temporarily unavailable during an upgrade, schedule upgrades to occur outside of +peak business hours to minimize impact to your business. -## Cluster Upgrade Best Practices +## Cluster upgrade best practices Docker Engine - Enterprise upgrades in Swarm clusters should follow these guidelines in order to avoid IP address space exhaustion and associated application downtime. @@ -25,18 +26,6 @@ space exhaustion and associated application downtime. * Once manager nodes are upgraded worker nodes should be upgraded next and then the Swarm cluster upgrade is complete. * If running UCP, the UCP upgrade should follow once all of the Swarm engines have been upgraded. - -To upgrade Docker Engine - Enterprise you need to individually upgrade each of the -following components: - -1. Docker Engine - Enterprise. -2. [Universal Control Plane (UCP)](/ee/ucp/admin/install/upgrade/). -3. [Docker Trusted Registry (DTR)](/ee/dtr/admin/upgrade/). - -While upgrading, some of these components become temporarily unavailable. -So you should schedule your upgrades to take place outside business peak hours -to make sure there's no impact to your business. - ## Create a backup Before upgrading Docker Engine - Enterprise, you should make sure you [create a backup](backup.md). @@ -67,7 +56,7 @@ Before you upgrade, make sure: > the UCP controller. {: .important} -## IP Address Consumption in 18.09+ +## IP address consumption in 18.09+ In Swarm overlay networks, each task connected to a network consumes an IP address on that network. Swarm networks have a finite amount of IPs based on the `--subnet` configured when the network is created. If no subnet is specified then Swarm @@ -191,7 +180,7 @@ i64lee19ia6s \_ ex_service.11 nginx:latest tk1706-ubuntu-1 7. Confirm the adjusted service deployed successfully. -## Manager Upgrades When Moving to Docker Engine - Enterprise 18.09 and later +### Manager upgrades when moving to Docker Engine - Enterprise 18.09 and later The following is a constraint introduced by architectural changes to the Swarm overlay networking when upgrading to Docker Engine - Enterprise 18.09 or later. It only applies to this one-time upgrade and to workloads @@ -234,7 +223,7 @@ listed below: * [Oracle Linux](/install/linux/docker-ee/oracle.md#upgrade-docker-ee) * [SLES](/install/linux/docker-ee/suse.md#upgrade-docker-ee) -### Post-Upgrade Steps +### Post-Upgrade steps for Docker Engine - Enterprise After all manager and worker nodes have been upgrades, the Swarm cluster can be used again to schedule new workloads. If workloads were previously scheduled off of the managers, they can be rescheduled again. @@ -242,26 +231,14 @@ If any worker nodes were drained, they can be undrained again by setting `--avai ## Upgrade UCP -Once you've upgraded the Docker Engine - Enterprise running on all the nodes, upgrade UCP. -You can do this from the UCP web UI. - -![UCP update notification banner](images/upgrade-1.png){: .with-border} - -Click on the banner, and choose the version you want to upgrade to. - -![UCP upgrade page - version selection](images/upgrade-2.png){: .with-border} - -Once you click **Upgrade UCP**, the upgrade starts. If you want you can upgrade -UCP from the CLI instead. [Learn more](/ee/ucp/admin/install/upgrade.md). +Once you've upgraded the Docker Engine - Enterprise running on all the nodes, +[upgrade UCP](/ee/ucp/admin/install/upgrade.md). ## Upgrade DTR -Log in into the DTR web UI to check if there's a new version available. +After you upgrade Docker Engine - Enterprise and UCP, [upgrade DTR](/ee/dtr/admin/upgrade.md). -![DTR settings page](images/upgrade-3.png){: .with-border} - -Then follow these [instructions to upgrade DTR](/ee/dtr/admin/upgrade.md). -When this is finished, your Docker EE has been upgraded. +After the DTR upgrade is finished, the Docker Enterprise upgrade is complete. ## Where to go next diff --git a/engine/reference/commandline/cluster.md b/engine/reference/commandline/cluster.md new file mode 100644 index 0000000000..17606a1ccb --- /dev/null +++ b/engine/reference/commandline/cluster.md @@ -0,0 +1,14 @@ +--- +datafolder: cluster +datafile: docker_cluster +title: docker cluster +redirect_from: /cluster/reference/ +--- + +{% include cli.md datafolder=page.datafolder datafile=page.datafile %} diff --git a/engine/reference/commandline/cluster_backup.md b/engine/reference/commandline/cluster_backup.md new file mode 100644 index 0000000000..8aaeac77dc --- /dev/null +++ b/engine/reference/commandline/cluster_backup.md @@ -0,0 +1,13 @@ +--- +datafolder: cluster +datafile: docker_cluster_backup +title: docker cluster backup +--- + +{% include cli.md datafolder=page.datafolder datafile=page.datafile %} diff --git a/engine/reference/commandline/cluster_create.md b/engine/reference/commandline/cluster_create.md new file mode 100644 index 0000000000..d037d92893 --- /dev/null +++ b/engine/reference/commandline/cluster_create.md @@ -0,0 +1,13 @@ +--- +datafolder: cluster +datafile: docker_cluster_create +title: docker cluster create +--- + +{% include cli.md datafolder=page.datafolder datafile=page.datafile %} diff --git a/engine/reference/commandline/cluster_inspect.md b/engine/reference/commandline/cluster_inspect.md new file mode 100644 index 0000000000..c9beb76b5a --- /dev/null +++ b/engine/reference/commandline/cluster_inspect.md @@ -0,0 +1,13 @@ +--- +datafolder: cluster +datafile: docker_cluster_inspect +title: docker cluster inspect +--- + +{% include cli.md datafolder=page.datafolder datafile=page.datafile %} diff --git a/engine/reference/commandline/cluster_ls.md b/engine/reference/commandline/cluster_ls.md new file mode 100644 index 0000000000..e2ae6d5a93 --- /dev/null +++ b/engine/reference/commandline/cluster_ls.md @@ -0,0 +1,13 @@ +--- +datafolder: cluster +datafile: docker_cluster_ls +title: docker cluster ls +--- + +{% include cli.md datafolder=page.datafolder datafile=page.datafile %} diff --git a/engine/reference/commandline/cluster_restore.md b/engine/reference/commandline/cluster_restore.md new file mode 100644 index 0000000000..168dce11ac --- /dev/null +++ b/engine/reference/commandline/cluster_restore.md @@ -0,0 +1,13 @@ +--- +datafolder: cluster +datafile: docker_cluster_restore +title: docker cluster restore +--- + +{% include cli.md datafolder=page.datafolder datafile=page.datafile %} diff --git a/engine/reference/commandline/cluster_rm.md b/engine/reference/commandline/cluster_rm.md new file mode 100644 index 0000000000..ea67e11e0e --- /dev/null +++ b/engine/reference/commandline/cluster_rm.md @@ -0,0 +1,13 @@ +--- +datafolder: cluster +datafile: docker_cluster_rm +title: docker cluster rm +--- + +{% include cli.md datafolder=page.datafolder datafile=page.datafile %} diff --git a/engine/reference/commandline/cluster_update.md b/engine/reference/commandline/cluster_update.md new file mode 100644 index 0000000000..a2d0a34782 --- /dev/null +++ b/engine/reference/commandline/cluster_update.md @@ -0,0 +1,13 @@ +--- +datafolder: cluster +datafile: docker_cluster_update +title: docker cluster update +--- + +{% include cli.md datafolder=page.datafolder datafile=page.datafile %} diff --git a/engine/reference/commandline/cluster_version.md b/engine/reference/commandline/cluster_version.md new file mode 100644 index 0000000000..c7ab98597d --- /dev/null +++ b/engine/reference/commandline/cluster_version.md @@ -0,0 +1,13 @@ +--- +datafolder: cluster +datafile: docker_cluster_version +title: docker cluster version +--- + +{% include cli.md datafolder=page.datafolder datafile=page.datafile %} diff --git a/engine/release-notes.md b/engine/release-notes.md index 31a41cfb56..12b40983c4 100644 --- a/engine/release-notes.md +++ b/engine/release-notes.md @@ -10,11 +10,12 @@ redirect_from: --- This document describes the latest changes, additions, known issues, and fixes -for Docker Engine - Enterprise and Docker Engine - Community. +for Docker Engine Enterprise (Docker EE). -Docker EE is a superset of all the features in Docker Docker Engine - Community. It incorporates defect fixes -that you can use in environments where new features cannot be adopted as quickly for -consistency and compatibility reasons. +Docker EE builds upon the corresponding Docker CE that it +references. Docker EE includes enterprise features as well as back-ported fixes (security-related +and priority defects) from the open source. It also incorporates defect fixes for environments +in which new features cannot be adopted as quickly for consistency and compatibility reasons. > **Note**: > New in 18.09 is an aligned release model for Docker Engine - Community and Docker @@ -29,6 +30,202 @@ consistency and compatibility reasons. > `sudo apt install docker-ce docker-ce-cli containerd.io`. See the install instructions > for the corresponding linux distro for details. +## 19.03.0 +2019-07-22 + +### Builder + +* Fixed `COPY --from` to preserve ownership. [moby/moby#38599](https://github.com/moby/moby/pull/38599) +* builder-next: + + - Added inline cache support `--cache-from`. [docker/engine#215](https://github.com/docker/engine/pull/215) + - Outputs configuration allowed. [moby/moby#38898](https://github.com/moby/moby/pull/38898) + - Fixed gcr workaround token cache. [docker/engine#212](https://github.com/docker/engine/pull/212) + - `stopprogress` called on download error. [docker/engine#215](https://github.com/docker/engine/pull/215) + - Buildkit now uses systemd's `resolv.conf`. [docker/engine#260](https://github.com/docker/engine/pull/260). + - Setting buildkit outputs now allowed. [docker/cli#1766](https://github.com/docker/cli/pull/1766) + - Look for Dockerfile specific dockerignore file (for example, Dockerfile.dockerignore) for + ignored paths. [docker/engine#215](https://github.com/docker/engine/pull/215) + - Automatically detect if process execution is possible for x86, arm, and arm64 binaries. + [docker/engine#215](https://github.com/docker/engine/pull/215) + - Updated buildkit to 1f89ec1. [docker/engine#260](https://github.com/docker/engine/pull/260) + - Use Dockerfile frontend version `docker/dockerfile:1.1` by default. + [docker/engine#215](https://github.com/docker/engine/pull/215) + - No longer rely on an external image for COPY/ADD operations. + [docker/engine#215](https://github.com/docker/engine/pull/215) + +### Client + +* Added `--pids-limit` flag to `docker update`. [docker/cli#1765](https://github.com/docker/cli/pull/1765) +* Added systctl support for services. [docker/cli#1754](https://github.com/docker/cli/pull/1754) +* Added support for `template_driver` in compose files. [docker/cli#1746](https://github.com/docker/cli/pull/1746) +* Added `--device` support for Windows. [docker/cli#1606](https://github.com/docker/cli/pull/1606) +* Added support for Data Path Port configuration. [docker/cli#1509](https://github.com/docker/cli/pull/1509) +* Added fast context switch: commands. [docker/cli#1501](https://github.com/docker/cli/pull/1501) +* Support added for `--mount type=bind,bind-nonrecursive,...` [docker/cli#1430](https://github.com/docker/cli/pull/1430) +* Added maximum replicas per node. [docker/cli#1612](https://github.com/docker/cli/pull/1612) +* Added option to pull images quietly. [docker/cli#882](https://github.com/docker/cli/pull/882) +* Added a separate `--domainname` flag. [docker/cli#1130](https://github.com/docker/cli/pull/1130) +* Added support for secret drivers in `docker stack deploy`. [docker/cli#1783](https://github.com/docker/cli/pull/1783) +* Added ability to use swarm `Configs` as `CredentialSpecs` on services. +[docker/cli#1781](https://github.com/docker/cli/pull/1781) +* Added `--security-opt systempaths=unconfined` support. [docker/cli#1808](https://github.com/docker/cli/pull/1808) +* Added basic framework for writing and running CLI plugins. [docker/cli#1564](https://github.com/docker/cli/pull/1564) + [docker/cli#1898](https://github.com/docker/cli/pull/1898) +* Bumped Docker App to v0.8.0. [docker/docker-ce-packaging#341](https://github.com/docker/docker-ce-packaging/pull/341) +* Added support for Docker buildx. [docker/docker-ce-packaging#336](https://github.com/docker/docker-ce-packaging/pull/336) +* Added support for Docker Assemble v0.36.0. +* Added support for Docker Cluster v1.0.0-rc2. +* Added support for Docker Template v0.1.4. +* Added support for Docker Registry v0.1.0-rc1. +* Bumped google.golang.org/grpc to v1.20.1. [docker/cli#1884](https://github.com/docker/cli/pull/1884) +* CLI changed to pass driver specific options to `docker run`. [docker/cli#1767](https://github.com/docker/cli/pull/1767) +* Bumped Golang 1.12.5. [docker/cli#1875](https://github.com/docker/cli/pull/1875) +* `docker system info` output now segregates information relevant to the client and daemon. +[docker/cli#1638](https://github.com/docker/cli/pull/1638) +* (Experimental) When targeting Kubernetes, added support for `x-pull-secret: some-pull-secret` in +compose-files service configs. [docker/cli#1617](https://github.com/docker/cli/pull/1617) +* (Experimental) When targeting Kubernetes, added support for `x-pull-policy: ` +in compose-files service configs. [docker/cli#1617](https://github.com/docker/cli/pull/1617) +* cp, save, export: Now preventing overwriting irregular files. [docker/cli#1515](https://github.com/docker/cli/pull/1515) +* npipe volume type on stack file now allowed. [docker/cli#1195](https://github.com/docker/cli/pull/1195) +* Fixed tty initial size error. [docker/cli#1529](https://github.com/docker/cli/pull/1529) +* Fixed problem with labels copying value from environment variables. +[docker/cli#1671](https://github.com/docker/cli/pull/1671) + +### API + +* Updated API version to v1.40. [moby/moby#38089](https://github.com/moby/moby/pull/38089) +* Added warnings to `/info` endpoint, and moved detection to the daemon. +[moby/moby#37502](https://github.com/moby/moby/pull/37502) +* Added HEAD support for `/_ping` endpoint. [moby/moby#38570](https://github.com/moby/moby/pull/38570) +* Added `Cache-Control` headers to disable caching `/_ping` endpoint. +[moby/moby#38569](https://github.com/moby/moby/pull/38569) +* Added `containerd`, `runc`, and `docker-init` versions to `/version`. +[moby/moby#37974](https://github.com/moby/moby/pull/37974) +* Added undocumented `/grpc` endpoint and registered BuildKit's controller. +[moby/moby#38990](https://github.com/moby/moby/pull/38990) + +### Experimental +* Enabled checkpoint/restore of containers with TTY. [moby/moby#38405](https://github.com/moby/moby/pull/38405) +* LCOW: Added support for memory and CPU limits. [moby/moby#37296](https://github.com/moby/moby/pull/37296) +* Windows: Added ContainerD runtime. [moby/moby#38541](https://github.com/moby/moby/pull/38541) +* Windows: LCOW now requires Windows RS5+. [moby/moby#39108](https://github.com/moby/moby/pull/39108) + +### Security + +* mount: added BindOptions.NonRecursive (API v1.40). [moby/moby#38003](https://github.com/moby/moby/pull/38003) +* seccomp: whitelisted `io_pgetevents()`. [moby/moby#38895](https://github.com/moby/moby/pull/38895) +* seccomp: `ptrace(2)` for 4.8+ kernels now allowed. [moby/moby#38137](https://github.com/moby/moby/pull/38137) + +### Runtime + +* Running `dockerd` as a non-root user (Rootless mode) is now allowed. +[moby/moby#380050](https://github.com/moby/moby/pull/38050) +* Rootless: optional support provided for `lxc-user-nic` SUID binary. +[docker/engine#208](https://github.com/docker/engine/pull/208) +* Added DeviceRequests to HostConfig to support NVIDIA GPUs. [moby/moby#38828](https://github.com/moby/moby/pull/38828) +* Added `--device` support for Windows. [moby/moby#37638](https://github.com/moby/moby/pull/37638) +* Added `memory.kernelTCP` support for linux. [moby/moby#37043](https://github.com/moby/moby/pull/37043) +* Windows credential specs can now be passed directly to the engine. +[moby/moby#38777](https://github.com/moby/moby/pull/38777) +* Added pids-limit support in docker update. [moby/moby#32519](https://github.com/moby/moby/pull/32519) +* Added support for exact list of capabilities. [moby/moby#38380](https://github.com/moby/moby/pull/38380) +* daemon: Now use 'private' ipc mode by default. [moby/moby#35621](https://github.com/moby/moby/pull/35621) +* daemon: switched to semaphore-gated WaitGroup for startup tasks. moby/moby#38301](https://github.com/moby/moby/pull/38301) +* Now use `idtools.LookupGroup` instead of parsing `/etc/group` file for docker.sock ownership to +fix: `api.go doesn't respect nsswitch.conf`. [moby/moby#38126](https://github.com/moby/moby/pull/38126) +* cli: fixed images filter when using multi reference filter. [moby/moby#38171](https://github.com/moby/moby/pull/38171) +* Bumped Golang to 1.12.5. [docker/engine#209](https://github.com/docker/engine/pull/209) +* Bumped `containerd` to 1.2.6. [moby/moby#39016](https://github.com/moby/moby/pull/39016) +* Bumped `runc` to 1.0.0-rc8, opencontainers/selinux v1.2.2. [docker/engine#210](https://github.com/docker/engine/pull/210) +* Bumped `google.golang.org/grpc` to v1.20.1. [docker/engine#215](https://github.com/docker/engine/pull/215) +* Performance optimized in aufs and layer store for massively parallel container creation/removal. +[moby/moby#39135](https://github.com/moby/moby/pull/39135) [moby/moby#39209](https://github.com/moby/moby/pull/39209) +* Root is now passed to chroot for chroot Tar/Untar (CVE-2018-15664) +[moby/moby#39292](https://github.com/moby/moby/pull/39292) +* Fixed `docker --init` with /dev bind mount. [moby/moby#37665](https://github.com/moby/moby/pull/37665) +* The right device number is now fetched when greater than 255 and using the `--device-read-bps` option. +[moby/moby#39212](https://github.com/moby/moby/pull/39212) +* Fixed `Path does not exist` error when path definitely exists. [moby/moby#39251](https://github.com/moby/moby/pull/39251) + +### Networking + +* Moved IPVLAN driver out of experimental. +[moby/moby#38983](https://github.com/moby/moby/pull/38983) +* Added support for 'dangling' filter. [moby/moby#31551](https://github.com/moby/moby/pull/31551) +[docker/libnetwork#2230](https://github.com/docker/libnetwork/pull/2230) +* Load balancer sandbox is now deleted when a service is updated with `--network-rm`. +[docker/engine#213](https://github.com/docker/engine/pull/213) +* Windows: Now forcing a nil IP specified in `PortBindings` to IPv4zero (0.0.0.0). +[docker/libnetwork#2376](https://github.com/docker/libnetwork/pull/2376) + +### Swarm + +* Added support for maximum replicas per node. [moby/moby#37940](https://github.com/moby/moby/pull/37940) +* Added support for GMSA CredentialSpecs from Swarmkit configs. [moby/moby#38632](https://github.com/moby/moby/pull/38632) +* Added support for sysctl options in services. [moby/moby#37701](https://github.com/moby/moby/pull/37701) +* Added support for filtering on node labels. [moby/moby#37650](https://github.com/moby/moby/pull/37650) +* Windows: Support added for named pipe mounts in docker service create + stack yml. +[moby/moby#37400](https://github.com/moby/moby/pull/37400) +* VXLAN UDP Port configuration now supported. [moby/moby#38102](https://github.com/moby/moby/pull/38102) +* Now using Service Placement Constraints in Enforcer. [docker/swarmkit#2857](https://github.com/docker/swarmkit/pull/2857) +* Increased max recv gRPC message size for nodes and secrets. +[docker/engine#256](https://github.com/docker/engine/pull/256) + +### Logging + +* Enabled gcplogs driver on Windows. [moby/moby#37717](https://github.com/moby/moby/pull/37717) +* Added zero padding for RFC5424 syslog format. [moby/moby#38335](https://github.com/moby/moby/pull/38335) +* Added `IMAGE_NAME` attribute to `journald` log events. [moby/moby#38032](https://github.com/moby/moby/pull/38032) + +### Deprecation + +* Deprecate image manifest v2 schema1 in favor of v2 schema2. Future version of Docker will remove +support for v2 schema1 althogether. [moby/moby#39365](https://github.com/moby/moby/pull/39365) +* Removed v1.10 migrator. [moby/moby#38265](https://github.com/moby/moby/pull/38265) +* Now skipping deprecated storage-drivers in auto-selection. [moby/moby#38019](https://github.com/moby/moby/pull/38019) +* Deprecated `aufs` storage driver and added warning. [moby/moby#38090](https://github.com/moby/moby/pull/38090) +* Removed support for 17.09. +* SLES12 is deprecated from Docker Enterprise 3.0, and EOL of SLES12 as an operating system will occur +in Docker Enterprise 3.1. Upgrade to SLES15 for continued support on Docker Enterprise. +* Windows 2016 is formally deprecated from Docker Enterprise 3.0. Only non-overlay networks are supported +on Windows 2016 in Docker Enterprise 3.0. EOL of Windows Server 2016 support will occur in Docker +Enterprise 3.1. Upgrade to Windows Server 2019 for continued support on Docker Enterprise. + +For more information on deprecated flags and APIs, refer to +https://docs.docker.com/engine/deprecated/ for target removal dates. + +### Known issues + +* In some circumstances, in large clusters, docker information might, as part of the Swarm section, +include the error `code = ResourceExhausted desc = grpc: received message larger than +max (5351376 vs. 4194304)`. This does not indicate any failure or misconfiguration by the user, +and requires no response. +* Orchestrator port conflict can occur when redeploying all services as new. Due to many swarm manager +requests in a short amount of time, some services are not able to receive traffic and are causing a `404` +error after being deployed. + - Workaround: restart all tasks via `docker service update --force`. + +* Traffic cannot egress the HOST because of missing Iptables rules in the FORWARD chain +The missing rules are : + ``` + sbin/iptables --wait -C FORWARD -o docker_gwbridge -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + /sbin/iptables --wait -C FORWARD -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + ``` + - Workaround: Add these rules back using a script and cron definitions. The script + must contain '-C' commands to check for the presence of a rule and '-A' commands to add + rules back. Run the script on a cron in regular intervals, for example, every minutes. + - Affected versions: 17.06.2-ee-16, 18.09.1, 19.03.0 +* [CVE-2018-15664](https://nvd.nist.gov/vuln/detail/CVE-2018-15664) symlink-exchange attack with directory traversal. Workaround until proper fix is available in upcoming patch release: `docker pause` container before doing file operations. [moby/moby#39252](https://github.com/moby/moby/pull/39252) +* `docker cp` regression due to CVE mitigation. An error is produced when the source of `docker cp` is set to `/`. +* Install Docker Engine - Enterprise fails to install on RHEL on Azure. This affects any RHEL version that uses an Extended Update Support (EUS) image. At the time of this writing, known versions affected are RHEL 7.4, 7.5, and 7.6. + + - Workaround options: + - Use an older image and don't get updates. Examples of EUS images are here: https://docs.microsoft.com/en-us/azure/virtual-machines/linux/rhel-images#rhel-images-with-eus. + - Import your own RHEL images into Azure and do not rely on the Extended Update Support (EUS) RHEL images. + - Use a RHEL image that does not contain a minor version in the SKU. These are not attached to EUS repositories. Some examples of those are the first three images (SKUs: 7-RAW, 7-LVM, 7-RAW-CI) listed here : https://docs.microsoft.com/en-us/azure/virtual-machines/linux/rhel-images#list-of-rhel-images-available. + ## 18.09.08 2019-07-17 @@ -71,7 +268,6 @@ consistency and compatibility reasons. * There are [important changes](/ee/upgrade) to the upgrade process that, if not correctly followed, can have an impact on the availability of applications running on the Swarm during upgrades. These constraints impact any upgrades coming from any version before 18.09 to version 18.09 or later. ## 18.09.6 - 2019-05-06 ### Builder @@ -373,6 +569,26 @@ Ubuntu 14.04 "Trusty Tahr" [docker-ce-packaging#255](https://github.com/docker/d ## Older Docker Engine EE Release notes +<<<<<<< HEAD +## 18.03.1-ee-9 + +2019-06-25 + +### Client + +* Fixed annnotation on `docker config create --template-driver`. [docker/cli#1769](https://github.com/docker/cli/pull/1769) +* Fixed annnotation on `docker secret create --template-driver`. [docker/cli#1785](https://github.com/docker/cli/pull/1785) + +### Runtime + +* Performance optimized in aufs and layer store for massively parallel container creation/removal. +[moby/moby#39107](https://github.com/moby/moby/pull/39107) +* Windows: fixed support for `docker service create --limit-cpu`. +[moby/moby#39190](https://github.com/moby/moby/pull/39190) +* Now using original process spec for execs. [moby/moby#38871](https://github.com/moby/moby/pull/38871) +* Fixed [CVE-2018-15664](https://nvd.nist.gov/vuln/detail/CVE-2018-15664) symlink-exchange attack +with directory traversal. [moby/moby#39357](https://github.com/moby/moby/pull/39357) +======= ## 18.03.1-ee-10 2019-07-17 @@ -395,6 +611,7 @@ Ubuntu 14.04 "Trusty Tahr" [docker-ce-packaging#255](https://github.com/docker/d * Windows: Fixed support for `docker service create --limit-cpu`. [moby/moby#39190](https://github.com/moby/moby/pull/39190) * Fixed a bug where the original process spec was not used for exec processes.[moby/moby#38871](https://github.com/moby/moby/pull/38871) * Fixed [CVE-2018-15664](https://nvd.nist.gov/vuln/detail/CVE-2018-15664) symlink-exchange attack with directory traversal. [moby/moby#39357](https://github.com/moby/moby/pull/39357) +>>>>>>> master ## 18.03.1-ee-8 @@ -452,48 +669,40 @@ Ubuntu 14.04 "Trusty Tahr" [docker-ce-packaging#255](https://github.com/docker/d * Mask proxy credentials from URL when displayed in system info (docker/escalation#879) ## 18.03.1-ee-4 - - 2018-10-25 +2018-10-25 > **Note**: If you're deploying UCP or DTR, use Docker EE Engine 18.09 or higher. 18.03 is an engine only release. - #### Client +### Client +* Fixed help message flags on docker stack commands and child commands. [docker/cli#1251](https://github.com/docker/cli/pull/1251) +* Fixed typo breaking zsh docker update autocomplete. [docker/cli#1232](https://github.com/docker/cli/pull/1232) - * Fixed help message flags on docker stack commands and child commands. [docker/cli#1251](https://github.com/docker/cli/pull/1251) - * Fixed typo breaking zsh docker update autocomplete. [docker/cli#1232](https://github.com/docker/cli/pull/1232) +### Networking +* Added optimizations to reduce the messages in the NetworkDB queue. [docker/libnetwork#2225](https://github.com/docker/libnetwork/pull/2225) +* Fixed a very rare condition where managers are not correctly triggering the reconnection logic. [docker/libnetwork#2226](https://github.com/docker/libnetwork/pull/2226) +* Changed loglevel from error to warning for missing disable_ipv6 file. [docker/libnetwork#2224](https://github.com/docker/libnetwork/pull/2224) - ### Networking +### Runtime +* Fixed denial of service with large numbers in cpuset-cpus and cpuset-mems. [moby/moby#37967](https://github.com/moby/moby/pull/37967) +* Added stability improvements for devicemapper shutdown. [moby/moby#36307](https://github.com/moby/moby/pull/36307) [moby/moby#36438](https://github.com/moby/moby/pull/36438) - * Added optimizations to reduce the messages in the NetworkDB queue. [docker/libnetwork#2225](https://github.com/docker/libnetwork/pull/2225) - * Fixed a very rare condition where managers are not correctly triggering the reconnection logic. [docker/libnetwork#2226](https://github.com/docker/libnetwork/pull/2226) - * Changed loglevel from error to warning for missing disable_ipv6 file. [docker/libnetwork#2224](https://github.com/docker/libnetwork/pull/2224) - - #### Runtime - - * Fixed denial of service with large numbers in cpuset-cpus and cpuset-mems. [moby/moby#37967](https://github.com/moby/moby/pull/37967) - * Added stability improvements for devicemapper shutdown. [moby/moby#36307](https://github.com/moby/moby/pull/36307) [moby/moby#36438](https://github.com/moby/moby/pull/36438) - - #### Swarm Mode - - * Fixed the logic used for skipping over running tasks. [docker/swarmkit#2724](https://github.com/docker/swarmkit/pull/2724) - * Addressed unassigned task leak when a service is removed. [docker/swarmkit#2709](https://github.com/docker/swarmkit/pull/2709) +### Swarm Mode +* Fixed the logic used for skipping over running tasks. [docker/swarmkit#2724](https://github.com/docker/swarmkit/pull/2724) +* Addressed unassigned task leak when a service is removed. [docker/swarmkit#2709](https://github.com/docker/swarmkit/pull/2709) ## 18.03.1-ee-3 2018-08-30 #### Builder - * Fix: no error if build args are missing during docker build. [docker/engine#25](https://github.com/docker/engine/pull/25) * Ensure RUN instruction to run without healthcheck. [moby/moby#37413](https://github.com/moby/moby/pull/37413) #### Client - * Fix manifest list to always use correct size. [docker/cli#1156](https://github.com/docker/cli/pull/1156) * Various shell completion script updates. [docker/cli#1159](https://github.com/docker/cli/pull/1159) [docker/cli#1227](https://github.com/docker/cli/pull/1227) * Improve version output alignment. [docker/cli#1204](https://github.com/docker/cli/pull/1204) #### Runtime - * Disable CRI plugin listening on port 10010 by default. [docker/engine#29](https://github.com/docker/engine/pull/29) * Update containerd to v1.1.2. [docker/engine#33](https://github.com/docker/engine/pull/33) * Windows: Pass back system errors on container exit. [moby/moby#35967](https://github.com/moby/moby/pull/35967) @@ -501,7 +710,6 @@ Ubuntu 14.04 "Trusty Tahr" [docker-ce-packaging#255](https://github.com/docker/d * Register OCI media types. [docker/engine#4](https://github.com/docker/engine/pull/4) #### Swarm Mode - * Clean up tasks in dirty list for which the service has been deleted. [docker/swarmkit#2694](https://github.com/docker/swarmkit/pull/2694) * Propagate the provided external CA certificate to the external CA object in swarm. [docker/cli#1178](https://github.com/docker/cli/pull/1178) @@ -541,6 +749,23 @@ Ubuntu 14.04 "Trusty Tahr" [docker-ce-packaging#255](https://github.com/docker/d + Support for `--chown` with `COPY` and `ADD` in `Dockerfile`. + Added functionality for the `docker logs` command to include the output of multiple logging drivers. +<<<<<<< HEAD +## 17.06.2-ee-22 +2019-06-25 + +### Networking + +* Fixed changing host target port. Fixes a bug where if a service has the same number of host-mode published ports with PublishedPort 0, changes to the spec would not reflect in the service object. [docker/swarmkit#2376](https://github.com/docker/swarmkit/pull/2376) + +### Runtime + +* Performance optimized in aufs and layer store for massively parallel container creation/removal. +[moby/moby#39107](https://github.com/moby/moby/pull/39107) +* Fixed [CVE-2018-15664](https://nvd.nist.gov/vuln/detail/CVE-2018-15664) symlink-exchange attack with +directory traversal. [moby/moby#39357](https://github.com/moby/moby/pull/39357) +* Windows: provided support for `docker service create --limit-cpu`. +[moby/moby#39190](https://github.com/moby/moby/pull/39190) +======= ## 17.06.2-ee-23 2019-07-17 @@ -604,6 +829,7 @@ not reachable until one of these 2 conditions happens: As a workaround, send at least a packet out from each container like (ping, GARP, etc). +>>>>>>> master ## 17.06.2-ee-21 2019-04-11 diff --git a/engine/security/trust/content_trust.md b/engine/security/trust/content_trust.md index bedd81b38a..924e13ba8a 100644 --- a/engine/security/trust/content_trust.md +++ b/engine/security/trust/content_trust.md @@ -7,21 +7,21 @@ title: Content trust in Docker When transferring data among networked systems, *trust* is a central concern. In particular, when communicating over an untrusted medium such as the internet, it is critical to ensure the integrity and the publisher of all the data a system -operates on. You use the Docker Engine to push and pull images (data) to a -public or private registry. Content trust gives you the ability to verify both -the integrity and the publisher of all the data received from a registry over +operates on. You use the Docker Engine to push and pull images (data) to a +public or private registry. Content trust gives you the ability to verify both +the integrity and the publisher of all the data received from a registry over any channel. ## About Docker Content Trust (DCT) -Docker Content Trust (DCT) provides the ability to use digital signatures for -data sent to and received from remote Docker registries. These signatures allow -client-side or runtime verification of the integrity and publisher of specific -image tags. +Docker Content Trust (DCT) provides the ability to use digital signatures for +data sent to and received from remote Docker registries. These signatures allow +client-side or runtime verification of the integrity and publisher of specific +image tags. -Through DCT, image publishers can sign their images and image consumers can -ensure that the images they use are signed. Publishers could be individuals -or organizations manually signing their content or automated software supply +Through DCT, image publishers can sign their images and image consumers can +ensure that the images they use are signed. Publishers could be individuals +or organizations manually signing their content or automated software supply chains signing content as part of their release process. ### Image tags and DCT @@ -37,7 +37,7 @@ A particular image `REPOSITORY` can have multiple tags. For example, `latest` an and tag combination many times changing the image with each build. DCT is associated with the `TAG` portion of an image. Each image repository has -a set of keys that image publishers use to sign an image tag. Image publishers +a set of keys that image publishers use to sign an image tag. Image publishers have discretion on which tags they sign. An image repository can contain an image with one tag that is signed and another @@ -57,16 +57,16 @@ push replaces the last unsigned tag `latest` but does not affect the signed `lat The ability to choose which tags they can sign, allows publishers to iterate over the unsigned version of an image before officially signing it. -Image consumers can enable DCT to ensure that images they use were signed. If a -consumer enables DCT, they can only pull, run, or build with trusted images. -Enabling DCT is a bit like applying a "filter" to your registry. Consumers "see" -only signed image tags and the less desirable, unsigned image tags are +Image consumers can enable DCT to ensure that images they use were signed. If a +consumer enables DCT, they can only pull, run, or build with trusted images. +Enabling DCT is a bit like applying a "filter" to your registry. Consumers "see" +only signed image tags and the less desirable, unsigned image tags are "invisible" to them. ![Trust view](images/trust_view.png) -To the consumer who has not enabled DCT, nothing about how they work with Docker -images changes. Every image is visible regardless of whether it is signed or +To the consumer who has not enabled DCT, nothing about how they work with Docker +images changes. Every image is visible regardless of whether it is signed or not. ### Docker Content Trust Keys @@ -99,78 +99,78 @@ read how to [manage keys for DCT](trust_key_mng.md). ## Signing Images with Docker Content Trust -> Note this applies to Docker Community Engine 17.12 and newer, and Docker +> Note this applies to Docker Community Engine 17.12 and newer, and Docker > Enterprise Engine 18.03 and newer. -Within the Docker CLI we can sign and push a container image with the +Within the Docker CLI we can sign and push a container image with the `$ docker trust` command syntax. This is built on top of the Notary feature -set, more information on Notary can be found [here](/notary/getting_started/). +set, more information on Notary can be found [here](/notary/getting_started/). A prerequisite for signing an image is a Docker Registry with a Notary server attached (Such as the Docker Hub or Docker Trusted Registry). Instructions for standing up a self-hosted environment can be found [here](/engine/security/trust/deploying_notary/). To sign a Docker Image you will need a delegation key pair. These keys -can be generated locally using `$ docker trust key generate`, generated -by a certificate authority, or if you are using Docker Enterprise's -Universal Control Plane (UCP), a user's Client Bundle provides adequate keys for a -delegation. Find more information on Delegation Keys +can be generated locally using `$ docker trust key generate`, generated +by a certificate authority, or if you are using Docker Enterprise's +Universal Control Plane (UCP), a user's Client Bundle provides adequate keys for a +delegation. Find more information on Delegation Keys [here](trust_delegation/#creating-delegation-keys). -First we will add the delegation private key to the local Docker trust -repository. (By default this is stored in `~/.docker/trust/`). If you are -generating delegation keys with `$ docker trust key generate`, the private key -is automatically added to the local trust store. If you are importing a separate -key, such as one from a UCP Client Bundle you will need to use the +First we will add the delegation private key to the local Docker trust +repository. (By default this is stored in `~/.docker/trust/`). If you are +generating delegation keys with `$ docker trust key generate`, the private key +is automatically added to the local trust store. If you are importing a separate +key, such as one from a UCP Client Bundle you will need to use the `$ docker trust key load` command. ``` $ docker trust key generate jeff Generating key for jeff... -Enter passphrase for new jeff key with ID 9deed25: -Repeat passphrase for new jeff key with ID 9deed25: +Enter passphrase for new jeff key with ID 9deed25: +Repeat passphrase for new jeff key with ID 9deed25: Successfully generated and loaded private key. Corresponding public key available: /home/ubuntu/Documents/mytrustdir/jeff.pub ``` -Or if you have an existing key: +Or if you have an existing key: ``` $ docker trust key load key.pem --name jeff Loading key from "key.pem"... -Enter passphrase for new jeff key with ID 8ae710e: -Repeat passphrase for new jeff key with ID 8ae710e: +Enter passphrase for new jeff key with ID 8ae710e: +Repeat passphrase for new jeff key with ID 8ae710e: Successfully imported key from key.pem ``` -Next we will need to add the delegation public key to the Notary server; -this is specific to a particular image repository in Notary known as a Global +Next we will need to add the delegation public key to the Notary server; +this is specific to a particular image repository in Notary known as a Global Unique Name (GUN). If this is the first time you are adding a delegation to that -repository, this command will also initiate the repository, using a local Notary +repository, this command will also initiate the repository, using a local Notary canonical root key. To understand more about initiating a repository, and the -role of delegations, head to +role of delegations, head to [delegations for content trust](trust_delegation/#managing-delegations-in-a-notary-server). ``` $ docker trust signer add --key cert.pem jeff dtr.example.com/admin/demo Adding signer "jeff" to dtr.example.com/admin/demo... -Enter passphrase for new repository key with ID 10b5e94: +Enter passphrase for new repository key with ID 10b5e94: ``` -Finally, we will use the delegation private key to sign a particular tag and +Finally, we will use the delegation private key to sign a particular tag and push it up to the registry. ``` $ docker trust sign dtr.example.com/admin/demo:1 Signing and pushing trust data for local image dtr.example.com/admin/demo:1, may overwrite remote trust data The push refers to repository [dtr.example.com/admin/demo] -7bff100f35cb: Pushed +7bff100f35cb: Pushed 1: digest: sha256:3d2e482b82608d153a374df3357c0291589a61cc194ec4a9ca2381073a17f58e size: 528 Signing and pushing trust metadata -Enter passphrase for signer key with ID 8ae710e: +Enter passphrase for signer key with ID 8ae710e: Successfully signed dtr.example.com/admin/demo:1 ``` -Alternatively, once the keys have been imported an image can be pushed with the +Alternatively, once the keys have been imported an image can be pushed with the `$ docker push` command, by exporting the DCT environmental variable. ``` @@ -178,14 +178,14 @@ $ export DOCKER_CONTENT_TRUST=1 $ docker push dtr.example.com/admin/demo:1 The push refers to repository [dtr.example.com/admin/demo:1] -7bff100f35cb: Pushed +7bff100f35cb: Pushed 1: digest: sha256:3d2e482b82608d153a374df3357c0291589a61cc194ec4a9ca2381073a17f58e size: 528 Signing and pushing trust metadata -Enter passphrase for signer key with ID 8ae710e: +Enter passphrase for signer key with ID 8ae710e: Successfully signed dtr.example.com/admin/demo:1 ``` -Remote trust data for a tag or a repository can be viewed by the +Remote trust data for a tag or a repository can be viewed by the `$ docker trust inspect` command: ``` @@ -211,54 +211,60 @@ Remote Trust data for a tag can be removed by the `$ docker trust revoke` comman ``` $ docker trust revoke dtr.example.com/admin/demo:1 -Enter passphrase for signer key with ID 8ae710e: +Enter passphrase for signer key with ID 8ae710e: Successfully deleted signature for dtr.example.com/admin/demo:1 ``` ## Runtime Enforcement with Docker Content Trust -> Note this only applies to Docker Enterprise Engine 18.09 or newer. This -> implementation is also separate from the `only run signed images` feature of +> Note this only applies to Docker Enterprise Engine 18.09 or newer. This +> implementation is also separate from the `only run signed images` feature of > [Universal Control Plane](/ee/ucp/admin/configure/run-only-the-images-you-trust/) Docker Content Trust within the Docker Enterprise Engine prevents a user from -using a container image from an unknown source, it will also prevent a user from +using a container image from an unknown source, it will also prevent a user from building a container image from a base layer from an unknown source. Trusted sources could include Official Docker Images, found on the [Docker Hub](https://hub.docker.com/search?image_filter=official&type=image), or User trusted sources, with repositories and tags signed with the commands [above](#signing-images-with-docker-content-trust). Engine Signature Verification prevents the following: -* `$ docker container run` of an unsigned image. -* `$ docker pull` of an unsigned image. +* `$ docker container run` of an unsigned or altered image. +* `$ docker pull` of an unsigned or altered image. * `$ docker build` where the `FROM` image is not signed or is not scratch. -DCT does not verify that a running container’s filesystem has not been altered -from what was in the image. For example, it does not prevent a container from -writing to the filesystem, once the container is running, nor does it prevent -the container’s filesystem from being altered on disk. DCT will also not prevent +> **Note**: The implicit pulls and runs performed by worker +> nodes for a [Swarm service](/engine/swarm/services.md) on `$ docker service create` and +> `$ docker service update` are also verified. Tag resolution of services +> requires that all nodes in the Swarm including managers have content trust +> enabled and similarly configured. + +DCT does not verify that a running container’s filesystem has not been altered +from what was in the image. For example, it does not prevent a container from +writing to the filesystem, once the container is running, nor does it prevent +the container’s filesystem from being altered on disk. DCT will also not prevent unsigned images from being imported, loaded, or created. -### Enabling DCT within the Docker Enterprise Engine +### Enabling DCT within the Docker Enterprise Engine DCT is controlled by the Docker Engine's configuration file. By default this is -found at `/etc/docker/daemon.json`. More details on this file can be found +found at `/etc/docker/daemon.json`. More details on this file can be found [here](/engine/reference/commandline/dockerd/#daemon-configuration-file). The `content-trust` flag is based around a `mode` variable instructing -the engine whether to enforce signed images, and a `trust-pinning` variable -instructing the engine which sources to trust. +the engine whether to enforce signed images, and a `trust-pinning` variable +instructing the engine which sources to trust. -`Mode` can take three variables: +`Mode` can take three variables: -* `Disabled` - Verification is not active and the remainder of the content-trust +* `Disabled` - Verification is not active and the remainder of the content-trust related metadata will be ignored. This is the default value if `mode` is not specified. -* `Permissive` - Verification will be performed, but only failures will be -logged and remain unenforced. This configuration is intended for testing of -changes related to content-trust. The results of the signature verification -is displayed in the Docker Engine's daemon logs. -* `Enforced` - Content trust will be enforced and an image that cannot be +* `Permissive` - Verification will be performed, but only failures will be +logged and remain unenforced. This configuration is intended for testing of +changes related to content-trust. The results of the signature verification +is displayed in the Docker Engine's daemon logs. +* `Enforced` - Content trust will be enforced and an image that cannot be verified successfully will not be pulled or run. ``` @@ -272,9 +278,9 @@ verified successfully will not be pulled or run. ### Official Docker images All official Docker library images found on the Docker Hub (docker.io/library/*) -are signed by the same Notary root key. This root key's ID has been embedded -inside of the Docker Enterprise Engine. Therefore, to enforce that, only official -Docker images can be used. Specify: +are signed by the same Notary root key. This root key's ID has been embedded +inside of the Docker Enterprise Engine. Therefore, to enforce that, only official +Docker images can be used. Specify: ``` { @@ -289,23 +295,23 @@ Docker images can be used. Specify: ### User-Signed images -There are two options for trust pinning user-signed images: +There are two options for trust pinning user-signed images: -* Notary Canonical Root Key ID (DCT Root Key) is an ID that describes *just* the -root key used to sign a repository (or rather its respective keys). This is the -root key on the host that originally signed the repository (i.e. your workstation). -This can be retrieved from the workstation that signed the repository through -`$ grep -r "root" ~/.docker/trust/private/` (Assuming your trust data is -at `~/.docker/trust/*`). It is expected that this canonical ID has initiated +* Notary Canonical Root Key ID (DCT Root Key) is an ID that describes *just* the +root key used to sign a repository (or rather its respective keys). This is the +root key on the host that originally signed the repository (i.e. your workstation). +This can be retrieved from the workstation that signed the repository through +`$ grep -r "root" ~/.docker/trust/private/` (Assuming your trust data is +at `~/.docker/trust/*`). It is expected that this canonical ID has initiated multiple image repositories (`mydtr/user1/image1` and `mydtr/user1/image2`). ``` # Retrieving Root ID $ grep -r "root" ~/.docker/trust/private /home/ubuntu/.docker/trust/private/0b6101527b2ac766702e4b40aa2391805b70e5031c04714c748f914e89014403.key:role: root - + # Using a Canonical ID that has signed 2 repos (mydtr/user1/repo1 and mydtr/user1/repo2). Note you can use a Wildcard. - + { "content-trust": { "trust-pinning": { @@ -320,21 +326,21 @@ $ grep -r "root" ~/.docker/trust/private } ``` -* Notary Root key ID (DCT Certificate ID) is an ID that describes the same, but -the ID is unique per repository. For example, `mydtr/user1/image1` and `mydtr/usr1/image2` -will have unique certificate IDs. A certificate ID can be retrieved through a -`$ docker trust inspect` command and is labelled as a root-key (referring back -to the Notary key name). This is designed for when different users are signing -their own repositories, for example, when there is no central signing server. As a cert-id -is more granular, it would take priority if a conflict occurs over a root ID. - +* Notary Root key ID (DCT Certificate ID) is an ID that describes the same, but +the ID is unique per repository. For example, `mydtr/user1/image1` and `mydtr/usr1/image2` +will have unique certificate IDs. A certificate ID can be retrieved through a +`$ docker trust inspect` command and is labelled as a root-key (referring back +to the Notary key name). This is designed for when different users are signing +their own repositories, for example, when there is no central signing server. As a cert-id +is more granular, it would take priority if a conflict occurs over a root ID. + ``` # Retrieving Cert ID $ docker trust inspect mydtr/user1/repo1 | jq -r '.[].AdministrativeKeys[] | select(.Name=="Root") | .Keys[].ID' -9430d6e31e3b3e240957a1b62bbc2d436aafa33726d0fcb50addbf7e2dfa2168 +9430d6e31e3b3e240957a1b62bbc2d436aafa33726d0fcb50addbf7e2dfa2168 # Using Cert Ids, by specifying 2 repositories by their DCT root ID. Example for using this may be different DTRs or maybe because the repository was initiated on different hosts, therefore having different canonical IDs. - + { "content-trust": { "trust-pinning": { @@ -355,8 +361,8 @@ $ docker trust inspect mydtr/user1/repo1 | jq -r '.[].AdministrativeKeys[] | sel ### Using DCT in an offline environment If your engine is unable to communicate to the registry, we can enable DCT to -trust cached signature data. This is done through the -`allow-expired-cached-trust-data` variable. +trust cached signature data. This is done through the +`allow-expired-cached-trust-data` variable. ``` { @@ -375,22 +381,22 @@ trust cached signature data. This is done through the } }, "mode": "enforced", - "allow-expired-cached-trust-data": true + "allow-expired-cached-trust-data": true } } ``` ## Client Enforcement with Docker Content Trust -> Note this is supported on Docker Community and Enterprise Engines newer than +> Note this is supported on Docker Community and Enterprise Engines newer than > 17.03. -Currently, content trust is disabled by default in the Docker Client. To enable -it, set the `DOCKER_CONTENT_TRUST` environment variable to `1`. This prevents +Currently, content trust is disabled by default in the Docker Client. To enable +it, set the `DOCKER_CONTENT_TRUST` environment variable to `1`. This prevents users from working with tagged images unless they contain a signature. -When DCT is enabled in the Docker client, `docker` CLI commands that operate on -tagged images must either have content signatures or explicit content hashes. +When DCT is enabled in the Docker client, `docker` CLI commands that operate on +tagged images must either have content signatures or explicit content hashes. The commands that operate with DCT are: * `push` @@ -409,8 +415,8 @@ Error: remote trust data does not exist for dtr.example.com/user/image: dtr.exam $ docker pull dtr.example.com/user/image@sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a sha256:ee7491c9c31db1ffb7673d91e9fac5d6354a89d0e97408567e09df069a1687c1: Pulling from user/image -ff3a5c916c92: Pull complete -a59a168caba3: Pull complete +ff3a5c916c92: Pull complete +a59a168caba3: Pull complete Digest: sha256:ee7491c9c31db1ffb7673d91e9fac5d6354a89d0e97408567e09df069a1687c1 Status: Downloaded newer image for dtr.example.com/user/image@sha256:ee7491c9c31db1ffb7673d91e9fac5d6354a89d0e97408567e09df069a1687c1 ``` diff --git a/engine/swarm/services.md b/engine/swarm/services.md index ec7c30e04f..335f405914 100644 --- a/engine/swarm/services.md +++ b/engine/swarm/services.md @@ -74,6 +74,27 @@ $ docker service create --name helloworld alpine:3.6 ping docker.com For more details about image tag resolution, see [Specify the image version the service should use](#specify-the-image-version-the-service-should-use). +### gMSA for Swarm + +Swarm now allows using a Docker Config as a gMSA credential spec - a requirement for Active Directory-authenticated applications. This reduces the burden of distributing credential specs to the nodes they're used on. + +The following example assumes a gMSA and its credential spec (called credspec.json) already exists, and that the nodes being deployed to are correctly configured for the gMSA. + +To use a Config as a credential spec, first create the Docker Config containing the credential spec: + + +```bash +docker config create credspec credspec.json +``` + +Now, you should have a Docker Config named credspec, and you can create a service using this credential spec. To do so, use the --credential-spec flag with the config name, like this: + +```bash +docker service create --credential-spec="config://credspec" +``` + +Your service will use the gMSA credential spec when it starts, but unlike a typical Docker Config (used by passing the --config flag), the credential spec will not be mounted into the container. + ### Create a service using an image on a private registry If your image is available on a private registry which requires login, use the diff --git a/index.md b/index.md index b89d30a301..d985080c98 100644 --- a/index.md +++ b/index.md @@ -13,7 +13,7 @@ notags: true ## Get started with Docker -Try our new multi-part walkthrough that covers writing your first app, +Try our multi-part walkthrough that covers writing your first app, data storage, networking, and swarms, and ends with your app running on production servers in the cloud. Total reading time is less than an hour. @@ -34,20 +34,20 @@ Docker Enterprise without installing anything. -## Docker Editions +## Docker products
### Docker Engine - Community -Get started with Docker and experimenting with container-based apps. Docker CE +Get started with Docker and experimenting with container-based apps. Docker Engine - Community is available on many platforms, from desktop to cloud to server. Build and share containers and automate the development pipeline from a single environment. Choose the Edge channel to get access to the latest features, or the Stable channel for more predictability. -[Learn more about Docker CE](/install/){: class="button outline-btn"} +[Learn more about Docker Engine - Community](/install/){: class="button outline-btn"}
@@ -62,7 +62,7 @@ the industry to modernize all applications. Docker Enterprise comes with enterpr orchestrating the container runtime, and Docker Trusted Registry (DTR) for storing and securing images in an enterprise grade registry. -[Learn more about Docker Enterprise supported platforms](/ee/supported-platforms/){: class="button outline-btn"} +[Learn more about Docker Enterprise products](/ee/supported-platforms/){: class="button outline-btn"}
diff --git a/release-notes/index.md b/release-notes/index.md index 1d9192fef7..4a6e6362c9 100644 --- a/release-notes/index.md +++ b/release-notes/index.md @@ -1,18 +1,18 @@ --- description: Release notes for Docker keywords: docker, documentation, about, technology, understanding, release -title: Docker Release Notes +title: Docker release notes --- -Find out what's new in Docker products! -- [Docker Enterprise Platform](/ee/release-notes/) - - [Docker Engine - Enterprise and Engine - Community](/engine/release-notes) - - [Docker Trusted Registry](/ee/dtr/release-notes/) - - [Docker Universal Control Plane](/ee/ucp/release-notes/) - - [Docker Desktop Enterprise](/ee/desktop/release-notes/) -- [Docker Desktop for Mac](/docker-for-mac/release-notes.md) ([Edge Releases](/docker-for-mac/edge-release-notes.md)) -- [Docker Desktop for Windows](/docker-for-windows/release-notes.md) ([Edge Releases](/docker-for-windows/edge-release-notes.md)) -- [Docker for Azure](/docker-for-azure/release-notes.md) or [Docker for AWS](/docker-for-aws/release-notes.md) +Find out what's new in Docker! Release notes also contain detailed information about known issues and fixes for each component. + +- [Docker Engine](/engine/release-notes) +- [Docker Desktop Enterprise](/ee/desktop/release-notes/) +- [Docker Desktop for Mac](/docker-for-mac/release-notes.md) +- [Docker Desktop for Mac Edge Releases](/docker-for-mac/edge-release-notes.md) +- [Docker Desktop for Windows](/docker-for-windows/release-notes.md) +- [Docker Desktop for Windows Edge Releases](/docker-for-windows/edge-release-notes.md) +- [Docker for AWS](/docker-for-aws/release-notes.md) - [Docker Compose](docker-compose.md) - [Docker Machine](docker-machine.md) - [Docker Swarm (standalone)](docker-swarm.md)