From c695a430f0c3da79af972484ae5f408921ace2a4 Mon Sep 17 00:00:00 2001 From: Hacktivista Date: Wed, 7 Nov 2018 11:15:49 -0300 Subject: [PATCH 01/18] Update configs.md configs key is not (and will not be) supported in docker compose, check https://github.com/docker/compose/issues/5110 for more info --- engine/swarm/configs.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/engine/swarm/configs.md b/engine/swarm/configs.md index f78c1c4616..5127c47de1 100644 --- a/engine/swarm/configs.md +++ b/engine/swarm/configs.md @@ -122,8 +122,8 @@ Docker configs. ### Defining and using configs in compose files -Both the `docker compose` and `docker stack` commands support defining configs -in a compose file. See +`docker stack` command supports defining configs in a compose file, though +`docker compose` will ignore the `configs` key as is not supported. See [the Compose file reference](/compose/compose-file/#configs) for details. ### Simple example: Get started with configs From 478113a972c7b656edf396243e4e023850fc0109 Mon Sep 17 00:00:00 2001 From: Alastair Smith Date: Mon, 1 Apr 2019 13:11:12 -0500 Subject: [PATCH 02/18] add slack webhook to Jenkinsfile --- Jenkinsfile | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 8e420440ae..a826f05f52 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -1,5 +1,6 @@ def dtrVpnAddress = "vpn.corp-us-east-1.aws.dckr.io" def ucpBundle = [file(credentialsId: "ucp-bundle", variable: 'UCP')] +def slackString = [string(credentialsId: 'slack-docs-webhook', variable: 'slack')] def reg = [credentialsId: 'csebuildbot', url: 'https://index.docker.io/v1/'] pipeline { @@ -50,7 +51,6 @@ pipeline { } withDockerRegistry(reg) { sh """ - cd ucp-bundle-success_bot export DOCKER_TLS_VERIFY=1 export COMPOSE_TLS_VERSION=TLSv1_2 export DOCKER_CERT_PATH=${WORKSPACE}/ucp-bundle-success_bot @@ -70,15 +70,18 @@ pipeline { withCredentials(ucpBundle) { sh 'unzip -o $UCP' } - withDockerRegistry(reg) { - sh """ - cd ucp-bundle-success_bot - export DOCKER_TLS_VERIFY=1 - export COMPOSE_TLS_VERSION=TLSv1_2 - export DOCKER_CERT_PATH=${WORKSPACE}/ucp-bundle-success_bot - export DOCKER_HOST=tcp://ucp.corp-us-east-1.aws.dckr.io:443 - docker service update --detach=false --force --image docs/docker.github.io:prod-${env.BUILD_NUMBER} docs-docker-com_docs --with-registry-auth - """ + withCredentials(slackString) { + withDockerRegistry(reg) { + sh """ + cd ucp-bundle-success_bot + export DOCKER_TLS_VERIFY=1 + export COMPOSE_TLS_VERSION=TLSv1_2 + export DOCKER_CERT_PATH=${WORKSPACE}/ucp-bundle-success_bot + export DOCKER_HOST=tcp://ucp.corp-us-east-1.aws.dckr.io:443 + docker service update --detach=false --force --image docs/docker.github.io:prod-${env.BUILD_NUMBER} docs-docker-com_docs --with-registry-auth + curl -X POST -H 'Content-type: application/json' --data '{"text":"Successfully published docs. https://docs.docker.com/"}' $slack + """ + } } } } From a2e2846b6c6d93982ed96980d6fd2cf3a6870297 Mon Sep 17 00:00:00 2001 From: David Ye Date: Mon, 1 Apr 2019 23:56:55 -0700 Subject: [PATCH 03/18] Fix typo --- develop/develop-images/dockerfile_best-practices.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/develop/develop-images/dockerfile_best-practices.md b/develop/develop-images/dockerfile_best-practices.md index c020cdcaa0..6beacdc26b 100644 --- a/develop/develop-images/dockerfile_best-practices.md +++ b/develop/develop-images/dockerfile_best-practices.md @@ -202,7 +202,7 @@ cd example # create an example file touch somefile.txt -# build and image using the current directory as context, and a Dockerfile passed through stdin +# build an image using the current directory as context, and a Dockerfile passed through stdin docker build -t myimage:latest -f- . < Date: Tue, 2 Apr 2019 09:08:02 -0300 Subject: [PATCH 04/18] Update configs.md --- engine/swarm/configs.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/engine/swarm/configs.md b/engine/swarm/configs.md index 5127c47de1..9be45cc8bc 100644 --- a/engine/swarm/configs.md +++ b/engine/swarm/configs.md @@ -122,8 +122,8 @@ Docker configs. ### Defining and using configs in compose files -`docker stack` command supports defining configs in a compose file, though -`docker compose` will ignore the `configs` key as is not supported. See +The `docker stack` command supports defining configs in a compose file. +However, the 'configs' key is not supported for `docker compose`. See [the Compose file reference](/compose/compose-file/#configs) for details. ### Simple example: Get started with configs From 41dfff114babdcc6776bf808195ab067c6cd6775 Mon Sep 17 00:00:00 2001 From: Maria Bermudez Date: Tue, 2 Apr 2019 09:09:51 -0700 Subject: [PATCH 05/18] Final edit --- engine/swarm/configs.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/engine/swarm/configs.md b/engine/swarm/configs.md index 9be45cc8bc..d2aec6673d 100644 --- a/engine/swarm/configs.md +++ b/engine/swarm/configs.md @@ -122,8 +122,8 @@ Docker configs. ### Defining and using configs in compose files -The `docker stack` command supports defining configs in a compose file. -However, the 'configs' key is not supported for `docker compose`. See +The `docker stack` command supports defining configs in a Compose file. +However, the `configs` key is not supported for `docker compose`. See [the Compose file reference](/compose/compose-file/#configs) for details. ### Simple example: Get started with configs From b39d2ddd1427afe7d192b18496c5b1724c64f5ea Mon Sep 17 00:00:00 2001 From: Alastair Smith Date: Tue, 2 Apr 2019 12:00:23 -0500 Subject: [PATCH 06/18] add protection to Jenkinsfile --- Jenkinsfile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Jenkinsfile b/Jenkinsfile index 34916fbce7..744eb91abd 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -13,6 +13,9 @@ pipeline { agent { label 'ubuntu-1604-aufs-stable' } + when { + expression { env.GIT_URL == 'https://github.com/docker/docker.github.io.git' } + } stages { stage( 'build and push stage image' ) { when { From 900feffb4ce8df014e30eef01423166b9ad04f87 Mon Sep 17 00:00:00 2001 From: Alastair Smith Date: Tue, 2 Apr 2019 12:23:41 -0500 Subject: [PATCH 07/18] fix git url protection in jenkinsfile --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 744eb91abd..e1102ed1ec 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -14,7 +14,7 @@ pipeline { label 'ubuntu-1604-aufs-stable' } when { - expression { env.GIT_URL == 'https://github.com/docker/docker.github.io.git' } + expression { env.GIT_URL == 'https://github.com/Docker/docker.github.io.git' } } stages { stage( 'build and push stage image' ) { From 1662ab4867a2ac42bfdbdda81a50737de5d0fbe5 Mon Sep 17 00:00:00 2001 From: Yoju LEE Date: Tue, 2 Apr 2019 20:42:20 +0200 Subject: [PATCH 08/18] typo fix friendlyname -> friendlyhello --- get-started/part2.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/get-started/part2.md b/get-started/part2.md index 162290edde..89968c1e96 100644 --- a/get-started/part2.md +++ b/get-started/part2.md @@ -418,7 +418,7 @@ ones if you'd like to explore a bit before moving on. ```shell docker build -t friendlyhello . # Create image using this directory's Dockerfile -docker run -p 4000:80 friendlyhello # Run "friendlyname" mapping port 4000 to 80 +docker run -p 4000:80 friendlyhello # Run "friendlyhello" mapping port 4000 to 80 docker run -d -p 4000:80 friendlyhello # Same thing, but in detached mode docker container ls # List all running containers docker container ls -a # List all containers, even those not running From bf746b45298e811862dcfd6ae251e6389c74049f Mon Sep 17 00:00:00 2001 From: Maria Bermudez Date: Tue, 2 Apr 2019 17:40:54 -0700 Subject: [PATCH 09/18] Storage backend data migration updates Fix incorrect API command, add backup updates Update incorrect commands --- _data/toc.yaml | 2 + .../admin/configure/external-storage/index.md | 7 +- .../admin/configure/external-storage/nfs.md | 9 ++- ee/dtr/admin/configure/external-storage/s3.md | 13 +++- .../storage-backend-migration.md | 78 +++++++++++++++++++ .../disaster-recovery/create-a-backup.md | 70 ++++++++++++++--- .../disaster-recovery/restore-from-backup.md | 9 ++- reference/dtr/2.6/cli/reconfigure.md | 3 +- reference/dtr/2.6/cli/restore.md | 4 +- 9 files changed, 170 insertions(+), 25 deletions(-) create mode 100644 ee/dtr/admin/configure/external-storage/storage-backend-migration.md diff --git a/_data/toc.yaml b/_data/toc.yaml index 1986831ae5..32f58c48db 100644 --- a/_data/toc.yaml +++ b/_data/toc.yaml @@ -2170,6 +2170,8 @@ manuals: section: - path: /ee/dtr/admin/configure/external-storage/ title: Overview + - path: /ee/dtr/admin/configure/external-storage/storage-backend-migration/ + title: Switch storage backends - path: /ee/dtr/admin/configure/external-storage/s3/ title: S3 - path: /ee/dtr/admin/configure/external-storage/nfs/ diff --git a/ee/dtr/admin/configure/external-storage/index.md b/ee/dtr/admin/configure/external-storage/index.md index 3faa798c65..8d5b0086e5 100644 --- a/ee/dtr/admin/configure/external-storage/index.md +++ b/ee/dtr/admin/configure/external-storage/index.md @@ -82,16 +82,11 @@ all replicas can share the same storage backend. DTR supports Amazon S3 or other storage systems that are S3-compatible like Minio. [Learn how to configure DTR with Amazon S3](s3.md). -## Switching storage backends -Starting in DTR 2.6, switching storage backends initializes a new metadata store and erases your existing tags. This helps facilitate online garbage collection, which has been introduced in 2.5 as an experimental feature. Make sure to [perform a backup](/ee/dtr/admin/disaster-recovery/create-a-backup/#back-up-dtr-data) before you change your storage backend when running DTR 2.5 (with online garbage collection) and 2.6.0-2.6.3. If you encounter an issue with lost tags, refer to the following resources: - * For changes to reconfigure and restore options in DTR 2.6, see [docker/dtr reconfigure](/reference/dtr/2.6/cli/reconfigure/) and [docker/dtr restore](/reference/dtr/2.6/cli/restore). - * For Docker's recommended recovery strategies, see [DTR 2.6 lost tags after reconfiguring storage](https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage). - * For NFS-specific changes, see [Use NFS](nfs.md). - * For S3-specific changes, see [Learn how to configure DTR with Amazon S3](s3.md). ## Where to go next +- [Switch storage backends](storage-backend-migration.md) - [Use NFS](nfs.md) - [Use S3](s3.md) - CLI reference pages diff --git a/ee/dtr/admin/configure/external-storage/nfs.md b/ee/dtr/admin/configure/external-storage/nfs.md index baefea0e58..8b90bc43ed 100644 --- a/ee/dtr/admin/configure/external-storage/nfs.md +++ b/ee/dtr/admin/configure/external-storage/nfs.md @@ -53,12 +53,18 @@ To support **NFS v4**, more NFS options have been added to the CLI. See [New Fea > See [Reconfigure Using a Local NFS Volume]( https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage#reconfigureusingalocalnfsvolume) for Docker's recommended recovery strategy. {: .warning} +#### DTR 2.6.4 + +In DTR 2.6.4, a new flag, `--storage-migrated`, [has been added to `docker/dtr reconfigure`](/reference/dtr/2.6/cli/reconfigure/) which lets you indicate the migration status of your storage data during a reconfigure. [Upgrade to 2.6.4](/reference/dtr/2.6/cli/upgrade/) and follow [Best practice for data migration in 2.6.4](/ee/dtr/admin/configure/external-storage/storage-backend-migration/#best-practice-for-data-migration) when switching storage backends. + ```bash docker run --rm -it \ docker/dtr:{{ page.dtr_version}} reconfigure \ --ucp-url \ --ucp-username \ - --dtr-storage-volume + --nfs-storage-url + --async-nfs + --storage-migrated ``` To reconfigure DTR to stop using NFS storage, leave the `--nfs-storage-url` option @@ -71,6 +77,7 @@ docker run -it --rm {{ page.dtr_org }}/{{ page.dtr_repo }}:{{ page.dtr_version}} ## Where to go next +- [Switch storage backends](storage-backend-migration.md) - [Create a backup](/ee/dtr/admin/disaster-recovery/create-a-backup/) - [Restore from a backup](/ee/dtr/admin/disaster-recovery/restore-from-backup/) - [Configure where images are stored](index.md) diff --git a/ee/dtr/admin/configure/external-storage/s3.md b/ee/dtr/admin/configure/external-storage/s3.md index a264af1b88..bb3292dcd1 100644 --- a/ee/dtr/admin/configure/external-storage/s3.md +++ b/ee/dtr/admin/configure/external-storage/s3.md @@ -133,15 +133,26 @@ DTR supports the following S3 regions: ## Update your S3 settings on the web interface -There is currently an issue with [changing your S3 settings on the web interface](/ee/dtr/release-notes#version-26) which leads to erased metadata. Make sure to [back up your DTR metadata](/ee/dtr/admin/disaster-recovery/create-a-backup/#back-up-dtr-metadata) before you proceed. +When running 2.5.x (with experimental garbage collection) or 2.6.0-2.6.3, there is an issue with [changing your S3 settings on the web interface](/ee/dtr/release-notes#version-26) which leads to erased metadata. Make sure to [back up your DTR metadata](/ee/dtr/admin/disaster-recovery/create-a-backup/#back-up-dtr-metadata) before you proceed. ## Restore DTR with S3 To [restore DTR using your previously configured S3 settings](https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage#restoretocloudstorage), use `docker/dtr restore` with `--dtr-use-default-storage` to keep your metadata. +#### DTR 2.6.4 + +In DTR 2.6.4, a new flag, `--storage-migrated`, [has been added to `docker/dtr reconfigure`](/reference/dtr/2.6/cli/reconfigure/) which lets you indicate the migration status of your storage data during a reconfigure. [Upgrade to 2.6.4](/reference/dtr/2.6/cli/upgrade/) and follow [Best practice for data migration in 2.6.4](/ee/dtr/admin/configure/external-storage/storage-backend-migration/#best-practice-for-data-migration) when switching storage backends. + ## Where to go next +- [Switch storage backends](storage-backend-migration.md) +- [Create a backup](/ee/dtr/admin/disaster-recovery/create-a-backup/) +- [Restore from a backup](/ee/dtr/admin/disaster-recovery/restore-from-backup/) - [Configure where images are stored](index.md) +- CLI reference pages + - [docker/dtr install](/reference/dtr/2.6/cli/install/) + - [docker/dtr reconfigure](/reference/dtr/2.6/cli/reconfigure/) + - [docker/dtr restore](/reference/dtr/2.6/cli/restore/) diff --git a/ee/dtr/admin/configure/external-storage/storage-backend-migration.md b/ee/dtr/admin/configure/external-storage/storage-backend-migration.md new file mode 100644 index 0000000000..a83cdafc7a --- /dev/null +++ b/ee/dtr/admin/configure/external-storage/storage-backend-migration.md @@ -0,0 +1,78 @@ +--- +title: Switch storage backends +description: Storage backend migration for Docker Trusted Registry +keywords: dtr, storage drivers, local volume, NFS, Azure, S3, +--- + +Starting in DTR 2.6, switching storage backends initializes a new metadata store and erases your existing tags. This helps facilitate online garbage collection, which has been introduced in 2.5 as an experimental feature. In earlier versions, DTR would subsequently start a `tagmigration` job to rebuild tag metadata from the file layout in the image layer store. This job has been discontinued, as your storage backend could get out of sync with your DTR metadata, like your manifests and existing repositories. As best practice, DTR storage backends and metadata should always be moved, backed up, and restored together. + +## DTR 2.6.4 and above + +In DTR 2.6.4, a new flag, `--storage-migrated`, [has been added to `docker/dtr reconfigure`](/reference/dtr/2.6/cli/reconfigure/) which lets you indicate the migration status of your storage data during a reconfigure. + +### Best practice for data migration + +Docker recommends the following steps for your storage backend and metadata migration: + +1. Disable garbage collection by selecting "Never" under **System > Garbage Collection**, so blobs referenced in the backup that you create continue to exist. See [Garbage collection](/ee/dtr/admin/configure/garbage-collection/) for more details. + + ![](/ee/dtr/images/garbage-collection-0.png){: .img-fluid .with-border} + +2. [Back up your existing metadata](/ee/dtr/admin/disaster-recovery/create-a-backup/#back-up-dtr-metadata). See [docker/dtr backup](/reference/dtr/2.6/cli/backup/) for CLI command description and options. + +3. Migrate the contents of your current storage backend to the new one you are switching to. For example, upload your NFS contents to an S3 bucket if you're switching from NFS to S3. + +4. [Restore DTR from your backup](/ee/dtr/admin/disaster-recovery/restore-from-backup/) and specify your new storage backend. See [docker/dtr destroy](/reference/dtr/2.6/cli/destroy/) and [docker/dtr restore](/reference/dtr/2.6/cli/backup/) for CLI command descriptions and options. + +5. Run garbage collection in "blob" mode. You can either SSH into a DTR node, or [use a UCP client bundle](/ee/ucp/user-access/cli/) to run the following command: + + ```bash + docker exec -it dtr-jobrunner- sh + ``` + > See [Find your replica ID](/ee/dtr/admin/disaster-recovery/create-a-backup/#find-your-replica-id) for tips on determining your replica ID. + + Within the running container, type `/bin/job_executor` to start the `job_executor` binary. + + ```bash + /bin/job_executor + + / # onlinegc blobs + ``` + > Note that the first line results in a display of the binary name, its usage, and available commands including `onlinegc`. To learn more about a command, run `` --help. + + Running garbage collection in blob mode destroys any new blobs which are not referenced in your previously created backup. + +### Alternative options for data migration + +- If you have a long maintenance window, you can skip some steps from above and do the following: + + 1. Put DTR in "read-only" mode. To do so, send the following API request: + + ```bash + curl -u :$TOKEN -X POST "https:///api/v0/meta/settings" -H "accept: application/json" -H "content-type: application/json" -d "{ \"readOnlyRegistry\": true }" + ``` + On success, you should get a `202 Accepted` response. + + 2. Migrate the contents of your current storage backend to the new one you are switching to. For example, upload your NFS contents to an S3 bucket if you're switching from NFS to S3. + + 3. [Reconfigure DTR](/reference/dtr/2.6/cli/reconfigure) while specifying the `--storage-migrated` flag to preserve your existing tags. + +- If you are not worried about inconsistencies in data, skip steps 1 and 2 and perform a reconfigure. + +## DTR 2.6.0-2.6.3 and DTR 2.5 (with experimental garbage collection) + +Make sure to [perform a backup](/ee/dtr/admin/disaster-recovery/create-a-backup/#back-up-dtr-data) before you change your storage backend when running DTR 2.5 (with online garbage collection) and 2.6.0-2.6.3. If you encounter an issue with lost tags, refer to the following resources: + * For changes to reconfigure and restore options in DTR 2.6, see [docker/dtr reconfigure](/reference/dtr/2.6/cli/reconfigure/) and [docker/dtr restore](/reference/dtr/2.6/cli/restore). + * For Docker's recommended recovery strategies, see [DTR 2.6 lost tags after reconfiguring storage](https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage). + * For NFS-specific changes, see [Use NFS](nfs.md). + * For S3-specific changes, see [Learn how to configure DTR with Amazon S3](s3.md). + +Upgrade to [DTR 2.6.4](#dtr-264-and-above) and follow [best practice for data migration](#best-practice-for-data-migration) to avoid the wiped tags issue. + +## Where to go next + +- [Use NFS](nfs.md) +- [Use S3](s3.md) +- CLI reference pages + - [docker/dtr install](/reference/dtr/2.6/cli/install/) + - [docker/dtr reconfigure](/reference/dtr/2.6/cli/reconfigure/) diff --git a/ee/dtr/admin/disaster-recovery/create-a-backup.md b/ee/dtr/admin/disaster-recovery/create-a-backup.md index 1796ec0477..f13fb2e336 100644 --- a/ee/dtr/admin/disaster-recovery/create-a-backup.md +++ b/ee/dtr/admin/disaster-recovery/create-a-backup.md @@ -2,6 +2,7 @@ title: Create a backup description: Learn how to create a backup of Docker Trusted Registry, for disaster recovery. keywords: dtr, disaster recovery +toc_max_header: 5 --- {% assign metadata_backup_file = "dtr-metadata-backup.tar" %} @@ -43,7 +44,7 @@ command backs up the following data: ## Back up DTR data -To create a backup of DTR you need to: +To create a backup of DTR, you need to: 1. Back up image content 2. Back up DTR metadata @@ -53,13 +54,46 @@ restore. If you have not previously performed a backup, the web interface displa ![](/ee/dtr/images/backup-warning.png) +#### Find your replica ID + +Since you need your DTR replica ID during a backup, the following covers a few ways for you to determine your replica ID: + +##### UCP web interface + +You can find the list of replicas by navigating to **Shared Resources > Stacks** or **Swarm > Volumes** (when using [swarm mode](/engine/swarm/)) on the UCP web interface. + +##### UCP client bundle + +From a terminal [using a UCP client bundle]((/ee/ucp/user-access/cli/)), run: + +{% raw %} +```bash +docker ps --format "{{.Names}}" | grep dtr + +# The list of DTR containers with /-, e.g. +# node-1/dtr-api-a1640e1c15b6 +``` +{% endraw %} + + +##### SSH access + +Another way to determine the replica ID is to SSH into a DTR node and run the following: + +{% raw %} +```bash +REPLICA_ID=$(docker inspect -f '{{.Name}}' $(docker ps -q -f name=dtr-rethink) | cut -f 3 -d '-') +&& echo $REPLICA_ID +``` +{% endraw %} + ### Back up image content Since you can configure the storage backend that DTR uses to store images, -the way you backup images depends on the storage backend you're using. +the way you back up images depends on the storage backend you're using. If you've configured DTR to store images on the local file system or NFS mount, -you can backup the images by using ssh to log into a node where DTR is running, +you can backup the images by using SSH to log in to a DTR node, and creating a tar archive of the [dtr-registry volume](../../architecture.md): {% raw %} @@ -76,10 +110,16 @@ recommended for that system. ### Back up DTR metadata To create a DTR backup, load your UCP client bundle, and run the following -command, replacing the placeholders for the real values: +command, replacing the placeholders with real values: -```none -read -sp 'ucp password: ' UCP_PASSWORD; \ +```bash +read -sp 'ucp password: ' UCP_PASSWORD; +``` + +This prompts you for the UCP password. Next, run the following to back up your DTR metadata and save the result into a tar archive. You can learn more about the supported flags in +the [reference documentation](/reference/dtr/2.6/cli/backup.md). + +```bash docker run --log-driver none -i --rm \ --env UCP_PASSWORD=$UCP_PASSWORD \ {{ page.dtr_org }}/{{ page.dtr_repo }}:{{ page.dtr_version }} backup \ @@ -95,14 +135,9 @@ Where: * `` is the username of a UCP administrator. * `` is the id of the DTR replica to backup. -This prompts you for the UCP password, backups up the DTR metadata and saves the -result into a tar archive. You can learn more about the supported flags in -the [reference documentation](/reference/dtr/2.5/cli/backup.md). By default the backup command doesn't stop the DTR replica being backed up. -This allows performing backups without affecting your users. Since the replica -is not stopped, it's possible that happen while the backup is taking place, won't -be persisted. +This means you can take frequent backups without affecting your users. You can use the `--offline-backup` option to stop the DTR replica while taking the backup. If you do this, remove the replica from the load balancing pool. @@ -117,6 +152,7 @@ gpg --symmetric {{ metadata_backup_file }} This prompts you for a password to encrypt the backup, copies the backup file and encrypts it. + ### Test your backups To validate that the backup was correctly performed, you can print the contents @@ -151,3 +187,13 @@ gpg -d {{ metadata_backup_file }} | tar -t You can also create a backup of a UCP cluster and restore it into a new cluster. Then restore DTR on that new cluster to confirm that everything is working as expected. + +## Where to go next +- [Configure your storage backend](/ee/dtr/admin/configure/external-storage/index.md) +- [Switch your storage backend](/ee/dtr/admin/configure/external-storage/storage-backend-migration.md) +- [Use NFS](/ee/dtr/admin/configure/external-storage/nfs.md) +- [Use S3](/ee/dtr/admin/configure/external-storage/s3.md) +- CLI reference pages + - [docker/dtr install](/reference/dtr/2.6/cli/install/) + - [docker/dtr reconfigure](/reference/dtr/2.6/cli/reconfigure/) + - [docker/dtr restore](/reference/dtr/2.6/cli/restore/) diff --git a/ee/dtr/admin/disaster-recovery/restore-from-backup.md b/ee/dtr/admin/disaster-recovery/restore-from-backup.md index 31eeb08dac..2986726e80 100644 --- a/ee/dtr/admin/disaster-recovery/restore-from-backup.md +++ b/ee/dtr/admin/disaster-recovery/restore-from-backup.md @@ -59,8 +59,13 @@ the configuration created during a backup. Load your UCP client bundle, and run the following command, replacing the placeholders for the real values: -```none -read -sp 'ucp password: ' UCP_PASSWORD; \ +```bash +read -sp 'ucp password: ' UCP_PASSWORD; +``` + +This prompts you for the UCP password. Next, run the following to restore DTR from your backup. You can learn more about the supported flags in [docker/dtr restore](/reference/dtr/2.6/cli/restore). + +```bash docker run -i --rm \ --env UCP_PASSWORD=$UCP_PASSWORD \ {{ page.dtr_org }}/{{ page.dtr_repo }}:{{ page.dtr_version }} restore \ diff --git a/reference/dtr/2.6/cli/reconfigure.md b/reference/dtr/2.6/cli/reconfigure.md index d67cb9952d..8658d6408f 100644 --- a/reference/dtr/2.6/cli/reconfigure.md +++ b/reference/dtr/2.6/cli/reconfigure.md @@ -40,13 +40,14 @@ time, configure your DTR for high availability. | `--log-host` | $LOG_HOST | The syslog system to send logs to. The endpoint to send logs to. Use this flag if you set `--log-protocol` to `tcp` or `udp`. | | `--log-level` | $LOG_LEVEL | Log level for all container logs when logging to syslog. Default: INFO. The supported log levels are `debug`, `info`, `warn`, `error`, or `fatal`. | | `--log-protocol` | $LOG_PROTOCOL | The protocol for sending logs. Default is internal. By default, DTR internal components log information using the logger specified in the Docker daemon in the node where the DTR replica is deployed. Use this option to send DTR logs to an external syslog system. The supported values are `tcp`, `udp`, and `internal`. Internal is the default option, stopping DTR from sending logs to an external system. Use this flag with `--log-host`. | -| `--nfs-storage-url` | $NFS_STORAGE_URL | When running DTR 2.5 (with experimental online garbage collection) and 2.6.0-2.6.3, there is an issue with [reconfiguring and restoring DTR with `--nfs-storage-url`](/ee/dtr/release-notes#version-26) which leads to erased tags. Make sure to [back up your DTR metadata](/ee/dtr/admin/disaster-recovery/create-a-backup/#back-up-dtr-metadata) before you proceed. To work around the issue, manually create a storage volume on each DTR node and reconfigure DTR with `--dtr-storage-volume` and your newly-created volume instead. See [Reconfigure Using a Local NFS Volume](https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage#reconfigureusingalocalnfsvolume) for more details. To reconfigure DTR to stop using NFS, leave this option empty: `--nfs-storage-url ""`. See [USE NFS](/ee/dtr/admin/configure/external-storage/nfs/) for more details. | +| `--nfs-storage-url` | $NFS_STORAGE_URL | When running DTR 2.5 (with experimental online garbage collection) and 2.6.0-2.6.3, there is an issue with [reconfiguring and restoring DTR with `--nfs-storage-url`](/ee/dtr/release-notes#version-26) which leads to erased tags. Make sure to [back up your DTR metadata](/ee/dtr/admin/disaster-recovery/create-a-backup/#back-up-dtr-metadata) before you proceed. To work around the issue, manually create a storage volume on each DTR node and reconfigure DTR with `--dtr-storage-volume` and your newly-created volume instead. See [Reconfigure Using a Local NFS Volume](https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage#reconfigureusingalocalnfsvolume) for more details. To reconfigure DTR to stop using NFS, leave this option empty: `--nfs-storage-url ""`. See [USE NFS](/ee/dtr/admin/configure/external-storage/nfs/) for more details. [Upgrade to 2.6.4](/reference/dtr/2.6/cli/upgrade/) and follow [Best practice for data migration in 2.6.4](/ee/dtr/admin/configure/external-storage/storage-backend-migration/#best-practice-for-data-migration) when switching storage backends. | | `--async-nfs` | $ASYNC_NFS | Use async NFS volume options on the replica specified in the `--existing-replica-id` option. The NFS configuration must be set with `--nfs-storage-url` explicitly to use this option. Using `--async-nfs` will bring down any containers on the replica that use the NFS volume, delete the NFS volume, bring it back up with the appropriate configuration, and restart any containers that were brought down. | | `--nfs-options` | $NFS_OPTIONS | Pass in NFS volume options verbatim for the replica specified in the `--existing-replica-id` option. The NFS configuration must be set with `--nfs-storage-url` explicitly to use this option. Specifying `--nfs-options` will pass in character-for-character the options specified in the argument when creating or recreating the NFS volume. For instance, to use NFS v4 with async, pass in "rw,nfsvers=4,async" as the argument. | | `--no-proxy` | $DTR_NO_PROXY | List of domains the proxy should not be used for. When using `--http-proxy` you can use this flag to specify a list of domains that you don't want to route through the proxy. Format `acme.com[, acme.org]`. | | `--replica-http-port` | $REPLICA_HTTP_PORT | The public HTTP port for the DTR replica. Default is `80`. This allows you to customize the HTTP port where users can reach DTR. Once users access the HTTP port, they are redirected to use an HTTPS connection, using the port specified with --replica-https-port. This port can also be used for unencrypted health checks. | | `--replica-https-port` | $REPLICA_HTTPS_PORT | The public HTTPS port for the DTR replica. Default is `443`. This allows you to customize the HTTPS port where users can reach DTR. Each replica can use a different port. | | `--replica-rethinkdb-cache-mb` | $RETHINKDB_CACHE_MB | The maximum amount of space in MB for RethinkDB in-memory cache used by the given replica. Default is auto. Auto is `(available_memory - 1024) / 2`. This config allows changing the RethinkDB cache usage per replica. You need to run it once per replica to change each one. | +| `--storage-migrated` | $STORAGE_MIGRATED | A flag added in 2.6.4 which lets you indicate the migration status of your storage data. Specify this flag if you are migrating to a new storage backend and have already moved all contents from your old backend to your new one. If not specified, DTR will assume the new backend is empty during a backend storage switch, and consequently destroy your existing tags and related image metadata. | | `--ucp-ca` | $UCP_CA | Use a PEM-encoded TLS CA certificate for UCP. Download the UCP TLS CA certificate from `https:///ca`, and use `--ucp-ca "$(cat ca.pem)"`. | | `--ucp-insecure-tls` | $UCP_INSECURE_TLS | Disable TLS verification for UCP. The installation uses TLS but always trusts the TLS certificate used by UCP, which can lead to MITM (man-in-the-middle) attacks. For production deployments, use `--ucp-ca "$(cat ca.pem)"` instead. | | `--ucp-password` | $UCP_PASSWORD | The UCP administrator password. | diff --git a/reference/dtr/2.6/cli/restore.md b/reference/dtr/2.6/cli/restore.md index cc3ca76259..640a34f69c 100644 --- a/reference/dtr/2.6/cli/restore.md +++ b/reference/dtr/2.6/cli/restore.md @@ -50,8 +50,8 @@ DTR replicas for high availability. | `--dtr-external-url` | $DTR_EXTERNAL_URL | URL of the host or load balancer clients use to reach DTR. When you use this flag, users are redirected to UCP for logging in. Once authenticated they are redirected to the URL you specify in this flag. If you don't use this flag, DTR is deployed without single sign-on with UCP. Users and teams are shared but users log in separately into the two applications. You can enable and disable single sign-on within your DTR system settings. Format `https://host[:port]`, where port is the value you used with `--replica-https-port`. | | `--dtr-key` | $DTR_KEY | Use a PEM-encoded TLS private key for DTR. By default DTR generates a self-signed TLS certificate during deployment. You can use your own TLS private key with `--dtr-key "$(cat ca.pem)"`. | | `--dtr-storage-volume` | $DTR_STORAGE_VOLUME | Mandatory flag to allow for DTR to fall back to your configured storage setting at the time of backup. If you have previously configured DTR to use a full path or volume name for storage, specify this flag to use the same setting on restore. See [docker/dtr install](install.md) and [docker/dtr reconfigure](reconfigure.md) for usage details. | -| `--dtr-use-default-storage` | $DTR_DEFAULT_STORAGE | Mandatory flag to allow for DTR to fall back to your configured storage backend at the time of backup. If cloud storage was configured, then the default storage on restore is cloud storage. Otherwise, local storage is used. With DTR 2.5 (with experimental online garbage collection) and 2.6.0-2.6.3, this flag must be specified in order to keep your DTR metadata. If you encounter an issue with lost tags, see [Restore to Cloud Storage](https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage#restoretocloudstorage) for Docker's recommended recovery strategy. | -| `--nfs-storage-url` | $NFS_STORAGE_URL | Mandatory flag to allow for DTR to fall back to your configured storage setting at the time of backup. When running DTR 2.5 (with experimental online garbage collection) and 2.6.0-2.6.3, there is an issue with [reconfiguring and restoring DTR with `--nfs-storage-url`](/ee/dtr/release-notes#version-26) which leads to erased tags. Make sure to [back up your DTR metadata](/ee/dtr/admin/disaster-recovery/create-a-backup/#back-up-dtr-metadata) before you proceed. If NFS was previously configured, you have to manually create a storage volume on each DTR node and specify `--dtr-storage-volume` with the newly-created volume instead. See [Restore to a Local NFS Volume](https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage#restoretoalocalnfsvolume) for more details. For additional NFS configuration options to support **NFS v4**, see [docker/dtr install](install.md) and [docker/dtr reconfigure](reconfigure.md). | +| `--dtr-use-default-storage` | $DTR_DEFAULT_STORAGE | Mandatory flag to allow for DTR to fall back to your configured storage backend at the time of backup. If cloud storage was configured, then the default storage on restore is cloud storage. Otherwise, local storage is used. With DTR 2.5 (with experimental online garbage collection) and 2.6.0-2.6.3, this flag must be specified in order to keep your DTR metadata. If you encounter an issue with lost tags, see [Restore to Cloud Storage](https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage#restoretocloudstorage) for Docker's recommended recovery strategy. [Upgrade to 2.6.4](/reference/dtr/2.6/cli/upgrade/) and follow [Best practice for data migration in 2.6.4](/ee/dtr/admin/configure/external-storage/storage-backend-migration/#best-practice-for-data-migration) when switching storage backends. | +| `--nfs-storage-url` | $NFS_STORAGE_URL | Mandatory flag to allow for DTR to fall back to your configured storage setting at the time of backup. When running DTR 2.5 (with experimental online garbage collection) and 2.6.0-2.6.3, there is an issue with [reconfiguring and restoring DTR with `--nfs-storage-url`](/ee/dtr/release-notes#version-26) which leads to erased tags. Make sure to [back up your DTR metadata](/ee/dtr/admin/disaster-recovery/create-a-backup/#back-up-dtr-metadata) before you proceed. If NFS was previously configured, you have to manually create a storage volume on each DTR node and specify `--dtr-storage-volume` with the newly-created volume instead. See [Restore to a Local NFS Volume](https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage#restoretoalocalnfsvolume) for more details. For additional NFS configuration options to support **NFS v4**, see [docker/dtr install](install.md) and [docker/dtr reconfigure](reconfigure.md). [Upgrade to 2.6.4](/reference/dtr/2.6/cli/upgrade/) and follow [Best practice for data migration in 2.6.4](/ee/dtr/admin/configure/external-storage/storage-backend-migration/#best-practice-for-data-migration) when switching storage backends. | | `--enable-pprof` | $DTR_PPROF | Enables pprof profiling of the server. Use `--enable-pprof=false` to disable it. Once DTR is deployed with this flag, you can access the `pprof` endpoint for the api server at `/debug/pprof`, and the registry endpoint at `/registry_debug_pprof/debug/pprof`. | | `--help-extended` | $DTR_EXTENDED_HELP | Display extended help text for a given command. | | `--http-proxy` | $DTR_HTTP_PROXY | The HTTP proxy used for outgoing requests. | From 55f26aaeb32e60980800b7faecfc21708f3ca1fe Mon Sep 17 00:00:00 2001 From: Carl Dunkelberger <39133599+cdunkelb@users.noreply.github.com> Date: Wed, 3 Apr 2019 12:42:29 -0400 Subject: [PATCH 10/18] --unmanaged-cni is not a valid option for upgrade --- reference/ucp/3.1/cli/upgrade.md | 1 - 1 file changed, 1 deletion(-) diff --git a/reference/ucp/3.1/cli/upgrade.md b/reference/ucp/3.1/cli/upgrade.md index 8c849dce8a..de7d72ae24 100644 --- a/reference/ucp/3.1/cli/upgrade.md +++ b/reference/ucp/3.1/cli/upgrade.md @@ -45,4 +45,3 @@ healthy and that all nodes have been upgraded successfully. | `--pod-cidr` | Kubernetes cluster IP pool for the pods to allocated IP. The default IP pool is `192.168.0.0/16`. | | `--nodeport-range` | Allowed port range for Kubernetes services of type `NodePort`. The default port range is `32768-35535`. | | `--cloud-provider` | The cloud provider for the cluster | -| `--unmanaged-cni` | Flag to indicate if CNI provider is Calico and managed by UCP. Calico is the default CNI provider. The default value is `true` when using the default Calico CNI. | From 3a4d4f4cbb9e8b1c2e7c04b3251dcdb74dcf1322 Mon Sep 17 00:00:00 2001 From: paigehargrave Date: Wed, 3 Apr 2019 12:47:29 -0400 Subject: [PATCH 11/18] Update to UCP known issues --- ee/ucp/release-notes.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ee/ucp/release-notes.md b/ee/ucp/release-notes.md index 7161890fd2..46d0660f9e 100644 --- a/ee/ucp/release-notes.md +++ b/ee/ucp/release-notes.md @@ -42,6 +42,9 @@ upgrade your installation to the latest release. * Fixed an issue with continuous interlock reconciliation if `ucp-interlock` service image does not match expected version. (ENGORC-2081) ### Known Issues +* Upgrading from UCP 3.1.4 to 3.1.5 causes missing Swarm placement constraints banner for some services (ENGORC-2191)https://docker.atlassian.net/browse/ENGORC-2191. This can cause Swarm services to run unexpectedly on Kubernetes nodes. See https://www.docker.com/ddc-41 for more information. + + - Workaround: Delete any `ucp-*-s390x` services; they're designed for running on IBM Z nodes. * There are important changes to the upgrade process that, if not correctly followed, can impact the availability of applications running on the Swarm during uprades. These constraints impact any upgrades coming from any Docker Engine version before 18.09 to version 18.09 or greater. For more information about about upgrading Docker Enterprise to version 2.1, see [Upgrade Docker](../upgrade) * To deploy Pods with containers using Restricted Parameters, the user must be an admin and a service account must explicitly have a **ClusterRoleBinding** with `cluster-admin` as the **ClusterRole**. Restricted Parameters on Containers include: * Host Bind Mounts From 8dc337686dd74ecfe77343f1b223d1402f615532 Mon Sep 17 00:00:00 2001 From: paigehargrave Date: Wed, 3 Apr 2019 12:52:28 -0400 Subject: [PATCH 12/18] Update UCP release notes --- ee/ucp/release-notes.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ee/ucp/release-notes.md b/ee/ucp/release-notes.md index 46d0660f9e..92f462a0ad 100644 --- a/ee/ucp/release-notes.md +++ b/ee/ucp/release-notes.md @@ -42,9 +42,9 @@ upgrade your installation to the latest release. * Fixed an issue with continuous interlock reconciliation if `ucp-interlock` service image does not match expected version. (ENGORC-2081) ### Known Issues -* Upgrading from UCP 3.1.4 to 3.1.5 causes missing Swarm placement constraints banner for some services (ENGORC-2191)https://docker.atlassian.net/browse/ENGORC-2191. This can cause Swarm services to run unexpectedly on Kubernetes nodes. See https://www.docker.com/ddc-41 for more information. +* Upgrading from UCP 3.1.4 to 3.1.5 causes missing Swarm placement constraints banner for some Swarm services (ENGORC-2191)https://docker.atlassian.net/browse/ENGORC-2191. This can cause Swarm services to run unexpectedly on Kubernetes nodes. See https://www.docker.com/ddc-41 for more information. - - Workaround: Delete any `ucp-*-s390x` services; they're designed for running on IBM Z nodes. + - Workaround: Delete any `ucp-*-s390x` Swarm services for example, `ucp-auth-api-s390x`. * There are important changes to the upgrade process that, if not correctly followed, can impact the availability of applications running on the Swarm during uprades. These constraints impact any upgrades coming from any Docker Engine version before 18.09 to version 18.09 or greater. For more information about about upgrading Docker Enterprise to version 2.1, see [Upgrade Docker](../upgrade) * To deploy Pods with containers using Restricted Parameters, the user must be an admin and a service account must explicitly have a **ClusterRoleBinding** with `cluster-admin` as the **ClusterRole**. Restricted Parameters on Containers include: * Host Bind Mounts From e88746c6a8d0fccfd8eb41f4b8c58115c3ee51eb Mon Sep 17 00:00:00 2001 From: Maria Bermudez Date: Wed, 3 Apr 2019 09:56:56 -0700 Subject: [PATCH 13/18] Update release-notes.md --- ee/ucp/release-notes.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ee/ucp/release-notes.md b/ee/ucp/release-notes.md index 92f462a0ad..f10e995f0b 100644 --- a/ee/ucp/release-notes.md +++ b/ee/ucp/release-notes.md @@ -42,9 +42,9 @@ upgrade your installation to the latest release. * Fixed an issue with continuous interlock reconciliation if `ucp-interlock` service image does not match expected version. (ENGORC-2081) ### Known Issues -* Upgrading from UCP 3.1.4 to 3.1.5 causes missing Swarm placement constraints banner for some Swarm services (ENGORC-2191)https://docker.atlassian.net/browse/ENGORC-2191. This can cause Swarm services to run unexpectedly on Kubernetes nodes. See https://www.docker.com/ddc-41 for more information. - - Workaround: Delete any `ucp-*-s390x` Swarm services for example, `ucp-auth-api-s390x`. +* Upgrading from UCP 3.1.4 to 3.1.5 causes missing Swarm placement constraints banner for some Swarm services (ENGORC-2191)https://docker.atlassian.net/browse/ENGORC-2191. This can cause Swarm services to run unexpectedly on Kubernetes nodes. See https://www.docker.com/ddc-41 for more information. + - Workaround: Delete any `ucp-*-s390x` Swarm services. For example, `ucp-auth-api-s390x`. * There are important changes to the upgrade process that, if not correctly followed, can impact the availability of applications running on the Swarm during uprades. These constraints impact any upgrades coming from any Docker Engine version before 18.09 to version 18.09 or greater. For more information about about upgrading Docker Enterprise to version 2.1, see [Upgrade Docker](../upgrade) * To deploy Pods with containers using Restricted Parameters, the user must be an admin and a service account must explicitly have a **ClusterRoleBinding** with `cluster-admin` as the **ClusterRole**. Restricted Parameters on Containers include: * Host Bind Mounts From 64cbf59a3e5c40019f1202ee15dec79b2c87a831 Mon Sep 17 00:00:00 2001 From: Ally Smith Date: Wed, 3 Apr 2019 16:01:55 -0500 Subject: [PATCH 14/18] make jenkinsfile serve private and public docs After a couple of Jenkins-based mix-ups it became obvious we needed a Jenkinsfile that would serve both public and private projects, that we could move between repos without worry. This Jenkinsfile knows which images to build and push and which swarm services to update because of the use of git_url and branch conditions. --- Jenkinsfile | 126 +++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 101 insertions(+), 25 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index e1102ed1ec..3b54bd3e61 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -1,6 +1,3 @@ -def dtrVpnAddress = "vpn.corp-us-east-1.aws.dckr.io" -def ucpBundle = [file(credentialsId: "ucp-bundle", variable: 'UCP')] -def slackString = [string(credentialsId: 'slack-docs-webhook', variable: 'slack')] def reg = [credentialsId: 'csebuildbot', url: 'https://index.docker.io/v1/'] pipeline { @@ -13,6 +10,12 @@ pipeline { agent { label 'ubuntu-1604-aufs-stable' } + environment { + DTR_VPN_ADDRESS = credentials('dtr-vpn-address') + DOCKER_HOST_STRING = credentials('docker-host') + UCP_BUNDLE = credentials('ucp-bundle') + SLACK = credentials('slack-docs-webhook') + } when { expression { env.GIT_URL == 'https://github.com/Docker/docker.github.io.git' } } @@ -43,48 +46,121 @@ pipeline { } } } - stage( 'update docs-stage' ) { + stage( 'update docs stage' ) { when { branch 'master' } steps { - withVpn(dtrVpnAddress) { - withCredentials(ucpBundle) { - sh 'unzip -o $UCP' - } + withVpn("$DTR_VPN_ADDRESS") { + sh "unzip -o $UCP_BUNDLE" withDockerRegistry(reg) { sh """ export DOCKER_TLS_VERIFY=1 export COMPOSE_TLS_VERSION=TLSv1_2 export DOCKER_CERT_PATH=${WORKSPACE}/ucp-bundle-success_bot - export DOCKER_HOST=tcp://ucp.corp-us-east-1.aws.dckr.io:443 + export DOCKER_HOST=$DOCKER_HOST_STRING docker service update --detach=false --force --image docs/docker.github.io:stage-${env.BUILD_NUMBER} docs-stage-docker-com_docs --with-registry-auth """ } } } } - stage( 'update docs-prod' ) { + stage( 'update docs prod' ) { when { branch 'published' } steps { - withVpn(dtrVpnAddress) { - withCredentials(ucpBundle) { - sh 'unzip -o $UCP' + withVpn("$DTR_VPN_ADDRESS") { + sh "unzip -o $UCP_BUNDLE" + withDockerRegistry(reg) { + sh """ + cd ucp-bundle-success_bot + export DOCKER_TLS_VERIFY=1 + export COMPOSE_TLS_VERSION=TLSv1_2 + export DOCKER_CERT_PATH=${WORKSPACE}/ucp-bundle-success_bot + export DOCKER_HOST=$DOCKER_HOST_STRING + docker service update --detach=false --force --image docs/docker.github.io:prod-${env.BUILD_NUMBER} docs-docker-com_docs --with-registry-auth + curl -X POST -H 'Content-type: application/json' --data '{"text":"Successfully published docs. https://docs.docker.com/"}' $SLACK + """ } - withCredentials(slackString) { - withDockerRegistry(reg) { - sh """ - cd ucp-bundle-success_bot - export DOCKER_TLS_VERIFY=1 - export COMPOSE_TLS_VERSION=TLSv1_2 - export DOCKER_CERT_PATH=${WORKSPACE}/ucp-bundle-success_bot - export DOCKER_HOST=tcp://ucp.corp-us-east-1.aws.dckr.io:443 - docker service update --detach=false --force --image docs/docker.github.io:prod-${env.BUILD_NUMBER} docs-docker-com_docs --with-registry-auth - curl -X POST -H 'Content-type: application/json' --data '{"text":"Successfully published docs. https://docs.docker.com/"}' $slack - """ - } + } + } + } + } + } + stage( 'docs-private' ) { + agent { + label 'ubuntu-1604-aufs-stable' + } + environment { + DTR_VPN_ADDRESS = credentials('dtr-vpn-address') + DOCKER_HOST_STRING = credentials('docker-host') + UCP_BUNDLE = credentials('ucp-bundle') + } + when { + expression { env.GIT_URL == "https://github.com/docker/docs-private.git" } + } + stages { + stage( 'build and push new beta stage image' ) { + when { + branch 'amberjack' + } + steps { + withDockerRegistry(reg) { + sh """ + docker image build --tag docs/docs-private:beta-stage-${env.BUILD_NUMBER} . && \ + docker image push docs/docs-private:beta-stage-${env.BUILD_NUMBER} + """ + } + } + } + stage( 'build and push new beta image' ) { + when { + branch 'published' + } + steps { + withDockerRegistry(reg) { + sh """ + docker image build --tag docs/docs-private:beta-${env.BUILD_NUMBER} . && \ + docker image push docs/docs-private:beta-${env.BUILD_NUMBER} + """ + } + } + } + stage( 'update beta stage service' ) { + when { + branch 'amberjack' + } + steps { + withVpn("$DTR_VPN_ADDRESS") { + sh "unzip -o $UCP_BUNDLE" + withDockerRegistry(reg) { + sh """ + export DOCKER_TLS_VERIFY=1 + export COMPOSE_TLS_VERSION=TLSv1_2 + export DOCKER_CERT_PATH=${WORKSPACE}/ucp-bundle-success_bot + export DOCKER_HOST=$DOCKER_HOST_STRING + docker service update --detach=false --force --image docs/docs-private:beta-stage-${env.BUILD_NUMBER} docs-beta-stage-docker-com_docs --with-registry-auth + """ + } + } + } + } + stage( 'update beta service' ) { + when { + branch 'published' + } + steps { + withVpn("$DTR_VPN_ADDRESS") { + sh "unzip -o $UCP_BUNDLE" + withDockerRegistry(reg) { + sh """ + export DOCKER_TLS_VERIFY=1 + export COMPOSE_TLS_VERSION=TLSv1_2 + export DOCKER_CERT_PATH=${WORKSPACE}/ucp-bundle-success_bot + export DOCKER_HOST=$DOCKER_HOST_STRING + docker service update --detach=false --force --image docs/docs-private:beta-${env.BUILD_NUMBER} docs-beta-docker-com_docs --with-registry-auth + """ } } } From 9b77e6af4639dd3f4213cfa991b0300a0eaadabf Mon Sep 17 00:00:00 2001 From: Maria Bermudez Date: Wed, 3 Apr 2019 23:41:39 -0700 Subject: [PATCH 15/18] Add HSTS warning for specifying --dtr-external-url --- ee/dtr/admin/install/index.md | 5 ++++- reference/dtr/2.6/cli/install.md | 2 +- reference/dtr/2.6/cli/reconfigure.md | 2 +- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/ee/dtr/admin/install/index.md b/ee/dtr/admin/install/index.md index 67c77ad5e7..779a009003 100644 --- a/ee/dtr/admin/install/index.md +++ b/ee/dtr/admin/install/index.md @@ -54,7 +54,10 @@ information that is necessary. By default DTR is deployed with self-signed certificates, so your UCP deployment might not be able to pull images from DTR. Use the `--dtr-external-url :` optional flag while deploying -DTR, so that UCP is automatically reconfigured to trust DTR. +DTR, so that UCP is automatically reconfigured to trust DTR. Since [HSTS (HTTP Strict-Transport-Security) +header](https://en.wikipedia.org/wiki/HTTP_Strict_Transport_Security) is included in all API responses, +make sure to specify the FQDN (Fully Qualified Domain Name) of your DTR, or your browser may refuse +to load the web interface. ## Step 4. Check that DTR is running diff --git a/reference/dtr/2.6/cli/install.md b/reference/dtr/2.6/cli/install.md index 325a09f5b4..4473bd74b7 100644 --- a/reference/dtr/2.6/cli/install.md +++ b/reference/dtr/2.6/cli/install.md @@ -38,7 +38,7 @@ $ docker run -it --rm docker/dtr:{{ site.dtr_version }}.0 install \ | `--debug` | $DEBUG | Enable debug mode for additional logs. | | `--dtr-ca` | $DTR_CA | Use a PEM-encoded TLS CA certificate for DTR. By default DTR generates a self-signed TLS certificate during deployment. You can use your own root CA public certificate with `--dtr-ca "$(cat ca.pem)"`. | | `--dtr-cert` | $DTR_CERT | Use a PEM-encoded TLS certificate for DTR. By default DTR generates a self-signed TLS certificate during deployment. You can use your own public key certificate with `--dtr-cert "$(cat cert.pem)"`. If the certificate has been signed by an intermediate certificate authority, append its public key certificate at the end of the file to establish a chain of trust. | -| `--dtr-external-url` | $DTR_EXTERNAL_URL | URL of the host or load balancer clients use to reach DTR. When you use this flag, users are redirected to UCP for logging in. Once authenticated they are redirected to the URL you specify in this flag. If you don't use this flag, DTR is deployed without single sign-on with UCP. Users and teams are shared but users log in separately into the two applications. You can enable and disable single sign-on within your DTR system settings. Format `https://host[:port]`, where port is the value you used with `--replica-https-port`. | +| `--dtr-external-url` | $DTR_EXTERNAL_URL | URL of the host or load balancer clients use to reach DTR. When you use this flag, users are redirected to UCP for logging in. Once authenticated they are redirected to the URL you specify in this flag. If you don't use this flag, DTR is deployed without single sign-on with UCP. Users and teams are shared but users log in separately into the two applications. You can enable and disable single sign-on within your DTR system settings. Format `https://host[:port]`, where port is the value you used with `--replica-https-port`. Since [HSTS (HTTP Strict-Transport-Security) header](https://en.wikipedia.org/wiki/HTTP_Strict_Transport_Security) is included in all API responses, make sure to specify the FQDN (Fully Qualified Domain Name) of your DTR, or your browser may refuse to load the web interface. | | `--dtr-key` | $DTR_KEY | Use a PEM-encoded TLS private key for DTR. By default DTR generates a self-signed TLS certificate during deployment. You can use your own TLS private key with `--dtr-key "$(cat key.pem)"`. | | `--dtr-storage-volume` | $DTR_STORAGE_VOLUME | Customize the volume to store Docker images. By default DTR creates a volume to store the Docker images in the local filesystem of the node where DTR is running, without high-availability. Use this flag to specify a full path or volume name for DTR to store images. For high-availability, make sure all DTR replicas can read and write data on this volume. If you're using NFS, use `--nfs-storage-url` instead. | | `--enable-pprof` | $DTR_PPROF | Enables pprof profiling of the server. Use `--enable-pprof=false` to disable it. Once DTR is deployed with this flag, you can access the `pprof` endpoint for the api server at `/debug/pprof`, and the registry endpoint at `/registry_debug_pprof/debug/pprof`. | diff --git a/reference/dtr/2.6/cli/reconfigure.md b/reference/dtr/2.6/cli/reconfigure.md index d67cb9952d..99fe774f73 100644 --- a/reference/dtr/2.6/cli/reconfigure.md +++ b/reference/dtr/2.6/cli/reconfigure.md @@ -29,7 +29,7 @@ time, configure your DTR for high availability. | `--debug` | $DEBUG | Enable debug mode for additional logs of this bootstrap container (the log level of downstream DTR containers can be set with `--log-level`). | | `--dtr-ca` | $DTR_CA | Use a PEM-encoded TLS CA certificate for DTR. By default DTR generates a self-signed TLS certificate during deployment. You can use your own root CA public certificate with `--dtr-ca "$(cat ca.pem)"`. | | `--dtr-cert` | $DTR_CERT | Use a PEM-encoded TLS certificate for DTR. By default DTR generates a self-signed TLS certificate during deployment. You can use your own public key certificate with `--dtr-cert "$(cat cert.pem)"`. If the certificate has been signed by an intermediate certificate authority, append its public key certificate at the end of the file to establish a chain of trust. | -| `--dtr-external-url` | $DTR_EXTERNAL_URL | URL of the host or load balancer clients use to reach DTR. When you use this flag, users are redirected to UCP for logging in. Once authenticated they are redirected to the url you specify in this flag. If you don't use this flag, DTR is deployed without single sign-on with UCP. Users and teams are shared but users login separately into the two applications. You can enable and disable single sign-on in the DTR settings. Format `https://host[:port]`, where port is the value you used with `--replica-https-port`. | +| `--dtr-external-url` | $DTR_EXTERNAL_URL | URL of the host or load balancer clients use to reach DTR. When you use this flag, users are redirected to UCP for logging in. Once authenticated they are redirected to the url you specify in this flag. If you don't use this flag, DTR is deployed without single sign-on with UCP. Users and teams are shared but users login separately into the two applications. You can enable and disable single sign-on in the DTR settings. Format `https://host[:port]`, where port is the value you used with `--replica-https-port`. Since [HSTS (HTTP Strict-Transport-Security) header](https://en.wikipedia.org/wiki/HTTP_Strict_Transport_Security) is included in all API responses, make sure to specify the FQDN (Fully Qualified Domain Name) of your DTR, or your browser may refuse to load the web interface. | | `--dtr-key` | $DTR_KEY | Use a PEM-encoded TLS private key for DTR. By default DTR generates a self-signed TLS certificate during deployment. You can use your own TLS private key with `--dtr-key "$(cat key.pem)"`. | | `--dtr-storage-volume` | $DTR_STORAGE_VOLUME | Customize the volume to store Docker images. By default DTR creates a volume to store the Docker images in the local filesystem of the node where DTR is running, without high-availability. Use this flag to specify a full path or volume name for DTR to store images. For high-availability, make sure all DTR replicas can read and write data on this volume. If you're using NFS, use `--nfs-storage-url` instead. | | `--enable-pprof` | $DTR_PPROF | Enables pprof profiling of the server. Use `--enable-pprof=false` to disable it. Once DTR is deployed with this flag, you can access the pprof endpoint for the api server at `/debug/pprof`, and the registry endpoint at `/registry_debug_pprof/debug/pprof`. | From 8ece7f64ebc5724ebbd8dbb80f88f8ef2056fc86 Mon Sep 17 00:00:00 2001 From: Darwin Traver Date: Thu, 4 Apr 2019 16:08:43 -0400 Subject: [PATCH 16/18] Typo on logging driver name --- config/containers/logging/local.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/containers/logging/local.md b/config/containers/logging/local.md index f8d7cee52e..708e4a3ee8 100644 --- a/config/containers/logging/local.md +++ b/config/containers/logging/local.md @@ -46,7 +46,7 @@ $ docker run \ ### Options -The `json-file` logging driver supports the following logging options: +The `local` logging driver supports the following logging options: | Option | Description | Example value | |:------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------| From 603b14f1f0d8a8cd5cea7474946709947df26787 Mon Sep 17 00:00:00 2001 From: Maria Bermudez Date: Thu, 4 Apr 2019 16:18:40 -0700 Subject: [PATCH 17/18] Addressed engineering feedback --- .../admin/configure/external-storage/nfs.md | 2 +- ee/dtr/admin/configure/external-storage/s3.md | 7 +-- .../storage-backend-migration.md | 44 +++++++------------ 3 files changed, 19 insertions(+), 34 deletions(-) diff --git a/ee/dtr/admin/configure/external-storage/nfs.md b/ee/dtr/admin/configure/external-storage/nfs.md index 8b90bc43ed..8fa0656688 100644 --- a/ee/dtr/admin/configure/external-storage/nfs.md +++ b/ee/dtr/admin/configure/external-storage/nfs.md @@ -55,7 +55,7 @@ To support **NFS v4**, more NFS options have been added to the CLI. See [New Fea #### DTR 2.6.4 -In DTR 2.6.4, a new flag, `--storage-migrated`, [has been added to `docker/dtr reconfigure`](/reference/dtr/2.6/cli/reconfigure/) which lets you indicate the migration status of your storage data during a reconfigure. [Upgrade to 2.6.4](/reference/dtr/2.6/cli/upgrade/) and follow [Best practice for data migration in 2.6.4](/ee/dtr/admin/configure/external-storage/storage-backend-migration/#best-practice-for-data-migration) when switching storage backends. +In DTR 2.6.4, a new flag, `--storage-migrated`, [has been added to `docker/dtr reconfigure`](/reference/dtr/2.6/cli/reconfigure/) which lets you indicate the migration status of your storage data during a reconfigure. [Upgrade to 2.6.4](/reference/dtr/2.6/cli/upgrade/) and follow [Best practice for data migration in 2.6.4](/ee/dtr/admin/configure/external-storage/storage-backend-migration/#best-practice-for-data-migration) when switching storage backends. The following shows you how to reconfigure DTR using an NFSv4 volume as a storage backend: ```bash docker run --rm -it \ diff --git a/ee/dtr/admin/configure/external-storage/s3.md b/ee/dtr/admin/configure/external-storage/s3.md index bb3292dcd1..a47ebea055 100644 --- a/ee/dtr/admin/configure/external-storage/s3.md +++ b/ee/dtr/admin/configure/external-storage/s3.md @@ -133,19 +133,14 @@ DTR supports the following S3 regions: ## Update your S3 settings on the web interface -When running 2.5.x (with experimental garbage collection) or 2.6.0-2.6.3, there is an issue with [changing your S3 settings on the web interface](/ee/dtr/release-notes#version-26) which leads to erased metadata. Make sure to [back up your DTR metadata](/ee/dtr/admin/disaster-recovery/create-a-backup/#back-up-dtr-metadata) before you proceed. +When running 2.5.x (with experimental garbage collection) or 2.6.0-2.6.4, there is an issue with [changing your S3 settings on the web interface](/ee/dtr/release-notes#version-26) which leads to erased metadata. Make sure to [back up your DTR metadata](/ee/dtr/admin/disaster-recovery/create-a-backup/#back-up-dtr-metadata) before you proceed. ## Restore DTR with S3 To [restore DTR using your previously configured S3 settings](https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage#restoretocloudstorage), use `docker/dtr restore` with `--dtr-use-default-storage` to keep your metadata. -#### DTR 2.6.4 - -In DTR 2.6.4, a new flag, `--storage-migrated`, [has been added to `docker/dtr reconfigure`](/reference/dtr/2.6/cli/reconfigure/) which lets you indicate the migration status of your storage data during a reconfigure. [Upgrade to 2.6.4](/reference/dtr/2.6/cli/upgrade/) and follow [Best practice for data migration in 2.6.4](/ee/dtr/admin/configure/external-storage/storage-backend-migration/#best-practice-for-data-migration) when switching storage backends. - ## Where to go next -- [Switch storage backends](storage-backend-migration.md) - [Create a backup](/ee/dtr/admin/disaster-recovery/create-a-backup/) - [Restore from a backup](/ee/dtr/admin/disaster-recovery/restore-from-backup/) - [Configure where images are stored](index.md) diff --git a/ee/dtr/admin/configure/external-storage/storage-backend-migration.md b/ee/dtr/admin/configure/external-storage/storage-backend-migration.md index a83cdafc7a..6d883cc109 100644 --- a/ee/dtr/admin/configure/external-storage/storage-backend-migration.md +++ b/ee/dtr/admin/configure/external-storage/storage-backend-migration.md @@ -4,62 +4,52 @@ description: Storage backend migration for Docker Trusted Registry keywords: dtr, storage drivers, local volume, NFS, Azure, S3, --- -Starting in DTR 2.6, switching storage backends initializes a new metadata store and erases your existing tags. This helps facilitate online garbage collection, which has been introduced in 2.5 as an experimental feature. In earlier versions, DTR would subsequently start a `tagmigration` job to rebuild tag metadata from the file layout in the image layer store. This job has been discontinued, as your storage backend could get out of sync with your DTR metadata, like your manifests and existing repositories. As best practice, DTR storage backends and metadata should always be moved, backed up, and restored together. +Starting in DTR 2.6, switching storage backends initializes a new metadata store and erases your existing tags. This helps facilitate online garbage collection, which has been introduced in 2.5 as an experimental feature. In earlier versions, DTR would subsequently start a `tagmigration` job to rebuild tag metadata from the file layout in the image layer store. This job has been discontinued for DTR 2.5.x (with garbage collection) and DTR 2.6, as your storage backend could get out of sync with your DTR metadata, like your manifests and existing repositories. As best practice, DTR storage backends and metadata should always be moved, backed up, and restored together. ## DTR 2.6.4 and above -In DTR 2.6.4, a new flag, `--storage-migrated`, [has been added to `docker/dtr reconfigure`](/reference/dtr/2.6/cli/reconfigure/) which lets you indicate the migration status of your storage data during a reconfigure. +In DTR 2.6.4, a new flag, `--storage-migrated`, [has been added to `docker/dtr reconfigure`](/reference/dtr/2.6/cli/reconfigure/) which lets you indicate the migration status of your storage data during a reconfigure. If you are not worried about losing your existing tags, you can skip the recommended steps below and [perform a reconfigure](/reference/dtr/2.6/cli/reconfigure/). ### Best practice for data migration Docker recommends the following steps for your storage backend and metadata migration: -1. Disable garbage collection by selecting "Never" under **System > Garbage Collection**, so blobs referenced in the backup that you create continue to exist. See [Garbage collection](/ee/dtr/admin/configure/garbage-collection/) for more details. +1. Disable garbage collection by selecting "Never" under **System > Garbage Collection**, so blobs referenced in the backup that you create continue to exist. See [Garbage collection](/ee/dtr/admin/configure/garbage-collection/) for more details. Make sure to keep it disabled while you're performing the metadata backup and migrating your storage data. ![](/ee/dtr/images/garbage-collection-0.png){: .img-fluid .with-border} 2. [Back up your existing metadata](/ee/dtr/admin/disaster-recovery/create-a-backup/#back-up-dtr-metadata). See [docker/dtr backup](/reference/dtr/2.6/cli/backup/) for CLI command description and options. -3. Migrate the contents of your current storage backend to the new one you are switching to. For example, upload your NFS contents to an S3 bucket if you're switching from NFS to S3. +3. Migrate the contents of your current storage backend to the new one you are switching to. For example, upload your current storage data to your new NFS server. 4. [Restore DTR from your backup](/ee/dtr/admin/disaster-recovery/restore-from-backup/) and specify your new storage backend. See [docker/dtr destroy](/reference/dtr/2.6/cli/destroy/) and [docker/dtr restore](/reference/dtr/2.6/cli/backup/) for CLI command descriptions and options. -5. Run garbage collection in "blob" mode. You can either SSH into a DTR node, or [use a UCP client bundle](/ee/ucp/user-access/cli/) to run the following command: +5. With DTR restored from your backup and your storage data migrated to your new backend, garbage collect any dangling blobs using the following API request: - ```bash - docker exec -it dtr-jobrunner- sh - ``` - > See [Find your replica ID](/ee/dtr/admin/disaster-recovery/create-a-backup/#find-your-replica-id) for tips on determining your replica ID. - - Within the running container, type `/bin/job_executor` to start the `job_executor` binary. + ```bash + curl -u :$TOKEN -X POST "https:///api/v0/jobs" -H "accept: application/json" -H "content-type: application/json" -d "{ \"action": \"onlinegc_blobs\" }" + ``` + On success, you should get a `202 Accepted` response with a job `id` and other related details. + +This ensures any blobs which are not referenced in your previously created backup get destroyed. - ```bash - /bin/job_executor - - / # onlinegc blobs - ``` - > Note that the first line results in a display of the binary name, its usage, and available commands including `onlinegc`. To learn more about a command, run `` --help. - - Running garbage collection in blob mode destroys any new blobs which are not referenced in your previously created backup. - -### Alternative options for data migration +### Alternative option for data migration - If you have a long maintenance window, you can skip some steps from above and do the following: - 1. Put DTR in "read-only" mode. To do so, send the following API request: + 1. Put DTR in "read-only" mode using the following API request: ```bash curl -u :$TOKEN -X POST "https:///api/v0/meta/settings" -H "accept: application/json" -H "content-type: application/json" -d "{ \"readOnlyRegistry\": true }" ``` On success, you should get a `202 Accepted` response. - 2. Migrate the contents of your current storage backend to the new one you are switching to. For example, upload your NFS contents to an S3 bucket if you're switching from NFS to S3. + 2. Migrate the contents of your current storage backend to the new one you are switching to. For example, upload your current storage data to your new NFS server. - 3. [Reconfigure DTR](/reference/dtr/2.6/cli/reconfigure) while specifying the `--storage-migrated` flag to preserve your existing tags. + 3. [Reconfigure DTR](/reference/dtr/2.6/cli/reconfigure) while specifying the `--storage-migrated` flag to preserve your existing tags. -- If you are not worried about inconsistencies in data, skip steps 1 and 2 and perform a reconfigure. -## DTR 2.6.0-2.6.3 and DTR 2.5 (with experimental garbage collection) +## DTR 2.6.0-2.6.4 and DTR 2.5 (with experimental garbage collection) Make sure to [perform a backup](/ee/dtr/admin/disaster-recovery/create-a-backup/#back-up-dtr-data) before you change your storage backend when running DTR 2.5 (with online garbage collection) and 2.6.0-2.6.3. If you encounter an issue with lost tags, refer to the following resources: * For changes to reconfigure and restore options in DTR 2.6, see [docker/dtr reconfigure](/reference/dtr/2.6/cli/reconfigure/) and [docker/dtr restore](/reference/dtr/2.6/cli/restore). @@ -67,7 +57,7 @@ Make sure to [perform a backup](/ee/dtr/admin/disaster-recovery/create-a-backup/ * For NFS-specific changes, see [Use NFS](nfs.md). * For S3-specific changes, see [Learn how to configure DTR with Amazon S3](s3.md). -Upgrade to [DTR 2.6.4](#dtr-264-and-above) and follow [best practice for data migration](#best-practice-for-data-migration) to avoid the wiped tags issue. +Upgrade to [DTR 2.6.4](#dtr-264-and-above) and follow [best practice for data migration](#best-practice-for-data-migration) to avoid the wiped tags issue when moving from one NFS serverto another. ## Where to go next From a7793edc746fc3374f1b4a637bf4d528dd2bbcef Mon Sep 17 00:00:00 2001 From: paigehargrave Date: Thu, 4 Apr 2019 19:45:46 -0400 Subject: [PATCH 18/18] Netlify redirects interlock (#8595) * Added netlify redirect --- ee/ucp/interlock/architecture.md | 2 ++ ee/ucp/interlock/config/custom-template.md | 2 ++ ee/ucp/interlock/config/haproxy-config.md | 2 ++ ee/ucp/interlock/config/host-mode-networking.md | 1 + ee/ucp/interlock/config/index.md | 1 + ee/ucp/interlock/config/nginx-config.md | 2 ++ ee/ucp/interlock/config/service-labels.md | 2 ++ ee/ucp/interlock/config/tuning.md | 2 ++ ee/ucp/interlock/config/updates.md | 2 ++ ee/ucp/interlock/deploy/index.md | 1 + ee/ucp/interlock/deploy/offline-install.md | 2 ++ ee/ucp/interlock/deploy/production.md | 2 ++ ee/ucp/interlock/index.md | 3 +++ ee/ucp/interlock/usage/canary.md | 2 ++ ee/ucp/interlock/usage/context.md | 2 ++ ee/ucp/interlock/usage/index.md | 1 + ee/ucp/interlock/usage/interlock-vip-mode.md | 2 ++ ee/ucp/interlock/usage/redirects.md | 2 ++ ee/ucp/interlock/usage/service-clusters.md | 2 ++ ee/ucp/interlock/usage/sessions.md | 2 ++ ee/ucp/interlock/usage/ssl.md | 1 + ee/ucp/interlock/usage/websockets.md | 2 ++ 22 files changed, 40 insertions(+) diff --git a/ee/ucp/interlock/architecture.md b/ee/ucp/interlock/architecture.md index 3b455c4af3..78ce9352c0 100644 --- a/ee/ucp/interlock/architecture.md +++ b/ee/ucp/interlock/architecture.md @@ -3,6 +3,8 @@ title: Interlock architecture description: Learn more about the architecture of the layer 7 routing solution for Docker swarm services. keywords: routing, UCP, interlock, load balancing +redirect_from: + - https://interlock-dev-docs.netlify.com/intro/architecture/ --- This document covers the following considerations: diff --git a/ee/ucp/interlock/config/custom-template.md b/ee/ucp/interlock/config/custom-template.md index cc8e63cd8a..b488145442 100644 --- a/ee/ucp/interlock/config/custom-template.md +++ b/ee/ucp/interlock/config/custom-template.md @@ -2,6 +2,8 @@ title: Custom templates description: Learn how to use a custom extension template keywords: routing, proxy, interlock, load balancing +redirect_from: + - https://interlock-dev-docs.netlify.com/ops/custom_template/ --- Use a custom extension if a needed option is not available in the extension configuration. diff --git a/ee/ucp/interlock/config/haproxy-config.md b/ee/ucp/interlock/config/haproxy-config.md index 6108e8ca75..4a62dacbaa 100644 --- a/ee/ucp/interlock/config/haproxy-config.md +++ b/ee/ucp/interlock/config/haproxy-config.md @@ -2,6 +2,8 @@ title: Configure HAProxy description: Learn how to configure an HAProxy extension keywords: routing, proxy, interlock, load balancing +redirect_from: + - https://interlock-dev-docs.netlify.com/config/extensions/haproxy/ --- The following HAProxy configuration options are available: diff --git a/ee/ucp/interlock/config/host-mode-networking.md b/ee/ucp/interlock/config/host-mode-networking.md index 152fb7b97a..d8d286472d 100644 --- a/ee/ucp/interlock/config/host-mode-networking.md +++ b/ee/ucp/interlock/config/host-mode-networking.md @@ -6,6 +6,7 @@ keywords: routing, proxy, interlock, load balancing redirect_from: - /ee/ucp/interlock/usage/host-mode-networking/ - /ee/ucp/interlock/deploy/host-mode-networking/ + - https://interlock-dev-docs.netlify.com/usage/host_mode/ --- By default, layer 7 routing components communicate with one another using diff --git a/ee/ucp/interlock/config/index.md b/ee/ucp/interlock/config/index.md index e38ba7bc4b..cfb7cb8804 100644 --- a/ee/ucp/interlock/config/index.md +++ b/ee/ucp/interlock/config/index.md @@ -5,6 +5,7 @@ keywords: routing, proxy, interlock, load balancing redirect_from: - /ee/ucp/interlock/deploy/configure/ - /ee/ucp/interlock/usage/default-service/ + - https://interlock-dev-docs.netlify.com/config/interlock/ --- To further customize the layer 7 routing solution, you must update the diff --git a/ee/ucp/interlock/config/nginx-config.md b/ee/ucp/interlock/config/nginx-config.md index 580fd470e4..ecdccc9024 100644 --- a/ee/ucp/interlock/config/nginx-config.md +++ b/ee/ucp/interlock/config/nginx-config.md @@ -2,6 +2,8 @@ title: Configure Nginx description: Learn how to configure an nginx extension keywords: routing, proxy, interlock, load balancing +redirect_from: + - https://interlock-dev-docs.netlify.com/config/extensions/nginx/ --- By default, nginx is used as a proxy, so the following configuration options are diff --git a/ee/ucp/interlock/config/service-labels.md b/ee/ucp/interlock/config/service-labels.md index 2ee2d3170b..1ec898358b 100644 --- a/ee/ucp/interlock/config/service-labels.md +++ b/ee/ucp/interlock/config/service-labels.md @@ -2,6 +2,8 @@ title: Use application service labels description: Learn how applications use service labels for publishing keywords: routing, proxy, interlock, load balancing +redirect_from: + - https://interlock-dev-docs.netlify.com/config/service_labels/ --- Service labels define hostnames that are routed to the diff --git a/ee/ucp/interlock/config/tuning.md b/ee/ucp/interlock/config/tuning.md index 21c74ea66e..8e0ea6648d 100644 --- a/ee/ucp/interlock/config/tuning.md +++ b/ee/ucp/interlock/config/tuning.md @@ -2,6 +2,8 @@ title: Tune the proxy service description: Learn how to tune the proxy service for environment optimization keywords: routing, proxy, interlock +redirect_from: + - https://interlock-dev-docs.netlify.com/ops/tuning/ --- ## Constrain the proxy service to multiple dedicated worker nodes diff --git a/ee/ucp/interlock/config/updates.md b/ee/ucp/interlock/config/updates.md index e984e28fbe..8f03783549 100644 --- a/ee/ucp/interlock/config/updates.md +++ b/ee/ucp/interlock/config/updates.md @@ -2,6 +2,8 @@ title: Update Interlock services description: Learn how to update the UCP layer 7 routing solution services keywords: routing, proxy, interlock +redirect_from: + - https://interlock-dev-docs.netlify.com/ops/updates/ --- There are two parts to the update process: diff --git a/ee/ucp/interlock/deploy/index.md b/ee/ucp/interlock/deploy/index.md index f282b28c64..068ad42ec9 100644 --- a/ee/ucp/interlock/deploy/index.md +++ b/ee/ucp/interlock/deploy/index.md @@ -4,6 +4,7 @@ description: Learn the deployment steps for the UCP layer 7 routing solution keywords: routing, proxy, interlock redirect_from: - /ee/ucp/interlock/deploy/configuration-reference/ + - https://interlock-dev-docs.netlify.com/install/ --- This topic covers deploying a layer 7 routing solution into a Docker Swarm to route traffic to Swarm services. Layer 7 routing is also referred to as an HTTP routing mesh. diff --git a/ee/ucp/interlock/deploy/offline-install.md b/ee/ucp/interlock/deploy/offline-install.md index 4b27f8c4c5..727b46049a 100644 --- a/ee/ucp/interlock/deploy/offline-install.md +++ b/ee/ucp/interlock/deploy/offline-install.md @@ -2,6 +2,8 @@ title: Offline installation considerations description: Learn how to to install Interlock on a Docker cluster without internet access. keywords: routing, proxy, interlock +redirect_from: + - https://interlock-dev-docs.netlify.com/install/offline/ --- To install Interlock on a Docker cluster without internet access, the Docker images must be loaded. This topic describes how to export the images from a local Docker diff --git a/ee/ucp/interlock/deploy/production.md b/ee/ucp/interlock/deploy/production.md index 0a353b4e8c..ca98d9809b 100644 --- a/ee/ucp/interlock/deploy/production.md +++ b/ee/ucp/interlock/deploy/production.md @@ -3,6 +3,8 @@ title: Configure layer 7 routing for production description: Learn how to configure the layer 7 routing solution for a production environment. keywords: routing, proxy, interlock +redirect_from: + - https://interlock-dev-docs.netlify.com/install/production/ --- This section includes documentation on configuring Interlock diff --git a/ee/ucp/interlock/index.md b/ee/ucp/interlock/index.md index 6aed55d0a5..7e91868220 100644 --- a/ee/ucp/interlock/index.md +++ b/ee/ucp/interlock/index.md @@ -2,6 +2,9 @@ title: Layer 7 routing overview description: Learn how to route layer 7 traffic to your Swarm services keywords: routing, UCP, interlock, load balancing +redirect_from: + - https://interlock-dev-docs.netlify.com/ + - https://interlock-dev-docs.netlify.com/intro/about/ --- Application-layer (Layer 7) routing is the application routing and load balancing (ingress routing) system included with Docker Enterprise for Swarm orchestration. Interlock architecture takes advantage of the underlying Swarm components to provide scalable Layer 7 routing and Layer 4 VIP mode functionality. diff --git a/ee/ucp/interlock/usage/canary.md b/ee/ucp/interlock/usage/canary.md index 2041ec7f16..60f6bc4f0a 100644 --- a/ee/ucp/interlock/usage/canary.md +++ b/ee/ucp/interlock/usage/canary.md @@ -2,6 +2,8 @@ title: Publish Canary application instances description: Learn how to do canary deployments for your Docker swarm services keywords: routing, proxy +redirect_from: + - https://interlock-dev-docs.netlify.com/usage/canary/ --- The following example publishes a service as a canary instance. diff --git a/ee/ucp/interlock/usage/context.md b/ee/ucp/interlock/usage/context.md index 6390931108..2b63220795 100644 --- a/ee/ucp/interlock/usage/context.md +++ b/ee/ucp/interlock/usage/context.md @@ -3,6 +3,8 @@ title: Use context and path-based routing description: Learn how to route traffic to your Docker swarm services based on a url path. keywords: routing, proxy +redirect_from: + - https://interlock-dev-docs.netlify.com/usage/context_root/ --- The following example publishes a service using context or path based routing. diff --git a/ee/ucp/interlock/usage/index.md b/ee/ucp/interlock/usage/index.md index 0a488ccf26..61eb2889b5 100644 --- a/ee/ucp/interlock/usage/index.md +++ b/ee/ucp/interlock/usage/index.md @@ -5,6 +5,7 @@ keywords: routing, proxy redirect_from: - /ee/ucp/interlock/deploy/configuration-reference/ - /ee/ucp/interlock/deploy/configure/ + - https://interlock-dev-docs.netlify.com/usage/hello/ --- After Interlock is deployed, you can launch and publish services and applications. diff --git a/ee/ucp/interlock/usage/interlock-vip-mode.md b/ee/ucp/interlock/usage/interlock-vip-mode.md index 1291dcf655..b1c3be7edf 100644 --- a/ee/ucp/interlock/usage/interlock-vip-mode.md +++ b/ee/ucp/interlock/usage/interlock-vip-mode.md @@ -2,6 +2,8 @@ title: Specify a routing mode description: Learn about task and VIP backend routing modes for Layer 7 routing keywords: routing, proxy, interlock +redirect_from: + - https://interlock-dev-docs.netlify.com/usage/default_backend/ --- You can publish services using "vip" and "task" backend routing modes. diff --git a/ee/ucp/interlock/usage/redirects.md b/ee/ucp/interlock/usage/redirects.md index 82e3e72764..555f61355b 100644 --- a/ee/ucp/interlock/usage/redirects.md +++ b/ee/ucp/interlock/usage/redirects.md @@ -3,6 +3,8 @@ title: Implement application redirects description: Learn how to implement redirects using swarm services and the layer 7 routing solution for UCP. keywords: routing, proxy, redirects, interlock +redirect_from: + - https://interlock-dev-docs.netlify.com/usage/redirects/ --- The following example publishes a service and configures a redirect from `old.local` to `new.local`. diff --git a/ee/ucp/interlock/usage/service-clusters.md b/ee/ucp/interlock/usage/service-clusters.md index 181ad9bcfb..bb8f272a5b 100644 --- a/ee/ucp/interlock/usage/service-clusters.md +++ b/ee/ucp/interlock/usage/service-clusters.md @@ -2,6 +2,8 @@ title: Implement service clusters description: Learn how to route traffic to different proxies using a service cluster. keywords: ucp, interlock, load balancing, routing +redirect_from: + - https://interlock-dev-docs.netlify.com/usage/service_clusters/ --- ## Configure Proxy Services diff --git a/ee/ucp/interlock/usage/sessions.md b/ee/ucp/interlock/usage/sessions.md index e312826b54..f092c95b6b 100644 --- a/ee/ucp/interlock/usage/sessions.md +++ b/ee/ucp/interlock/usage/sessions.md @@ -3,6 +3,8 @@ title: Implement persistent (sticky) sessions description: Learn how to configure your swarm services with persistent sessions using UCP. keywords: routing, proxy, cookies, IP hash +redirect_from: + - https://interlock-dev-docs.netlify.com/usage/sessions/ --- You can publish a service and configure the proxy for persistent (sticky) sessions using: diff --git a/ee/ucp/interlock/usage/ssl.md b/ee/ucp/interlock/usage/ssl.md index 154636f2fe..d29d41cb12 100644 --- a/ee/ucp/interlock/usage/ssl.md +++ b/ee/ucp/interlock/usage/ssl.md @@ -4,6 +4,7 @@ description: Learn how to configure your swarm services with SSL. keywords: routing, proxy, tls, ssl redirect_from: - /ee/ucp/interlock/usage/ssl/ + - https://interlock-dev-docs.netlify.com/usage/ssl/ --- This topic covers Swarm services implementation with: diff --git a/ee/ucp/interlock/usage/websockets.md b/ee/ucp/interlock/usage/websockets.md index 69eb9febb7..637cd979a5 100644 --- a/ee/ucp/interlock/usage/websockets.md +++ b/ee/ucp/interlock/usage/websockets.md @@ -2,6 +2,8 @@ title: Use websockets description: Learn how to use websockets in your swarm services. keywords: routing, proxy, websockets +redirect_from: + - https://interlock-dev-docs.netlify.com/usage/websockets/ --- First, create an overlay network to isolate and secure service traffic: