Merge branch 'master' of github.com:docker/docker.github.io

This commit is contained in:
Maria Bermudez 2019-04-22 18:14:59 -06:00
commit 964ce19005
35 changed files with 338 additions and 178 deletions

View File

@ -1345,8 +1345,6 @@ manuals:
path: /ee/ucp/interlock/usage/canary/ path: /ee/ucp/interlock/usage/canary/
- title: Using context or path-based routing - title: Using context or path-based routing
path: /ee/ucp/interlock/usage/context/ path: /ee/ucp/interlock/usage/context/
- title: Publishing a default host service
path: /ee/ucp/interlock/usage/default-backend/
- title: Specifying a routing mode - title: Specifying a routing mode
path: /ee/ucp/interlock/usage/interlock-vip-mode/ path: /ee/ucp/interlock/usage/interlock-vip-mode/
- title: Using routing labels - title: Using routing labels

View File

@ -22,11 +22,11 @@ PollInterval = "3s"
[Extensions] [Extensions]
[Extensions.default] [Extensions.default]
Image = "docker/ucp-interlock-extension:3.0.1" Image = "{{ page.ucp_org }}/ucp-interlock-extension:{{ page.ucp_version }}"
ServiceName = "ucp-interlock-extension" ServiceName = "ucp-interlock-extension"
Args = [] Args = []
Constraints = ["node.labels.com.docker.ucp.orchestrator.swarm==true", "node.platform.os==linux"] Constraints = ["node.labels.com.docker.ucp.orchestrator.swarm==true", "node.platform.os==linux"]
ProxyImage = "docker/ucp-interlock-proxy:3.0.1" ProxyImage = "{{ page.ucp_org }}/ucp-interlock-proxy:{{ page.ucp_version }}"
ProxyServiceName = "ucp-interlock-proxy" ProxyServiceName = "ucp-interlock-proxy"
ProxyConfigPath = "/etc/nginx/nginx.conf" ProxyConfigPath = "/etc/nginx/nginx.conf"
ProxyReplicas = 2 ProxyReplicas = 2

View File

@ -49,10 +49,10 @@ PollInterval = "3s"
[Extensions] [Extensions]
[Extensions.us-east] [Extensions.us-east]
Image = "interlockpreview/interlock-extension-nginx:2.0.0-preview" Image = "{{ page.ucp_org }}/ucp-interlock-extension:{{ page.ucp_version }}"
Args = ["-D"] Args = ["-D"]
ServiceName = "interlock-ext-us-east" ServiceName = "interlock-ext-us-east"
ProxyImage = "nginx:alpine" ProxyImage = "{{ page.ucp_org }}/ucp-interlock-proxy:{{ page.ucp_version }}"
ProxyArgs = [] ProxyArgs = []
ProxyServiceName = "interlock-proxy-us-east" ProxyServiceName = "interlock-proxy-us-east"
ProxyConfigPath = "/etc/nginx/nginx.conf" ProxyConfigPath = "/etc/nginx/nginx.conf"
@ -74,10 +74,10 @@ PollInterval = "3s"
proxy_region = "us-east" proxy_region = "us-east"
[Extensions.us-west] [Extensions.us-west]
Image = "interlockpreview/interlock-extension-nginx:2.0.0-preview" Image = "{{ page.ucp_org }}/ucp-interlock-extension:{{ page.ucp_version }}"
Args = ["-D"] Args = ["-D"]
ServiceName = "interlock-ext-us-west" ServiceName = "interlock-ext-us-west"
ProxyImage = "nginx:alpine" ProxyImage = "{{ page.ucp_org }}/ucp-interlock-proxy:{{ page.ucp_version }}"
ProxyArgs = [] ProxyArgs = []
ProxyServiceName = "interlock-proxy-us-west" ProxyServiceName = "interlock-proxy-us-west"
ProxyConfigPath = "/etc/nginx/nginx.conf" ProxyConfigPath = "/etc/nginx/nginx.conf"
@ -119,7 +119,7 @@ $> docker service create \
--network interlock \ --network interlock \
--constraint node.role==manager \ --constraint node.role==manager \
--config src=service.interlock.conf,target=/config.toml \ --config src=service.interlock.conf,target=/config.toml \
interlockpreview/interlock:2.0.0-preview -D run -c /config.toml { page.ucp_org }}/ucp-interlock:{{ page.ucp_version }} -D run -c /config.toml
sjpgq7h621exno6svdnsvpv9z sjpgq7h621exno6svdnsvpv9z
``` ```

View File

@ -2,6 +2,8 @@
title: Set up Security Scanning in DTR title: Set up Security Scanning in DTR
description: Enable and configure Docker Security Scanning for Docker Trusted Registry. description: Enable and configure Docker Security Scanning for Docker Trusted Registry.
keywords: registry, scanning, security scan, vulnerability, CVE keywords: registry, scanning, security scan, vulnerability, CVE
redirect_from:
- /datacenter/dtr/2.2/guides/admin/configure/set-up-vulnerability-scans/
--- ---
This page explains how to set up and enable Docker Security Scanning on an This page explains how to set up and enable Docker Security Scanning on an

View File

@ -2,7 +2,7 @@
title: Create a backup title: Create a backup
description: Learn how to create a backup of Docker Trusted Registry, for disaster recovery. description: Learn how to create a backup of Docker Trusted Registry, for disaster recovery.
keywords: dtr, disaster recovery keywords: dtr, disaster recovery
toc_max_header: 5 toc_max_header: 3
--- ---
{% assign metadata_backup_file = "dtr-metadata-backup.tar" %} {% assign metadata_backup_file = "dtr-metadata-backup.tar" %}
@ -94,15 +94,35 @@ the way you back up images depends on the storage backend you're using.
If you've configured DTR to store images on the local file system or NFS mount, If you've configured DTR to store images on the local file system or NFS mount,
you can back up the images by using SSH to log into a DTR node, you can back up the images by using SSH to log into a DTR node,
and creating a tar archive of the [dtr-registry volume](../../architecture.md): and creating a `tar` archive of the [dtr-registry volume](../../architecture.md):
#### Example backup commands
##### Local images
{% raw %} {% raw %}
```none ```none
sudo tar -cf {{ image_backup_file }} \ sudo tar -cf dtr-image-backup-$(date +%Y%m%d-%H_%M_%S).tar \
-C /var/lib/docker/volumes/ dtr-registry-<replica-id> /var/lib/docker/volumes/dtr-registry-$(docker ps --filter name=dtr-rethinkdb \
--format "{{ .Names }}" | sed 's/dtr-rethinkdb-//')
``` ```
{% endraw %} {% endraw %}
##### NFS-mounted images
{% raw %}
```none
sudo tar -cf dtr-image-backup-$(date +%Y%m%d-%H_%M_%S).tar \
/var/lib/docker/volumes/dtr-registry-nfs-$(docker ps --filter name=dtr-rethinkdb \
--format "{{ .Names }}" | sed 's/dtr-rethinkdb-//')
```
{% endraw %}
###### Expected output
```bash
tar: Removing leading `/' from member names
```
If you're using a different storage backend, follow the best practices If you're using a different storage backend, follow the best practices
recommended for that system. recommended for that system.
@ -110,37 +130,50 @@ recommended for that system.
### Back up DTR metadata ### Back up DTR metadata
To create a DTR backup, load your UCP client bundle, and run the following To create a DTR backup, load your UCP client bundle, and run the following
command, replacing the placeholders with real values: chained commands:
```bash ```none
read -sp 'ucp password: ' UCP_PASSWORD; DTR_VERSION=$(docker container inspect $(docker container ps -f name=dtr-registry -q) | \
``` grep -m1 -Po '(?<=DTR_VERSION=)\d.\d.\d'); \
REPLICA_ID=$(docker ps --filter name=dtr-rethinkdb --format "{{ .Names }}" | head -1 | \
This prompts you for the UCP password. Next, run the following to back up your DTR metadata and save the result into a tar archive. You can learn more about the supported flags in sed 's|.*/||' | sed 's/dtr-rethinkdb-//'); \
the [reference documentation](/reference/dtr/2.6/cli/backup.md). read -p 'ucp-url (The UCP URL including domain and port): ' UCP_URL; \
read -p 'ucp-username (The UCP administrator username): ' UCP_ADMIN; \
```bash read -sp 'ucp password: ' UCP_PASSWORD; \
docker run --log-driver none -i --rm \ docker run --log-driver none -i --rm \
--env UCP_PASSWORD=$UCP_PASSWORD \ --env UCP_PASSWORD=$UCP_PASSWORD \
{{ page.dtr_org }}/{{ page.dtr_repo }}:{{ page.dtr_version }} backup \ docker/dtr:$DTR_VERSION backup \
--ucp-url <ucp-url> \ --ucp-username $UCP_ADMIN \
--ucp-insecure-tls \ --ucp-url $UCP_URL \
--ucp-username <ucp-username> \ --ucp-ca "$(curl https://${UCP_URL}/ca)" \
--existing-replica-id <replica-id> > {{ metadata_backup_file }} --existing-replica-id $REPLICA_ID > dtr-metadata-${DTR_VERSION}-backup-$(date +%Y%m%d-%H_%M_%S).tar
``` ```
{% endraw %}
Where: #### UCP field prompts
* `<ucp-url>` is the url you use to access UCP. * `<ucp-url>` is the URL you use to access UCP.
* `<ucp-username>` is the username of a UCP administrator. * `<ucp-username>` is the username of a UCP administrator.
* `<replica-id>` is the id of the DTR replica to backup. * `<replica-id>` is the DTR replica ID to back up.
The above chained commands run through the following tasks:
1. Sets your DTR version and replica ID. To back up
a specific replica, set the replica ID manually by modifying the
`--existing-replica-id` flag in the backup command.
2. Prompts you for your UCP URL (domain and port) and admin username.
3. Prompts you for your UCP password without saving it to your disk or printing it on the terminal.
4. Retrieves the CA certificate for your specified UCP URL. To skip TLS verification, replace the `--ucp-ca`
flag with `--ucp-insecure-tls`. Docker does not recommend this flag for production environments.
5. Includes DTR version and timestamp to your `tar` backup file.
By default the backup command doesn't stop the DTR replica being backed up. You can learn more about the supported flags in
This means you can take frequent backups without affecting your users. the [reference documentation](/reference/dtr/2.6/cli/backup.md).
You can use the `--offline-backup` option to stop the DTR replica while taking By default, the backup command does not pause the DTR replica being backed up to
the backup. If you do this, remove the replica from the load balancing pool. prevent interruptions of user access to DTR. Since the replica
is not stopped, changes that happen during the backup may not be saved.
Use the `--offline-backup` flag to stop the DTR replica during the backup procedure. If you set this flag,
remove the replica from the load balancing pool to avoid user interruption.
Also, the backup contains sensitive information Also, the backup contains sensitive information
like private keys, so you can encrypt the backup by running: like private keys, so you can encrypt the backup by running:

View File

@ -5,7 +5,7 @@ keywords: registry, promotion, mirror
--- ---
Docker Trusted Registry allows you to create mirroring policies for a repository. Docker Trusted Registry allows you to create mirroring policies for a repository.
When an image gets pushed to a repository and meets a certain criteria, When an image gets pushed to a repository and meets the mirroring criteria,
DTR automatically pushes it to a repository in a remote Docker Trusted or Hub registry. DTR automatically pushes it to a repository in a remote Docker Trusted or Hub registry.
This not only allows you to mirror images but also allows you to create This not only allows you to mirror images but also allows you to create

View File

@ -4,63 +4,54 @@ description: Learn how to install Docker Universal Control Plane in a Microsoft
keywords: Universal Control Plane, UCP, install, Docker EE, Azure, Kubernetes keywords: Universal Control Plane, UCP, install, Docker EE, Azure, Kubernetes
--- ---
Docker UCP closely integrates into Microsoft Azure for its Kubernetes Networking Docker Universal Control Plane (UCP) closely integrates with Microsoft Azure for its Kubernetes Networking
and Persistent Storage feature set. UCP deploys the Calico CNI provider. In Azure and Persistent Storage feature set. UCP deploys the Calico CNI provider. In Azure,
the Calico CNI leverages the Azure networking infrastructure for data path the Calico CNI leverages the Azure networking infrastructure for data path
networking and the Azure IPAM for IP address management. There are networking and the Azure IPAM for IP address management. There are
infrastructure prerequisites that are required prior to UCP installation for the infrastructure prerequisites required prior to UCP installation for the
Calico / Azure integration. Calico / Azure integration.
## Docker UCP Networking ## Docker UCP Networking
Docker UCP configures the Azure IPAM module for Kubernetes to allocate Docker UCP configures the Azure IPAM module for Kubernetes to allocate IP
IP addresses to Kubernetes pods. The Azure IPAM module requires each Azure addresses for Kubernetes pods. The Azure IPAM module requires each Azure virtual
virtual machine that's part of the Kubernetes cluster to be configured with a pool of machine which is part of the Kubernetes cluster to be configured with a pool of IP
IP addresses. addresses.
There are two options for provisoning IPs for the Kubernetes cluster on Azure There are two options for provisoning IPs for the Kubernetes cluster on Azure:
- Docker UCP provides an automated mechanism to configure and maintain IP pools
for standalone Azure virtual machines. This service runs within the calico-node daemonset - _An automated mechanism provided by UCP which allows for IP pool configuration and maintenance
and by default will provision 128 IP address for each node. This value can be for standalone Azure virtual machines._ This service runs within the
configured through the `azure_ip_count`in the UCP `calico-node` daemonset and provisions 128 IP addresses for each
[configuration file](../configure/ucp-configuration-file) before or after the node by default. For information on customizing this value, see [Adjusting the IP count value](#adjusting-the-ip-count-value).
UCP installation. Note that if this value is reduced post-installation, existing - _Manual provision of additional IP address for each Azure virtual machine._ This
virtual machines will not be reconciled, and you will have to manually edit the IP count could be done through the Azure Portal, the Azure CLI `$ az network nic ip-config create`,
in Azure. or an ARM template. You can find an example of an ARM template
- Manually provision additional IP address for each Azure virtual machine. This could be done [here](#manually-provision-ip-address-as-part-of-an-azure-virtual-machine-scale-set).
as part of an Azure Virtual Machine Scale Set through an ARM template. You can find an example [here](#set-up-ip-configurations-on-an-azure-virtual-machine-scale-set).
Note that the `azure_ip_count` value in the UCP
[configuration file](../configure/ucp-configuration-file) will need to be set
to 0, otherwise UCP's IP Allocator service will provision the IP Address on top of
those you have already provisioned.
## Azure Prerequisites ## Azure Prerequisites
You must meet these infrastructure prerequisites in order You must meet the following infrastructure prerequisites in order
to successfully deploy Docker UCP on Azure to successfully deploy Docker UCP on Azure:
- All UCP Nodes (Managers and Workers) need to be deployed into the same - All UCP Nodes (Managers and Workers) need to be deployed into the same Azure
Azure Resource Group. The Azure Networking (Vnets, Subnets, Security Groups) Resource Group. The Azure Networking components (Virtual Network, Subnets,
components could be deployed in a second Azure Resource Group. Security Groups) could be deployed in a second Azure Resource Group.
- The Azure Vnet and Subnet must be appropriately sized for your - The Azure Virtual Network and Subnet must be appropriately sized for your
environment, and addresses from this pool are consumed by Kubernetes Pods. For more information, see environment, as addresses from this pool will be consumed by Kubernetes Pods.
[Considerations for IPAM For more information, see [Considerations for IPAM
Configuration](#considerations-for-ipam-configuration). Configuration](#considerations-for-ipam-configuration).
- All UCP Nodes (Managers and Workers) need to be attached to the same - All UCP worker and manager nodes need to be attached to the same Azure
Azure Subnet. Subnet.
- All UCP (Managers and Workers) need to be tagged in Azure with the
`Orchestrator` tag. Note the value for this tag is the Kubernetes version number
in the format `Orchestrator=Kubernetes:x.y.z`. This value may change in each
UCP release. To find the relevant version please see the UCP
[Release Notes](../../release-notes). For example for UCP 3.1.0 the tag
would be `Orchestrator=Kubernetes:1.11.2`.
- The Azure Virtual Machine Object Name needs to match the Azure Virtual Machine - The Azure Virtual Machine Object Name needs to match the Azure Virtual Machine
Computer Name and the Node Operating System's Hostname. Note this applies to the Computer Name and the Node Operating System's Hostname which is the FQDN of
FQDN of the host including domain names. the host, including domain names. Note that this requires all characters to be in lowercase.
- An Azure Service Principal with `Contributor` access to the Azure Resource - An Azure Service Principal with `Contributor` access to the Azure Resource
Group hosting the UCP Nodes. Note, if using a separate networking Resource Group hosting the UCP Nodes. This Service principal will be used by Kubernetes
Group the same Service Principal will need `Network Contributor` access to this to communicate with the Azure API. The Service Principal ID and Secret Key are
Resource Group. needed as part of the UCP prerequisites. If you are using a separate Resource
Group for the networking components, the same Service Principal will need
`Network Contributor` access to this Resource Group.
UCP requires the following information for the installation: UCP requires the following information for the installation:
@ -68,17 +59,18 @@ UCP requires the following information for the installation:
objects are being deployed. objects are being deployed.
- `tenantId` - The Azure Active Directory Tenant ID in which the UCP - `tenantId` - The Azure Active Directory Tenant ID in which the UCP
objects are being deployed. objects are being deployed.
- `aadClientId` - The Azure Service Principal ID - `aadClientId` - The Azure Service Principal ID.
- `aadClientSecret` - The Azure Service Principal Secret Key - `aadClientSecret` - The Azure Service Principal Secret Key.
### Azure Configuration File ### Azure Configuration File
For Docker UCP to integrate into Microsoft Azure, you need to place an Azure For Docker UCP to integrate with Microsoft Azure,each UCP node in your cluster
configuration file within each UCP node in your cluster, at needs an Azure configuration file, `azure.json`. Place the file within
`/etc/kubernetes/azure.json`. The `azure.json` file needs 0644 permissions. `/etc/kubernetes`. Since the config file is owned by `root`, set its permissions
to `0644` to ensure the container user has read access.
See the template below. Note entries that do not contain `****` should not be The following is an example template for `azure.json`. Replace `***` with real values, and leave the other
changed. parameters as is.
``` ```
{ {
@ -105,45 +97,44 @@ changed.
} }
``` ```
There are some optional values for Azure deployments: There are some optional parameters for Azure deployments:
- `"primaryAvailabilitySetName": "****",` - The Worker Nodes availability set. - `primaryAvailabilitySetName` - The Worker Nodes availability set.
- `"vnetResourceGroup": "****",` - If your Azure Network objects live in a - `vnetResourceGroup` - The Virtual Network Resource group, if your Azure Network objects live in a
seperate resource group. seperate resource group.
- `"routeTableName": "****",` - If you have defined multiple Route tables within - `routeTableName` - If you have defined multiple Route tables within
an Azure subnet. an Azure subnet.
More details on this configuration file can be found See [Kubernetes' azure.go](https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/azure/azure.go) for more details on this configuration file.
[here](https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/azure/azure.go).
## Considerations for IPAM Configuration ## Considerations for IPAM Configuration
The subnet and the virtual network associated with the primary interface of The subnet and the virtual network associated with the primary interface of the
the Azure virtual machines need to be configured with a large enough address prefix/range. Azure virtual machines need to be configured with a large enough address
The number of required IP addresses depends on the number of pods running prefix/range. The number of required IP addresses depends on the workload and
on each node and the number of nodes in the cluster. the number of nodes in the cluster.
For example, in a cluster of 256 nodes, to run a maximum of 128 pods For example, in a cluster of 256 nodes, make sure that the address space of the subnet and the
concurrently on a node, make sure that the address space of the subnet and the virtual network can allocate at least 128 * 256 IP addresses, in order to run a maximum of 128 pods
virtual network can allocate at least 128 * 256 IP addresses, _in addition to_ concurrently on a node. This would be ***in addition to*** initial IP allocations to virtual machine
initial IP allocations to virtual machine NICs during Azure resource creation. NICs (network interfaces) during Azure resource creation.
Accounting for IP addresses that are allocated to NICs during virtual machine bring-up, set Accounting for IP addresses that are allocated to NICs during virtual machine bring-up, set
the address space of the subnet and virtual network to 10.0.0.0/16. This the address space of the subnet and virtual network to `10.0.0.0/16`. This
ensures that the network can dynamically allocate at least 32768 addresses, ensures that the network can dynamically allocate at least 32768 addresses,
plus a buffer for initial allocations for primary IP addresses. plus a buffer for initial allocations for primary IP addresses.
> Azure IPAM, UCP, and Kubernetes > Azure IPAM, UCP, and Kubernetes
> >
> The Azure IPAM module queries an Azure virtual machine's metadata to obtain > The Azure IPAM module queries an Azure virtual machine's metadata to obtain
> a list of IP addresses that are assigned to the virtual machine's NICs. The > a list of IP addresses which are assigned to the virtual machine's NICs. The
> IPAM module allocates these IP addresses to Kubernetes pods. You configure the > IPAM module allocates these IP addresses to Kubernetes pods. You configure the
> IP addresses as `ipConfigurations` in the NICs associated with a virtual machine > IP addresses as `ipConfigurations` in the NICs associated with a virtual machine
> or scale set member, so that Azure IPAM can provide them to Kubernetes when > or scale set member, so that Azure IPAM can provide them to Kubernetes when
> requested. > requested.
{: .important} {: .important}
## Manually provision IP address as part of an Azure virtual machine scale set ## Manually provision IP address pools as part of an Azure virtual machine scale set
Configure IP Pools for each member of the virtual machine scale set during provisioning by Configure IP Pools for each member of the virtual machine scale set during provisioning by
associating multiple `ipConfigurations` with the scale sets associating multiple `ipConfigurations` with the scale sets
@ -204,20 +195,109 @@ for each virtual machine in the virtual machine scale set.
} }
``` ```
## Install UCP ## UCP Installation
Use the following command to install UCP on the manager node. ### Adjust the IP Count Value
The `--pod-cidr` option maps to the IP address range that you configured for
the subnets in the previous sections, and the `--host-address` maps to the
IP address of the master node.
> Note: The `pod-cidr` range must be within an Azure subnet attached to the If you have manually attached additional IP addresses to the Virtual Machines
> host. (via an ARM Template, Azure CLI or Azure Portal) or you want to reduce the
number of IP Addresses automatically provisioned by UCP from the default of 128
addresses, you can alter the `azure_ip_count` variable in the UCP
Configuration file before installation. If you are happy with 128 addresses per
Virtual Machine, proceed to [installing UCP](#install-ucp).
Once UCP has been installed, the UCP [configuration
file](../configure/ucp-configuration-file/) is managed by UCP and populated with
all of the cluster configuration data, such as AD/LDAP information or networking
configuration. As there is no Universal Control Plane deployed yet, we are able
to stage a [configuration file](../configure/ucp-configuration-file/) just
containing the Azure IP Count value. UCP will populate the rest of the cluster
variables during and after the installation.
Below are some example configuration files with just the `azure_ip_count`
variable defined. These 3-line files can be preloaded into a Docker Swarm prior
to installing UCP in order to override the default `azure_ip_count` value of 128 IP
addresses per node. See [UCP configuration file](../configure/ucp-configuration-file/)
to learn more about the configuration file, and other variables that can be staged pre-install.
> Note: Do not set the `azure_ip_count` to a value of less than 6 if you have not
> manually provisioned additional IP addresses for each Virtual Machine. The UCP
> installation will need at least 6 IP addresses to allocate to the core UCP components
> that run as Kubernetes pods. That is in addition to the Virtual
> Machine's private IP address.
If you have manually provisioned additional IP addresses for each Virtual
Machine, and want to disallow UCP from dynamically provisioning IP
addresses for you, then your UCP configuration file would be:
```
$ vi example-config-1
[cluster_config]
azure_ip_count = "0"
```
If you want to reduce the IP addresses dynamically allocated from 128 to a
custom value, then your UCP configuration file would be:
```
$ vi example-config-2
[cluster_config]
azure_ip_count = "20" # This value may be different for your environment
```
See [Considerations for IPAM
Configuration](#considerations-for-ipam-configuration) to calculate an
appropriate value.
To preload this configuration file prior to installing UCP:
1. Copy the configuration file to a Virtual Machine that you wish to become a UCP Manager Node.
2. Initiate a Swarm on that Virtual Machine.
```
$ docker swarm init
```
3. Upload the configuration file to the Swarm, by using a [Docker Swarm Config](/engine/swarm/configs/).
This Swarm Config will need to be named `com.docker.ucp.config`.
```
$ docker config create com.docker.ucp.config <local-configuration-file>
```
4. Check that the configuration has been loaded succesfully.
```
$ docker config list
ID NAME CREATED UPDATED
igca3q30jz9u3e6ecq1ckyofz com.docker.ucp.config 1 days ago 1 days ago
```
5. You are now ready to [install UCP](#install-ucp). As you have already staged
a UCP configuration file, you will need to add `--existing-config` to the
install command below.
If you need to adjust this value post-installation, see [instructions](../configure/ucp-configuration-file/)
on how to download the UCP configuration file, change the value, and update the configuration via the API.
If you reduce the value post-installation, existing virtual machines will not be
reconciled, and you will have to manually edit the IP count in Azure.
### Install UCP
Run the following command to install UCP on a manager node. The `--pod-cidr`
option maps to the IP address range that you have configured for the Azure
subnet, and the `--host-address` maps to the private IP address of the master
node. Finally if you have set the [Ip Count
Value](#adjusting-the-ip-count-value) you will need to add `--existing-config`
to the install command below.
> Note: The `pod-cidr` range must match the Azure Virtual Network's Subnet
> attached the hosts. For example, if the Azure Virtual Network had the range
> `172.0.0.0/16` with Virtual Machines provisioned on an Azure Subnet of
> `172.0.1.0/24`, then the Pod CIDR should also be `172.0.1.0/24`.
```bash ```bash
docker container run --rm -it \ docker container run --rm -it \
--name ucp \ --name ucp \
-v /var/run/docker.sock:/var/run/docker.sock \ --volume /var/run/docker.sock:/var/run/docker.sock \
{{ page.ucp_org }}/{{ page.ucp_repo }}:{{ page.ucp_version }} install \ {{ page.ucp_org }}/{{ page.ucp_repo }}:{{ page.ucp_version }} install \
--host-address <ucp-ip> \ --host-address <ucp-ip> \
--pod-cidr <ip-address-range> \ --pod-cidr <ip-address-range> \

View File

@ -20,6 +20,7 @@ containers to be listed as well.
Click on a container to see more details, like its configurations and logs. Click on a container to see more details, like its configurations and logs.
![](../../images/troubleshoot-with-logs-2.png){: .with-border}
## Check the logs from the CLI ## Check the logs from the CLI
@ -73,7 +74,7 @@ applications won't be affected by this.
To increase the UCP log level, navigate to the UCP web UI, go to the To increase the UCP log level, navigate to the UCP web UI, go to the
**Admin Settings** tab, and choose **Logs**. **Admin Settings** tab, and choose **Logs**.
![](../../images/troubleshoot-with-logs-2.png){: .with-border} ![](../../images/troubleshoot-with-logs-3.png){: .with-border}
Once you change the log level to **Debug** the UCP containers restart. Once you change the log level to **Debug** the UCP containers restart.
Now that the UCP components are creating more descriptive logs, you can Now that the UCP components are creating more descriptive logs, you can

Binary file not shown.

Before

Width:  |  Height:  |  Size: 169 KiB

After

Width:  |  Height:  |  Size: 164 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 76 KiB

After

Width:  |  Height:  |  Size: 119 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 99 KiB

View File

@ -3,8 +3,6 @@ title: Interlock architecture
description: Learn more about the architecture of the layer 7 routing solution description: Learn more about the architecture of the layer 7 routing solution
for Docker swarm services. for Docker swarm services.
keywords: routing, UCP, interlock, load balancing keywords: routing, UCP, interlock, load balancing
redirect_from:
- https://interlock-dev-docs.netlify.com/intro/architecture/
--- ---
This document covers the following considerations: This document covers the following considerations:

View File

@ -2,8 +2,6 @@
title: Custom templates title: Custom templates
description: Learn how to use a custom extension template description: Learn how to use a custom extension template
keywords: routing, proxy, interlock, load balancing keywords: routing, proxy, interlock, load balancing
redirect_from:
- https://interlock-dev-docs.netlify.com/ops/custom_template/
--- ---
Use a custom extension if a needed option is not available in the extension configuration. Use a custom extension if a needed option is not available in the extension configuration.

View File

@ -2,8 +2,6 @@
title: Configure HAProxy title: Configure HAProxy
description: Learn how to configure an HAProxy extension description: Learn how to configure an HAProxy extension
keywords: routing, proxy, interlock, load balancing keywords: routing, proxy, interlock, load balancing
redirect_from:
- https://interlock-dev-docs.netlify.com/config/extensions/haproxy/
--- ---
The following HAProxy configuration options are available: The following HAProxy configuration options are available:

View File

@ -6,7 +6,6 @@ keywords: routing, proxy, interlock, load balancing
redirect_from: redirect_from:
- /ee/ucp/interlock/usage/host-mode-networking/ - /ee/ucp/interlock/usage/host-mode-networking/
- /ee/ucp/interlock/deploy/host-mode-networking/ - /ee/ucp/interlock/deploy/host-mode-networking/
- https://interlock-dev-docs.netlify.com/usage/host_mode/
--- ---
By default, layer 7 routing components communicate with one another using By default, layer 7 routing components communicate with one another using
@ -144,10 +143,10 @@ PollInterval = "3s"
[Extensions] [Extensions]
[Extensions.default] [Extensions.default]
Image = "interlockpreview/interlock-extension-nginx:2.0.0-preview" Image = "{{ page.ucp_org }}/ucp-interlock-extension:{{ page.ucp_version }}"
Args = [] Args = []
ServiceName = "interlock-ext" ServiceName = "interlock-ext"
ProxyImage = "nginx:alpine" ProxyImage = "{{ page.ucp_org }}/ucp-interlock-proxy:{{ page.ucp_version }}"
ProxyArgs = [] ProxyArgs = []
ProxyServiceName = "interlock-proxy" ProxyServiceName = "interlock-proxy"
ProxyConfigPath = "/etc/nginx/nginx.conf" ProxyConfigPath = "/etc/nginx/nginx.conf"
@ -178,7 +177,7 @@ $> docker service create \
--constraint node.role==manager \ --constraint node.role==manager \
--publish mode=host,target=8080 \ --publish mode=host,target=8080 \
--config src=service.interlock.conf,target=/config.toml \ --config src=service.interlock.conf,target=/config.toml \
interlockpreview/interlock:2.0.0-preview -D run -c /config.toml { page.ucp_org }}/ucp-interlock:{{ page.ucp_version }} -D run -c /config.toml
sjpgq7h621exno6svdnsvpv9z sjpgq7h621exno6svdnsvpv9z
``` ```

View File

@ -5,7 +5,6 @@ keywords: routing, proxy, interlock, load balancing
redirect_from: redirect_from:
- /ee/ucp/interlock/deploy/configure/ - /ee/ucp/interlock/deploy/configure/
- /ee/ucp/interlock/usage/default-service/ - /ee/ucp/interlock/usage/default-service/
- https://interlock-dev-docs.netlify.com/config/interlock/
--- ---
To further customize the layer 7 routing solution, you must update the To further customize the layer 7 routing solution, you must update the
@ -174,10 +173,10 @@ DockerURL = "unix:///var/run/docker.sock"
PollInterval = "3s" PollInterval = "3s"
[Extensions.default] [Extensions.default]
Image = "docker/interlock-extension-nginx:latest" Image = "{{ page.ucp_org }}/interlock-extension-nginx:{{ page.ucp_version }}"
Args = ["-D"] Args = ["-D"]
ServiceName = "interlock-ext" ServiceName = "interlock-ext"
ProxyImage = "nginx:alpine" ProxyImage = "{{ page.ucp_org }}/ucp-interlock-proxy:{{ page.ucp_version }}"
ProxyArgs = [] ProxyArgs = []
ProxyServiceName = "interlock-proxy" ProxyServiceName = "interlock-proxy"
ProxyConfigPath = "/etc/nginx/nginx.conf" ProxyConfigPath = "/etc/nginx/nginx.conf"

View File

@ -2,8 +2,6 @@
title: Configure Nginx title: Configure Nginx
description: Learn how to configure an nginx extension description: Learn how to configure an nginx extension
keywords: routing, proxy, interlock, load balancing keywords: routing, proxy, interlock, load balancing
redirect_from:
- https://interlock-dev-docs.netlify.com/config/extensions/nginx/
--- ---
By default, nginx is used as a proxy, so the following configuration options are By default, nginx is used as a proxy, so the following configuration options are

View File

@ -2,8 +2,6 @@
title: Use application service labels title: Use application service labels
description: Learn how applications use service labels for publishing description: Learn how applications use service labels for publishing
keywords: routing, proxy, interlock, load balancing keywords: routing, proxy, interlock, load balancing
redirect_from:
- https://interlock-dev-docs.netlify.com/config/service_labels/
--- ---
Service labels define hostnames that are routed to the Service labels define hostnames that are routed to the

View File

@ -2,8 +2,6 @@
title: Tune the proxy service title: Tune the proxy service
description: Learn how to tune the proxy service for environment optimization description: Learn how to tune the proxy service for environment optimization
keywords: routing, proxy, interlock keywords: routing, proxy, interlock
redirect_from:
- https://interlock-dev-docs.netlify.com/ops/tuning/
--- ---
## Constrain the proxy service to multiple dedicated worker nodes ## Constrain the proxy service to multiple dedicated worker nodes

View File

@ -2,8 +2,6 @@
title: Update Interlock services title: Update Interlock services
description: Learn how to update the UCP layer 7 routing solution services description: Learn how to update the UCP layer 7 routing solution services
keywords: routing, proxy, interlock keywords: routing, proxy, interlock
redirect_from:
- https://interlock-dev-docs.netlify.com/ops/updates/
--- ---
There are two parts to the update process: There are two parts to the update process:
@ -22,16 +20,70 @@ $> docker config create service.interlock.conf.v2 <path-to-new-config>
Remove the old configuration and specify the new configuration: Remove the old configuration and specify the new configuration:
```bash ```bash
$> docker service update --config-rm service.interlock.conf interlock $> docker service update --config-rm service.interlock.conf ucp-interlock
$> docker service update --config-add source=service.interlock.conf.v2,target=/config.toml interlock $> docker service update --config-add source=service.interlock.conf.v2,target=/config.toml ucp-interlock
``` ```
Next, update the Interlock service to use the new image. The following example updates the Interlock core service to use the `sha256:d173014908eb09e9a70d8e5ed845469a61f7cbf4032c28fad0ed9af3fc04ef51` Next, update the Interlock service to use the new image. To pull the latest version of UCP, run the following:
version of Interlock. Interlock starts and checks the config object, which has the new extension version, and
```bash
$> docker pull docker/ucp:latest
```
### Example output
```bash
latest: Pulling from docker/ucp
cd784148e348: Already exists
3871e7d70c20: Already exists
cad04e4a4815: Pull complete
Digest: sha256:63ca6d3a6c7e94aca60e604b98fccd1295bffd1f69f3d6210031b72fc2467444
Status: Downloaded newer image for docker/ucp:latest
docker.io/docker/ucp:latest
```
Next, list all the latest UCP images. To learn more about `docker/ucp images` and available options,
see [the reference page](/reference/ucp/3.1/cli/images/).
```bash
$> docker run --rm docker/ucp images --list
```
### Example output
```bash
docker/ucp-agent:{{ page.ucp_version }}
docker/ucp-auth-store:{{ page.ucp_version }}
docker/ucp-auth:{{ page.ucp_version }}
docker/ucp-azure-ip-allocator:{{ page.ucp_version }}
docker/ucp-calico-cni:{{ page.ucp_version }}
docker/ucp-calico-kube-controllers:{{ page.ucp_version }}
docker/ucp-calico-node:{{ page.ucp_version }}
docker/ucp-cfssl:{{ page.ucp_version }}
docker/ucp-compose:{{ page.ucp_version }}
docker/ucp-controller:{{ page.ucp_version }}
docker/ucp-dsinfo:{{ page.ucp_version }}
docker/ucp-etcd:{{ page.ucp_version }}
docker/ucp-hyperkube:{{ page.ucp_version }}
docker/ucp-interlock-extension:{{ page.ucp_version }}
docker/ucp-interlock-proxy:{{ page.ucp_version }}
docker/ucp-interlock:{{ page.ucp_version }}
docker/ucp-kube-compose-api:{{ page.ucp_version }}
docker/ucp-kube-compose:{{ page.ucp_version }}
docker/ucp-kube-dns-dnsmasq-nanny:{{ page.ucp_version }}
docker/ucp-kube-dns-sidecar:{{ page.ucp_version }}
docker/ucp-kube-dns:{{ page.ucp_version }}
docker/ucp-metrics:{{ page.ucp_version }}
docker/ucp-pause:{{ page.ucp_version }}
docker/ucp-swarm:{{ page.ucp_version }}
docker/ucp:{{ page.ucp_version }}
```
Interlock starts and checks the config object, which has the new extension version, and
performs a rolling deploy to update all extensions. performs a rolling deploy to update all extensions.
```bash ```bash
$> docker service update \ $> docker service update \
--image interlockpreview/interlock@sha256:d173014908eb09e9a70d8e5ed845469a61f7cbf4032c28fad0ed9af3fc04ef51 \ --image { page.ucp_org }}/ucp-interlock:{{ page.ucp_version }} \
interlock ucp-interlock
``` ```

View File

@ -4,7 +4,6 @@ description: Learn the deployment steps for the UCP layer 7 routing solution
keywords: routing, proxy, interlock keywords: routing, proxy, interlock
redirect_from: redirect_from:
- /ee/ucp/interlock/deploy/configuration-reference/ - /ee/ucp/interlock/deploy/configuration-reference/
- https://interlock-dev-docs.netlify.com/install/
--- ---
This topic covers deploying a layer 7 routing solution into a Docker Swarm to route traffic to Swarm services. Layer 7 routing is also referred to as an HTTP routing mesh. This topic covers deploying a layer 7 routing solution into a Docker Swarm to route traffic to Swarm services. Layer 7 routing is also referred to as an HTTP routing mesh.
@ -135,9 +134,9 @@ PollInterval = "3s"
[Extensions] [Extensions]
[Extensions.default] [Extensions.default]
Image = "interlockpreview/interlock-extension-nginx:2.0.0-preview" Image = "{{ page.ucp_org }}/ucp-interlock-extension:{{ page.ucp_version }}"
Args = ["-D"] Args = ["-D"]
ProxyImage = "nginx:alpine" ProxyImage = "{{ page.ucp_org }}/ucp-interlock-proxy:{{ page.ucp_version }}"
ProxyArgs = [] ProxyArgs = []
ProxyConfigPath = "/etc/nginx/nginx.conf" ProxyConfigPath = "/etc/nginx/nginx.conf"
ProxyReplicas = 1 ProxyReplicas = 1
@ -179,7 +178,7 @@ $> docker service create \
--network interlock \ --network interlock \
--constraint node.role==manager \ --constraint node.role==manager \
--config src=service.interlock.conf,target=/config.toml \ --config src=service.interlock.conf,target=/config.toml \
interlockpreview/interlock:2.0.0-preview -D run -c /config.toml {{ page.ucp_org }}/ucp-interlock:{{ page.ucp_version }} -D run -c /config.toml
sjpgq7h621exno6svdnsvpv9z sjpgq7h621exno6svdnsvpv9z
``` ```
@ -190,8 +189,8 @@ one for the extension service, and one for the proxy service:
$> docker service ls $> docker service ls
ID NAME MODE REPLICAS IMAGE PORTS ID NAME MODE REPLICAS IMAGE PORTS
lheajcskcbby modest_raman replicated 1/1 nginx:alpine *:80->80/tcp *:443->443/tcp lheajcskcbby modest_raman replicated 1/1 nginx:alpine *:80->80/tcp *:443->443/tcp
oxjvqc6gxf91 keen_clarke replicated 1/1 interlockpreview/interlock-extension-nginx:2.0.0-preview oxjvqc6gxf91 keen_clarke replicated 1/1 {{ page.ucp_org }}/ucp-interlock-extension:{{ page.ucp_version }}
sjpgq7h621ex interlock replicated 1/1 interlockpreview/interlock:2.0.0-preview sjpgq7h621ex interlock replicated 1/1 {{ page.ucp_org }}/ucp-interlock:{{ page.ucp_version }}
``` ```
The Interlock traffic layer is now deployed. The Interlock traffic layer is now deployed.

View File

@ -2,8 +2,6 @@
title: Offline installation considerations title: Offline installation considerations
description: Learn how to to install Interlock on a Docker cluster without internet access. description: Learn how to to install Interlock on a Docker cluster without internet access.
keywords: routing, proxy, interlock keywords: routing, proxy, interlock
redirect_from:
- https://interlock-dev-docs.netlify.com/install/offline/
--- ---
To install Interlock on a Docker cluster without internet access, the Docker images must be loaded. This topic describes how to export the images from a local Docker To install Interlock on a Docker cluster without internet access, the Docker images must be loaded. This topic describes how to export the images from a local Docker
@ -12,13 +10,14 @@ engine and then loading them to the Docker Swarm cluster.
First, using an existing Docker engine, save the images: First, using an existing Docker engine, save the images:
```bash ```bash
$> docker save docker/interlock:latest > interlock.tar $> docker save {{ page.ucp_org }}/ucp-interlock:{{ page.ucp_version }} > interlock.tar
$> docker save docker/interlock-extension-nginx:latest > interlock-extension-nginx.tar $> docker save {{ page.ucp_org }}/ucp-interlock-extension:{{ page.ucp_version }} > interlock-extension-nginx.tar
$> docker save nginx:alpine > nginx.tar $> docker save {{ page.ucp_org }}/ucp-interlock-proxy:{{ page.ucp_version }} > nginx.tar
``` ```
Note: replace `docker/interlock-extension-nginx:latest` and `nginx:alpine` with the corresponding Note: replace `{{ page.ucp_org }}/ucp-interlock-extension:{{ page.ucp_version
extension and proxy image if you are not using Nginx. }}` and `{{ page.ucp_org }}/ucp-interlock-proxy:{{ page.ucp_version }}` with the
corresponding extension and proxy image if you are not using Nginx.
You should have the following two files: You should have the following two files:

View File

@ -3,8 +3,6 @@ title: Configure layer 7 routing for production
description: Learn how to configure the layer 7 routing solution for a production description: Learn how to configure the layer 7 routing solution for a production
environment. environment.
keywords: routing, proxy, interlock keywords: routing, proxy, interlock
redirect_from:
- https://interlock-dev-docs.netlify.com/install/production/
--- ---
This section includes documentation on configuring Interlock This section includes documentation on configuring Interlock

View File

@ -2,9 +2,6 @@
title: Layer 7 routing overview title: Layer 7 routing overview
description: Learn how to route layer 7 traffic to your Swarm services description: Learn how to route layer 7 traffic to your Swarm services
keywords: routing, UCP, interlock, load balancing keywords: routing, UCP, interlock, load balancing
redirect_from:
- https://interlock-dev-docs.netlify.com/
- https://interlock-dev-docs.netlify.com/intro/about/
--- ---
Application-layer (Layer 7) routing is the application routing and load balancing (ingress routing) system included with Docker Enterprise for Swarm orchestration. Interlock architecture takes advantage of the underlying Swarm components to provide scalable Layer 7 routing and Layer 4 VIP mode functionality. Application-layer (Layer 7) routing is the application routing and load balancing (ingress routing) system included with Docker Enterprise for Swarm orchestration. Interlock architecture takes advantage of the underlying Swarm components to provide scalable Layer 7 routing and Layer 4 VIP mode functionality.

View File

@ -2,8 +2,6 @@
title: Publish Canary application instances title: Publish Canary application instances
description: Learn how to do canary deployments for your Docker swarm services description: Learn how to do canary deployments for your Docker swarm services
keywords: routing, proxy keywords: routing, proxy
redirect_from:
- https://interlock-dev-docs.netlify.com/usage/canary/
--- ---
The following example publishes a service as a canary instance. The following example publishes a service as a canary instance.

View File

@ -3,8 +3,6 @@ title: Use context and path-based routing
description: Learn how to route traffic to your Docker swarm services based description: Learn how to route traffic to your Docker swarm services based
on a url path. on a url path.
keywords: routing, proxy keywords: routing, proxy
redirect_from:
- https://interlock-dev-docs.netlify.com/usage/context_root/
--- ---
The following example publishes a service using context or path based routing. The following example publishes a service using context or path based routing.

View File

@ -5,7 +5,6 @@ keywords: routing, proxy
redirect_from: redirect_from:
- /ee/ucp/interlock/deploy/configuration-reference/ - /ee/ucp/interlock/deploy/configuration-reference/
- /ee/ucp/interlock/deploy/configure/ - /ee/ucp/interlock/deploy/configure/
- https://interlock-dev-docs.netlify.com/usage/hello/
--- ---
After Interlock is deployed, you can launch and publish services and applications. After Interlock is deployed, you can launch and publish services and applications.

View File

@ -3,7 +3,7 @@ title: Specify a routing mode
description: Learn about task and VIP backend routing modes for Layer 7 routing description: Learn about task and VIP backend routing modes for Layer 7 routing
keywords: routing, proxy, interlock keywords: routing, proxy, interlock
redirect_from: redirect_from:
- https://interlock-dev-docs.netlify.com/usage/default_backend/ - /ee/ucp/interlock/usage/default-backend/
--- ---
You can publish services using "vip" and "task" backend routing modes. You can publish services using "vip" and "task" backend routing modes.

View File

@ -3,8 +3,6 @@ title: Implement application redirects
description: Learn how to implement redirects using swarm services and the description: Learn how to implement redirects using swarm services and the
layer 7 routing solution for UCP. layer 7 routing solution for UCP.
keywords: routing, proxy, redirects, interlock keywords: routing, proxy, redirects, interlock
redirect_from:
- https://interlock-dev-docs.netlify.com/usage/redirects/
--- ---
The following example publishes a service and configures a redirect from `old.local` to `new.local`. The following example publishes a service and configures a redirect from `old.local` to `new.local`.

View File

@ -2,8 +2,6 @@
title: Implement service clusters title: Implement service clusters
description: Learn how to route traffic to different proxies using a service cluster. description: Learn how to route traffic to different proxies using a service cluster.
keywords: ucp, interlock, load balancing, routing keywords: ucp, interlock, load balancing, routing
redirect_from:
- https://interlock-dev-docs.netlify.com/usage/service_clusters/
--- ---
## Configure Proxy Services ## Configure Proxy Services
@ -163,7 +161,7 @@ PollInterval = "3s"
Image = "{{ page.ucp_org }}/ucp-interlock-extension:{{ page.ucp_version }}" Image = "{{ page.ucp_org }}/ucp-interlock-extension:{{ page.ucp_version }}"
Args = [] Args = []
ServiceName = "ucp-interlock-extension-us-west" ServiceName = "ucp-interlock-extension-us-west"
ProxyImage = "docker/ucp-interlock-proxy:3.1.2" ProxyImage = "{{ page.ucp_org }}/ucp-interlock-proxy:{{ page.ucp_version }}"
ProxyArgs = [] ProxyArgs = []
ProxyServiceName = "ucp-interlock-proxy-us-west" ProxyServiceName = "ucp-interlock-proxy-us-west"
ProxyConfigPath = "/etc/nginx/nginx.conf" ProxyConfigPath = "/etc/nginx/nginx.conf"

View File

@ -3,8 +3,6 @@ title: Implement persistent (sticky) sessions
description: Learn how to configure your swarm services with persistent sessions description: Learn how to configure your swarm services with persistent sessions
using UCP. using UCP.
keywords: routing, proxy, cookies, IP hash keywords: routing, proxy, cookies, IP hash
redirect_from:
- https://interlock-dev-docs.netlify.com/usage/sessions/
--- ---
You can publish a service and configure the proxy for persistent (sticky) sessions using: You can publish a service and configure the proxy for persistent (sticky) sessions using:

View File

@ -4,7 +4,6 @@ description: Learn how to configure your swarm services with SSL.
keywords: routing, proxy, tls, ssl keywords: routing, proxy, tls, ssl
redirect_from: redirect_from:
- /ee/ucp/interlock/usage/ssl/ - /ee/ucp/interlock/usage/ssl/
- https://interlock-dev-docs.netlify.com/usage/ssl/
--- ---
This topic covers Swarm services implementation with: This topic covers Swarm services implementation with:

View File

@ -2,8 +2,6 @@
title: Use websockets title: Use websockets
description: Learn how to use websockets in your swarm services. description: Learn how to use websockets in your swarm services.
keywords: routing, proxy, websockets keywords: routing, proxy, websockets
redirect_from:
- https://interlock-dev-docs.netlify.com/usage/websockets/
--- ---
First, create an overlay network to isolate and secure service traffic: First, create an overlay network to isolate and secure service traffic:

View File

@ -15,12 +15,41 @@ docker run -i --rm docker/dtr \
backup [command options] > backup.tar backup [command options] > backup.tar
``` ```
### Example Usage ### Example Commands
#### Basic
```bash ```bash
docker run -i --rm docker/dtr \ docker run -i --rm --log-driver none docker/dtr:{{ page.dtr_version }} \
backup --ucp-ca "$(cat ca.pem)" --existing-replica-id 5eb9459a7832 > backup.tar backup --ucp-ca "$(cat ca.pem)" --existing-replica-id 5eb9459a7832 > backup.tar
``` ```
#### Advanced (with chained commands)
{% raw %}
```none
DTR_VERSION=$(docker container inspect $(docker container ps -f \
name=dtr-registry -q) | grep -m1 -Po '(?<=DTR_VERSION=)\d.\d.\d'); \
REPLICA_ID=$(docker ps --filter name=dtr-rethinkdb \
--format "{{ .Names }}" | head -1 | sed 's|.*/||' | sed 's/dtr-rethinkdb-//'); \
read -p 'ucp-url (The UCP URL including domain and port): ' UCP_URL; \
read -p 'ucp-username (The UCP administrator username): ' UCP_ADMIN; \
read -sp 'ucp password: ' UCP_PASSWORD; \
docker run --log-driver none -i --rm \
--env UCP_PASSWORD=$UCP_PASSWORD \
docker/dtr:$DTR_VERSION backup \
--ucp-username $UCP_ADMIN \
--ucp-url $UCP_URL \
--ucp-ca "$(curl https://${UCP_URL}/ca)" \
--existing-replica-id $REPLICA_ID > \
dtr-metadata-${DTR_VERSION}-backup-$(date +%Y%m%d-%H_%M_%S).tar
```
{% endraw %}
For a detailed explanation on the advanced example, see
[Back up your DTR metadata](ee/dtr/admin/disaster-recovery/create-a-backup/#back-up-dtr-metadata).
To learn more about the `--log-driver` option for `docker run`, see [docker run reference](/engine/reference/run/#logging-drivers---log-driver).
## Description ## Description
This command creates a `tar` file with the contents of the volumes used by This command creates a `tar` file with the contents of the volumes used by

View File

@ -168,7 +168,7 @@ Consider some scenarios where files in a container are modified.
However, AUFS works at the file level rather than the block level. This However, AUFS works at the file level rather than the block level. This
means that all copy_up operations copy the entire file, even if the file is means that all copy_up operations copy the entire file, even if the file is
very large and only a small part of it is being modified. This can have a very large and only a small part of it is being modified. This can have a
noticeable impact on container write performance. AUFS, which can suffer noticeable impact on container write performance. AUFS can suffer
noticeable latencies when searching for files in images with many layers. noticeable latencies when searching for files in images with many layers.
However, it is worth noting that the copy_up operation only occurs the first However, it is worth noting that the copy_up operation only occurs the first
time a given file is written to. Subsequent writes to the same file operate time a given file is written to. Subsequent writes to the same file operate