Introduce UCP 3.0

This commit is contained in:
Joao Fernandes 2017-11-30 13:43:29 -08:00 committed by Jim Galasyn
parent e707cf9fd4
commit 373cc84e01
268 changed files with 78995 additions and 89 deletions

View File

@ -141,7 +141,7 @@ defaults:
- scope:
path: "datacenter"
values:
ucp_latest_image: "docker/ucp:2.2.7"
ucp_latest_image: "docker/ucp:3.0.0"
dtr_latest_image: "docker/dtr:2.4.3"
enterprise: true
- scope:
@ -173,9 +173,16 @@ defaults:
values:
ucp_version: "1.1"
dtr_version: "2.0"
- scope:
path: "datacenter/ucp/3.0"
values:
ucp_org: "docker"
ucp_repo: "ucp"
ucp_version: "3.0.0"
- scope:
path: "datacenter/ucp/2.2"
values:
hide_from_sitemap: true
ucp_org: "docker"
ucp_repo: "ucp"
ucp_version: "2.2.7"

View File

@ -1392,7 +1392,7 @@ reference:
path: /datacenter/dtr/2.4/reference/cli/
nosync: true
- title: Universal Control Plane CLI
path: /datacenter/ucp/2.2/reference/cli/
path: /datacenter/ucp/3.0/reference/cli/
nosync: true
- sectiontitle: Application Programming Interfaces (APIs)
@ -1572,186 +1572,334 @@ manuals:
title: Upgrade Docker EE
- sectiontitle: Universal Control Plane
section:
- path: /datacenter/ucp/2.2/guides/
- path: /datacenter/ucp/3.0/guides/
title: Universal Control Plane overview
- path: /datacenter/ucp/2.2/guides/architecture/
- path: /datacenter/ucp/3.0/guides/architecture/
title: Architecture
- sectiontitle: Administration
section:
- sectiontitle: Install
section:
- path: /datacenter/ucp/2.2/guides/admin/install/system-requirements/
- path: /datacenter/ucp/3.0/guides/admin/install/system-requirements/
title: System requirements
- path: /datacenter/ucp/2.2/guides/admin/install/plan-installation/
- path: /datacenter/ucp/3.0/guides/admin/install/plan-installation/
title: Plan your installation
- path: /datacenter/ucp/2.2/guides/admin/install/
- path: /datacenter/ucp/3.0/guides/admin/install/
title: Install
- path: /datacenter/ucp/2.2/guides/admin/install/install-offline/
- path: /datacenter/ucp/3.0/guides/admin/install/install-offline/
title: Install offline
- path: /datacenter/ucp/2.2/guides/admin/install/upgrade/
- path: /datacenter/ucp/3.0/guides/admin/install/upgrade/
title: Upgrade
- path: /datacenter/ucp/2.2/guides/admin/install/upgrade-offline/
- path: /datacenter/ucp/3.0/guides/admin/install/upgrade-offline/
title: Upgrade offline
- path: /datacenter/ucp/2.2/guides/admin/install/uninstall/
- path: /datacenter/ucp/3.0/guides/admin/install/uninstall/
title: Uninstall
- path: /datacenter/ucp/2.2/guides/admin/install/architecture-specific-images/
- path: /datacenter/ucp/3.0/guides/admin/install/architecture-specific-images/
title: Architecture-specific images
- sectiontitle: Configure
section:
- path: /datacenter/ucp/2.2/guides/admin/configure/add-labels-to-cluster-nodes/
- path: /datacenter/ucp/3.0/guides/admin/configure/add-labels-to-cluster-nodes/
title: Add labels to cluster nodes
- path: /datacenter/ucp/2.2/guides/admin/configure/add-sans-to-cluster/
- path: /datacenter/ucp/3.0/guides/admin/configure/add-sans-to-cluster/
title: Add SANs to cluster certificates
- path: /datacenter/ucp/2.2/guides/admin/configure/join-windows-worker-nodes/
- path: /datacenter/ucp/3.0/guides/admin/configure/join-windows-worker-nodes/
title: Join Windows worker nodes to a swarm
- path: /datacenter/ucp/2.2/guides/admin/configure/integrate-with-dtr/
- path: /datacenter/ucp/3.0/guides/admin/configure/integrate-with-dtr/
title: Integrate with Docker Trusted Registry
- path: /datacenter/ucp/2.2/guides/admin/configure/external-auth/
- path: /datacenter/ucp/3.0/guides/admin/configure/external-auth/
title: Integrate with LDAP
- path: /datacenter/ucp/2.2/guides/admin/configure/external-auth/enable-ldap-config-file/
- path: /datacenter/ucp/3.0/guides/admin/configure/external-auth/enable-ldap-config-file/
title: Integrate with LDAP by using a configuration file
- path: /datacenter/ucp/2.2/guides/admin/configure/license-your-installation/
- path: /datacenter/ucp/3.0/guides/admin/configure/license-your-installation/
title: License your installation
- path: /datacenter/ucp/2.2/guides/admin/configure/restrict-services-to-worker-nodes/
- path: /datacenter/ucp/3.0/guides/admin/configure/restrict-services-to-worker-nodes/
title: Restrict services to worker nodes
- path: /datacenter/ucp/2.2/guides/admin/configure/run-only-the-images-you-trust/
- path: /datacenter/ucp/3.0/guides/admin/configure/run-only-the-images-you-trust/
title: Run only the images you trust
- path: /datacenter/ucp/2.2/guides/admin/configure/scale-your-cluster/
- path: /datacenter/ucp/3.0/guides/admin/configure/scale-your-cluster/
title: Scale your cluster
- path: /datacenter/ucp/2.2/guides/admin/configure/set-session-timeout/
- path: /datacenter/ucp/3.0/guides/admin/configure/set-session-timeout/
title: Set the user's session timeout
- path: /datacenter/ucp/2.2/guides/admin/configure/set-up-high-availability/
- path: /datacenter/ucp/3.0/guides/admin/configure/set-up-high-availability/
title: Set up high availability
- path: /datacenter/ucp/2.2/guides/admin/configure/store-logs-in-an-external-system/
- path: /datacenter/ucp/3.0/guides/admin/configure/store-logs-in-an-external-system/
title: Store logs in an external system
- path: /datacenter/ucp/2.2/guides/admin/configure/ucp-configuration-file/
- path: /datacenter/ucp/3.0/guides/admin/configure/ucp-configuration-file/
title: UCP configuration file
- path: /datacenter/ucp/2.2/guides/admin/configure/use-a-load-balancer/
- path: /datacenter/ucp/3.0/guides/admin/configure/use-a-load-balancer/
title: Use a load balancer
- path: /datacenter/ucp/2.2/guides/admin/configure/use-node-local-network-in-swarm/
- path: /datacenter/ucp/3.0/guides/admin/configure/use-node-local-network-in-swarm/
title: Use a local node network in a swarm
- path: /datacenter/ucp/2.2/guides/admin/configure/use-domain-names-to-access-services/
- path: /datacenter/ucp/3.0/guides/admin/configure/use-domain-names-to-access-services/
title: Use domain names to access services
- path: /datacenter/ucp/2.2/guides/admin/configure/use-your-own-tls-certificates/
- path: /datacenter/ucp/3.0/guides/admin/configure/use-your-own-tls-certificates/
title: Use your own TLS certificates
- sectiontitle: Monitor and troubleshoot
section:
- path: /datacenter/ucp/2.2/guides/admin/monitor-and-troubleshoot/
- path: /datacenter/ucp/3.0/guides/admin/monitor-and-troubleshoot/
title: Monitor the cluster status
- path: /datacenter/ucp/2.2/guides/admin/monitor-and-troubleshoot/troubleshoot-node-messages/
- path: /datacenter/ucp/3.0/guides/admin/monitor-and-troubleshoot/troubleshoot-node-messages/
title: Troubleshoot node messages
- path: /datacenter/ucp/2.2/guides/admin/monitor-and-troubleshoot/troubleshoot-with-logs/
- path: /datacenter/ucp/3.0/guides/admin/monitor-and-troubleshoot/troubleshoot-with-logs/
title: Troubleshoot with logs
- path: /datacenter/ucp/2.2/guides/admin/monitor-and-troubleshoot/troubleshoot-configurations/
- path: /datacenter/ucp/3.0/guides/admin/monitor-and-troubleshoot/troubleshoot-configurations/
title: Troubleshoot configurations
- path: /datacenter/ucp/2.2/guides/admin/backups-and-disaster-recovery/
- path: /datacenter/ucp/3.0/guides/admin/backups-and-disaster-recovery/
title: Backups and disaster recovery
- sectiontitle: CLI reference
section:
- path: /datacenter/ucp/2.2/reference/cli/
- path: /datacenter/ucp/3.0/reference/cli/
title: docker/ucp overview
- path: /datacenter/ucp/2.2/reference/cli/backup/
- path: /datacenter/ucp/3.0/reference/cli/backup/
title: backup
- path: /datacenter/ucp/2.2/reference/cli/dump-certs/
- path: /datacenter/ucp/3.0/reference/cli/dump-certs/
title: dump-certs
- path: /datacenter/ucp/2.2/reference/cli/example-config/
- path: /datacenter/ucp/3.0/reference/cli/example-config/
title: example-config
- path: /datacenter/ucp/2.2/reference/cli/id/
- path: /datacenter/ucp/3.0/reference/cli/id/
title: id
- path: /datacenter/ucp/2.2/reference/cli/images/
- path: /datacenter/ucp/3.0/reference/cli/images/
title: images
- path: /datacenter/ucp/2.2/reference/cli/install/
- path: /datacenter/ucp/3.0/reference/cli/install/
title: install
- path: /datacenter/ucp/2.2/reference/cli/restart/
- path: /datacenter/ucp/3.0/reference/cli/restart/
title: restart
- path: /datacenter/ucp/2.2/reference/cli/restore/
- path: /datacenter/ucp/3.0/reference/cli/restore/
title: restore
- path: /datacenter/ucp/2.2/reference/cli/stop/
- path: /datacenter/ucp/3.0/reference/cli/stop/
title: stop
- path: /datacenter/ucp/2.2/reference/cli/support/
- path: /datacenter/ucp/3.0/reference/cli/support/
title: support
- path: /datacenter/ucp/2.2/reference/cli/uninstall-ucp/
- path: /datacenter/ucp/3.0/reference/cli/uninstall-ucp/
title: uninstall-ucp
- path: /datacenter/ucp/2.2/reference/cli/upgrade/
- path: /datacenter/ucp/3.0/reference/cli/upgrade/
title: upgrade
- sectiontitle: Access control
section:
- path: /datacenter/ucp/2.2/guides/access-control/
- path: /datacenter/ucp/3.0/guides/access-control/
title: Access control model
- path: /datacenter/ucp/2.2/guides/access-control/create-and-manage-users/
- path: /datacenter/ucp/3.0/guides/access-control/create-and-manage-users/
title: Create and manage users
- path: /datacenter/ucp/2.2/guides/access-control/create-and-manage-teams/
- path: /datacenter/ucp/3.0/guides/access-control/create-and-manage-teams/
title: Create and manage teams
- path: /datacenter/ucp/2.2/guides/access-control/deploy-view-only-service/
- path: /datacenter/ucp/3.0/guides/access-control/deploy-view-only-service/
title: Deploy a service with view-only access across an organization
- path: /datacenter/ucp/2.2/guides/access-control/grant-permissions/
- path: /datacenter/ucp/3.0/guides/access-control/grant-permissions/
title: Grant permissions to users based on roles
- path: /datacenter/ucp/2.2/guides/access-control/isolate-nodes-between-teams/
- path: /datacenter/ucp/3.0/guides/access-control/isolate-nodes-between-teams/
title: Isolate swarm nodes to a specific team
- path: /datacenter/ucp/2.2/guides/access-control/isolate-volumes-between-teams/
- path: /datacenter/ucp/3.0/guides/access-control/isolate-volumes-between-teams/
title: Isolate volumes between two different teams
- path: /datacenter/ucp/2.2/guides/access-control/manage-access-with-collections/
- path: /datacenter/ucp/3.0/guides/access-control/manage-access-with-collections/
title: Manage access to resources by using collections
- path: /datacenter/ucp/2.2/guides/access-control/access-control-node/
- path: /datacenter/ucp/3.0/guides/access-control/access-control-node/
title: Node access control
- path: /datacenter/ucp/2.2/guides/access-control/permission-levels/
- path: /datacenter/ucp/3.0/guides/access-control/permission-levels/
title: Permission levels
- path: /datacenter/ucp/2.2/guides/access-control/access-control-design-ee-standard/
- path: /datacenter/ucp/3.0/guides/access-control/access-control-design-ee-standard/
title: Access control design with Docker EE Standard
- path: /datacenter/ucp/2.2/guides/access-control/access-control-design-ee-advanced/
- path: /datacenter/ucp/3.0/guides/access-control/access-control-design-ee-advanced/
title: Access control design with Docker EE Advanced
- path: /datacenter/ucp/2.2/guides/access-control/recover-a-user-password/
- path: /datacenter/ucp/3.0/guides/access-control/recover-a-user-password/
title: Recover a user password
- sectiontitle: User guides
section:
- sectiontitle: Access UCP
section:
- path: /datacenter/ucp/2.2/guides/user/access-ucp/
- path: /datacenter/ucp/3.0/guides/user/access-ucp/
title: Web-based access
- path: /datacenter/ucp/2.2/guides/user/access-ucp/cli-based-access/
- path: /datacenter/ucp/3.0/guides/user/access-ucp/cli-based-access/
title: CLI-based access
- sectiontitle: Deploy an application
section:
- path: /datacenter/ucp/2.2/guides/user/services/deploy-a-service/
- path: /datacenter/ucp/3.0/guides/user/services/deploy-a-service/
title: Deploy a service
- path: /datacenter/ucp/2.2/guides/user/services/use-domain-names-to-access-services/
- path: /datacenter/ucp/3.0/guides/user/services/use-domain-names-to-access-services/
title: Use domain names to access services
- path: /datacenter/ucp/2.2/guides/user/services/
- path: /datacenter/ucp/3.0/guides/user/services/
title: Deploy an app from the UI
- path: /datacenter/ucp/2.2/guides/user/services/deploy-app-cli/
- path: /datacenter/ucp/3.0/guides/user/services/deploy-app-cli/
title: Deploy an app from the CLI
- path: /datacenter/ucp/2.2/guides/user/services/deploy-stack-to-collection/
- path: /datacenter/ucp/3.0/guides/user/services/deploy-stack-to-collection/
title: Deploy application resources to a collection
- sectiontitle: Secrets
section:
- path: /datacenter/ucp/2.2/guides/user/secrets/
- path: /datacenter/ucp/3.0/guides/user/secrets/
title: Manage secrets
- path: /datacenter/ucp/2.2/guides/user/secrets/grant-revoke-access/
- path: /datacenter/ucp/3.0/guides/user/secrets/grant-revoke-access/
title: Grant access to secrets
- sectiontitle: Interlock
section:
- title: Interlock beta overview
path: /datacenter/ucp/2.2/guides/interlock/
- title: Architecture
path: /datacenter/ucp/2.2/guides/interlock/architecture/
- title: Deploy Interlock
path: /datacenter/ucp/2.2/guides/interlock/install/
- title: Deploy Interlock offline
path: /datacenter/ucp/2.2/guides/interlock/install/offline/
- title: Route traffic to services
path: /datacenter/ucp/2.2/guides/interlock/use-interlock/
- title: Configuration reference
path: /datacenter/ucp/2.2/guides/interlock/configuration-reference/
- title: Extensions
path: /datacenter/ucp/2.2/guides/interlock/extensions/
- path: /datacenter/ucp/2.2/reference/api/
- path: /datacenter/ucp/3.0/reference/api/
title: API reference
- path: /datacenter/ucp/2.2/guides/release-notes/
- path: /datacenter/ucp/3.0/guides/release-notes/
title: Release notes
- path: /datacenter/ucp/2.2/guides/get-support/
- path: /datacenter/ucp/3.0/guides/get-support/
title: Get support
- sectiontitle: Previous versions
section:
- sectiontitle: Universal Control Plane 2.2
section:
- path: /datacenter/ucp/2.2/guides/
title: Universal Control Plane overview
- path: /datacenter/ucp/2.2/guides/architecture/
title: Architecture
- sectiontitle: Administration
section:
- sectiontitle: Install
section:
- path: /datacenter/ucp/2.2/guides/admin/install/system-requirements/
title: System requirements
- path: /datacenter/ucp/2.2/guides/admin/install/plan-installation/
title: Plan your installation
- path: /datacenter/ucp/2.2/guides/admin/install/
title: Install
- path: /datacenter/ucp/2.2/guides/admin/install/install-offline/
title: Install offline
- path: /datacenter/ucp/2.2/guides/admin/install/upgrade/
title: Upgrade
- path: /datacenter/ucp/2.2/guides/admin/install/upgrade-offline/
title: Upgrade offline
- path: /datacenter/ucp/2.2/guides/admin/install/uninstall/
title: Uninstall
- path: /datacenter/ucp/2.2/guides/admin/install/architecture-specific-images/
title: Architecture-specific images
- sectiontitle: Configure
section:
- path: /datacenter/ucp/2.2/guides/admin/configure/add-labels-to-cluster-nodes/
title: Add labels to cluster nodes
- path: /datacenter/ucp/2.2/guides/admin/configure/add-sans-to-cluster/
title: Add SANs to cluster certificates
- path: /datacenter/ucp/2.2/guides/admin/configure/join-windows-worker-nodes/
title: Join Windows worker nodes to a swarm
- path: /datacenter/ucp/2.2/guides/admin/configure/integrate-with-dtr/
title: Integrate with Docker Trusted Registry
- path: /datacenter/ucp/2.2/guides/admin/configure/external-auth/
title: Integrate with LDAP
- path: /datacenter/ucp/2.2/guides/admin/configure/external-auth/enable-ldap-config-file/
title: Integrate with LDAP by using a configuration file
- path: /datacenter/ucp/2.2/guides/admin/configure/license-your-installation/
title: License your installation
- path: /datacenter/ucp/2.2/guides/admin/configure/restrict-services-to-worker-nodes/
title: Restrict services to worker nodes
- path: /datacenter/ucp/2.2/guides/admin/configure/run-only-the-images-you-trust/
title: Run only the images you trust
- path: /datacenter/ucp/2.2/guides/admin/configure/scale-your-cluster/
title: Scale your cluster
- path: /datacenter/ucp/2.2/guides/admin/configure/set-session-timeout/
title: Set the user's session timeout
- path: /datacenter/ucp/2.2/guides/admin/configure/set-up-high-availability/
title: Set up high availability
- path: /datacenter/ucp/2.2/guides/admin/configure/store-logs-in-an-external-system/
title: Store logs in an external system
- path: /datacenter/ucp/2.2/guides/admin/configure/ucp-configuration-file/
title: UCP configuration file
- path: /datacenter/ucp/2.2/guides/admin/configure/use-a-load-balancer/
title: Use a load balancer
- path: /datacenter/ucp/2.2/guides/admin/configure/use-node-local-network-in-swarm/
title: Use a local node network in a swarm
- path: /datacenter/ucp/2.2/guides/admin/configure/use-domain-names-to-access-services/
title: Use domain names to access services
- path: /datacenter/ucp/2.2/guides/admin/configure/use-your-own-tls-certificates/
title: Use your own TLS certificates
- sectiontitle: Monitor and troubleshoot
section:
- path: /datacenter/ucp/2.2/guides/admin/monitor-and-troubleshoot/
title: Monitor the cluster status
- path: /datacenter/ucp/2.2/guides/admin/monitor-and-troubleshoot/troubleshoot-node-messages/
title: Troubleshoot node messages
- path: /datacenter/ucp/2.2/guides/admin/monitor-and-troubleshoot/troubleshoot-with-logs/
title: Troubleshoot with logs
- path: /datacenter/ucp/2.2/guides/admin/monitor-and-troubleshoot/troubleshoot-configurations/
title: Troubleshoot configurations
- path: /datacenter/ucp/2.2/guides/admin/backups-and-disaster-recovery/
title: Backups and disaster recovery
- sectiontitle: CLI reference
section:
- path: /datacenter/ucp/2.2/reference/cli/
title: docker/ucp overview
- path: /datacenter/ucp/2.2/reference/cli/backup/
title: backup
- path: /datacenter/ucp/2.2/reference/cli/dump-certs/
title: dump-certs
- path: /datacenter/ucp/2.2/reference/cli/example-config/
title: example-config
- path: /datacenter/ucp/2.2/reference/cli/id/
title: id
- path: /datacenter/ucp/2.2/reference/cli/images/
title: images
- path: /datacenter/ucp/2.2/reference/cli/install/
title: install
- path: /datacenter/ucp/2.2/reference/cli/restart/
title: restart
- path: /datacenter/ucp/2.2/reference/cli/restore/
title: restore
- path: /datacenter/ucp/2.2/reference/cli/stop/
title: stop
- path: /datacenter/ucp/2.2/reference/cli/support/
title: support
- path: /datacenter/ucp/2.2/reference/cli/uninstall-ucp/
title: uninstall-ucp
- path: /datacenter/ucp/2.2/reference/cli/upgrade/
title: upgrade
- sectiontitle: Access control
section:
- path: /datacenter/ucp/2.2/guides/access-control/
title: Access control model
- path: /datacenter/ucp/2.2/guides/access-control/create-and-manage-users/
title: Create and manage users
- path: /datacenter/ucp/2.2/guides/access-control/create-and-manage-teams/
title: Create and manage teams
- path: /datacenter/ucp/2.2/guides/access-control/deploy-view-only-service/
title: Deploy a service with view-only access across an organization
- path: /datacenter/ucp/2.2/guides/access-control/grant-permissions/
title: Grant permissions to users based on roles
- path: /datacenter/ucp/2.2/guides/access-control/isolate-nodes-between-teams/
title: Isolate swarm nodes to a specific team
- path: /datacenter/ucp/2.2/guides/access-control/isolate-volumes-between-teams/
title: Isolate volumes between two different teams
- path: /datacenter/ucp/2.2/guides/access-control/manage-access-with-collections/
title: Manage access to resources by using collections
- path: /datacenter/ucp/2.2/guides/access-control/access-control-node/
title: Node access control
- path: /datacenter/ucp/2.2/guides/access-control/permission-levels/
title: Permission levels
- path: /datacenter/ucp/2.2/guides/access-control/access-control-design-ee-standard/
title: Access control design with Docker EE Standard
- path: /datacenter/ucp/2.2/guides/access-control/access-control-design-ee-advanced/
title: Access control design with Docker EE Advanced
- path: /datacenter/ucp/2.2/guides/access-control/recover-a-user-password/
title: Recover a user password
- sectiontitle: User guides
section:
- sectiontitle: Access UCP
section:
- path: /datacenter/ucp/2.2/guides/user/access-ucp/
title: Web-based access
- path: /datacenter/ucp/2.2/guides/user/access-ucp/cli-based-access/
title: CLI-based access
- sectiontitle: Deploy an application
section:
- path: /datacenter/ucp/2.2/guides/user/services/deploy-a-service/
title: Deploy a service
- path: /datacenter/ucp/2.2/guides/user/services/use-domain-names-to-access-services/
title: Use domain names to access services
- path: /datacenter/ucp/2.2/guides/user/services/
title: Deploy an app from the UI
- path: /datacenter/ucp/2.2/guides/user/services/deploy-app-cli/
title: Deploy an app from the CLI
- path: /datacenter/ucp/2.2/guides/user/services/deploy-stack-to-collection/
title: Deploy application resources to a collection
- sectiontitle: Secrets
section:
- path: /datacenter/ucp/2.2/guides/user/secrets/
title: Manage secrets
- path: /datacenter/ucp/2.2/guides/user/secrets/grant-revoke-access/
title: Grant access to secrets
- path: /datacenter/ucp/2.2/reference/api/
title: API reference
- path: /datacenter/ucp/2.2/guides/release-notes/
title: Release notes
- path: /datacenter/ucp/2.2/guides/get-support/
title: Get support
- sectiontitle: Universal Control Plane 2.1
section:
- path: /datacenter/ucp/2.1/guides/

View File

@ -0,0 +1,127 @@
---
title: Access control design with Docker EE Advanced
description: Learn how to architect multitenancy by using Docker Enterprise Edition Advanced.
keywords: authorize, authentication, users, teams, groups, sync, UCP, role, access control
---
[Collections and grants](index.md) are strong tools that can be used to control
access and visibility to resources in UCP. The previous tutorial,
[Access Control Design with Docker EE Standard](access-control-design-ee-standard.md),
describes a fictional company called OrcaBank that has designed a resource
access architecture that fits the specific security needs of their organization.
Be sure to go through this tutorial if you have not already before continuing.
In this tutorial OrcaBank's deployment model is becoming more advanced.
Instead of moving developed applications directly in to production,
OrcaBank will now deploy apps from their dev cluster to a staging zone of
their production cluster. After applications have passed staging they will
be moved to production. OrcaBank has very stringent security requirements for
production applications. Its security team recently read a blog post about
DevSecOps and is excited to implement some changes. Production applications
aren't permitted to share any physical infrastructure with non-Production
infrastructure.
In this tutorial OrcaBank will use Docker EE Advanced feature to segment the
scheduling and access control of applications across disparate physical
infrastructure. [Node Access Control](access-control-node.md) with EE Advanced
licensing allows nodes to be placed in different collections so that resources
can be scheduled and isolated on disparate physical or virtual hardware
resources.
## Team access requirements
As in the [Introductory Multitenancy Tutorial](access-control-design-ee-standard.md)
OrcaBank still has three application teams, `payments`, `mobile`, and `db` that
need to have varying levels of segmentation between them. Their upcoming Access
Control redesign will organize their UCP cluster in to two top-level collections,
Staging and Production, which will be completely separate security zones on
separate physical infrastructure.
- `security` should have visibility-only access across all
applications that are in Production. The security team is not
concerned with Staging applications and thus will not have
access to Staging
- `db` should have the full set of operations against all database
applications that are in Production. `db` does not manage the
databases that are in Staging, which are managed directly by the
application teams.
- `payments` should have the full set of operations to deploy Payments
apps in both Production and Staging and also access some of the shared
services provided by the `db` team.
- `mobile` has the same rights as the `payments` team, with respect to the
Mobile applications.
## Role composition
OrcaBank will use the same roles as in the Introductory Tutorial. An `ops` role
will provide them with the ability to deploy, destroy, and view any kind of
resource. `View Only` will be used by the security team to only view resources
with no edit rights. `View & Use Networks + Secrets` will be used to access
shared resources across collection boundaries, such as the `db` services that
are offered by the `db` collection to the other app teams.
![image](../images/design-access-control-adv-0.png){: .with-border}
## Collection architecture
The previous tutorial had separate collections for each application team.
In this Access Control redesign there will be collections for each zone,
Staging and Production, and also collections within each zone for the
individual applications. Another major change is that Docker nodes will be
segmented themselves so that nodes in Staging are separate from Production
nodes. Within the Production zone every application will also have their own
dedicated nodes.
The resulting collection architecture takes the following tree representation:
```
/
├── System
├── Shared
├── prod
│   ├── db
│   ├── mobile
│   └── payments
└── staging
├── mobile
└── payments
```
## Grant composition
OrcaBank will now be granting teams diverse roles to different collections.
Multiple grants per team are required to grant this kind of access. Each of
the Payments and Mobile applications will have three grants that give them the
operation to deploy in their production zone, their staging zone, and also the
ability to share some resources with the `db` collection.
![image](../images/design-access-control-adv-grant-composition.png){: .with-border}
## OrcaBank access architecture
The resulting access architecture provides the appropriate physical segmentation
between Production and Staging. Applications will be scheduled only on the UCP
Worker nodes in the collection where the application is placed. The production
Mobile and Payments applications use shared resources across collection
boundaries to access the databases in the `/prod/db` collection.
![image](../images/design-access-control-adv-architecture.png){: .with-border}
### DB team
The OrcaBank `db` team is responsible for deploying and managing the full
lifecycle of the databases that are in Production. They have the full set of
operations against all database resources.
![image](../images/design-access-control-adv-db.png){: .with-border}
### Mobile team
The `mobile` team is responsible for deploying their full application stack in
staging. In production they deploy their own applications but utilize the
databases that are provided by the `db` team.
![image](../images/design-access-control-adv-mobile.png){: .with-border}

View File

@ -0,0 +1,121 @@
---
title: Access control design with Docker EE Standard
description: Learn how to architect multitenancy by using Docker Enterprise Edition Advanced.
keywords: authorize, authentication, users, teams, groups, sync, UCP, role, access control
---
[Collections and grants](index.md) are strong tools that can be used to control
access and visibility to resources in UCP. This tutorial describes a fictitious
company named OrcaBank that is designing the access architecture for two
application teams that they have, Payments and Mobile.
This tutorial introduces many concepts include collections, grants, centralized
LDAP/AD, and also the ability for resources to be shared between different teams
and across collections.
## Team access requirements
OrcaBank has organized their application teams to specialize more and provide
shared services to other applications. A `db` team was created just to manage
the databases that other applications will utilize. Additionally, OrcaBank
recently read a book about DevOps. They have decided that developers should be
able to deploy and manage the lifecycle of their own applications.
- `security` should have visibility-only access across all applications in the
swarm.
- `db` should have the full set of capabilities against all database
applications and their respective resources.
- `payments` should have the full set of capabilities to deploy Payments apps
and also access some of the shared services provided by the `db` team.
- `mobile` has the same rights as the `payments` team, with respect to the
Mobile applications.
## Role composition
OrcaBank will use a combination of default and custom roles, roles which they
have created specifically for their use case. They are using the default
`View Only` role to provide security access to only see but not edit resources.
There is an `ops` role that they created which can do almost all operations
against all types of resources. They also created the
`View & Use Networks + Secrets` role. This type of role will enable application
DevOps teams to use shared resources provided by other teams. It will enable
applications to connect to networks and use secrets that will also be used by
`db` containers, but not the ability to see or impact the `db` applications
themselves.
![image](../images/design-access-control-adv-0.png){: .with-border}
## Collection architecture
OrcaBank will also create some collections that fit the organizational structure
of the company. Since all applications will share the same physical resources,
all nodes and applications are built in to collections underneath the `/Shared`
built-in collection.
- `/Shared/payments` hosts all applications and resources for the Payments
applications.
- `/Shared/mobile` hosts all applications and resources for the Mobile
applications.
Some other collections will be created to enable the shared `db` applications.
- `/Shared/db` will be a top-level collection for all `db` resources.
- `/Shared/db/payments` will be specifically for `db` resources providing
service to the Payments applications.
- `/Shared/db/mobile` will do the same for the Mobile applications.
The following grant composition will show that this collection architecture
allows an app team to access shared `db` resources without providing access
to _all_ `db` resources. At the same time _all_ `db` resources will be managed
by a single `db` team.
## LDAP/AD integration
OrcaBank has standardized on LDAP for centralized authentication to help their
identity team scale across all the platforms they manage. As a result LDAP
groups will be mapped directly to UCP teams using UCP's native LDAP/AD
integration. As a result users can be added to or removed from UCP teams via
LDAP which can be managed centrally by OrcaBank's identity team. The following
grant composition shows how LDAP groups are mapped to UCP teams .
## Grant composition
Two grants are applied for each application team, allowing each team to fully
manage their own apps in their collection, but also have limited access against
networks and secrets within the `db` collection. This kind of grant composition
provides flexibility to have different roles against different groups of
resources.
![image](../images/design-access-control-adv-1.png){: .with-border}
## OrcaBank access architecture
The resulting access architecture shows applications connecting across
collection boundaries. Multiple grants per team allow Mobile applications and
Databases to connect to the same networks and use the same secrets so they can
securely connect with each other but through a secure and controlled interface.
Note that these resources are still deployed across the same group of UCP
worker nodes. Node segmentation is discussed in the [next tutorial](#).
![image](../images/design-access-control-adv-2.png){: .with-border}
### DB team
The `db` team is responsible for deploying and managing the full lifecycle
of the databases used by the application teams. They have the full set of
operations against all database resources.
![image](../images/design-access-control-adv-3.png){: .with-border}
### Mobile team
The `mobile` team is responsible for deploying their own application stack,
minus the database tier which is managed by the `db` team.
![image](../images/design-access-control-adv-4.png){: .with-border}
## Where to go next
- [Access control design with Docker EE Advanced](access-control-design-ee-advanced.md)

View File

@ -0,0 +1,49 @@
---
title: Node access control in Docker EE Advanced
description: Learn how to architect node access by using Docker Enterprise Edition Standard.
keywords: authorize, authentication, node, UCP, role, access control
---
The ability to segment scheduling and visibility by node is called
*node access control* and is a feature of Docker EE Advanced. By default,
all nodes that aren't infrastructure nodes (UCP & DTR nodes) belong to a
built-in collection called `/Shared`. By default, all application workloads
in the cluster will get scheduled on nodes in the `/Shared` collection. This
includes users that are deploying in their private collections
(`/Shared/Private/`) and in any other collections under `/Shared`. This is
enabled by a built-in grant that grants every UCP user the `scheduler`
capability against the `/Shared` collection.
Node Access Control works by placing nodes in to custom collections outside of
`/Shared`. If the `scheduler` capability is granted via a role to a user or
group of users against a collection then they will be able to schedule
containers and services on these nodes. In the following example, users with
`scheduler` capability against `/collection1` will be able to schedule
applications on those nodes.
Note that in the directory these collections lie outside of the `/Shared`
collection so users without grants will not have access to these collections
unless explicitly granted access. These users will only be able to deploy
applications on the built-in `/Shared` collection nodes.
![image](../images/design-access-control-adv-custom-grant.png){: .with-border}
The tree representation of this collection structure looks like this:
```
/
├── Shared
├── System
├── collection1
└── collection2
├── sub-collection1
└── sub-collection2
```
With the use of default collections, users, teams, and organizations can be
constrained to what nodes and physical infrastructure they are capable of
deploying on.
## Where to go next
- [Isolate swarm nodes to a specific team](isolate-nodes-between-teams.md)

View File

@ -0,0 +1,118 @@
---
title: Create and manage teams
description: Learn how to create and manage user permissions, using teams in
your Docker Universal Control Plane cluster.
keywords: authorize, authentication, users, teams, groups, sync, UCP, Docker
---
You can extend the user's default permissions by granting them fine-grained
permissions over resources. You do this by adding the user to a team.
To create a new team, go to the UCP web UI, and navigate to the
**Organizations** page.
![](../images/create-and-manage-teams-1.png){: .with-border}
If you want to put the team in a new organization, click
**Create Organization** and give the new organization a name, like
"engineering". Click **Create** to create it.
In the list, click the organization where you want to create the new team.
Name the team, give it an optional description, and click **Create** to
create a new team.
![](../images/create-and-manage-teams-2.png){: .with-border}
## Add users to a team
You can now add and remove users from the team. In the current organization's
teams list, click the new team, and in the details pane, click **Add Users**.
Choose the users that you want to add to the team, and when you're done, click
**Add Users**.
![](../images/create-and-manage-teams-3.png){: .with-border}
## Enable Sync Team Members
To sync the team with your organization's LDAP directory, click **Yes**.
If UCP is configured to sync users with your organization's LDAP directory
server, you have the option to enable syncing the new team's members when
creating a new team or when modifying settings of an existing team.
[Learn how to configure integration with an LDAP directory](../admin/configure/external-auth/index.md).
Enabling this option expands the form with additional fields for configuring
the sync of team members.
![](../images/create-and-manage-teams-5.png){: .with-border}
There are two methods for matching group members from an LDAP directory:
**Match Group Members**
This option specifies that team members should be synced directly with members
of a group in your organization's LDAP directory. The team's membership will by
synced to match the membership of the group.
| Field | Description |
|:-----------------------|:------------------------------------------------------------------------------------------------------|
| Group DN | This specifies the distinguished name of the group from which to select users. |
| Group Member Attribute | The value of this group attribute corresponds to the distinguished names of the members of the group. |
**Match Search Results**
This option specifies that team members should be synced using a search query
against your organization's LDAP directory. The team's membership will be
synced to match the users in the search results.
| Field | Description |
| :--------------------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------- |
| Search Base DN | The distinguished name of the node in the directory tree where the search should start looking for users. |
| Search Filter | The LDAP search filter used to find users. If you leave this field empty, all existing users in the search scope will be added as members of the team. |
| Search subtree instead of just one level | Whether to perform the LDAP search on a single level of the LDAP tree, or search through the full LDAP tree starting at the Base DN. |
**Immediately Sync Team Members**
Select this option to run an LDAP sync operation immediately after saving the
configuration for the team. It may take a moment before the members of the team
are fully synced.
## Manage team permissions
Create a grant to manage the team's permissions.
[Learn how to grant permissions to users based on roles](grant-permissions.md).
In this example, you'll create a collection for the "Data Center" team,
where they can deploy services and resources, and you'll create a grant that
gives the team permission to access the collection.
![](../images/team-grant-diagram.svg){: .with-border}
1. Navigate to the **Organizations & Teams** page.
2. Select **docker-datacenter**, and click **Create Team**. Name the team
"Data Center", and click **Create**.
3. Navigate to the **Collections** page.
4. Click **Create Collection**, name the collection "Data Center Resources",
and click **Create**.
5. Navigate to the **Grants** page, and click **Create Grant**.
6. Find **Swarm** in the collections list, and click **View Children**.
7. Find **Data Center Resources**, and click **Select Collection**.
8. In the left pane, click **Roles** and in the **Role** dropdown, select
**Restricted Control**.
9. In the left pane, click **Subjects** and select the **Organizations**
subject type.
10. In the **Organization** dropdown, select **docker-datacenter**, and in the
**Teams** dropdown, select **Data Center**.
11. Click **Create** to create the grant.
![](../images/create-and-manage-teams-4.png){: .with-border}
In this example, you gave members of the `Data Center` team
`Restricted Control` permissions to create and edit resources in
the `Data Center Resources` collection.
## Where to go next
- [UCP permission levels](permission-levels.md)
- [Isolate volumes between two different teams](isolate-volumes-between-teams.md)
- [Isolate swarm nodes between two different teams](isolate-nodes-between-teams.md)

View File

@ -0,0 +1,37 @@
---
title: Create and manage users
description: Learn how to create and manage users in your Docker Universal Control
Plane cluster.
keywords: authorize, authentication, users, teams, UCP, Docker
---
Docker Universal Control Plane provides built-in authentication and also
integrates with LDAP directory services. If you want to manage
users and groups from your organization's directory, choose LDAP.
[Learn to integrate with an LDAP directory](../configure/external-auth/index.md).
When using the UCP built-in authentication, you need to create users and
optionally grant them UCP administrator permissions.
Each new user gets a default permission level so that they can access the
swarm.
To create a new user, go to the UCP web UI, and navigate to the
**Users** page.
![](../images/create-users-1.png){: .with-border}
Click the **Create user** button, and fill-in the user information.
![](../images/create-users-2.png){: .with-border}
Check the `Is a UCP admin?` option, if you want to grant permissions for the
user to change the swarm configuration and manage grants, roles, and
collections.
Finally, click the **Create** button to create the user.
## Where to go next
* [Create and manage teams](create-and-manage-teams.md)
* [UCP permission levels](permission-levels.md)

View File

@ -0,0 +1,100 @@
---
title: Deploy a service with view-only access across an organization
description: Create a grant to control access to a service.
keywords: ucp, grant, role, permission, authentication
---
In this example, your organization is granted access to a new resource
collection that contains one service.
1. Create an organization and a team.
2. Create a collection for the view-only service.
3. Create a grant to manage user access to the collection.
![](../images/view-only-access-diagram.svg)
## Create an organization
In this example, you create an organization and a team, and you add one user
who isn't an administrator to the team.
[Learn how to create and manage teams](create-and-manage-teams.md).
1. Log in to UCP as an administrator.
2. Navigate to the **Organizations & Teams** page and click
**Create Organization**. Name the new organization "engineering" and
click **Create**.
3. Click **Create Team**, name the new team "Dev", and click **Create**.
3. Add a non-admin user to the Dev team.
## Create a collection for the service
1. Navigate to the **Collections** page to view all of the resource
collections in the swarm.
2. Find the **Shared** collection and click **View children**.
3. Click **Create collection** and name the collection "View-only services".
4. Click **Create** to create the collection.
![](../images/deploy-view-only-service-1.png)
The `/Shared/View-only services` collection is ready to use for access
control.
## Deploy a service
Currently, the new collection has no resources assigned to it. To access
resources through this collection, deploy a new service and add it to the
collection.
1. Navigate to the **Services** page and create a new service, named
"WordPress".
2. In the **Image** textbox, enter "wordpress:latest". This identifies the
most recent WordPress image in the Docker Store.
3. In the left pane, click **Collection**. The **Swarm** collection appears.
4. Click **View children** to list all of the collections. In **Shared**,
Click **View children**, find the **View-only services** collection and
select it.
5. Click **Create** to add the "WordPress" service to the collection and
deploy it.
![](../images/deploy-view-only-service-3.png)
You're ready to create a grant for controlling access to the "WordPress" service.
## Create a grant
Currently, users who aren't administrators can't access the
`/Shared/View-only services` collection. Create a grant to give the
`engineering` organization view-only access.
1. Navigate to the **Grants** page and click **Create Grant**.
2. In the left pane, click **Collections**, navigate to **/Shared/View-only services**,
and click **Select Collection**.
3. Click **Roles**, and in the dropdown, select **View Only**.
4. Click **Subjects**, and under **Select subject type**, click **Organizations**.
In the dropdown, select **engineering**.
5. Click **Create** to grant permissions to the organization.
![](../images/deploy-view-only-service-4.png)
Everything is in place to show role-based access control in action.
## Verify the user's permissions
Users in the `engineering` organization have view-only access to the
`/Shared/View-only services` collection. You can confirm this by logging in
as a non-admin user in the organization and trying to delete the service.
1. Log in as the user who you assigned to the Dev team.
2. Navigate to the **Services** page and click **WordPress**.
3. In the details pane, confirm that the service's collection is
**/Shared/View-only services**.
![](../images/deploy-view-only-service-2.png)
4. Click the checkbox next to the **WordPress** service, click **Actions**,
and select **Remove**. You get an error message, because the user
doesn't have `Service Delete` access to the collection.
## Where to go next
- [Isolate volumes between two different teams](isolate-volumes-between-teams.md)

View File

@ -0,0 +1,47 @@
---
title: Grant permissions to users based on roles
description: Grant access to swarm resources by using role-based access control.
keywords: ucp, grant, role, permission, authentication, authorization
---
If you're a UCP administrator, you can create *grants* to control how users
and organizations access swarm resources.
![](../images/ucp-grant-model-0.svg){: .with-border}
A grant is made up of a *subject*, a *role*, and a *resource collection*.
A grant defines who (subject) has how much access (role)
to a set of resources (collection). Each grant is a 1:1:1 mapping of
subject, role, collection. For example, you can grant the "Prod Team"
"Restricted Control" permissions for the "/Production" collection.
The usual workflow for creating grants has four steps.
1. Set up your users and teams. For example, you might want three teams,
Dev, QA, and Prod.
2. Organize swarm resources into separate collections that each team uses.
3. Optionally, create custom roles for specific permissions to the Docker API.
4. Grant role-based access to collections for your teams.
![](../images/ucp-grant-model.svg){: .with-border}
## Create a grant
When you have your users, collections, and roles set up, you can create
grants. Administrators create grants on the **Manage Grants** page.
1. Click **Create Grant**. All of the collections in the system are listed.
2. Click **Select** on the collection you want to grant access to.
3. In the left pane, click **Roles** and select a role from the dropdown list.
4. In the left pane, click **Subjects**. Click **All Users** to create a grant
for a specific user, or click **Organizations** to create a grant for an
organization or a team.
5. Select a user, team, or organization and click **Create**.
By default, all new users are placed in the `docker-datacenter` organization.
If you want to apply a grant to all UCP users, create a grant with the
`docker-datacenter` org as a subject.
## Where to go next
- [Isolate volumes between two different teams](isolate-volumes-between-teams.md)

View File

@ -0,0 +1,156 @@
---
title: Access control model
description: Manage access to containers, services, volumes, and networks by using role-based access control.
keywords: ucp, grant, role, permission, authentication, authorization
---
With Docker Universal Control Plane, you get to control who can create and
edit container resources in your swarm, like services, images, networks,
and volumes. You can grant and manage permissions to enforce fine-grained
access control as needed.
## Grant access to swarm resources
If you're a UCP administrator, you can create *grants* to control how users
and organizations access swarm resources.
A grant is made up of a *subject*, a *role*, and a *resource collection*.
A grant defines who (subject) has how much access (role)
to a set of resources (collection).
[Learn how to grant permissions to users based on roles](grant-permissions.md).
![](../images/ucp-grant-model.svg)
An administrator is a user who can manage grants, subjects, roles, and
collections. An administrator identifies which operations can be performed
against specific resources and who can perform these actions. An administrator
can create and manage role assignments against subject in the system.
Only an administrator can manage subjects, grants, roles, and collections.
## Subjects
A subject represents a user, team, or organization. A subject is granted a
role for a collection of resources.
- **User**: A person that the authentication backend validates. You can
assign users to one or more teams and one or more organizations.
- **Organization**: A group of users that share a specific set of
permissions, defined by the roles of the organization.
- **Team**: A group of users that share a set of permissions defined in the
team itself. A team exists only as part of an organization, and all of its
members must be members of the organization. Team members share
organization permissions. A team can be in one organization only.
## Roles
A role is a set of permitted API operations that you can assign to a specific
subject and collection by using a grant. UCP administrators view and manage
roles by navigating to the **Roles** page.
[Learn more about roles and permissions](permission-levels.md).
## Resource collections
Docker EE enables controlling access to swarm resources by using
*collections*. A collection is a grouping of swarm cluster resources that you
access by specifying a directory-like path.
Swarm resources that can be placed in to a collection include:
- Physical or virtual nodes
- Containers
- Services
- Networks
- Volumes
- Secrets
- Application configs
## Collection architecture
Grants tie together who has which kind of access to what resources. Grants
are effectively ACLs, which grouped together, can provide full and comprehensive
access policies for an entire organization. However, before grants can be
implemented, collections need to be designed to group resources in a way that
makes sense for an organization.
The following example shows a potential access policy of an organization.
Consider an organization with two application teams, Mobile and Payments, that
will share cluster hardware resources, but still need to segregate access to the
applications. Collections should be designed to map to the organizational
structure desired, in this case the two application teams. Their collection
architecture for a production UCP cluster might look something like this:
```
prod
├── mobile
└── payments
```
> A subject that has access to any level in a collection hierarchy will have
> that same access to any collections below it.
## Role composition
Roles define what operations can be done against cluster resources. An
organization will likely use several different kinds or roles to give the
right kind of access. A given team or user may have different roles provided
to them depending on what resource they are accessing. There are default roles
provided by UCP and also the ability to build custom roles. In this example
three different roles are used:
- Full Control - This is a default role that provides the full list of
operations against cluster resources.
- View Only - This is also a default role that allows a user to see resources,
but not to edit or delete.
- Dev - This is not a default role, but a potential custom role. In this
example "Dev" includes the ability to view containers and also `docker exec`.
This allows developers to run a shell inside their container process but not
see or change any other cluster resources.
## Grant composition
The following four grants define the access policy for the entire organization
for this cluster. They tie together the collections that were created, the
default and custom roles, and also teams of users that are in UCP.
![image](../images/access-control-grant-composition.png){: .with-border}
## Access architecture
The resulting access architecture defined by these grants is depicted below.
![image](../images/access-control-collection-architecture.png){: .with-border}
There are four teams that are given access to cluster resources:
- `security` can see, but not edit, all resources shown, as it has `View Only`
access to the entire `/prod` collection.
- `ops` has `Full Control` against the entire `/prod` collection, giving it the
capability to deploy, view, edit, and remove applications and application
resources.
- `mobile` has the `Dev` role against the `/prod/mobile` collection. This team
is able to see and `exec` in to their own applications, but will not see any
of the `payments` applications.
- `payments` has the same type of access but for the `/prod/payments` collection.
[See a deeper tutorial on how to design access control architectures.](access-control-design-ee-standard.md)
[Manage access to resources by using collections](manage-access-with-collections.md).
## Transition from UCP 2.1 access control
- Your existing access labels and permissions are migrated automatically
during an upgrade from UCP 2.1.x.
- Unlabeled "user-owned" resources are migrated into the user's private
collection, in `/Shared/Private/<username>`.
- Old access control labels are migrated into `/Shared/Legacy/<labelname>`.
- When deploying a resource, choose a collection instead of an access label.
- Use grants for access control, instead of unlabeled permissions.
## Where to go next
- [Create and manage users](create-and-manage-users.md)
- [Create and manage teams](create-and-manage-teams.md)
- [Deploy a service with view-only access across an organization](deploy-view-only-service.md)
- [Isolate volumes between two different teams](isolate-volumes-between-teams.md)
- [Isolate swarm nodes between two different teams](isolate-nodes-between-teams.md)

View File

@ -0,0 +1,174 @@
---
title: Isolate swarm nodes to a specific team
description: Create grants that limit access to nodes to specific teams.
keywords: ucp, grant, role, permission, authentication
---
With Docker EE Advanced, you can enable physical isolation of resources
by organizing nodes into collections and granting `Scheduler` access for
different users. To control access to nodes, move them to dedicated collections
where you can grant access to specific users, teams, and organizations.
In this example, a team gets access to a node collection and a resource
collection, and UCP access control ensures that the team members can't view
or use swarm resources that aren't in their collection.
You need a Docker EE Advanced license and at least two worker nodes to
complete this example.
1. Create an `Ops` team and assign a user to it.
2. Create a `/Prod` collection for the team's node.
3. Assign a worker node to the `/Prod` collection.
4. Grant the `Ops` teams access to its collection.
![](../images/isolate-nodes-diagram.svg){: .with-border}
## Create a team
In the web UI, navigate to the **Organizations & Teams** page to create a team
named "Ops" in your organization. Add a user who isn't a UCP administrator to
the team.
[Learn to create and manage teams](create-and-manage-teams.md).
## Create a node collection and a resource collection
In this example, the Ops team uses an assigned group of nodes, which it
accesses through a collection. Also, the team has a separate collection
for its resources.
Create two collections: one for the team's worker nodes and another for the
team's resources.
1. Navigate to the **Collections** page to view all of the resource
collections in the swarm.
2. Click **Create collection** and name the new collection "Prod".
3. Click **Create** to create the collection.
4. Find **Prod** in the list, and click **View children**.
5. Click **Create collection**, and name the child collection
"Webserver". This creates a sub-collection for access control.
You've created two new collections. The `/Prod` collection is for the worker
nodes, and the `/Prod/Webserver` sub-collection is for access control to
an application that you'll deploy on the corresponding worker nodes.
## Move a worker node to a collection
By default, worker nodes are located in the `/Shared` collection.
Worker nodes that are running DTR are assigned to the `/System` collection.
To control access to the team's nodes, move them to a dedicated collection.
Move a worker node by changing the value of its access label key,
`com.docker.ucp.access.label`, to a different collection.
1. Navigate to the **Nodes** page to view all of the nodes in the swarm.
2. Click a worker node, and in the details pane, find its **Collection**.
If it's in the `/System` collection, click another worker node,
because you can't move nodes that are in the `/System` collection. By
default, worker nodes are assigned to the `/Shared` collection.
3. When you've found an available node, in the details pane, click
**Configure**.
3. In the **Labels** section, find `com.docker.ucp.access.label` and change
its value from `/Shared` to `/Prod`.
4. Click **Save** to move the node to the `/Prod` collection.
> Docker EE Advanced required
>
> If you don't have a Docker EE Advanced license, you'll get the following
> error message when you try to change the access label:
> **Nodes must be in either the shared or system collection without an advanced license.**
> [Get a Docker EE Advanced license](https://www.docker.com/pricing).
![](../images/isolate-nodes-1.png){: .with-border}
## Grant access for a team
You'll need two grants to control access to nodes and container resources:
- Grant the `Ops` team the `Restricted Control` role for the `/Prod/Webserver`
resources.
- Grant the `Ops` team the `Scheduler` role against the nodes in the `/Prod`
collection.
Create two grants for team access to the two collections:
1. Navigate to the **Grants** page and click **Create Grant**.
2. In the left pane, click **Collections**, and in the **Swarm** collection,
click **View Children**.
3. In the **Prod** collection, click **View Children**.
4. In the **Webserver** collection, click **Select Collection**.
5. In the left pane, click **Roles**, and select **Restricted Control**
in the dropdown.
6. Click **Subjects**, and under **Select subject type**, click **Organizations**.
7. Select your organization, and in the **Team** dropdown, select **Ops**.
8. Click **Create** to grant the Ops team access to the `/Prod/Webserver`
collection.
The same steps apply for the nodes in the `/Prod` collection.
1. Navigate to the **Grants** page and click **Create Grant**.
2. In the left pane, click **Collections**, and in the **Swarm** collection,
click **View Children**.
3. In the **Prod** collection, click **Select Collection**.
4. In the left pane, click **Roles**, and in the dropdown, select **Scheduler**.
5. In the left pane, click **Subjects**, and under **Select subject type**, click
**Organizations**.
6. Select your organization, and in the **Team** dropdown, select **Ops** .
7. Click **Create** to grant the Ops team `Scheduler` access to the nodes in the
`/Prod` collection.
![](../images/isolate-nodes-2.png){: .with-border}
## Deploy a service as a team member
Your swarm is ready to show role-based access control in action. When a user
deploys a service, UCP assigns its resources to the user's default collection.
From the target collection of a resource, UCP walks up the ancestor collections
until it finds nodes that the user has `Scheduler` access to. In this example,
UCP assigns the user's service to the `/Prod/Webserver` collection and schedules
tasks on nodes in the `/Prod` collection.
As a user on the Ops team, set your default collection to `/Prod/Webserver`.
1. Log in as a user on the Ops team.
2. Navigate to the **Collections** page, and in the **Prod** collection,
click **View Children**.
3. In the **Webserver** collection, click the **More Options** icon and
select **Set to default**.
Deploy a service automatically to worker nodes in the `/Prod` collection.
All resources are deployed under the user's default collection,
`/Prod/Webserver`, and the containers are scheduled only on the nodes under
`/Prod`.
1. Navigate to the **Services** page, and click **Create Service**.
2. Name the service "NGINX", use the "nginx:latest" image, and click
**Create**.
3. When the **nginx** service status is green, click the service. In the
details view, click **Inspect Resource**, and in the dropdown, select
**Containers**.
4. Click the **NGINX** container, and in the details pane, confirm that its
**Collection** is **/Prod/Webserver**.
![](../../images/isolate-nodes-3.png){: .with-border}
5. Click **Inspect Resource**, and in the dropdown, select **Nodes**.
6. Click the node, and in the details pane, confirm that its **Collection**
is **/Prod**.
![](../images/isolate-nodes-4.png){: .with-border}
## Alternative: Use a grant instead of the default collection
Another approach is to use a grant instead of changing the user's default
collection. An administrator can create a grant for a role that has the
`Service Create` permission against the `/Prod/Webserver` collection or a child
collection. In this case, the user sets the value of the service's access label,
`com.docker.ucp.access.label`, to the new collection or one of its children
that has a `Service Create` grant for the user.
## Where to go next
- [Node access control in Docker EE Advanced](access-control-node.md)
- [Isolate volumes between two different teams](isolate-volumes-between-teams.md)
- [Deploy a service with view-only access across an organization](deploy-view-only-service.md)

View File

@ -0,0 +1,97 @@
---
title: Isolate volumes between two different teams
description: Create grants that limit access to volumes to specific teams.
keywords: ucp, grant, role, permission, authentication
---
In this example, two teams are granted access to volumes in two different
resource collections. UCP access control prevents the teams from viewing and
accessing each other's volumes, even though they may be located in the same
nodes.
1. Create two teams.
2. Create two collections, one for either team.
3. Create grants to manage access to the collections.
4. Team members create volumes that are specific to their team.
![](../images/isolate-volumes-diagram.svg){: .with-border}
## Create two teams
Navigate to the **Organizations & Teams** page to create two teams in your
organization, named "Dev" and "Prod". Add a user who's not a UCP administrator
to the Dev team, and add another non-admin user to the Prod team.
[Learn how to create and manage teams](create-and-manage-teams.md).
## Create resource collections
In this example, the Dev and Prod teams use two different volumes, which they
access through two corresponding resource collections. The collections are
placed under the `/Shared` collection.
1. In the left pane, click **Collections** to show all of the resource
collections in the swarm.
2. Find the **/Shared** collection and click **View children**.
2. Click **Create collection** and name the new collection "dev-volumes".
3. Click **Create** to create the collection.
4. Click **Create collection** again, name the new collection "prod-volumes",
and click **Create**.
## Create grants for controlling access to the new volumes
In this example, the Dev team gets access to its volumes from a grant that
associates the team with the `/Shared/dev-volumes` collection, and the Prod
team gets access to its volumes from another grant that associates the team
with the `/Shared/prod-volumes` collection.
1. Navigate to the **Grants** page and click **Create Grant**.
2. In the left pane, click **Collections**, and in the **Swarm** collection,
click **View Children**.
3. In the **Shared** collection, click **View Children**.
4. In the list, find **/Shared/dev-volumes** and click **Select Collection**.
3. Click **Roles**, and in the dropdown, select **Restricted Control**.
4. Click **Subjects**, and under **Select subject type**, click **Organizations**.
In the dropdown, pick your organization, and in the **Team** dropdown,
select **Dev**.
5. Click **Create** to grant permissions to the Dev team.
6. Click **Create Grant** and repeat the previous steps for the **/Shared/prod-volumes**
collection and the Prod team.
![](../images/isolate-volumes-1.png){: .with-border}
With the collections and grants in place, users can sign in and create volumes
in their assigned collections.
## Create a volume as a team member
Team members have permission to create volumes in their assigned collection.
1. Log in as one of the users on the Dev team.
2. Navigate to the **Volumes** page to view all of the volumes in the swarm
that the user can access.
2. Click **Create volume** and name the new volume "dev-data".
3. In the left pane, click **Collections**. The default collection appears.
At the top of the page, click **Shared**, find the **dev-volumes**
collection in the list, and click **Select Collection**.
4. Click **Create** to add the "dev-data" volume to the collection.
5. Log in as one of the users on the Prod team, and repeat the previous steps
to create a "prod-data" volume assigned to the `/Shared/prod-volumes`
collection.
![](../images/isolate-volumes-2.png){: .with-border}
Now you can see role-based access control in action for volumes. The user on
the Prod team can't see the Dev team's volumes, and if you log in again as a
user on the Dev team, you won't see the Prod team's volumes.
![](../images/isolate-volumes-3.png){: .with-border}
Sign in with a UCP administrator account, and you see all of the volumes
created by the Dev and Prod users.
![](../images/isolate-volumes-4.png){: .with-border}
## Where to go next
- [Isolate swarm nodes to a specific team](isolate-nodes-between-teams.md)

View File

@ -0,0 +1,148 @@
---
title: Manage access to resources by using collections
description: Use collections to enable access control for worker nodes and container resources.
keywords: ucp, grant, role, permission, authentication, resource collection
---
Docker EE enables controlling access to container resources by using
*collections*. A collection is a group of swarm resources,
like services, containers, volumes, networks, and secrets.
![](../images/collections-and-resources.svg){: .with-border}
Access to collections goes through a directory structure that arranges a
swarm's resources. To assign permissions, administrators create grants
against directory branches.
## Directory paths define access to collections
Access to collections is based on a directory-like structure.
For example, the path to a user's default collection is
`/Shared/Private/<username>`. Every user has a private collection that
has the default permission specified by the UCP administrator.
Each collection has an access label that identifies its path.
For example, the private collection for user "hans" has a label that looks
like this:
```
com.docker.ucp.access.label = /Shared/Private/hans
```
You can nest collections. If a user has a grant against a collection,
the grant applies to all of its child collections.
For a child collection, or for a user who belongs to more than one team,
the system concatenates permissions from multiple roles into an
"effective role" for the user, which specifies the operations that are
allowed against the target.
## Built-in collections
UCP provides a number of built-in collections.
- `/` - The path to the `Swarm` collection. All resources in the
cluster are here. Resources that aren't in a collection are assigned
to the `/` directory.
- `/System` - The system collection, which contains UCP managers, DTR nodes,
and UCP/DTR system services. By default, only admins have access to the
system collection, but you can change this.
- `/Shared` - All worker nodes are here by default, for scheduling.
In a system with a standard-tier license, all worker nodes are under
the `/Shared` collection. With the EE Advanced license, administrators
can move worker nodes to other collections and apply role-based access.
- `/Shared/Private` - User private collections are stored here.
- `/Shared/Legacy` - After updating from UCP 2.1, all legacy access control
labels are stored here.
![](../images/collections-diagram.svg){: .with-border}
This diagram shows the `/System` and `/Shared` collections that are created
by UCP. User private collections are children of the `/Shared/private`
collection. Also, an admin user has created a `/prod` collection and its
`/webserver` child collection.
## Default collections
A user always has a default collection. The user can select the default
in UI preferences. When a user deploys a resource in the web UI, the
preselected option is the default collection, but this can be changed.
Users can't deploy a resource without a collection. When deploying a
resource in CLI without an access label, UCP automatically places the
resource in the user's default collection.
[Learn how to add labels to cluster nodes](../admin/configure/add-labels-to-cluster-nodes/).
When using Docker Compose, the system applies default collection labels
across all resources in the stack, unless the `com.docker.ucp.access.label`
has been set explicitly.
> Default collections and collection labels
>
> Setting a default collection is most helpful for users who deploy stacks
> and don't want to edit the contents of their compose files. Also, setting
> a default collection is useful for users who work only on a well-defined
> slice of the system. On the other hand, setting the collection label for
> every resource works best for users who have versatile roles in the system,
> like administrators.
## Collections and labels
Resources are marked as being in a collection by using labels.
Some resource types don't have editable labels, so you can't move resources
like this across collections. You can't modify collections after
resource creation for containers, networks, and volumes, but you can
update labels for services, nodes, secrets, and configs.
For editable resources, like services, secrets, nodes, and configs,
you can change the `com.docker.ucp.access.label` to move resources to
different collections. With the CLI, you can use this label to deploy
resources to a collection other than your default collection. Omitting this
label on the CLI deploys a resource on the user's default resource collection.
The system uses the additional labels, `com.docker.ucp.collection.*`, to enable
efficient resource lookups. By default, nodes have the
`com.docker.ucp.collection.root`, `com.docker.ucp.collection.shared`, and
`com.docker.ucp.collection.swarm` labels set to `true`. UCP automatically
controls these labels, and you don't need to manage them.
Collections get generic default names, but you can give them meaningful names,
like "Dev", "Test", and "Prod".
A *stack* is a group of resources identified by a label. You can place the
stack's resources in multiple collections. Resources are placed in the user's
default collection unless you specify an explicit `com.docker.ucp.access.label`
within the stack/compose file.
## Control access to nodes
The Docker EE Advanced license enables access control on worker nodes. Admin
users can move worker nodes from the default `/Shared` collection into other
collections and create corresponding grants for scheduling tasks.
In this example, an administrator has moved worker nodes to a `/prod`
collection:
![](../images/containers-and-nodes-diagram.svg)
When you deploy a resource with a collection, UCP sets a constraint implicitly
based on what nodes the collection, and any ancestor collections, can access.
The `Scheduler` role allows users to deploy resources on a node.
By default, all users have the `Scheduler` role against the `/Shared`
collection.
When deploying a resource that isn't global, like local volumes, bridge
networks, containers, and services, the system identifies a set of
"schedulable nodes" for the user. The system identifies the target collection
of the resource, like `/Shared/Private/hans`, and it tries to find the parent
that's closest to the root that the user has the `Node Schedule` permission on.
For example, when a user with a default configuration runs `docker container run nginx`,
the system interprets this to mean, "Create an NGINX container under the
user's default collection, which is at `/Shared/Private/hans`, and deploy it
on one of the nodes under `/Shared`.
If you want to isolate nodes against other teams, place these nodes in
new collections, and assign the `Scheduler` role, which contains the
`Node Schedule` permission, to the team.
[Isolate swarm nodes to a specific team](isolate-nodes-between-teams.md).

View File

@ -0,0 +1,79 @@
---
description: Learn about the permission levels available in Docker Universal
Control Plane.
keywords: authorization, authentication, users, teams, UCP
title: Roles and permission levels
---
Docker Universal Control Plane has two types of users: administrators and
regular users. Administrators can make changes to the UCP swarm, while
regular users have permissions that range from no access to full control over
resources like volumes, networks, images, and containers. Users are
grouped into teams and organizations.
![Diagram showing UCP permission levels](../images/role-diagram.svg)
Administrators create *grants* to users, teams, and organizations to give
permissions to swarm resources.
## Administrator users
In Docker UCP, only users with administrator privileges can make changes to
swarm settings. This includes:
* Managing user permissions by creating grants.
* Managing swarm configurations, like adding and removing nodes.
## Roles
A role is a set of permitted API operations on a collection that you
can assign to a specific user, team, or organization by using a grant.
UCP administrators view and manage roles by navigating to the **Roles** page.
The system provides the following default roles:
| Built-in role | Description |
|----------------------|-------------|
| `None` | The user has no access to swarm resources. This maps to the `No Access` role in UCP 2.1.x. |
| `View Only` | The user can view resources like services, volumes, and networks but can't create them. |
| `Restricted Control` | The user can view and edit volumes, networks, and images but can't run a service or container in a way that might affect the node where it's running. The user can't mount a node directory and can't `exec` into containers. Also, The user can't run containers in privileged mode or with additional kernel capabilities. |
| `Scheduler` | The user can view nodes and schedule workloads on them. Worker nodes and manager nodes are affected by `Scheduler` grants. Having `Scheduler` access doesn't allow the user to view workloads on these nodes. They need the appropriate resource permissions, like `Container View`. By default, all users get a grant with the `Scheduler` role against the `/Shared` collection. |
| `Full Control` | The user can view and edit volumes, networks, and images, They can create containers without any restriction, but can't see other users' containers. |
![Diagram showing UCP permission levels](../images/permissions-ucp.svg)
Administrators can create a custom role that has Docker API permissions
that specify the API actions that a subject may perform.
The **Roles** page lists the available roles, including the default roles
and any custom roles that administrators have created. In the **Roles**
list, click a role to see the API operations that it uses. For example, the
`Scheduler` role has two of the node operations, `Schedule` and `View`.
## Create a custom role
Click **Create role** to create a custom role and define the API operations
that it uses. When you create a custom role, all of the APIs that you can use
are listed on the **Create Role** page. For example, you can create a custom
role that uses the node operations, `Schedule`, `Update`, and `View`, and you
might give it a name like "Node Operator".
![](../images/custom-role.png){: .with-border}
You can give a role a global name, like "Remove Images", which might enable
the **Remove** and **Force Remove** operations for images. You can apply a
role with the same name to different collections.
Only an administrator can create and remove roles. Roles are always enabled.
Roles can't be edited, so to change a role's API operations, you must delete it
and create it again.
You can't delete a custom role if it's used in a grant. You must first delete
the grants that use the role.
## Where to go next
* [Create and manage users](create-and-manage-users.md)
* [Create and manage teams](create-and-manage-teams.md)
* [Docker Reference Architecture: Securing Docker EE and Security Best Practices](https://success.docker.com/Architecture/Docker_Reference_Architecture%3A_Securing_Docker_EE_and_Security_Best_Practices)

View File

@ -0,0 +1,32 @@
---
title: Reset a user password
description: Learn how to recover your Docker Datacenter credentials
keywords: ucp, authentication
---
If you have administrator credentials to UCP, you can reset the password of
other users.
If that user is being managed using an LDAP service, you need to change the
user password on that system. If the user account is managed using UCP,
log in with administrator credentials to the UCP web UI, navigate to
the **Users** page, and choose the user whose password you want to change.
In the details pane, click **Configure** and select **Security** from the
dropdown.
![](../images/recover-a-user-password-1.png){: .with-border}
Update the user's password and click **Save**.
If you're an administrator and forgot your password, you can ask other users
with administrator credentials to change your password.
If you're the only administrator, use **ssh** to log in to a manager
node managed by UCP, and run:
```none
{% raw %}
docker exec -it ucp-auth-api enzi \
$(docker inspect --format '{{range .Args}}{{if eq "--db-addr=" (printf "%.10s" .)}}{{.}}{{end}}{{end}}' ucp-auth-api) \
passwd -i
{% endraw %}
```

View File

@ -0,0 +1,210 @@
---
description: Learn how to backup your Docker Universal Control Plane swarm, and
to recover your swarm from an existing backup.
keywords: ucp, backup, restore, recovery
title: Backups and disaster recovery
---
When you decide to start using Docker Universal Control Plane on a production
setting, you should
[configure it for high availability](configure/set-up-high-availability.md).
The next step is creating a backup policy and disaster recovery plan.
## Data managed by UCP
UCP maintains data about:
| Data | Description |
| :-------------------- | :------------------------------------------------------------------------------------------------------------------- |
| Configurations | The UCP cluster configurations, as shown by `docker config ls`, including Docker EE license and swarm and client CAs |
| Access control | Permissions for teams to swarm resources, including collections, grants, and roles |
| Certificates and keys | The certificates, public keys, and private keys that are used for authentication and mutual TLS communication |
| Metrics data | Monitoring data gathered by UCP |
| Organizations | Your users, teams, and orgs |
| Volumes | All [UCP named volumes](../architecture/#volumes-used-by-ucp), which include all UCP component certs and data |
This data is persisted on the host running UCP, using named volumes.
[Learn more about UCP named volumes](../architecture.md).
## Backup steps
Back up your Docker EE components in the following order:
1. [Back up your swarm](/engine/swarm/admin_guide/#back-up-the-swarm)
2. Back up UCP
3. [Back up DTR](../../../../dtr/2.3/guides/admin/backups-and-disaster-recovery.md)
## Backup policy
As part of your backup policy you should regularly create backups of UCP.
DTR is backed up independently.
[Learn about DTR backups and recovery](../../../../dtr/2.3/guides/admin/backups-and-disaster-recovery.md).
To create a UCP backup, run the `{{ page.ucp_org }}/{{ page.ucp_repo }}:{{ page.ucp_version }} backup` command
on a single UCP manager. This command creates a tar archive with the
contents of all the [volumes used by UCP](../architecture.md) to persist data
and streams it to stdout. The backup doesn't include the swarm-mode state,
like service definitions and overlay network definitions.
You only need to run the backup command on a single UCP manager node. Since UCP
stores the same data on all manager nodes, you only need to take periodic
backups of a single manager node.
To create a consistent backup, the backup command temporarily stops the UCP
containers running on the node where the backup is being performed. User
resources, such as services, containers, and stacks are not affected by this
operation and will continue operating as expected. Any long-lasting `exec`,
`logs`, `events`, or `attach` operations on the affected manager node will
be disconnected.
Additionally, if UCP is not configured for high availability, you will be
temporarily unable to:
* Log in to the UCP Web UI
* Perform CLI operations using existing client bundles
To minimize the impact of the backup policy on your business, you should:
* Configure UCP for [high availability](configure/set-up-high-availability.md).
This allows load-balancing user requests across multiple UCP manager nodes.
* Schedule the backup to take place outside business hours.
## Backup command
The example below shows how to create a backup of a UCP manager node and
verify its contents:
```none
# Create a backup, encrypt it, and store it on /tmp/backup.tar
$ docker container run --log-driver none --rm -i --name ucp \
-v /var/run/docker.sock:/var/run/docker.sock \
{{ page.ucp_org }}/{{ page.ucp_repo }}:{{ page.ucp_version }} backup --interactive > /tmp/backup.tar
# Ensure the backup is a valid tar and list its contents
# In a valid backup file, over 100 files should appear in the list
# and the `./ucp-node-certs/key.pem` file should be present
$ tar --list -f /tmp/backup.tar
```
A backup file may optionally be encrypted using a passphrase, as in the
following example:
```none
# Create a backup, encrypt it, and store it on /tmp/backup.tar
$ docker container run --log-driver none --rm -i --name ucp \
-v /var/run/docker.sock:/var/run/docker.sock \
{{ page.ucp_org }}/{{ page.ucp_repo }}:{{ page.ucp_version }} backup --interactive \
--passphrase "secret" > /tmp/backup.tar
# Decrypt the backup and list its contents
$ gpg --decrypt /tmp/backup.tar | tar --list
```
### Security-Enhanced Linux (SELinux)
For Docker EE 17.06 or higher, if the Docker engine has SELinux enabled,
which is typical for RHEL hosts, you need to include `--security-opt label=disable`
in the `docker` command:
```bash
$ docker container run --security-opt label=disable --log-driver none --rm -i --name ucp \
-v /var/run/docker.sock:/var/run/docker.sock \
{{ page.ucp_org }}/{{ page.ucp_repo }}:{{ page.ucp_version }} backup --interactive > /tmp/backup.tar
```
To find out whether SELinux is enabled in the engine, view the host's
`/etc/docker/daemon.json` file and search for the string
`"selinux-enabled":"true"`.
## Restore UCP
To restore an existing UCP installation from a backup, you need to
uninstall UCP from the swarm by using the `uninstall-ucp` command.
[Learn to uninstall UCP](install/uninstall.md).
When restoring, make sure you use the same version of the `docker/ucp` image
that you've used to create the backup. The example below shows how to restore
UCP from an existing backup file, presumed to be located at
`/tmp/backup.tar`:
```none
$ docker container run --rm -i --name ucp \
-v /var/run/docker.sock:/var/run/docker.sock \
{{ page.ucp_org }}/{{ page.ucp_repo }}:{{ page.ucp_version }} restore < /tmp/backup.tar
```
If the backup file is encrypted with a passphrase, you will need to provide the
passphrase to the restore operation:
```none
$ docker container run --rm -i --name ucp \
-v /var/run/docker.sock:/var/run/docker.sock \
{{ page.ucp_org }}/{{ page.ucp_repo }}:{{ page.ucp_version }} restore --passphrase "secret" < /tmp/backup.tar
```
The restore command may also be invoked in interactive mode, in which case the
backup file should be mounted to the container rather than streamed through
stdin:
```none
$ docker container run --rm -i --name ucp \
-v /var/run/docker.sock:/var/run/docker.sock \
-v /tmp/backup.tar:/config/backup.tar \
{{ page.ucp_org }}/{{ page.ucp_repo }}:{{ page.ucp_version }} restore -i
```
### UCP and Swarm
UCP restore recovers the following assets from the backup file:
* Users, teams, and permissions.
* All UCP configuration options available under `Admin Settings`, like the
Docker EE subscription license, scheduling options, content trust, and
authentication backends.
UCP restore does not include swarm assets such as cluster membership, services, networks,
secrets, etc. [Learn to backup a swarm](https://docs.docker.com/engine/swarm/admin_guide/#back-up-the-swarm).
There are two ways to restore UCP:
* On a manager node of an existing swarm which does not have UCP installed.
In this case, UCP restore will use the existing swarm.
* On a docker engine that isn't participating in a swarm. In this case, a new
swarm is created and UCP is restored on top.
## Disaster recovery
In the event where half or more manager nodes are lost and cannot be recovered
to a healthy state, the system is considered to have lost quorum and can only be
restored through the following disaster recovery procedure. If your cluster has
lost quorum, you can still take a backup of one of the remaining nodes, but we
recommend making backups regularly.
It is important to note that this procedure is not guaranteed to succeed with
no loss of running services or configuration data. To properly protect against
manager failures, the system should be configured for
[high availability](configure/set-up-high-availability.md).
1. On one of the remaining manager nodes, perform `docker swarm init
--force-new-cluster`. You may also need to specify an
`--advertise-addr` parameter which is equivalent to the `--host-address`
parameter of the `docker/ucp install` operation. This will instantiate a new
single-manager swarm by recovering as much state as possible from the
existing manager. This is a disruptive operation and existing tasks may be
either terminated or suspended.
2. Obtain a backup of one of the remaining manager nodes if one is not already
available.
3. If UCP is still installed on the swarm, uninstall UCP using the
`uninstall-ucp` command.
4. Perform a restore operation on the recovered swarm manager node.
5. Log in to UCP and browse to the nodes page, or use the CLI `docker node ls`
command.
6. If any nodes are listed as `down`, you'll have to manually [remove these
nodes](../configure/scale-your-cluster.md) from the swarm and then re-join
them using a `docker swarm join` operation with the swarm's new join-token.
## Where to go next
* [Set up high availability](configure/set-up-high-availability.md)
* [UCP architecture](../architecture.md)

View File

@ -0,0 +1,139 @@
---
title: Add labels to swarm nodes
description: Learn how to add metadata to swarm nodes that can be used to specify constraints when deploying services.
keywords: cluster, node, label, swarm, metadata
---
With Docker UCP, you can add labels to your nodes. Labels are metadata that
describe the node, like its role (development, QA, production), its region
(US, EU, APAC), or the kind of disk (hdd, ssd). Once you have labeled your
nodes, you can add deployment constraints to your services, to ensure they
are scheduled on a node with a specific label.
For example, you can apply labels based on their role in the development
lifecycle, or the hardware resources they have.
![](../../images/add-labels-to-cluster-nodes-1.svg)
Don't create labels for authorization and permissions to resources.
Instead, use collections to organize access to your swarm.
[Learn about managing access with collections](../../access-control/manage-access-with-collections.md).
## Apply labels to a node
In this example we'll apply the `ssd` label to a node. Then we'll deploy
a service with a deployment constraint to make sure the service is always
scheduled to run on a node that has the `ssd` label.
Log in with administrator credentials in the UCP web UI, navigate to the
**Nodes** page, and choose the node you want to apply labels to. In the
details pane, click **Configure**.
In the **Edit Node** page, scroll down to the **Labels** section.
Click **Add Label**, and add a label with the key `disk` and a value of `ssd`.
![](../../images/add-labels-to-cluster-nodes-2.png){: .with-border}
Click **Save** and dismiss the **Edit Node** page. In the node's details
pane, click **Labels** to view the labels that are applied to the node.
You can also do this from the CLI by running:
```bash
$ docker node update --label-add <key>=<value> <node-id>
```
## Deploy a service with constraints
When deploying a service, you can specify constraints, so that the service gets
scheduled only on a node that has a label that fulfills all of the constraints
you specify.
In this example, when users deploy a service, they can add a constraint for the
service to be scheduled only on nodes that have SSD storage.
Navigate to the **Stacks** page. Name the new stack "wordpress", and in the
**Mode** dropdown, check **Services**.
In the **Compose.yml** editor, paste the following stack file.
```
version: "3.1"
services:
db:
image: mysql:5.7
deploy:
placement:
constraints:
- node.labels.disk == ssd
restart_policy:
condition: on-failure
networks:
- wordpress-net
environment:
MYSQL_ROOT_PASSWORD: wordpress
MYSQL_DATABASE: wordpress
MYSQL_USER: wordpress
MYSQL_PASSWORD: wordpress
wordpress:
depends_on:
- db
image: wordpress:latest
deploy:
replicas: 1
placement:
constraints:
- node.labels.disk == ssd
restart_policy:
condition: on-failure
max_attempts: 3
networks:
- wordpress-net
ports:
- "8000:80"
environment:
WORDPRESS_DB_HOST: db:3306
WORDPRESS_DB_PASSWORD: wordpress
networks:
wordpress-net:
```
Click **Create** to deploy the stack, and when the stack deploys,
click **Done**.
![](../../images/use-constraints-in-stack-deployment.png)
Navigate to the **Nodes** page, and click the node that has the
`disk` label. In the details pane, click the **Inspect Resource**
dropdown and select **Containers**.
![](../../images/use-constraints-in-stack-deployment-2.png)
Dismiss the filter and navigate to the **Nodes** page. Click a node that
doesn't have the `disk` label. In the details pane, click the
**Inspect Resource** dropdown and select **Containers**. There are no
WordPress containers scheduled on the node. Dismiss the filter.
## Add a constraint to a service by using the UCP web UI
You can declare the deployment constraints in your docker-stack.yml file or
when you're creating a stack. Also, you can apply them when you're creating
a service.
To check if a service has deployment constraints, navigate to the
**Services** page and choose the service that you want to check.
In the details pane, click **Constraints** to list the constraint labels.
To edit the labels on the service, click **Configure** and select
**Environment**.
![](../../images/add-constraint-to-service.png)
You can add or remove deployment constraints on this page.
## Where to go next
* [Store logs in an external system](store-logs-in-an-external-system.md)

View File

@ -0,0 +1,55 @@
---
title: Add SANs to cluster certificates
description: Learn how to add new SANs to cluster nodes, allowing you to connect to UCP with a different hostname
keywords: cluster, node, label, certificate, SAN
---
UCP always runs with HTTPS enabled. When you connect to UCP, you need to make
sure that the hostname that you use to connect is recognized by UCP's
certificates. If, for instance, you put UCP behind a load balancer that
forwards its traffic to your UCP instance, your requests will be for the load
balancer's hostname or IP address, not UCP's. UCP will reject these requests
unless you include the load balancer's address as a Subject Alternative Name
(or SAN) in UCP's certificates.
If you use your own TLS certificates, make sure that they have the correct SAN
values.
[Learn about using your own TLS certificates](use-your-own-tls-certificates.md).
If you want to use the self-signed certificate that UCP has out of the box, you
can set up the SANs when you install UCP with the `--san` argument. You can
also add them after installation.
## Add new SANs to UCP
1. In the UCP web UI, log in with administrator credentials and navigate to
the **Nodes** page.
2. Click on a manager node, and in the details pane, click **Configure**.
3. In the **SANs** section, click **Add SAN**, and enter one or more SANs
for the swarm.
![](../../images/add-sans-to-cluster-1.png){: .with-border}
4. Once you're done, click **Save**.
You will have to do this on every manager node in the swarm, but once you
have done so, the SANs are applied automatically to any new manager nodes
that join the swarm.
You can also do this from the CLI by first running:
```bash
{% raw %}
$ docker node inspect --format '{{ index .Spec.Labels "com.docker.ucp.SANs" }}' <node-id>
default-cs,127.0.0.1,172.17.0.1
{% endraw %}
```
This will get the current set of SANs for the given manager node. Append your
desired SAN to this list, for example `default-cs,127.0.0.1,172.17.0.1,example.com`,
and then run:
```bash
$ docker node update --label-add com.docker.ucp.SANs=<SANs-list> <node-id>
```
`<SANs-list>` is the list of SANs with your new SAN appended at the end. As in
the web UI, you must do this for every manager node.

View File

@ -0,0 +1,68 @@
---
title: Integrate with LDAP by using a configuration file
description: Set up LDAP authentication by using a configuration file.
keywords: UCP, LDAP, config
---
Docker UCP integrates with LDAP directory services, so that you can manage
users and groups from your organization's directory and automatically
propagate this information to UCP and DTR. You can set up your swarm's LDAP
configuration by using the UCP web UI, or you can use a
[UCP configuration file](../ucp-configuration-file.md).
To see an example TOML config file that shows how to configure UCP settings,
run UCP with the `example-config` option.
[Learn about UCP configuration files](../ucp-configuration-file.md).
```bash
$ docker container run --rm {{ page.ucp_org }}/{{ page.ucp_repo }}:{{ page.ucp_version }} example-config
```
## Set up LDAP by using a configuration file
1. Use the following command to extract the name of the currently active
configuration from the `ucp-agent` service.
```bash
{% raw %}
$ CURRENT_CONFIG_NAME=$(docker service inspect --format '{{ range $config := .Spec.TaskTemplate.ContainerSpec.Configs }}{{ $config.ConfigName }}{{ "\n" }}{{ end }}' ucp-agent | grep 'com.docker.ucp.config-')
{% endraw %}
```
2. Get the current configuration and save it to a TOML file.
```bash
{% raw %}
$ docker config inspect --format '{{ printf "%s" .Spec.Data }}' $CURRENT_CONFIG_NAME > config.toml
{% endraw %}
```
3. Use the output of the `example-config` command as a guide to edit your
`config.toml` file. Under the `[auth]` sections, set `backend = "ldap"`
and `[auth.ldap]` to configure LDAP integration the way you want.
4. Once you've finished editing your `config.toml` file, create a new Docker
Config object by using the following command.
```bash
$ NEW_CONFIG_NAME="com.docker.ucp.config-$(( $(cut -d '-' -f 2 <<< "$CURRENT_CONFIG_NAME") + 1 ))"
docker config create $NEW_CONFIG_NAME config.toml
```
5. Update the `ucp-agent` service to remove the reference to the old config
and add a reference to the new config.
```bash
$ docker service update --config-rm "$CURRENT_CONFIG_NAME" --config-add "source=${NEW_CONFIG_NAME},target=/etc/ucp/ucp.toml" ucp-agent
```
6. Wait a few moments for the `ucp-agent` service tasks to update across
your swarm. If you set `jit_user_provisioning = true` in the LDAP
configuration, users matching any of your specified search queries will
have their accounts created when they log in with their username and LDAP
password.
## Where to go next
- [Create and manage users](../../../access-control/create-and-manage-users.md)
- [Create and manage teams](../../../access-control/create-and-manage-teams.md)

View File

@ -0,0 +1,226 @@
---
title: Integrate with an LDAP Directory
description: Learn how to integrate UCP with an LDAP service, so that you can
manage users from a single place.
keywords: LDAP, UCP, authentication, user management
---
Docker UCP integrates with LDAP directory services, so that you can manage
users and groups from your organization's directory and it will automatically
propagate that information to UCP and DTR.
If you enable LDAP, UCP uses a remote directory server to create users
automatically, and all logins are forwarded to the directory server.
When you switch from built-in authentication to LDAP authentication,
all manually created users whose usernames don't match any LDAP search results
are still available.
When you enable LDAP authentication, you can choose whether UCP creates user
accounts only when users log in for the first time. Select the
**Just-In-Time User Provisioning** option to ensure that the only LDAP
accounts that exist in UCP are those that have had a user log in to UCP.
## How UCP integrates with LDAP
You control how UCP integrates with LDAP by creating searches for users.
You can specify multiple search configurations, and you can specify multiple
LDAP servers to integrate with. Searches start with the `Base DN`, which is
the *distinguished name* of the node in the LDAP directory tree where the
search starts looking for users.
Access LDAP settings by navigating to the **Authentication & Authorization**
page in the UCP web UI. There are two sections for controlling LDAP searches
and servers.
- **LDAP user search configurations:** This is the section of the
**Authentication & Authorization** page where you specify search
parameters, like `Base DN`, `scope`, `filter`, the `username` attribute,
and the `full name` attribute. These searches are stored in a list, and
the ordering may be important, depending on your search configuration.
- **LDAP server:** This is the section where you specify the URL of an LDAP
server, TLS configuration, and credentials for doing the search requests.
Also, you provide a domain for all servers but the first one. The first
server is considered the default domain server. Any others are associated
with the domain that you specify in the page.
Here's what happens when UCP synchronizes with LDAP:
1. UCP creates a set of search results by iterating over each of the user
search configs, in the order that you specify.
2. UCP choses an LDAP server from the list of domain servers by considering the
`Base DN` from the user search config and selecting the domain server that
has the longest domain suffix match.
3. If no domain server has a domain suffix that matches the `Base DN` from the
search config, UCP uses the default domain server.
4. UCP combines the search results into a list of users and creates UCP
accounts for them. If the **Just-In-Time User Provisioning** option is set,
user accounts are created only when users first log in.
The domain server to use is determined by the `Base DN` in each search config.
UCP doesn't perform search requests against each of the domain servers, only
the one which has the longest matching domain suffix, or the default if there's
no match.
Here's an example. Let's say we have three LDAP domain servers:
| Domain | Server URL |
| -------------------------------------- | ---------------------------- |
| *default* | ldaps://ldap.example.com |
| `dc=subsidiary1,dc=com` | ldaps://ldap.subsidiary1.com |
| `dc=subsidiary2,dc=subsidiary1,dc=com` | ldaps://ldap.subsidiary2.com |
Here are three user search configs with the following `Base DNs`:
- baseDN=`ou=people,dc=subsidiary1,dc=com`
For this search config, `dc=subsidiary1,dc=com` is the only server with a
domain which is a suffix, so UCP uses the server `ldaps://ldap.subsidiary1.com`
for the search request.
- baseDN=`ou=product,dc=subsidiary2,dc=subsidiary1,dc=com`
For this search config, two of the domain servers have a domain which is a
suffix of this base DN, but `dc=subsidiary2,dc=subsidiary1,dc=com` is the
longer of the two, so UCP uses the server `ldaps://ldap.subsidiary2.com`
for the search request.
- baseDN=`ou=eng,dc=example,dc=com`
For this search config, there is no server with a domain specified which is
a suffix of this base DN, so UCP uses the default server, `ldaps://ldap.example.com`,
for the search request.
If there are `username` collisions for the search results between domains, UCP
uses only the first search result, so the ordering of the user search configs
may be important. For example, if both the first and third user search configs
result in a record with the username `jane.doe`, the first has higher
precedence and the second is ignored. For this reason, it's important to choose
a `username` attribute that's unique for your users across all domains.
Because names may collide, it's a good idea to use something unique to the
subsidiary, like the email address for each person. Users can log in with the
email address, for example, `jane.doe@subsidiary1.com`.
## Configure the LDAP integration
To configure UCP to create and authenticate users by using an LDAP directory,
go to the UCP web UI, navigate to the **Admin Settings** page and click
**Authentication & Authorization** to select the method used to create and
authenticate users.
![](../../../images/authentication-authorization.png)
In the **LDAP Enabled** section, click **Yes** to The LDAP settings appear.
Now configure your LDAP directory integration.
## Default role for all private collections
Use this setting to change the default permissions of new users.
Click the dropdown to select the permission level that UCP assigns by default
to the private collections of new users. For example, if you change the value
to `View Only`, all users who log in for the first time after the setting is
changed have `View Only` access to their private collections, but permissions
remain unchanged for all existing users.
[Learn more about permission levels](../../../access-control/permission-levels.md).
## LDAP enabled
Click **Yes** to enable integrating UCP users and teams with LDAP servers.
## LDAP server
| Field | Description |
| :-------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| LDAP server URL | The URL where the LDAP server can be reached. |
| Reader DN | The distinguished name of the LDAP account used for searching entries in the LDAP server. As a best practice, this should be an LDAP read-only user. |
| Reader password | The password of the account used for searching entries in the LDAP server. |
| Use Start TLS | Whether to authenticate/encrypt the connection after connecting to the LDAP server over TCP. If you set the LDAP Server URL field with `ldaps://`, this field is ignored. |
| Skip TLS verification | Whether to verify the LDAP server certificate when using TLS. The connection is still encrypted but vulnerable to man-in-the-middle attacks. |
| No simple pagination | If your LDAP server doesn't support pagination. |
| Just-In-Time User Provisioning | Whether to create user accounts only when users log in for the first time. The default value of `true` is recommended. If you upgraded from UCP 2.0.x, the default is `false`. |
![](../../../images/ldap-integration-1.png){: .with-border}
Click **Confirm** to add your LDAP domain.
To integrate with more LDAP servers, click **Add LDAP Domain**.
## LDAP user search configurations
| Field | Description | |
| :--------------------------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------- |
| Base DN | The distinguished name of the node in the directory tree where the search should start looking for users. | |
| Username attribute | The LDAP attribute to use as username on UCP. Only user entries with a valid username will be created. A valid username is no longer than 100 characters and does not contain any unprintable characters, whitespace characters, or any of the following characters: `/` `\` `[` `]` `:` `;` `|` `=` `,` `+` `*` `?` `<` `>` `'` `"`. |
| Full name attribute | The LDAP attribute to use as the user's full name for display purposes. If left empty, UCP will not create new users with a full name value. | |
| Filter | The LDAP search filter used to find users. If you leave this field empty, all directory entries in the search scope with valid username attributes are created as users. | |
| Search subtree instead of just one level | Whether to perform the LDAP search on a single level of the LDAP tree, or search through the full LDAP tree starting at the Base DN. | |
| Select Group Members | Whether to further filter users by selecting those who are also members of a specific group on the directory server. This feature is helpful if the LDAP server does not support `memberOf` search filters. | |
| Iterate through group members | If `Select Group Members` is selected, this option searches for users by first iterating over the target group's membership, making a separate LDAP query for each member. as opposed to first querying for all users which match the above search query and intersecting those with the set of group members. This option can be more efficient in situations where the number of members of the target group is significantly smaller than the number of users which would match the above search filter, or if your directory server does not support simple pagination of search results. | |
| Group DN | If `Select Group Members` is selected, this specifies the distinguished name of the group from which to select users. | |
| Group Member Attribute | If `Select Group Members` is selected, the value of this group attribute corresponds to the distinguished names of the members of the group. | |
![](../../../images/ldap-integration-2.png){: .with-border}
To configure more user search queries, click **Add LDAP User Search Configuration**
again. This is useful in cases where users may be found in multiple distinct
subtrees of your organization's directory. Any user entry which matches at
least one of the search configurations will be synced as a user.
## LDAP test login
| Field | Description |
| :------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| Username | An LDAP username for testing authentication to this application. This value corresponds with the **Username Attribute** specified in the **LDAP user search configurations** section. |
| Password | The user's password used to authenticate (BIND) to the directory server. |
Before you save the configuration changes, you should test that the integration
is correctly configured. You can do this by providing the credentials of an
LDAP user, and clicking the **Test** button.
## LDAP sync configuration
| Field | Description |
| :------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| Sync interval | The interval, in hours, to synchronize users between UCP and the LDAP server. When the synchronization job runs, new users found in the LDAP server are created in UCP with the default permission level. UCP users that don't exist in the LDAP server become inactive. |
| Enable sync of admin users | This option specifies that system admins should be synced directly with members of a group in your organization's LDAP directory. The admins will be synced to match the membership of the group. The configured recovery admin user will also remain a system admin. |
Once you've configured the LDAP integration, UCP synchronizes users based on
the interval you've defined starting at the top of the hour. When the
synchronization runs, UCP stores logs that can help you troubleshoot when
something goes wrong.
You can also manually synchronize users by clicking **Sync Now**.
## Revoke user access
When a user is removed from LDAP, the effect on the user's UCP account depends
on the **Just-In-Time User Provisioning** setting:
- **Just-In-Time User Provisioning** is `false`: Users deleted from LDAP become
inactive in UCP after the next LDAP synchronization runs.
- **Just-In-Time User Provisioning** is `true`: Users deleted from LDAP can't
authenticate, but their UCP accounts remain active. This means that they can
use their client bundles to run commands. To prevent this, deactivate their
UCP user accounts.
## Data synced from your organization's LDAP directory
UCP saves a minimum amount of user data required to operate. This includes
the value of the username and full name attributes that you have specified in
the configuration as well as the distinguished name of each synced user.
UCP does not store any additional data from the directory server.
## Sync teams
UCP enables syncing teams with a search query or group in your organization's
LDAP directory.
[Sync team members with your organization's LDAP directory](../../../access-control/create-and-manage-teams.md).
## Where to go next
- [Create and manage users](../../../access-control/create-and-manage-users.md)
- [Create and manage teams](../../../access-control/create-and-manage-teams.md)
- [UCP permission levels](../../../access-control/permission-levels.md)
- [Enable LDAP integration by using a configuration file](enable-ldap-config-file.md)

View File

@ -0,0 +1,143 @@
---
title: Integrate with Docker Trusted Registry
description: Integrate UCP with Docker Trusted Registry
keywords: trust, registry, integrate, UCP, DTR
---
Once you deploy Docker Trusted Registry (DTR), you can use it to store your
Docker images and deploy services to UCP using these images.
[Learn how to deploy DTR](/datacenter/dtr/2.3/guides/admin/install/index.md).
Docker UCP integrates out of the box with Docker Trusted Registry (DTR).
This means that you can deploy services from the UCP web UI, using Docker
images that are stored in DTR. You can also use a
[UCP client bundle](../access-ucp/cli-based-access.md) to do the same from the
CLI.
If you've configured DTR to use TLS certificates issued by a globally-trusted
certificate authority, you can skip this, since all clients will automatically
trust the TLS certificates used by DTR.
If you're using the DTR default configurations or configured DTR to use
self-signed certificates, you need to configure all hosts that want to push
or pull Docker images from DTR. This includes:
* All UCP nodes
* Your local computer or any other that wants to push or pull Docker images
from DTR
If your host isn't configured to trust the DTR TLS certificates, you'll get an
error like:
```none
docker login dtr.example.org
x509: certificate signed by unknown authority
```
## 1. Configure your local computer
If you want to use your local computer to interact with DTR, you need to
configure it to trust the DTR TLS certificates. This depends on the operating
system:
* For macOS:
In your browser navigate to `https://<dtr-url>/ca` to download the TLS
certificate used by DTR. Then
[add this certificate to the macOS trust store](https://support.apple.com/kb/PH18677?locale=en_US).
* For Windows:
In your browser navigate to `https://<dtr-url>/ca` to download the TLS
certificate used by DTR. Then
[add this certificate to the Windows trust store](https://technet.microsoft.com/en-us/library/cc754841(v=ws.11).aspx).
* For Ubuntu:
```bash
# Download the DTR CA certificate
$ sudo curl -k https://<dtr-url>/ca -o /usr/local/share/ca-certificates/<dtr-domain-name>.crt
# Refresh the list of certificates to trust
$ sudo update-ca-certificates
# Restart the Docker daemon
$ sudo service docker restart
```
* For CentOS or RHEL:
```bash
# Download the DTR CA certificate
$ sudo curl -k https://<dtr-url>/ca -o /etc/pki/ca-trust/source/anchors/<dtr-domain-name>.crt
# Refresh the list of certificates to trust
$ sudo update-ca-trust
# Restart the Docker daemon
$ sudo /bin/systemctl restart docker.service
```
## 2. Test your local setup
To confirm that your computer is configured correctly, try to pull and push
images from your local Docker installation to DTR.
1. Create a test repository on DTR.
Navigate to the **DTR web UI**, and create a new **hello-world** repository
so that you can push and pull images. Set it as **private**, and save
the changes.
![](/datacenter/ucp/3.0/guides/images/dtr-integration-1.png)
2. Pull the `hello-world` image from Docker Store, re-tag it, and push it to the
DTR repository you created.
```none
# Pull hello-world from Docker Store
docker image pull hello-world:latest
# Re-tag it
docker tag hello-world:latest <dtr-domain>/<user>/hello-world:latest
# Log into DTR
docker login <dtr-domain>
# Push your image to DTR
docker image push <dtr-domain>/<user>/hello-world:latest
```
3. Validate that your image is now stored in DTR.
When successfully pushing the image you should see a result like:
```none
The push refers to a repository [dtr/username/hello-world]
5f70bf18a086: Pushed
33e7801ac047: Pushed
1: digest: sha256:7d9e482c0cc9e68c7f07bf76e0aafcb1869d32446547909200db990e7bc5461a size: 1930
```
You can also check that the tag exists on the DTR web UI.
![](/datacenter/ucp/3.0/guides/images/dtr-integration-2.png)
## 3. Configure UCP Docker Engines
You also need to configure the Docker Engine on every UCP node to trust the
DTR TLS certificates. This allows you to deploy services to UCP using images
that are stored in DTR.
For each UCP node:
1. Log into the node as an administrator, using ssh.
2. Configure the system to trust the DTR TLS certificates, following the same
steps as you used to configure your local computer.
## Where to go next
* [Use your own TLS certificates](use-your-own-tls-certificates.md)

View File

@ -0,0 +1,220 @@
---
title: Join Windows worker nodes to a swarm
description: Join worker nodes that are running on Windows Server 2016 to a swarm managed by UCP.
keywords: UCP, swarm, Windows, cluster
---
UCP supports worker nodes that run on Windows Server 2016. Only worker nodes
are supported on Windows, and all manager nodes in the swarm must run on Linux.
Follow these steps to enable a worker node on Windows.
1. Install UCP on a Linux distribution.
2. Install Docker Enterprise Edition (*Docker EE*) on Windows Server 2016.
3. Configure the Windows node.
4. Join the Windows node to the swarm.
## Install UCP
Install UCP on a Linux distribution.
[Learn how to install UCP on production](../install/index.md).
UCP requires Docker EE version 17.06 or later.
## Install Docker EE on Windows Server 2016
[Install Docker EE](/docker-ee-for-windows/install/#using-a-script-to-install-docker-ee)
on a Windows Server 2016 instance to enable joining a swarm that's managed by
UCP.
## Configure the Windows node
Follow these steps to configure the docker daemon and the Windows environment.
1. Pull the Windows-specific image of `ucp-agent`, which is named `ucp-agent-win`.
2. Run the Windows worker setup script provided with `ucp-agent-win`.
3. Join the swarm with the token provided by the UCP web UI.
### Pull the Windows-specific images
On a manager node, run the following command to list the images that are required
on Windows nodes.
```bash
docker container run --rm {{ page.ucp_org }}/{{ page.ucp_repo }}:{{ page.ucp_version }} images --list --enable-windows
{{ page.ucp_org }}/ucp-agent-win:{{ page.ucp_version }}
{{ page.ucp_org }}/ucp-dsinfo-win:{{ page.ucp_version }}
```
On Windows Server 2016, in a PowerShell terminal running as Administrator,
log in to Docker Hub with the `docker login` command and pull the listed images.
```powershell
docker image pull {{ page.ucp_org }}/ucp-agent-win:{{ page.ucp_version }}
docker image pull {{ page.ucp_org }}/ucp-dsinfo-win:{{ page.ucp_version }}
```
### Run the Windows node setup script
You need to open ports 2376 and 12376, and create certificates
for the Docker daemon to communicate securely. Run this command:
```powershell
docker container run --rm {{ page.ucp_org }}/ucp-agent-win:{{ page.ucp_version }} windows-script | powershell -noprofile -noninteractive -command 'Invoke-Expression -Command $input'
```
> Docker daemon restart
>
> When you run `windows-script`, the Docker service is unavailable temporarily.
The Windows node is ready to join the swarm. Run the setup script on each
instance of Windows Server that will be a worker node.
### Compatibility with daemon.json
The script may be incompatible with installations that use a config file at
`C:\ProgramData\docker\config\daemon.json`. If you use such a file, make sure
that the daemon runs on port 2376 and that it uses certificates located in
`C:\ProgramData\docker\daemoncerts`. If certificates don't exist in this
directory, run `ucp-agent-win generate-certs`, as shown in Step 2 of the
[Set up certs for the dockerd service](#set-up-certs-for-the-dockerd-service)
procedure.
In the daemon.json file, set the `tlscacert`, `tlscert`, and `tlskey` options
to the corresponding files in `C:\ProgramData\docker\daemoncerts`:
```json
{
...
"debug": true,
"tls": true,
"tlscacert": "C:\ProgramData\docker\daemoncerts\ca.pem",
"tlscert": "C:\ProgramData\docker\daemoncerts\cert.pem",
"tlskey": "C:\ProgramData\docker\daemoncerts\key.pem",
"tlsverify": true,
...
}
```
## Join the Windows node to the swarm
Now you can join the UCP cluster by using the `docker swarm join` command that's
provided by the UCP web UI. [Learn to add nodes to your swarm](scale-your-cluster.md).
The command looks similar to the following.
```powershell
docker swarm join --token <token> <ucp-manager-ip>
```
Run the `docker swarm join` command on each instance of Windows Server that
will be a worker node.
## Configure a Windows worker node manually
The following sections describe how to run the commands in the setup script
manually to configure the `dockerd` service and the Windows environment.
The script opens ports in the firewall and sets up certificates for `dockerd`.
To see the script, you can run the `windows-script` command without piping
to the `Invoke-Expression` cmdlet.
```powershell
docker container run --rm {{ page.ucp_org }}/ucp-agent-win:{{ page.ucp_version }} windows-script
```
### Open ports in the Windows firewall
UCP and Docker EE require that ports 2376 and 12376 are open for inbound
TCP traffic.
In a PowerShell terminal running as Administrator, run these commands
to add rules to the Windows firewall.
```powershell
netsh advfirewall firewall add rule name="docker_local" dir=in action=allow protocol=TCP localport=2376
netsh advfirewall firewall add rule name="docker_proxy" dir=in action=allow protocol=TCP localport=12376
```
### Set up certs for the dockerd service
1. Create the directory `C:\ProgramData\docker\daemoncerts`.
2. In a PowerShell terminal running as Administrator, run the following command
to generate certificates.
```powershell
docker container run --rm -v C:\ProgramData\docker\daemoncerts:C:\certs {{ page.ucp_org }}/ucp-agent-win:{{ page.ucp_version }} generate-certs
```
3. To set up certificates, run the following commands to stop and unregister the
`dockerd` service, register the service with the certificates, and restart the service.
```powershell
Stop-Service docker
dockerd --unregister-service
dockerd -H npipe:// -H 0.0.0.0:2376 --tlsverify --tlscacert=C:\ProgramData\docker\daemoncerts\ca.pem --tlscert=C:\ProgramData\docker\daemoncerts\cert.pem --tlskey=C:\ProgramData\docker\daemoncerts\key.pem --register-service
Start-Service docker
```
The `dockerd` service and the Windows environment are now configured to join a UCP swarm.
> **Tip:** If the TLS certificates aren't set up correctly, the UCP web UI shows the
> following warning.
```
Node WIN-NOOQV2PJGTE is a Windows node that cannot connect to its local Docker daemon.
```
## Uninstall UCP from Windows Server
The following steps return the Docker Engine to its original configuration:
1. Unregister the docker service and register it again without the TLS
certificates:
```powershell
Stop-Service docker
dockerd --unregister-service
dockerd -H npipe:// --register-service
Start-Service docker
```
2. Remove the `certs` directory for the docker service:
```powershell
Remove-Item -Recurse C:\ProgramData\docker\daemoncerts
```
3. Remove the firewall rules:
```powershell
netsh advfirewall firewall delete rule name="docker_2376_in"
netsh advfirewall firewall delete rule name="docker_12376_in"
netsh advfirewall firewall delete rule name="docker_2377_in"
netsh advfirewall firewall delete rule name="docker_4789_in"
netsh advfirewall firewall delete rule name="docker_4789_out"
netsh advfirewall firewall delete rule name="docker_7946_in"
netsh advfirewall firewall delete rule name="docker_7946_out"
```
## Windows nodes limitations
Some features are not yet supported on Windows nodes:
* Networking
* The swarm mode routing mesh can't be used on Windows nodes. You can expose
a port for your service in the host where it is running, and use the HTTP
routing mesh to make your service accessible using a domain name.
* Encrypted networks are not supported. If you've upgraded from a previous
version, you'll also need to recreate the `ucp-hrm` network to make it
unencrypted.
* Secrets
* When using secrets with Windows services, Windows stores temporary secret
files on disk. You can use BitLocker on the volume containing the Docker
root directory to encrypt the secret data at rest.
* When creating a service which uses Windows containers, the options to
specify UID, GID, and mode are not supported for secrets. Secrets are
currently only accessible by administrators and users with system access
within the container.
* Mounts
* On Windows, Docker can't listen on a Unix socket. Use TCP or a named pipe
instead.

View File

@ -0,0 +1,33 @@
---
title: License your installation
description: Learn how to license your Docker Universal Control Plane installation.
keywords: Universal Control Plane, UCP, install, license
---
After installing Docker Universal Control Plane, you need to license your
installation. Here's how to do it.
## Download your license
Go to [Docker Store](https://www.docker.com/enterprise-edition) and
download your UCP license, or get a free trial license.
![](../../images/license-ucp-1.png){: .with-border}
## License your installation
Once you've downloaded the license file, you can apply it to your UCP
installation.
In the UCP web UI, log in with administrator credentials and
navigate to the **Admin Settings** page.
In the left pane, click **License** and click **Upload License**. The
license refreshes immediately, and you don't need to click **Save**.
![](../../images/license-ucp-2.png){: .with-border}
## Where to go next
* [Install UCP](../install/index.md)
* [Install UCP offline](../install/install-offline.md)

View File

@ -0,0 +1,29 @@
---
title: Restrict services to worker nodes
description: Learn how to configure Universal Control Plane to only allow running services in worker nodes.
keywords: ucp, configuration, worker
---
You can configure UCP to allow users to deploy and run services only in
worker nodes. This ensures all cluster management functionality stays
performant, and makes the cluster more secure.
If a user deploys a malicious service that can affect the node where it
is running, it won't be able to affect other nodes in the cluster, or
any cluster management functionality.
To restrict users from deploying to manager nodes, log in with administrator
credentials to the UCP web UI, navigate to the **Admin Settings**
page, and choose **Scheduler**.
![](../../images/restrict-services-to-worker-nodes-1.png){: .with-border}
You can then choose if user services should be allowed to run on manager nodes
or not.
Having a grant with the `Scheduler` role against the `/` collection takes
precedence over any other grants with `Node Schedule` on subcollections.
## Where to go next
* [Use domain names to access your services](use-domain-names-to-access-services.md)

View File

@ -0,0 +1,71 @@
---
title: Run only the images you trust
description: Configure a Docker UCP swarm to only allow running applications that use images you trust.
keywords: ucp, dtr, security, trust
---
With Docker Universal Control Plane you can enforce applications to only use
Docker images signed by UCP users you trust. When a user tries to deploy an
application to the cluster, UCP checks if the application uses a Docker image
that is not trusted, and wont continue with the deployment if thats the case.
![Enforce image signing](../../images/run-only-the-images-you-trust-1.svg)
By signing and verifying the Docker images, you ensure that the images being
used in your cluster are the ones you trust and havent been altered either in
the image registry or on their way from the image registry to your UCP swarm.
## Example workflow
Here's an example of a typical workflow:
1. A developer makes changes to a service and pushes their changes to a version
control system.
2. A CI system creates a build, runs tests, and pushes an image to DTR with the
new changes.
3. The quality engineering team pulls the image and runs more tests. If
everything looks good they sign and push the image.
4. The IT operations team deploys a service. If the image used for the service
was signed by the QA team, UCP deploys it. Otherwise UCP refuses to deploy.
## Configure UCP
To configure UCP to only allow running services that use Docker images you
trust, go to the UCP web UI, navigate to the **Admin Settings** page, and in
the left pane, click **Docker Content Trust**.
Select the **Run Only Signed Images** option to only allow deploying
applications if they use images you trust.
![UCP settings](../../images/run-only-the-images-you-trust-2.png){: .with-border}
With this setting, UCP allows deploying any image as long as the image has
been signed. It doesn't matter who signed the image.
To enforce that the image needs to be signed by specific teams, click the
dropdown and select those teams from the list.
> Team must be in docker-datacenter
>
> You need to select a team that's part of the `docker-datacenter` organization
> in order to use this feature.
{: .important}
![UCP settings](../../images/run-only-the-images-you-trust-3.png){: .with-border}
If you specify multiple teams, the image needs to be signed by a member of each
team, or someone that is a member of all those teams, again which must be a part
of the `docker-datacenter` organization.
> Signing with teams
>
> Teams used for signing policy enforcement must be in the `docker-datacenter`
> organization.
Click **Save** for UCP to start enforcing the policy. From now on, existing
services will continue running and can be restarted if needed, but UCP will only
allow deploying new services that use a trusted image.
## Where to go next
* [Sign and push images to DTR](/datacenter/dtr/2.3/guides/user/manage-images/sign-images/index.md)

View File

@ -0,0 +1,157 @@
---
title: Scale your cluster
description: Learn how to scale Docker Universal Control Plane cluster, by adding and removing nodes.
keywords: UCP, cluster, scale
---
Docker UCP is designed for scaling horizontally as your applications grow in
size and usage. You can add or remove nodes from the UCP cluster to make it
scale to your needs.
![](../../images/scale-your-cluster-0.svg)
Since UCP leverages the clustering functionality provided by Docker Engine,
you use the [docker swarm join](/engine/swarm/swarm-tutorial/add-nodes.md)
command to add more nodes to your cluster. When joining new nodes, the UCP
services automatically start running in that node.
When joining a node to a cluster you can specify its role: manager or worker.
* **Manager nodes**
Manager nodes are responsible for swarm management functionality and
dispatching tasks to worker nodes. Having multiple manager nodes allows
your swarm to be highly-available and tolerate node failures.
Manager nodes also run all UCP components in a replicated way, so by adding
additional manager nodes, you're also making UCP highly available.
[Learn more about the UCP architecture.](../../architecture.md)
* **Worker nodes**
Worker nodes receive and execute your services and applications. Having
multiple worker nodes allows you to scale the computing capacity of your
cluster.
When deploying Docker Trusted Registry in your cluster, you deploy it to a
worker node.
## Join nodes to the cluster
To join nodes to the swarm, go to the UCP web UI and navigate to the **Nodes**
page.
![](../../images/scale-your-cluster-1.png){: .with-border}
Click **Add Node** to add a new node.
![](../../../../../images/try-ddc-3.png){: .with-border}
- Click **Manager** if you want to add the node as a manager.
- Check the **Use a custom listen address** option to specify the
IP address of the host that you'll be joining to the cluster.
- Check the **Use a custom listen address** option to specify the
IP address that's advertised to all members of the swarm for API access.
Copy the displayed command, use ssh to log into the host that you want to
join to the cluster, and run the `docker swarm join` command on the host.
To add a Windows node, click **Windows** and follow the instructions in
[Join Windows worker nodes to a swarm](join-windows-worker-nodes.md).
After you run the join command in the node, the node is displayed in the UCP
web UI.
![](../../images/scale-your-cluster-2.png){: .with-border}
## Remove nodes from the cluster
1. If the target node is a manager, you will need to first demote the node into
a worker before proceeding with the removal:
* From the UCP web UI, navigate to the **Nodes** page. Select the node you
wish to remove and switch its role to **Worker**, wait until the operation
completes, and confirm that the node is no longer a manager.
* From the CLI, perform `docker node ls` and identify the nodeID or hostname
of the target node. Then, run `docker node demote <nodeID or hostname>`.
2. If the status of the worker node is `Ready`, you'll need to manually force
the node to leave the swarm. To do this, connect to the target node through
SSH and run `docker swarm leave --force` directly against the local docker
engine.
> Loss of quorum
>
> Do not perform this step if the node is still a manager, as
> this may cause loss of quorum.
3. Now that the status of the node is reported as `Down`, you may remove the
node:
* From the UCP web UI, browse to the **Nodes** page and select the node.
In the details pane, click **Actions** and select **Remove**.
Click **Confirm** when you're prompted.
* From the CLI, perform `docker node rm <nodeID or hostname>`.
## Pause and drain nodes
Once a node is part of the cluster you can change its role making a manager
node into a worker and vice versa. You can also configure the node availability
so that it is:
* Active: the node can receive and execute tasks.
* Paused: the node continues running existing tasks, but doesn't receive new ones.
* Drained: the node won't receive new tasks. Existing tasks are stopped and
replica tasks are launched in active nodes.
In the UCP web UI, browse to the **Nodes** page and select the node. In the details pane, click the **Configure** to open the **Edit Node** page.
![](../../images/scale-your-cluster-3.png){: .with-border}
If you're load-balancing user requests to UCP across multiple manager nodes,
when demoting those nodes into workers, don't forget to remove them from your
load-balancing pool.
## Scale your cluster from the CLI
You can also use the command line to do all of the above operations. To get the
join token, run the following command on a manager node:
```bash
$ docker swarm join-token worker
```
If you want to add a new manager node instead of a worker node, use
`docker swarm join-token manager` instead. If you want to use a custom listen
address, add the `--listen-addr` arg:
```bash
$ docker swarm join \
--token SWMTKN-1-2o5ra9t7022neymg4u15f3jjfh0qh3yof817nunoioxa9i7lsp-dkmt01ebwp2m0wce1u31h6lmj \
--listen-addr 234.234.234.234 \
192.168.99.100:2377
```
Once your node is added, you can see it by running `docker node ls` on a manager:
```bash
$ docker node ls
```
To change the node's availability, use:
```bash
$ docker node update --availability drain node2
```
You can set the availability to `active`, `pause`, or `drain`.
To remove the node, use:
```bash
$ docker node rm <node-hostname>
```
## Where to go next
* [Use your own TLS certificates](use-your-own-tls-certificates.md)
* [Set up high availability](set-up-high-availability.md)

View File

@ -0,0 +1,21 @@
---
title: Set the user's session timeout
description: Learn how to set the session timeout for users and other session properties.
keywords: UCP, authorization, authenticaztion, security, session, timeout
---
Docker Universal Control Plane enables setting properties of user sessions,
like session timeout and number of concurrent sessions.
To configure UCP login sessions, go to the UCP web UI, navigate to the
**Admin Settings** page and click **Authentication & Authorization**.
![](../../images/authentication-authorization.png)
## Login session controls
| Field | Description |
| :---------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| Lifetime Hours | The initial lifetime of a login session, from the time UCP generates it. When this time expires, UCP invalidates the session, and the user must authenticate again to establish a new session. The default is 72 hours. |
| Renewal Threshold Hours | The time before session expiration when UCP extends an active session. UCP extends the session by the number of hours specified in **Lifetime Hours**. The threshold value can't be greater than **Lifetime Hours**. The default is 24 hours. To specify that sessions are extended with every use, set the threshold equal to the lifetime. To specify that sessions are never extended, set the threshold to zero. This may cause users to be logged out unexpectedly while using the UCP web UI. |
| Per User Limit | The maximum number of simultaneous logins for a user. If creating a new session exceeds this limit, UCP deletes the least recently used session. To disable the limit, set the value to zero. |

View File

@ -0,0 +1,41 @@
---
title: Set up high availability
description: Docker Universal Control plane has support for high availability. Learn how to set up your installation to ensure it tolerates failures.
keywords: ucp, high availability, replica
---
Docker Universal Control Plane is designed for high availability (HA). You can
join multiple manager nodes to the swarm, so that if one manager node fails,
another can automatically take its place without impact to the swarm.
Having multiple manager nodes in your cluster allows you to:
* Handle manager node failures,
* Load-balance user requests across all manager nodes.
## Size your deployment
To make the swarm tolerant to more failures, add additional replica nodes to
your swarm.
| Manager nodes | Failures tolerated |
|:-------------:|:------------------:|
| 1 | 0 |
| 3 | 1 |
| 5 | 2 |
For production-grade deployments, follow these rules of thumb:
* When a manager node fails, the number of failures tolerated by your swarm
decreases. Don't leave that node offline for too long.
* You should distribute your manager nodes across different availability
zones. This way your cluster can continue working even if an entire
availability zone goes down.
* Adding many manager nodes to the cluster might lead to performance
degradation, as changes to configurations need to be replicated across all
manager nodes. The maximum advisable is seven manager nodes.
## Where to go next
* [Scale your cluster](scale-your-cluster.md)
* [Use a load balancer](use-a-load-balancer.md)

View File

@ -0,0 +1,56 @@
---
title: Configure UCP logging
description: Learn how to configure Docker Universal Control Plane to store your logs
on an external log system.
keywords: ucp, integrate, logs
---
You can configure UCP for sending logs to a remote logging service:
1. Log in to UCP with an administrator account.
2. Navigate to the **Admin Settings** page.
3. Set the information about your logging server, and click
**Enable Remote Logging**.
![](../../images/configure-logs-1.png){: .with-border}
## Example: Setting up an ELK stack
One popular logging stack is composed of Elasticsearch, Logstash, and
Kibana. The following example demonstrates how to set up an example
deployment which can be used for logging.
```none
docker volume create --name orca-elasticsearch-data
docker container run -d \
--name elasticsearch \
-v orca-elasticsearch-data:/usr/share/elasticsearch/data \
elasticsearch elasticsearch -Enetwork.host=0.0.0.0
docker container run -d \
-p 514:514 \
--name logstash \
--link elasticsearch:es \
logstash \
sh -c "logstash -e 'input { syslog { } } output { stdout { } elasticsearch { hosts => [ \"es\" ] } } filter { json { source => \"message\" } }'"
docker container run -d \
--name kibana \
--link elasticsearch:elasticsearch \
-p 5601:5601 \
kibana
```
Once you have these containers running, configure UCP to send logs to
the IP of the Logstash container. You can then browse to port 5601 on the system
running Kibana and browse log/event entries. You should specify the "time"
field for indexing.
When deployed in a production environment, you should secure your ELK
stack. UCP does not do this itself, but there are a number of 3rd party
options that can accomplish this, like the Shield plug-in for Kibana.
## Where to go next
* [Require all images to be signed](restrict-services-to-worker-nodes.md)

View File

@ -0,0 +1,234 @@
---
title: UCP configuration file
description: Configure UCP deployments.
keywords: docker enterprise edition, ucp, universal control plane, swarm, configuration, deploy
---
Override the default UCP settings by providing a configuration file when you
create UCP manager nodes. This is useful for scripted installations.
## UCP configuration file
The `ucp-agent` service uses a configuration file to set up UCP.
You can use the configuration file in different ways to set up your UCP
swarms.
- Install one swarm and use the UCP web UI to configure it as desired,
extract the configuration file, edit it as needed, and use the edited
config file to make copies to multiple other swarms.
- Install a UCP swarm, extract and edit the configuration file, and use the
CLI to apply the new configuration to the same swarm.
- Run the `example-config` command, edit the example configuration file, and
apply the file at install time or after installation.
Specify your configuration settings in a TOML file.
[Learn about Tom's Obvious, Minimal Language](https://github.com/toml-lang/toml/blob/master/README.md).
The configuration has a versioned naming convention, with a trailing decimal
number that increases with each version, like `com.docker.ucp.config-1`. The
`ucp-agent` service maps the configuration to the file at `/etc/ucp/ucp.toml`.
## Inspect and modify existing configuration
Use the `docker config inspect` command to view the current settings and emit
them to a file.
```bash
{% raw %}
# CURRENT_CONFIG_NAME will be the name of the currently active UCP configuration
CURRENT_CONFIG_NAME=$(docker service inspect ucp-agent --format '{{range .Spec.TaskTemplate.ContainerSpec.Configs}}{{if eq "/etc/ucp/ucp.toml" .File.Name}}{{.ConfigName}}{{end}}{{end}}')
# Collect the current config with `docker config inspect`
docker config inspect --format '{{ printf "%s" .Spec.Data }}' $CURRENT_CONFIG_NAME > ucp-config.toml
{% endraw %}
```
Edit the file, then use the `docker config create` and `docker service update`
commands to create and apply the configuration from the file.
```bash
# NEXT_CONFIG_NAME will be the name of the new UCP configuration
NEXT_CONFIG_NAME=${CURRENT_CONFIG_NAME%%-*}-$((${CURRENT_CONFIG_NAME##*-}+1))
# Create the new swarm configuration from the file ucp-config.toml
docker config create $NEXT_CONFIG_NAME ucp-config.toml
# Use the `docker service update` command to remove the current configuration
# and apply the new configuration to the `ucp-agent` service.
docker service update --config-rm $CURRENT_CONFIG_NAME --config-add source=$NEXT_CONFIG_NAME,target=/etc/ucp/ucp.toml ucp-agent
```
## Example configuration file
You can see an example TOML config file that shows how to configure UCP
settings. From the command line, run UCP with the `example-config` option:
```bash
$ docker container run --rm {{ page.ucp_org }}/{{ page.ucp_repo }}:{{ page.ucp_version }} example-config
```
## Configuration file and web UI
Admin users can open the UCP web UI, navigate to **Admin Settings**,
and change UCP settings there. In most cases, the web UI is a front end
for modifying this config file.
## auth table
| Parameter | Required | Description |
| ----------------------- | -------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `backend` | no | The name of the authorization backend to use, either `managed` or `ldap`. The default is `managed`. |
| `default_new_user_role` | no | The role that new users get for their private collections. Values are `admin`, `viewonly`, `scheduler`, `restrictedcontrol`, or `fullcontrol`. The default is `restrictedcontrol`. |
## auth.sessions
| Parameter | Required | Description |
| --------------------------- | -------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `lifetime_minutes` | no | The initial session lifetime, in minutes. The default is 4320, which is 72 hours. |
| `renewal_threshold_minutes` | no | The length of time, in minutes, before the expiration of a session where, if used, a session will be extended by the current configured lifetime from then. A zero value disables session extension. The default is 1440, which is 24 hours. |
| `per_user_limit` | no | The maximum number of sessions that a user can have active simultaneously. If creating a new session would put a user over this limit, the least recently used session will be deleted. A value of zero disables limiting the number of sessions that users may have. The default is 5. |
## auth.ldap (optional)
| Parameter | Required | Description |
| ----------------------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| `server_url` | no | The URL of the LDAP server. |
| `no_simple_pagination` | no | Set to `true` if the LDAP server doesn't support the Simple Paged Results control extension (RFC 2696). The default is `false`. |
| `start_tls` | no | Set to `true` to use StartTLS to secure the connection to the server, ignored if the server URL scheme is 'ldaps://'. The default is `false`. |
| `root_certs` | no | A root certificate PEM bundle to use when establishing a TLS connection to the server. |
| `tls_skip_verify` | no | Set to `true` to skip verifying the server's certificate when establishing a TLS connection, which isn't recommended unless testing on a secure network. The default is `false`. |
| `reader_dn` | no | The distinguished name the system uses to bind to the LDAP server when performing searches. |
| `reader_password` | no | The password that the system uses to bind to the LDAP server when performing searches. |
| `sync_schedule` | no | The scheduled time for automatic LDAP sync jobs, in CRON format. Needs to have the seconds field set to zero. The default is @hourly if empty or omitted. |
| `jit_user_provisioning` | no | Whether to only create user accounts upon first login (recommended). The default is `true`. |
## auth.ldap.additional_domains array (optional)
A list of additional LDAP domains and corresponding server configs from which
to sync users and team members. This is an advanced feature which most
environments don't need.
| Parameter | Required | Description |
| ---------------------- | -------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `domain` | no | The root domain component of this server, for example, `dc=example,dc=com`. A longest-suffix match of the base DN for LDAP searches is used to select which LDAP server to use for search requests. If no matching domain is found, the default LDAP server config is used. |
| `server_url` | no | The URL of the LDAP server for the current additional domain. |
| `no_simple_pagination` | no | Set to true if the LDAP server for this additional domain does not support the Simple Paged Results control extension (RFC 2696). The default is `false`. |
| `server_url` | no | The URL of the LDAP server. |
| `start_tls` | no | Whether to use StartTLS to secure the connection to the server, ignored if the server URL scheme is 'ldaps://'. |
| `root_certs` | no | A root certificate PEM bundle to use when establishing a TLS connection to the server for the current additional domain. |
| `tls_skip_verify` | no | Whether to skip verifying the additional domain server's certificate when establishing a TLS connection, not recommended unless testing on a secure network. The default is `true`. |
| `reader_dn` | no | The distinguished name the system uses to bind to the LDAP server when performing searches under the additional domain. |
| `reader_password` | no | The password that the system uses to bind to the LDAP server when performing searches under the additional domain. |
## auth.ldap.user_search_configs array (optional)
Settings for syncing users.
| Parameter | Required | Description |
| ------------------------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| `base_dn` | no | The distinguished name of the element from which the LDAP server will search for users, for example, `ou=people,dc=example,dc=com`. |
| `scope_subtree` | no | Set to `true` to search for users in the entire subtree of the base DN. Set to `false` to search only one level under the base DN. The default is `false`. |
| `username_attr` | no | The name of the attribute of the LDAP user element which should be selected as the username. The default is `uid`. |
| `full_name_attr` | no | The name of the attribute of the LDAP user element which should be selected as the full name of the user. The default is `cn`. |
| `filter` | no | The LDAP search filter used to select user elements, for example, `(&(objectClass=person)(objectClass=user))`. May be left blank. |
| `match_group` | no | Whether to additionally filter users to those who are direct members of a group. The default is `true`. |
| `match_group_dn` | no | The distinguished name of the LDAP group, for example, `cn=ddc-users,ou=groups,dc=example,dc=com`. Required if `matchGroup` is `true`. |
| `match_group_member_attr` | no | The name of the LDAP group entry attribute which corresponds to distinguished names of members. Required if `matchGroup` is `true`. The default is `member`. |
| `match_group_iterate` | no | Set to `true` to get all of the user attributes by iterating through the group members and performing a lookup for each one separately. Use this instead of searching users first, then applying the group selection filter. Ignored if `matchGroup` is `false`. The default is `false`. |
## auth.ldap.admin_sync_opts (optional)
Settings for syncing system admininistrator users.
| Parameter | Required | Description |
| ---------------------- | -------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `enable_sync` | no | Set to `true` to enable syncing admins. If `false`, all other fields in this table are ignored. The default is `true`. |
| `select_group_members` | no | Set to `true` to sync using a group DN and member attribute selection. Set to `false` to use a search filter. The default is `true`. |
| `group_dn` | no | The distinguished name of the LDAP group, for example, `cn=ddc-admins,ou=groups,dc=example,dc=com`. Required if `select_group_members` is `true`. |
| `group_member_attr` | no | The name of the LDAP group entry attribute which corresponds to distinguished names of members. Required if `select_group_members` is `true`. The default is `member`. |
| `search_base_dn` | no | The distinguished name of the element from which the LDAP server will search for users, for example, `ou=people,dc=example,dc=com`. Required if `select_group_members` is `false`. |
| `search_scope_subtree` | no | Set to `true` to search for users in the entire subtree of the base DN. Set to `false` to search only one level under the base DN. The default is `false`. Required if `select_group_members` is `false`. |
| `search_filter` | no | The LDAP search filter used to select users if `select_group_members` is `false`, for example, `(memberOf=cn=ddc-admins,ou=groups,dc=example,dc=com)`. May be left blank. |
## registries array (required)
An array of tables that specifies the DTR instances that the current UCP instance manages.
| Parameter | Required | Description |
| -------------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `host_address` | yes | The address for connecting to the DTR instance tied to this UCP cluster. |
| `service_id` | yes | The DTR instance's OpenID Connect Client ID, as registered with the Docker authentication provider. |
| `ca_bundle` | no | If you're using a custom certificate authority (CA), the `ca_bundle` setting specifies the root CA bundle for the DTR instance. The value is a string with the contents of a `ca.pem` file. |
## scheduling_configuration table (optional)
Specifies the users who can schedule containers on manager nodes.
| Parameter | Required | Description |
| ----------------------------- | -------- | -------------------------------------------------------------------------------------------------- |
| `enable_admin_ucp_scheduling` | no | Set to `true` to allow admins to schedule on containers on manager nodes. The default is `false`. |
| `enable_user_ucp_scheduling` | no | Set to `true` to allow non-admin users to schedule containers on managers. The default is `false`. |
## tracking_configuration table (optional)
Specifies the analytics data that UCP collects.
| Parameter | Required | Description |
| -------------------- | -------- | --------------------------------------------------------------------------------------- |
| `disable_usageinfo` | no | Set to `true` to disable analytics of usage information. The default is `false`. |
| `disable_tracking` | no | Set to `true` to disable analytics of API call information. The default is `false`. |
| `anonymize_tracking` | no | Anonymize analytic data. Set to `true` to hide your license ID. The default is `false`. |
## trust_configuration table (optional)
Specifies whether DTR images require signing.
| Parameter | Required | Description |
| ------------------------ | -------- | ----------------------------------------------------------------------------------- |
| `require_content_trust` | no | Set to `true` to require images be signed by content trust. The default is `false`. |
| `require_signature_from` | no | A string array that specifies users or teams which must sign images. |
## log_configuration table (optional)
Configures the logging options for UCP components.
| Parameter | Required | Description |
| ---------- | -------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `protocol` | no | The protocol to use for remote logging. Values are `tcp` and `udp`. The default is `tcp`. |
| `host` | no | Specifies a remote syslog server to send UCP controller logs to. If omitted, controller logs are sent through the default docker daemon logging driver from the `ucp-controller` container. |
| `level` | no | The logging level for UCP components. Values are [syslog priority levels](https://linux.die.net/man/5/syslog.conf): `debug`, `info`, `notice`, `warning`, `err`, `crit`, `alert`, and `emerg`. |
## license_configuration table (optional)
Specifies whether the your UCP license is automatically renewed.
| Parameter | Required | Description |
| -------------- | -------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `auto_refresh` | no | Set to `true` to enable attempted automatic license renewal when the license nears expiration. If disabled, you must manually upload renewed license after expiration. The default is `true`. |
## cluster_config table (required)
Configures the swarm cluster that the current UCP instance manages.
The `dns`, `dns_opt`, and `dns_search` settings configure the DNS settings for UCP
components. Assigning these values overrides the settings in a container's
`/etc/resolv.conf` file. For more info, see
[Configure container DNS](/engine/userguide/networking/default_network/configure-dns/).
| Parameter | Required | Description |
| ----------------------------- | -------- | ---------------------------------------------------------------------------------------------------------------------------------------------- |
| `controller_port` | yes | Configures the port that the `ucp-controller` listens to. The default is `443`. |
| `swarm_port` | yes | Configures the port that the `ucp-swarm-manager` listens to. The default is `2376`. |
| `swarm_strategy` | no | Configures placement strategy for container scheduling. This doesn't affect swarm-mode services. Values are `spread`, `binpack`, and `random`. |
| `dns` | yes | Array of IP addresses to add as nameservers. |
| `dns_opt` | yes | Array of options used by DNS resolvers. |
| `dns_search` | yes | Array of domain names to search when a bare unqualified hostname is used inside of a container. |
| `profiling_enabled` | no | Set to `true` to enable specialized debugging endpoints for profiling UCP performance. The default is `false`. |
| `kv_timeout` | no | Sets the key-value store timeout setting, in milliseconds. The default is `5000`. |
| `kv_snapshot_count` | no | Sets the key-value store snapshot count setting. The default is `20000`. |
| `external_service_lb` | no | Specifies an optional external load balancer for default links to services with exposed ports in the web UI. |
| `metrics_retention_time` | no | Adjusts the metrics retention time. |
| `metrics_scrape_interval` | no | Sets the interval for how frequently managers gather metrics from nodes in the cluster. |
| `metrics_disk_usage_interval` | no | Sets the interval for how frequently storage metrics are gathered. This operation can be expensive when large volumes are present. |

View File

@ -0,0 +1,215 @@
---
title: Use a load balancer
description: Learn how to set up a load balancer to access the UCP web UI using an hostname.
keywords: UCP, high-availability, load balancer
---
Once you've joined multiple manager nodes for high-availability, you can
configure your own load balancer to balance user requests across all
manager nodes.
![](../../images/use-a-load-balancer-1.svg)
This allows users to access UCP using a centralized domain name. If
a manager node goes down, the load balancer can detect that and stop forwarding
requests to that node, so that the failure goes unnoticed by users.
## Load-balancing on UCP
Since Docker UCP uses mutual TLS, make sure you configure your load balancer to:
* Load-balance TCP traffic on port 443,
* Not terminate HTTPS connections,
* Use the `/_ping` endpoint on each manager node, to check if the node
is healthy and if it should remain on the load balancing pool or not.
## Load balancing UCP and DTR
By default, both UCP and DTR use port 443. If you plan on deploying UCP and DTR,
your load balancer needs to distinguish traffic between the two by IP address
or port number.
* If you want to configure your load balancer to listen on port 443:
* Use one load balancer for UCP, and another for DTR,
* Use the same load balancer with multiple virtual IPs.
* Configure your load balancer to expose UCP or DTR on a port other than 443.
## Configuration examples
Use the following examples to configure your load balancer for UCP.
<ul class="nav nav-tabs">
<li class="active"><a data-toggle="tab" data-target="#nginx" data-group="nginx">NGINX</a></li>
<li><a data-toggle="tab" data-target="#haproxy" data-group="haproxy">HAProxy</a></li>
<li><a data-toggle="tab" data-target="#aws">AWS LB</a></li>
</ul>
<div class="tab-content">
<div id="nginx" class="tab-pane fade in active" markdown="1">
```conf
user nginx;
worker_processes 1;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
stream {
upstream ucp_443 {
server <UCP_MANAGER_1_IP>:443 max_fails=2 fail_timeout=30s;
server <UCP_MANAGER_2_IP>:443 max_fails=2 fail_timeout=30s;
server <UCP_MANAGER_N_IP>:443 max_fails=2 fail_timeout=30s;
}
server {
listen 443;
proxy_pass ucp_443;
}
}
```
</div>
<div id="haproxy" class="tab-pane fade" markdown="1">
```conf
global
log /dev/log local0
log /dev/log local1 notice
defaults
mode tcp
option dontlognull
timeout connect 5000
timeout client 50000
timeout server 50000
### frontends
# Optional HAProxy Stats Page accessible at http://<host-ip>:8181/haproxy?stats
frontend ucp_stats
mode http
bind 0.0.0.0:8181
default_backend ucp_stats
frontend ucp_443
mode tcp
bind 0.0.0.0:443
default_backend ucp_upstream_servers_443
### backends
backend ucp_stats
mode http
option httplog
stats enable
stats admin if TRUE
stats refresh 5m
backend ucp_upstream_servers_443
mode tcp
option httpchk GET /_ping HTTP/1.1\r\nHost:\ <UCP_FQDN>
server node01 <UCP_MANAGER_1_IP>:443 weight 100 check check-ssl verify none
server node02 <UCP_MANAGER_2_IP>:443 weight 100 check check-ssl verify none
server node03 <UCP_MANAGER_N_IP>:443 weight 100 check check-ssl verify none
```
</div>
<div id="aws" class="tab-pane fade" markdown="1">
```json
{
"Subnets": [
"subnet-XXXXXXXX",
"subnet-YYYYYYYY",
"subnet-ZZZZZZZZ"
],
"CanonicalHostedZoneNameID": "XXXXXXXXXXX",
"CanonicalHostedZoneName": "XXXXXXXXX.us-west-XXX.elb.amazonaws.com",
"ListenerDescriptions": [
{
"Listener": {
"InstancePort": 443,
"LoadBalancerPort": 443,
"Protocol": "TCP",
"InstanceProtocol": "TCP"
},
"PolicyNames": []
}
],
"HealthCheck": {
"HealthyThreshold": 2,
"Interval": 10,
"Target": "HTTPS:443/_ping",
"Timeout": 2,
"UnhealthyThreshold": 4
},
"VPCId": "vpc-XXXXXX",
"BackendServerDescriptions": [],
"Instances": [
{
"InstanceId": "i-XXXXXXXXX"
},
{
"InstanceId": "i-XXXXXXXXX"
},
{
"InstanceId": "i-XXXXXXXXX"
}
],
"DNSName": "XXXXXXXXXXXX.us-west-2.elb.amazonaws.com",
"SecurityGroups": [
"sg-XXXXXXXXX"
],
"Policies": {
"LBCookieStickinessPolicies": [],
"AppCookieStickinessPolicies": [],
"OtherPolicies": []
},
"LoadBalancerName": "ELB-UCP",
"CreatedTime": "2017-02-13T21:40:15.400Z",
"AvailabilityZones": [
"us-west-2c",
"us-west-2a",
"us-west-2b"
],
"Scheme": "internet-facing",
"SourceSecurityGroup": {
"OwnerAlias": "XXXXXXXXXXXX",
"GroupName": "XXXXXXXXXXXX"
}
}
```
</div>
</div>
You can deploy your load balancer using:
<ul class="nav nav-tabs">
<li class="active"><a data-toggle="tab" data-target="#nginx-2" data-group="nginx">NGINX</a></li>
<li><a data-toggle="tab" data-target="#haproxy-2" data-group="haproxy">HAProxy</a></li>
</ul>
<div class="tab-content">
<div id="nginx-2" class="tab-pane fade in active" markdown="1">
```conf
# Create the nginx.conf file, then
# deploy the load balancer
docker run --detach \
--name ucp-lb \
--restart=unless-stopped \
--publish 443:443 \
--volume ${PWD}/nginx.conf:/etc/nginx/nginx.conf:ro \
nginx:stable-alpine
```
</div>
<div id="haproxy-2" class="tab-pane fade" markdown="1">
```conf
# Create the haproxy.cfg file, then
# deploy the load balancer
docker run --detach \
--name ucp-lb \
--publish 443:443 \
--publish 8181:8181 \
--restart=unless-stopped \
--volume ${PWD}/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro \
haproxy:1.7-alpine haproxy -d -f /usr/local/etc/haproxy/haproxy.cfg
```
</div>
</div>
## Where to go next
* [Add labels to cluster nodes](add-labels-to-cluster-nodes.md)

View File

@ -0,0 +1,82 @@
---
title: Enable using domain names to access services
description: Docker Universal Control Plane has an HTTP routing mesh that allows you to make your services accessible through a domain name.
keywords: ucp, services, http, https, dns, routing
---
Docker has a transport-layer load balancer, also known as an L4 load balancer.
This allows you to access your services independently of the node where they are
running.
![swarm routing mesh](../../images/use-domain-names-1.svg)
In this example, the WordPress service is being served on port 8000.
Users can access WordPress using the IP address of any node in the swarm
and port 8000. If WordPress is not running in that node, the
request is redirected to a node that is.
UCP extends this and provides an HTTP routing mesh for application-layer
load balancing. This allows you to access services with HTTP and HTTPS
endpoints using a domain name instead of an IP.
![http routing mesh](../../images/use-domain-names-2.svg)
In this example, the WordPress service listens on port 8000 and is attached to
the `ucp-hrm` network. There's also a DNS entry mapping `wordpress.example.org`
to the IP addresses of the UCP nodes.
When users access `wordpress.example.org:8000`, the HTTP routing mesh routes
the request to the service running WordPress in a way that is transparent to
the user.
## Enable the HTTP routing mesh
To enable the HTTP routing mesh, Log in as an administrator, go to the
UCP web UI, navigate to the **Admin Settings** page, and click the
**Routing Mesh** option. Check the **Enable routing mesh** option.
![http routing mesh](../../images/use-domain-names-3.png){: .with-border}
By default, the HTTP routing mesh service listens on port 80 for HTTP and port
8443 for HTTPS. Change the ports if you already have services that are using
them.
## Under the hood
Once you enable the HTTP routing mesh, UCP deploys:
| Name | What | Description |
|:----------|:--------|:------------------------------------------------------------------------------|
| `ucp-hrm` | Service | Receive HTTP and HTTPS requests and send them to the right service |
| `ucp-hrm` | Network | The network used to communicate with the services using the HTTP routing mesh |
You then deploy a service that exposes a port, attach that service to the
`ucp-hrm` network, and create a DNS entry to map a domain name to the IP
address of the UCP nodes.
When a user tries to access an HTTP service from that domain name:
1. The DNS resolution will point them to the IP of one of the UCP nodes
2. The HTTP routing mesh looks at the Hostname header in the HTTP request
3. If there's a service that maps to that hostname, the request is routed to the
port where the service is listening
4. If not, the user receives an `HTTP 503, bad gateway` error.
For services exposing HTTPS things are similar. The HTTP routing mesh doesn't
terminate the TLS connection, and instead leverages an extension to TLS called
Server Name Indication, that allows a client to announce in clear the domain
name it is trying to reach.
When receiving a connection in the HTTPS port, the routing mesh looks at the
Server Name Indication header and routes the request to the right service.
The service is responsible for terminating the HTTPS connection. Note that
the routing mesh uses the SSL session ID to make sure that a single SSL
session always goes to the same task for the service. This is done for
performance reasons so that the same SSL session can be maintained across
requests.
## Where to go next
- [Use your own TLS certificates](use-your-own-tls-certificates.md)
- [Run only the images you trust](run-only-the-images-you-trust.md)

View File

@ -0,0 +1,45 @@
---
title: Use a local node network in a swarm
description: Learn how to use a local node network, like MAC VLAN, in a UCP swarm.
keywords: ucp, network, macvlan
---
Docker Universal Control Plane can use your local networking drivers to
orchestrate your swarm. You can create a *config* network, with a driver like
MAC VLAN, and you use it like any other named network in UCP. If it's set up
as attachable, you can attach containers.
> Security
>
> Encrypting communication between containers on different nodes works only on
> overlay networks.
## Use UCP to create node-specific networks
Always use UCP to create node-specific networks. You can use the UCP web UI
or the CLI (with an admin bundle). If you create the networks without UCP,
the networks won't have the right access labels and won't be available in UCP.
## Create a MAC VLAN network
1. Log in as an administrator.
2. Navigate to **Networks** and click **Create Network**.
3. Name the network "macvlan".
4. In the **Driver** dropdown, select **Macvlan**.
5. In the **Macvlan Configure** section, select the configuration option.
Create all of the config-only networks before you create the config-from
network.
- **Config Only**: Prefix the `config-only` network name with a node hostname
prefix, like `node1/my-cfg-network`, `node2/my-cfg-network`, *etc*. This is
necessary to ensure that the access labels are applied consistently to all of
the back-end config-only networks. UCP routes the config-only network creation
to the appropriate node based on the node hostname prefix. All config-only
networks with the same name must belong in the same collection, or UCP returns
an error. Leaving the access label empty puts the network in the admin's default
collection, which is `/` in a new UCP installation.
- **Config From**: Create the network from a Docker config. Don't set up an
access label for the config-from network. The labels of the network and its
collection placement are inherited from the related config-only networks.
6. Click **Create** to create the network.

View File

@ -0,0 +1,149 @@
---
description: Set up and configure content trust and signing policy for use with a continuous integration system
keywords: cup, trust, notary, security, continuous integration
title: Use trusted images for continuous integration
---
The document provides a minimal example on setting up Docker Content Trust (DCT) in
Universal Control Plane (UCP) for use with a Continuous Integration (CI) system. It
covers setting up the necessary accounts and trust delegations to restrict only those
images built by your CI system to be deployed to your UCP managed cluster.
## Set up UCP accounts and teams
The first step is to create a user account for your CI system. For the purposes of
this document we will assume you are using Jenkins as your CI system and will therefore
name the account "jenkins". As an admin user logged in to UCP, navigate to "User Management"
and select "Add User". Create a user with the name "jenkins" and set a strong password.
Next, create a team called "CI" and add the "jenkins" user to this team. All signing
policy is team based, so if we want only a single user to be able to sign images
destined to be deployed on the cluster, we must create a team for this one user.
## Set up the signing policy
While still logged in as an admin, navigate to "Admin Settings" and select the "Content Trust"
subsection. Select the checkbox to enable content trust and in the select box that appears,
select the "CI" team we have just created. Save the settings.
This policy will require that every image that referenced in a `docker image pull`,
`docker container run`, or `docker service create` must be signed by a key corresponding
to a member of the "CI" team. In this case, the only member is the "jenkins" user.
## Create keys for the Jenkins user
The signing policy implementation uses the certificates issued in user client bundles
to connect a signature to a user. Using an incognito browser window (or otherwise),
log in to the "jenkins" user account you created earlier. Download a client bundle for
this user. It is also recommended to change the description associated with the public
key stored in UCP such that you can identify in the future which key is being used for
signing.
Each time a user retrieves a new client bundle, a new keypair is generated. It is therefore
necessary to keep track of a specific bundle that a user chooses to designate as their signing bundle.
Once you have decompressed the client bundle, the only two files you need for the purposes
of signing are `cert.pem` and `key.pem`. These represent the public and private parts of
the user's signing identity respectively. We will load the `key.pem` file onto the Jenkins
servers, and use `cert.pem` to create delegations for the "jenkins" user in our
Trusted Collection.
## Prepare the Jenkins server
### Load `key.pem` on Jenkins
You will need to use the notary client to load keys onto your Jenkins server. Simply run
`notary -d /path/to/.docker/trust key import /path/to/key.pem`. You will be asked to set
a password to encrypt the key on disk. For automated signing, this password can be configured
into the environment under the variable name `DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE`. The `-d`
flag to the command specifies the path to the `trust` subdirectory within the server's `docker`
configuration directory. Typically this is found at `~/.docker/trust`.
### Enable content trust
There are two ways to enable content trust: globally, and per operation. To enabled content
trust globally, set the environment variable `DOCKER_CONTENT_TRUST=1`. To enable on a per
operation basis, wherever you run `docker image push` in your Jenkins scripts, add the flag
`--disable-content-trust=false`. You may wish to use this second option if you only want
to sign some images.
The Jenkins server is now prepared to sign images, but we need to create delegations referencing
the key to give it the necessary permissions.
## Initialize a repository
Any commands displayed in this section should _not_ be run from the Jenkins server. You
will most likely want to run them from your local system.
If this is a new repository, create it in Docker Trusted Registry (DTR) or Docker Hub,
depending on which you use to store your images, before proceeding further.
We will now initialize the trust data and create the delegation that provides the Jenkins
key with permissions to sign content. The following commands initialize the trust data and
rotate snapshotting responsibilities to the server. This is necessary to ensure human involvement
is not required to publish new content.
```
notary -s https://my_notary_server.com -d ~/.docker/trust init my_repository
notary -s https://my_notary_server.com -d ~/.docker/trust key rotate my_repository snapshot -r
notary -s https://my_notary_server.com -d ~/.docker/trust publish my_repository
```
The `-s` flag specifies the server hosting a notary service. If you are operating against
Docker Hub, this will be `https://notary.docker.io`. If you are operating against your own DTR
instance, this will be the same hostname you use in image names when running docker commands preceded
by the `https://` scheme. For example, if you would run `docker image push my_dtr:4443/me/an_image` the value
of the `-s` flag would be expected to be `https://my_dtr:4443`.
If you are using DTR, the name of the repository should be identical to the full name you use
in a `docker image push` command. If however you use Docker Hub, the name you use in a `docker image push`
must be preceded by `docker.io/`. i.e. if you ran `docker image push me/alpine`, you would
`notary init docker.io/me/alpine`.
For brevity, we will exclude the `-s` and `-d` flags from subsequent command, but be aware you
will still need to provide them for the commands to work correctly.
Now that the repository is initialized, we need to create the delegations for Jenkins. Docker
Content Trust treats a delegation role called `targets/releases` specially. It considers this
delegation to contain the canonical list of published images for the repository. It is therefore
generally desirable to add all users to this delegation with the following command:
```
notary delegation add my_repository targets/releases --all-paths /path/to/cert.pem
```
This solves a number of prioritization problems that would result from needing to determine
which delegation should ultimately be trusted for a specific image. However, because it
is anticipated that any user will be able to sign the `targets/releases` role it is not trusted
in determining if a signing policy has been met. Therefore it is also necessary to create a
delegation specifically for Jenkins:
```
notary delegation add my_repository targets/jenkins --all-paths /path/to/cert.pem
```
We will then publish both these updates (remember to add the correct `-s` and `-d` flags):
```
notary publish my_repository
```
Informational (Advanced): If we included the `targets/releases` role in determining if a signing policy
had been met, we would run into the situation of images being opportunistically deployed when
an appropriate user signs. In the scenario we have described so far, only images signed by
the "CI" team (containing only the "jenkins" user) should be deployable. If a user "Moby" could
also sign images but was not part of the "CI" team, they might sign and publish a new `targets/releases`
that contained their image. UCP would refuse to deploy this image because it was not signed
by the "CI" team. However, the next time Jenkins published an image, it would update and sign
the `targets/releases` role as whole, enabling "Moby" to deploy their image.
## Conclusion
With the Trusted Collection initialized, and delegations created, the Jenkins server will
now use the key we imported to sign any images we push to this repository.
Through either the Docker CLI, or the UCP browser interface, we will find that any images
that do not meet our signing policy cannot be used. The signing policy we set up requires
that the "CI" team must have signed any image we attempt to `docker image pull`, `docker container run`,
or `docker service create`, and the only member of that team is the "jenkins" user. This
restricts us to only running images that were published by our Jenkins CI system.

View File

@ -0,0 +1,60 @@
---
title: Use your own TLS certificates
description: Learn how to configure Docker Universal Control Plane to use your own certificates.
keywords: Universal Control Plane, UCP, certificate, authentication, tls
---
All UCP services are exposed using HTTPS, to ensure all communications between
clients and UCP are encrypted. By default, this is done using self-signed TLS
certificates that are not trusted by client tools like web browsers. So when
you try to access UCP, your browser warns that it doesn't trust UCP or that
UCP has an invalid certificate.
![invalid certificate](../../images/use-externally-signed-certs-1.png)
The same happens with other client tools.
```none
$ curl https://ucp.example.org
SSL certificate problem: Invalid certificate chain
```
You can configure UCP to use your own TLS certificates, so that it is
automatically trusted by your browser and client tools.
To ensure minimal impact to your business, you should plan for this change to
happen outside business peak hours. Your applications will continue running
normally, but existing UCP client certificates will become invalid, so users
will have to download new ones to [access UCP from the CLI](../../user/access-ucp/cli-based-access.md).
## Configure UCP to use your own TLS certificates and keys
In the UCP web UI, log in with administrator credentials and
navigate to the **Admin Settings** page.
In the left pane, click **Certificates**.
![](../../images/use-externally-signed-certs-2.png)
Upload your certificates and keys:
* A `ca.pem` file with the root CA public certificate.
* A `cert.pem` file with the TLS certificate for your domain and any intermediate public
certificates, in this order.
* A `key.pem` file with TLS private key. Make sure it is not encrypted with a password.
Encrypted keys should have `ENCRYPTED` in the first line.
Finally, click **Save** for the changes to take effect.
After replacing the TLS certificates, your users won't be able to authenticate
with their old client certificate bundles. Ask your users to go to the UCP
web UI and [get new client certificate bundles](../../user/access-ucp/cli-based-access.md).
If you deployed Docker Trusted Registry, you'll also need to reconfigure it
to trust the new UCP TLS certificates.
[Learn how to configure DTR](/datacenter/dtr/2.3/reference/cli/reconfigure.md).
## Where to go next
* [Access UCP from the CLI](../../user/access-ucp/cli-based-access.md)

View File

@ -0,0 +1,43 @@
---
title: Architecture-specific images
description: Learn how to use images that are specific to particular hardware architectures in Docker Universal Control Plane.
keywords: UCP, Docker EE, image, IBM z, Windows
---
Docker Universal Control Plane deploys images for a number of different
hardware architectures, including IBM z systems. Some architectures require
pulling images that have specific tags or names indicating the target
architecture.
## Tag for IBM z Systems
Append the string `-s390x` to a UCP system image tag to pull the appropriate
image for IBM z Systems. For example, you can modify the CLI command for getting
a [UCP support dump](..\..\get-support.md) to use an environment variable
that indicates the current architecture:
```bash
{% raw %}
[[ $(docker info --format='{{.Architecture}}') == s390x ]] && export _ARCH='-s390x' || export _ARCH=''
{% endraw %}
docker container run --rm \
--name ucp \
-v /var/run/docker.sock:/var/run/docker.sock \
--log-driver none \
{{ page.ucp_org }}/{{ page.ucp_repo }}:{{ page.ucp_version }}${_ARCH} \
support > docker-support.tgz
```
In this example, the environment variable is named `_ARCH`, but you can use any
valid shell name.
## OS-specific component names
Some UCP component names depend on the node's operating system. Use the
following table to ensure that you're pulling the right images for each node.
| UCP component base name | Windows name | IBM z Systems name |
|-------------------------|----------------|--------------------|
| ucp-agent | ucp-agent-win | ucp-agent-s390x |
| ucp-dsinfo | ucp-dsinfo-win | ucp-dsinfo-s390x |

View File

@ -0,0 +1,147 @@
---
title: Install UCP for production
description: Learn how to install Docker Universal Control Plane on production.
keywords: Universal Control Plane, UCP, install, Docker EE
---
Docker Universal Control Plane (UCP) is a containerized application that you
can install on-premise or on a cloud infrastructure.
## Step 1: Validate the system requirements
The first step to installing UCP is ensuring that your infrastructure has all
of the [requirements UCP needs to run](system-requirements.md).
Also, you need to ensure that all nodes, physical and virtual, are running
the same version of Docker Enterprise Edition.
## Step 2: Install Docker EE on all nodes
UCP is a containerized application that requires the commercially supported
Docker Engine to run.
Install Docker EE on each host that you plan to manage with UCP.
View the [supported platforms](/engine/installation/#supported-platforms)
and click on your platform to get platform-specific instructions for installing
Docker EE.
Make sure you install the same Docker EE version on all the nodes. Also,
if you're creating virtual machine templates with Docker EE already
installed, make sure the `/etc/docker/key.json` file is not included in the
virtual machine image. When provisioning the virtual machine, restart the Docker
daemon to generate a new `/etc/docker/key.json` file.
## Step 3: Customize named volumes
Skip this step if you want to use the defaults provided by UCP.
Docker UCP uses named volumes to persist data. If you want
to customize the drivers used to manage these volumes, you can create the
volumes before installing UCP. When you install UCP, the installer
will notice that the volumes already exist, and will start using them.
[Learn about the named volumes used by UCP](../../architecture.md).
If these volumes don't exist, they'll be automatically created when installing
UCP.
## Step 4: Install UCP
To install UCP, you use the `docker/ucp` image, which has commands to install
and manage UCP.
Make sure you follow the [UCP System requirements](system-requirements.md) in regards to networking ports.
Ensure that your hardware or software firewalls are open appropriately or disabled.
To install UCP:
1. Use ssh to log in to the host where you want to install UCP.
2. Run the following command:
```none
# Pull the latest version of UCP
$ docker image pull {{ page.ucp_org }}/{{ page.ucp_repo }}:{{ page.ucp_version }}
# Install UCP
$ docker container run --rm -it --name ucp \
-v /var/run/docker.sock:/var/run/docker.sock \
{{ page.ucp_org }}/{{ page.ucp_repo }}:{{ page.ucp_version }} install \
--host-address <node-ip-address> \
--interactive
```
This runs the install command in interactive mode, so that you're
prompted for any necessary configuration values.
To find what other options are available in the install command, check the
[reference documentation](../../../reference/cli/install.md).
## Step 5: License your installation
Now that UCP is installed, you need to license it.
1. Go to the
[Docker Store](https://www.docker.com/enterprise-edition)
and buy a Docker EE subscription, or get a free trial license.
2. In your browser, navigate to the UCP web UI, log in with your
administrator credentials and upload your license. Navigate to the
**Admin Settings** page and in the left pane, click **License**.
![](../../images/license-ucp.png){: .with-border}
3. Click **Upload License** and navigate to your license (.lic) file.
When you're finished selecting the license, UCP updates with the new
settings.
## Step 6: Join manager nodes
Skip this step if you don't want UCP to be highly available.
To make your Docker swarm and UCP fault-tolerant and highly available, you can
join more manager nodes to it. Manager nodes are the nodes in the swarm
that perform the orchestration and swarm management tasks, and dispatch tasks
for worker nodes to execute.
To join manager nodes to the swarm,
1. In the UCP web UI, navigate to the **Nodes** page, and click the
**Add Node** button to add a new node.
![](../../images/nodes-page-ucp.png){: .with-border}
2. In the **Add Node** page, check **Add node as a manager** to turn this node
into a manager and replicate UCP for high-availability.
3. If you want to customize the network and port where the new node listens
for swarm management traffic, click **Use a custom listen address**. Enter
the IP address and port for the node to listen for inbound cluster
management traffic. The format is `interface:port` or `ip:port`.
The default is `0.0.0.0:2377`.
4. If you want to customize the network and port that the new node advertises
to other swarm members for API access, click
**Use a custom advertise address** and enter the IP address and port.
By default, this is also the outbound address used by the new node to
contact UCP. The joining node should be able to contact itself at this
address. The format is `interface:port` or `ip:port`.
Click the copy icon ![](../../images/copy-swarm-token.png) to copy the
`docker swarm join` command that nodes use to join the swarm.
![](../../images/add-node-ucp.png){: .with-border}
5. For each manager node that you want to join to the swarm, log in using
ssh and run the join command that you copied. After the join command
completes, the node appears on the **Nodes** page in the UCP web UI.
## Step 7: Join worker nodes
Skip this step if you don't want to add more nodes to run and scale your apps.
To add more computational resources to your swarm, you can join worker nodes.
These nodes execute tasks assigned to them by the manager nodes. Follow the
same steps as before, but don't check the **Add node as a manager** option.
## Where to go next
* [Use your own TLS certificates](../configure/use-your-own-tls-certificates.md)
* [Scale your cluster](../configure/scale-your-cluster.md)

View File

@ -0,0 +1,74 @@
---
title: Install UCP offline
description: Learn how to install Docker Universal Control Plane. on a machine with
no internet access.
keywords: UCP, install, offline, Docker EE
---
The procedure to install Docker Universal Control Plane on a host is the same,
whether the host has access to the internet or not.
The only difference when installing on an offline host is that instead of
pulling the UCP images from Docker Hub, you use a computer that's connected
to the internet to download a single package with all the images. Then you
copy this package to the host where you install UCP. The offline installation
process works only if one of the following is true:
- All of the swarm nodes, managers and workers alike, have internet access
to Docker Hub, and
- None of the nodes, managers and workers alike, have internet access to
Docker Hub.
If the managers have access to Docker Hub while the workers don't,
installation will fail.
## Versions available
Use a computer with internet access to download the UCP package from the
following links.
{% include components/ddc_url_list_2.html product="ucp" version="3.0" %}
## Download the offline package
You can also use these links to get the UCP package from the command
line:
```bash
$ wget <ucp-package-url> -O ucp.tar.gz
```
Now that you have the package in your local machine, you can transfer it to
the machines where you want to install UCP.
For each machine that you want to manage with UCP:
1. Copy the UCP package to the machine.
```bash
$ scp ucp.tar.gz <user>@<host>
```
2. Use ssh to log in to the hosts where you transferred the package.
3. Load the UCP images.
Once the package is transferred to the hosts, you can use the
`docker load` command, to load the Docker images from the tar archive:
```bash
$ docker load < ucp.tar.gz
```
Follow the same steps for the DTR binaries.
## Install UCP
Now that the offline hosts have all the images needed to install UCP,
you can [install Docker UCP on one of the manager nodes](index.md).
## Where to go next
* [Install UCP](index.md).
* [System requirements](system-requirements.md)

View File

@ -0,0 +1,106 @@
---
title: Plan a production UCP installation
description: Learn about the Docker Universal Control Plane architecture, and the requirements to install it on production.
keywords: UCP, install, Docker EE
---
Docker Universal Control Plane helps you manage your container swarm from a
centralized place. This article explains what you need to consider before
deploying Docker Universal Control Plane for production.
## System requirements
Before installing UCP you should make sure that all nodes (physical or virtual
machines) that you'll manage with UCP:
* [Comply with the system requirements](system-requirements.md), and
* Are running the same version of Docker Engine.
## Hostname strategy
Docker UCP requires Docker Enterprise Edition. Before installing Docker EE on
your swarm nodes, you should plan for a common hostname strategy.
Decide if you want to use short hostnames, like `engine01`, or Fully Qualified
Domain Names (FQDN), like `engine01.docker.vm`. Whichever you choose,
ensure that your naming strategy is consistent across the cluster, because
Docker Engine and UCP use hostnames.
For example, if your swarm has three hosts, you can name them:
```none
node1.company.example.org
node2.company.example.org
node3.company.example.org
```
## Static IP addresses
Docker UCP requires each node on the cluster to have a static IP address.
Before installing UCP, ensure your network and nodes are configured to support
this.
## Time synchronization
In distributed systems like Docker UCP, time synchronization is critical
to ensure proper operation. As a best practice to ensure consistency between
the engines in a UCP swarm, all engines should regularly synchronize time
with a Network Time Protocol (NTP) server. If a server's clock is skewed,
unexpected behavior may cause poor performance or even failures.
## Load balancing strategy
Docker UCP doesn't include a load balancer. You can configure your own
load balancer to balance user requests across all manager nodes.
If you plan to use a load balancer, you need to decide whether you'll
add the nodes to the load balancer using their IP addresses or their FQDNs.
Whichever you choose, be consistent across nodes. When this is decided,
take note of all IPs or FQDNs before starting the installation.
## Load balancing UCP and DTR
By default, UCP and DTR both use port 443. If you plan on deploying UCP and
DTR, your load balancer needs to distinguish traffic between the two by IP
address or port number.
* If you want to configure your load balancer to listen on port 443:
* Use one load balancer for UCP and another for DTR,
* Use the same load balancer with multiple virtual IPs.
* Configure your load balancer to expose UCP or DTR on a port other than 443.
If you want to install UCP in a high-availability configuration that uses
a load balancer in front of your UCP controllers, include the appropriate IP
address and FQDN of the load balancer's VIP by using
one or more `--san` flags in the [install command](../../../reference/cli/install.md)
or when you're asked for additional SANs in interactive mode.
[Learn about high availability](../configure/set-up-high-availability.md).
## Use an external Certificate Authority
You can customize UCP to use certificates signed by an external Certificate
Authority. When using your own certificates, you need to have a certificate
bundle that has:
* A ca.pem file with the root CA public certificate,
* A cert.pem file with the server certificate and any intermediate CA public
certificates. This certificate should also have SANs for all addresses used to
reach the UCP manager,
* A key.pem file with server private key.
You can have a certificate for each manager, with a common SAN. For
example, on a three-node cluster, you can have:
* node1.company.example.org with SAN ucp.company.org
* node2.company.example.org with SAN ucp.company.org
* node3.company.example.org with SAN ucp.company.org
You can also install UCP with a single externally-signed certificate
for all managers, rather than one for each manager node. In this case,
the certificate files are copied automatically to any new
manager nodes joining the cluster or being promoted to a manager role.
## Where to go next
* [UCP system requirements](system-requirements.md)
* [Install UCP](index.md)

View File

@ -0,0 +1,110 @@
---
title: UCP System requirements
description: Learn about the system requirements for installing Docker Universal Control Plane.
keywords: UCP, architecture, requirements, Docker EE
---
Docker Universal Control Plane can be installed on-premises or on the cloud.
Before installing, be sure your infrastructure has these requirements.
## Hardware and software requirements
You can install UCP on-premises or on a cloud provider. To install UCP,
all nodes must have:
* [Docker Enterprise Edition](/engine/installation/index.md) version 17.06 or higher
* Linux kernel version 3.10 or higher
* 8GB of RAM for manager nodes or nodes running DTR
* 4GB of RAM for worker nodes
* 3GB of free disk space
* A static IP address
Also, make sure the nodes are running one of these operating systems:
* A maintained version of CentOS 7. Archived versions aren't supported or tested.
* Red Hat Enterprise Linux 7.0, 7.1, 7.2, 7.3, or 7.4
* Ubuntu 14.04 LTS or 16.04 LTS
* SUSE Linux Enterprise 12
* Oracle Linux 7.3
For highly-available installations, you also need a way to transfer files
between hosts.
> Workloads on manager nodes
>
> These requirements assume that manager nodes won't run regular workloads.
> If you plan to run additional workloads on manager nodes, you may need to
> provision more powerful nodes. If manager nodes become overloaded, the
> swarm may experience issues.
## Ports used
When installing UCP on a host, make sure the following ports are open:
| Hosts | Direction | Port | Purpose |
|:------------------|:---------:|:------------------------|:----------------------------------------------------------------------------------|
| managers, workers | in | TCP 443 (configurable) | Port for the UCP web UI and API |
| managers | in | TCP 2376 (configurable) | Port for the Docker Swarm manager. Used for backwards compatibility |
| managers, workers | in | TCP 2377 (configurable) | Port for communication between swarm nodes |
| workers | out | TCP 2377 (configurable) | Port for communication between swarm nodes |
| managers, workers | in, out | UDP 4789 | Port for overlay networking |
| managers, workers | in, out | TCP, UDP 7946 | Port for gossip-based clustering |
| managers, workers | in | TCP 12376 | Port for a TLS proxy that provides access to UCP, Docker Engine, and Docker Swarm |
| managers | in | TCP 12379 | Port for internal node configuration, cluster configuration, and HA |
| managers | in | TCP 12380 | Port for internal node configuration, cluster configuration, and HA |
| managers | in | TCP 12381 | Port for the certificate authority |
| managers | in | TCP 12382 | Port for the UCP certificate authority |
| managers | in | TCP 12383 | Port for the authentication storage backend |
| managers | in | TCP 12384 | Port for the authentication storage backend for replication across managers |
| managers | in | TCP 12385 | Port for the authentication service API |
| managers | in | TCP 12386 | Port for the authentication worker |
| managers | in | TCP 12387 | Port for the metrics service |
For overlay networks with encryption to work, you need to ensure that
IP protocol 50 (ESP) traffic is allowed.
Also, make sure the networks you're using allow the UCP components enough time
to communicate before they time out.
| Component | Timeout (ms) | Configurable |
|:---------------------------------------|:-------------|:-------------|
| Raft consensus between manager nodes | 3000 | no |
| Gossip protocol for overlay networking | 5000 | no |
| etcd | 500 | yes |
| RethinkDB | 10000 | no |
| Stand-alone swarm | 90000 | no |
## Time Synchronization
In distributed systems like Docker UCP, time synchronization is critical
to ensure proper operation. As a best practice to ensure consistency between
the engines in a UCP swarm, all engines should regularly synchronize time
with a Network Time Protocol (NTP) server. If a server's clock is skewed,
unexpected behavior may cause poor performance or even failures.
## Compatibility and maintenance lifecycle
Docker EE is a software subscription that includes three products:
* Docker Engine with enterprise-grade support,
* Docker Trusted Registry,
* Docker Universal Control Plane.
[Learn more about the maintenance lifecycle for these products](http://success.docker.com/Get_Help/Compatibility_Matrix_and_Maintenance_Lifecycle).
## Version compatibility
UCP {{ page.ucp_version }} requires minimum versions of the following Docker components:
- Docker Engine 17.06 or higher
- DTR 2.3 or higher
<!--
- Docker Remote API 1.25
- Compose 1.9
-->
## Where to go next
* [UCP architecture](../../architecture.md)
* [Plan your installation](plan-installation.md)

View File

@ -0,0 +1,54 @@
---
title: Uninstall UCP
description: Learn how to uninstall a Docker Universal Control Plane swarm.
keywords: UCP, uninstall, install, Docker EE
---
Docker UCP is designed to scale as your applications grow in size and usage.
You can [add and remove nodes](../configure/scale-your-cluster.md) from the
cluster to make it scale to your needs.
You can also uninstall Docker Universal Control Plane from your cluster. In this
case the UCP services are stopped and removed, but your Docker Engines will
continue running in swarm mode. You applications will continue running normally.
If you wish to remove a single node from the UCP cluster, you should instead
[Remove that node from the cluster](../configure/scale-your-cluster.md).
After you uninstall UCP from the cluster, you'll no longer be able to enforce
role-based access control to the cluster, or have a centralized way to monitor
and manage the cluster.
After uninstalling UCP from the cluster, you will no longer be able to join new
nodes using `docker swarm join`, unless you reinstall UCP.
To uninstall UCP, log in to a manager node using ssh, and run the following
command:
```bash
$ docker container run --rm -it \
-v /var/run/docker.sock:/var/run/docker.sock \
--name ucp \
{{ page.ucp_org }}/{{ page.ucp_repo }}:{{ page.ucp_version }} uninstall-ucp --interactive
```
This runs the uninstall command in interactive mode, so that you are prompted
for any necessary configuration values. Running this command on a single manager
node will uninstall UCP from the entire cluster. [Check the reference
documentation](../../../reference/cli/index.md) to learn the options available
in the `uninstall-ucp` command.
## Swarm mode CA
After uninstalling UCP, the nodes in your cluster will still be in swarm mode,
but you can't join new nodes until you reinstall UCP, because swarm mode
relies on UCP to provide the CA certificates that allow nodes in the cluster
to identify one another. Also, since swarm mode is no longer controlling its
own certificates, if the certificates expire after you uninstall UCP, the nodes
in the swarm won't be able to communicate at all. To fix this, either reinstall
UCP before the certificates expire or disable swarm mode by running
`docker swarm leave --force` on every node.
## Where to go next
* [Scale your cluster](../configure/scale-your-cluster.md)

View File

@ -0,0 +1,62 @@
---
title: Upgrade UCP offline
description: Learn how to upgrade Docker Universal Control Plane on a machine with no internet access.
keywords: ucp, upgrade, offline
---
Upgrading Universal Control Plane is the same, whether your hosts have access to
the internet or not.
The only difference when installing on an offline host is that instead of
pulling the UCP images from Docker Hub, you use a computer that's connected
to the internet to download a single package with all the images. Then you
copy this package to the host where you upgrade UCP.
## Versions available
Use a computer with internet access to download the UCP package from the
following links.
{% include components/ddc_url_list_2.html product="ucp" version="3.0" %}
## Download the offline package
You can also use these links to get the UCP package from the command
line:
```bash
$ wget <ucp-package-url> -O ucp.tar.gz
```
Now that you have the package in your local machine, you can transfer it to
the machines where you want to upgrade UCP.
For each machine that you want to manage with UCP:
1. Copy the offline package to the machine.
```bash
$ scp ucp.tar.gz <user>@<host>
```
2. Use ssh to log in to the hosts where you transferred the package.
3. Load the UCP images.
Once the package is transferred to the hosts, you can use the
`docker load` command, to load the Docker images from the tar archive:
```bash
$ docker load < ucp.tar.gz
```
## Upgrade UCP
Now that the offline hosts have all the images needed to upgrade UCP,
you can [upgrade Docker UCP](upgrade.md).
## Where to go next
* [Upgrade UCP](upgrade.md)
* [Release Notes](release-notes.md)

View File

@ -0,0 +1,106 @@
---
title: Upgrade to UCP 3.0
description: Learn how to upgrade Docker Universal Control Plane with minimal impact to your users.
keywords: UCP, upgrade, update
---
This page guides you in upgrading Docker Universal Control Plane (UCP) to
version {{ page.ucp_version }}.
Before upgrading to a new version of UCP, check the
[release notes](../../release-notes/index.md) for this version.
There you'll find information about the new features, breaking changes, and
other relevant information for upgrading to a particular version.
## Plan the upgrade
As part of the upgrade process, you'll be upgrading the Docker Engine
installed in each node of the swarm to version 17.06 Enterprise Edition.
You should plan for the upgrade to take place outside of business hours,
to ensure there's minimal impact to your users.
Also, don't make changes to UCP configurations while you're upgrading it.
This can lead to misconfigurations that are difficult to troubleshoot.
## Back up your swarm
Before starting an upgrade, make sure that your swarm is healthy. If a problem
occurs, this makes it easier to find and troubleshoot it.
[Create a backup](../backups-and-disaster-recovery.md) of your swarm.
This allows you to recover if something goes wrong during the upgrade process.
> Upgrading and backup archives
>
> The backup archive is version-specific, so you can't use it during the
> upgrade process. For example, if you create a backup archive for a UCP 2.2
> swarm, you can't use the archive file after you upgrade to UCP 3.0.
## Upgrade Docker Engine
For each node that is part of your swarm, upgrade the Docker Engine
installed on that node to Docker Engine version 17.06 or higher. Be sure
to install the Docker Enterprise Edition.
Starting with the manager nodes, and then worker nodes:
1. Log into the node using ssh.
2. Upgrade the Docker Engine to version 17.06 or higher.
3. Make sure the node is healthy.
In your browser, navigate to the **Nodes** page in the UCP web UI,
and check that the node is healthy and is part of the swarm.
## Upgrade UCP
You can upgrade UCP from the web UI or the CLI.
### Use the UI to perform an upgrade
When an upgrade is available for a UCP installation, a banner appears.
![](../../images/upgrade-ucp-1.png){: .with-border}
Clicking this message takes an admin user directly to the upgrade process.
It can be found under the **Cluster Configuration** tab of the **Admin
Settings** section.
![](../../images/upgrade-ucp-2.png){: .with-border}
Select a version to upgrade to using the **Available UCP Versions** dropdown,
then click to upgrade.
Before the upgrade happens, a confirmation dialog along with important
information regarding swarm and UI availability is displayed.
![](../../images/upgrade-ucp-3.png){: .with-border}
During the upgrade, the UI will be unavailable, and you should wait
until completion before continuing to interact with it. When the upgrade
completes, you'll see a notification that a newer version of the UI
is available and a browser refresh is required to see the latest UI.
### Use the CLI to perform an upgrade
To upgrade from the CLI, log into a UCP manager node using ssh, and run:
```
# Get the latest version of UCP
$ docker image pull {{ page.ucp_org }}/{{ page.ucp_repo }}:{{ page.ucp_version }}
$ docker container run --rm -it \
--name ucp \
-v /var/run/docker.sock:/var/run/docker.sock \
{{ page.ucp_org }}/{{ page.ucp_repo }}:{{ page.ucp_version }} \
upgrade --interactive
```
This runs the upgrade command in interactive mode, so that you are prompted
for any necessary configuration values.
Once the upgrade finishes, navigate to the UCP web UI and make sure that
all the nodes managed by UCP are healthy.
## Where to go next
* [UCP release notes](../../release-notes/index.md)

View File

@ -0,0 +1,72 @@
---
title: Monitor the swarm status
description: Monitor your Docker Universal Control Plane installation, and learn how to troubleshoot it.
keywords: UCP, troubleshoot, health, swarm
---
You can monitor the status of UCP by using the web UI or the CLI.
You can also use the `_ping` endpoint to build monitoring automation.
## Check status from the UI
The first place to check the status of UCP is the UCP web UI, since it
shows warnings for situations that require your immediate attention.
Administrators might see more warnings than regular users.
![UCP dashboard](../../images/monitor-ucp-0.png){: .with-border}
You can also navigate to the **Nodes** page, to see if all the nodes
managed by UCP are healthy or not.
![UCP dashboard](../../images/monitor-ucp-1.png){: .with-border}
Each node has a status message explaining any problems with the node.
In this example, a Windows worker node is down.
[Learn more about node status](troubleshoot-node-messages.md).
Click the node to get more info on its status. In the details pane, click
**Actions** and select **Agent logs** to see the log entries from the
node.
## Check status from the CLI
You can also monitor the status of a UCP cluster using the Docker CLI client.
Download [a UCP client certificate bundle](../../user/access-ucp/cli-based-access.md)
and then run:
```none
$ docker node ls
```
As a rule of thumb, if the status message starts with `[Pending]`, then the
current state is transient and the node is expected to correct itself back
into a healthy state. [Learn more about node status](troubleshoot-node-messages.md).
## Monitoring automation
You can use the `https://<ucp-manager-url>/_ping` endpoint to check the health
of a single UCP manager node. When you access this endpoint, the UCP manager
validates that all its internal components are working, and returns one of the
following HTTP error codes:
* 200, if all components are healthy
* 500, if one or more components are not healthy
If an administrator client certificate is used as a TLS client certificate for
the `_ping` endpoint, a detailed error message is returned if any component is
unhealthy.
If you're accessing the `_ping` endpoint through a load balancer, you'll have no
way of knowing which UCP manager node is not healthy, since any manager node
might be serving your request. Make sure you're connecting directly to the
URL of a manager node, and not a load balancer. In addition, please be aware that
pinging the endpoint with HEAD will result in a 404 error code. It is better to
use GET instead.
## Where to go next
* [Troubleshoot with logs](troubleshoot-with-logs.md)
* [Troubleshoot node states](./troubleshoot-node-messages.md)

View File

@ -0,0 +1,148 @@
---
title: Troubleshoot swarm configurations
description: Learn how to troubleshoot your Docker Universal Control Plane cluster.
keywords: troubleshoot, etcd, rethinkdb, key, value, store, database, ucp, health, swarm
---
UCP automatically tries to heal itself by monitoring its internal
components and trying to bring them to a healthy state.
In most cases, if a single UCP component is in a failed state persistently,
you should be able to restore the cluster to a healthy state by
removing the unhealthy node from the cluster and joining it again.
[Lean how to remove and join modes](../configure/scale-your-cluster.md).
## Troubleshoot the etcd key-value store
UCP persists configuration data on an [etcd](https://coreos.com/etcd/)
key-value store and [RethinkDB](https://rethinkdb.com/) database that are
replicated on all manager nodes of the UCP swarm. These data stores are for
internal use only and should not be used by other applications.
### With the HTTP API
In this example we'll use `curl` for making requests to the key-value
store REST API, and `jq` to process the responses.
You can install these tools on a Ubuntu distribution by running:
```bash
$ sudo apt-get update && apt-get install curl jq
```
1. Use a client bundle to authenticate your requests.
[Learn more](../../user/access-ucp/cli-based-access.md).
2. Use the REST API to access the cluster configurations. The `$DOCKER_HOST`
and `$DOCKER_CERT_PATH` environment variables are set when using the client
bundle.
```bash
$ export KV_URL="https://$(echo $DOCKER_HOST | cut -f3 -d/ | cut -f1 -d:):12379"
$ curl -s \
--cert ${DOCKER_CERT_PATH}/cert.pem \
--key ${DOCKER_CERT_PATH}/key.pem \
--cacert ${DOCKER_CERT_PATH}/ca.pem \
${KV_URL}/v2/keys | jq "."
```
To learn more about the key-value store REST API, check the
[etcd official documentation](https://coreos.com/etcd/docs/latest/).
### With the CLI client
The containers running the key-value store, include `etcdctl`, a command line
client for etcd. You can run it using the `docker exec` command.
The examples below assume you are logged in with ssh into a UCP manager node.
```bash
$ docker exec -it ucp-kv etcdctl \
--endpoint https://127.0.0.1:2379 \
--ca-file /etc/docker/ssl/ca.pem \
--cert-file /etc/docker/ssl/cert.pem \
--key-file /etc/docker/ssl/key.pem \
cluster-health
member 16c9ae1872e8b1f0 is healthy: got healthy result from https://192.168.122.64:12379
member c5a24cfdb4263e72 is healthy: got healthy result from https://192.168.122.196:12379
member ca3c1bb18f1b30bf is healthy: got healthy result from https://192.168.122.223:12379
cluster is healthy
```
On failure, the command exits with an error code and no output.
To learn more about the `etcdctl` utility, check the
[etcd official documentation](https://coreos.com/etcd/docs/latest/).
## RethinkDB Database
User and organization data for Docker Enterprise Edition is stored in a
RethinkDB database which is replicated across all manager nodes in the UCP
swarm.
Replication and failover of this database is typically handled automatically by
UCP's own configuration management processes, but detailed database status and
manual reconfiguration of database replication is available through a command
line tool available as part of UCP.
The examples below assume you are logged in with ssh into a UCP manager node.
### Check the status of the database
```bash
{% raw %}
# NODE_ADDRESS will be the IP address of this Docker Swarm manager node
NODE_ADDRESS=$(docker info --format '{{.Swarm.NodeAddr}}')
# VERSION will be your most recent version of the docker/ucp-auth image
VERSION=$(docker image ls --format '{{.Tag}}' docker/ucp-auth | head -n 1)
# This command will output detailed status of all servers and database tables
# in the RethinkDB cluster.
docker container run --rm -v ucp-auth-store-certs:/tls docker/ucp-auth:${VERSION} --db-addr=${NODE_ADDRESS}:12383 db-status
Server Status: [
{
"ID": "ffa9cd5a-3370-4ccd-a21f-d7437c90e900",
"Name": "ucp_auth_store_192_168_1_25",
"Network": {
"CanonicalAddresses": [
{
"Host": "192.168.1.25",
"Port": 12384
}
],
"TimeConnected": "2017-07-14T17:21:44.198Z"
}
}
]
...
{% endraw %}
```
### Manually reconfigure database replication
```bash
{% raw %}
# NODE_ADDRESS will be the IP address of this Docker Swarm manager node
NODE_ADDRESS=$(docker info --format '{{.Swarm.NodeAddr}}')
# NUM_MANAGERS will be the current number of manager nodes in the cluster
NUM_MANAGERS=$(docker node ls --filter role=manager -q | wc -l)
# VERSION will be your most recent version of the docker/ucp-auth image
VERSION=$(docker image ls --format '{{.Tag}}' docker/ucp-auth | head -n 1)
# This reconfigure-db command will repair the RethinkDB cluster to have a
# number of replicas equal to the number of manager nodes in the cluster.
docker container run --rm -v ucp-auth-store-certs:/tls docker/ucp-auth:${VERSION} --db-addr=${NODE_ADDRESS}:12383 --debug reconfigure-db --num-replicas ${NUM_MANAGERS} --emergency-repair
time="2017-07-14T20:46:09Z" level=debug msg="Connecting to db ..."
time="2017-07-14T20:46:09Z" level=debug msg="connecting to DB Addrs: [192.168.1.25:12383]"
time="2017-07-14T20:46:09Z" level=debug msg="Reconfiguring number of replicas to 1"
time="2017-07-14T20:46:09Z" level=debug msg="(00/16) Emergency Repairing Tables..."
time="2017-07-14T20:46:09Z" level=debug msg="(01/16) Emergency Repaired Table \"grant_objects\""
...
{% endraw %}
```
## Where to go next
* [Get support](../../get-support.md)

View File

@ -0,0 +1,29 @@
---
title: Troubleshoot UCP node states
description: Learn how to troubleshoot individual UCP nodes.
keywords: UCP, troubleshoot, health, swarm
---
There are several cases in the lifecycle of UCP when a node is actively
transitioning from one state to another, such as when a new node is joining the
swarm or during node promotion and demotion. In these cases, the current step
of the transition will be reported by UCP as a node message. You can view the
state of each individual node by following the same steps required to [monitor
cluster status](index.md).
## UCP node states
The following table lists all possible node states that may be reported for a
UCP node, their explanation, and the expected duration of a given step.
| Message | Description | Typical step duration |
|:-----------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:----------------------|
| Completing node registration | Waiting for the node to appear in KV node inventory. This is expected to occur when a node first joins the UCP swarm. | 5 - 30 seconds |
| The ucp-agent task is `state` | The `ucp-agent` task on the target node is not in a running state yet. This is an expected message when configuration has been updated, or when a new node was first joined to the UCP swarm. This step may take a longer time duration than expected if the UCP images need to be pulled from Docker Hub on the affected node. | 1-10 seconds |
| Unable to determine node state | The `ucp-reconcile` container on the target node just started running and we are not able to determine its state. | 1-10 seconds |
| Node is being reconfigured | The `ucp-reconcile` container is currently converging the current state of the node to the desired state. This process may involve issuing certificates, pulling missing images, and starting containers, depending on the current node state. | 1 - 60 seconds |
| Reconfiguration pending | The target node is expected to be a manager but the `ucp-reconcile` container has not been started yet. | 1 - 10 seconds |
| Unhealthy UCP Controller: node is unreachable | Other manager nodes of the cluster have not received a heartbeat message from the affected node within a predetermined timeout. This usually indicates that there's either a temporary or permanent interruption in the network link to that manager node. Ensure the underlying networking infrastructure is operational, and [contact support](../../get-support.md) if the symptom persists. | Until resolved |
| Unhealthy UCP Controller: unable to reach controller | The controller that we are currently communicating with is not reachable within a predetermined timeout. Please refresh the node listing to see if the symptom persists. If the symptom appears intermittently, this could indicate latency spikes between manager nodes, which can lead to temporary loss in the availability of UCP itself. Please ensure the underlying networking infrastructure is operational, and [contact support](../../get-support.md) if the symptom persists. | Until resolved |
| Unhealthy UCP Controller: Docker Swarm Cluster: Local node `<ip>` has status Pending | The Engine ID of an engine is not unique in the swarm. When a node first joins the cluster, it's added to the node inventory and discovered as `Pending` by Docker Swarm. The engine is "validated" if a `ucp-swarm-manager` container can connect to it via TLS, and if its Engine ID is unique in the swarm. If you see this issue repeatedly, make sure that your engines don't have duplicate IDs. Use `docker info` to see the Engine ID. Refresh the ID by removing the `/etc/docker/key.json` file and restarting the daemon. | Until resolved |

View File

@ -0,0 +1,102 @@
---
title: Troubleshoot your swarm
description: Learn how to troubleshoot your Docker Universal Control Plane cluster.
keywords: ucp, troubleshoot, health, swarm
---
If you detect problems in your UCP cluster, you can start your troubleshooting
session by checking the logs of the
[individual UCP components](../../architecture.md). Only administrator users can
see information about UCP system containers.
## Check the logs from the UI
To see the logs of the UCP system containers, navigate to the **Containers**
page of UCP. By default, the UCP system containers are hidden. Click
**Settings** and check **Show system containers** for the UCP system containers
to be listed as well.
![](../../images/troubleshoot-with-logs-1.png){: .with-border}
Click on a container to see more details, like its configurations and logs.
## Check the logs from the CLI
You can also check the logs of UCP system containers from the CLI. This is
specially useful if the UCP web application is not working.
1. Get a client certificate bundle.
When using the Docker CLI client, you need to authenticate using client
certificates.
[Learn how to use client certificates](../../user/access-ucp/cli-based-access.md).
If your client certificate bundle is for a non-admin user, you won't have
permissions to see the UCP system containers.
2. Check the logs of UCP system containers. By default, system containers
aren't displayed. Use the `-a` flag to display them.
```bash
$ docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
8b77cfa87889 docker/ucp-agent:latest "/bin/ucp-agent re..." 3 hours ago Exited (0) 3 hours ago ucp-reconcile
b844cf76a7a5 docker/ucp-agent:latest "/bin/ucp-agent agent" 3 hours ago Up 3 hours 2376/tcp ucp-agent.tahzo3m4xjwhtsn6l3n8oc2bf.xx2hf6dg4zrphgvy2eohtpns9
de5b45871acb docker/ucp-controller:latest "/bin/controller s..." 3 hours ago Up 3 hours (unhealthy) 0.0.0.0:443->8080/tcp ucp-controller
...
```
3. Get the log from a UCP container by using the `docker logs <ucp container ID>`
command. For example, the following command emits the log for the
`ucp-controller` container listed above.
```bash
$ docker logs de5b45871acb
{"level":"info","license_key":"PUagrRqOXhMH02UgxWYiKtg0kErLY8oLZf1GO4Pw8M6B","msg":"/v1.22/containers/ucp/ucp-controller/json",
"remote_addr":"192.168.10.1:59546","tags":["api","v1.22","get"],"time":"2016-04-25T23:49:27Z","type":"api","username":"dave.lauper"}
{"level":"info","license_key":"PUagrRqOXhMH02UgxWYiKtg0kErLY8oLZf1GO4Pw8M6B","msg":"/v1.22/containers/ucp/ucp-controller/logs",
"remote_addr":"192.168.10.1:59546","tags":["api","v1.22","get"],"time":"2016-04-25T23:49:27Z","type":"api","username":"dave.lauper"}
```
## Get a support dump
Before making any changes to UCP, download a [support dump](../../get-support.md).
This allows you to troubleshoot problems which were already happening before
changing UCP configurations.
Then you can increase the UCP log level to debug, making it easier to understand
the status of the UCP cluster. Changing the UCP log level restarts all UCP
system components and introduces a small downtime window to UCP. Your
applications won't be affected by this.
To increase the UCP log level, navigate to the UCP web UI, go to the
**Admin Settings** tab, and choose **Logs**.
![](../../images/troubleshoot-with-logs-2.png){: .with-border}
Once you change the log level to **Debug** the UCP containers restart.
Now that the UCP components are creating more descriptive logs, you can
download a support dump and use it to troubleshoot the component causing the
problem.
Depending on the problem you're experiencing, it's more likely that you'll
find related messages in the logs of specific components on manager nodes:
* If the problem occurs after a node was added or removed, check the logs
of the `ucp-reconcile` container.
* If the problem occurs in the normal state of the system, check the logs
of the `ucp-controller` container.
* If you are able to visit the UCP web UI but unable to log in, check the
logs of the `ucp-auth-api` and `ucp-auth-store` containers.
It's normal for the `ucp-reconcile` container to be in a stopped state. This
container starts only when the `ucp-agent` detects that a node needs to
transition to a different state. The `ucp-reconcile` container is responsible
for creating and removing containers, issuing certificates, and pulling
missing images.
## Where to go next
* [Troubleshoot configurations](troubleshoot-configurations.md)

View File

@ -0,0 +1,152 @@
---
title: UCP architecture
description: Learn about the architecture of Docker Universal Control Plane.
keywords: ucp, architecture
---
Universal Control Plane is a containerized application that runs on
[Docker Enterprise Edition](/enterprise/index.md) and extends its functionality
to make it easier to deploy, configure, and monitor your applications at scale.
UCP also secures Docker with role-based access control so that only authorized
users can make changes and deploy applications to your Docker cluster.
![](images/architecture-1.svg)
Once Universal Control Plane (UCP) instance is deployed, developers and IT
operations no longer interact with Docker Engine directly, but interact with
UCP instead. Since UCP exposes the standard Docker API, this is all done
transparently, so that you can use the tools you already know and love, like
the Docker CLI client and Docker Compose.
## Under the hood
Docker UCP leverages the clustering and orchestration functionality provided
by Docker.
![](images/architecture-2.svg)
A swarm is a collection of nodes that are in the same Docker cluster.
[Nodes](/engine/swarm/key-concepts.md) in a Docker swarm operate in one of two
modes: Manager or Worker. If nodes are not already running in a swarm when
installing UCP, nodes will be configured to run in swarm mode.
When you deploy UCP, it starts running a globally scheduled service called
`ucp-agent`. This service monitors the node where it's running and starts
and stops UCP services, based on whether the node is a
[manager or a worker node](/engine/swarm/key-concepts.md).
If the node is a:
* **Manager**: the `ucp-agent` service automatically starts serving all UCP
components, including the UCP web UI and data stores used by UCP. The
`ucp-agent` accomplishes this by
[deploying several containers](#ucp-components-in-manager-nodes)
on the node. By promoting a node to manager, UCP automatically becomes
highly available and fault tolerant.
* **Worker**: on worker nodes, the `ucp-agent` service starts serving a proxy
service that ensures only authorized users and other UCP services can run
Docker commands in that node. The `ucp-agent` deploys a
[subset of containers](#ucp-components-in-worker-nodes) on worker nodes.
## UCP internal components
The core component of UCP is a globally-scheduled service called `ucp-agent`.
When you install UCP on a node, or join a node to a swarm that's being managed
by UCP, the `ucp-agent` service starts running on that node.
Once this service is running, it deploys containers with other UCP components,
and it ensures they keep running. The UCP components that are deployed
on a node depend on whether the node is a manager or a worker.
> OS-specific component names
>
> Some UCP component names depend on the node's operating system. For example,
> on Windows, the `ucp-agent` component is named `ucp-agent-win`.
> [Learn about architecture-specific images](admin/install/architecture-specific-images.md).
### UCP components in manager nodes
Manager nodes run all UCP services, including the web UI and data stores that
persist the state of UCP. These are the UCP services running on manager nodes:
| UCP component | Description |
| :------------------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| ucp-agent | Monitors the node and ensures the right UCP services are running |
| ucp-reconcile | When ucp-agent detects that the node is not running the right UCP components, it starts the ucp-reconcile container to converge the node to its desired state. It is expected for the ucp-reconcile container to remain in an exited state when the node is healthy. |
| ucp-auth-api | The centralized service for identity and authentication used by UCP and DTR |
| ucp-auth-store | Stores authentication configurations and data for users, organizations, and teams |
| ucp-auth-worker | Performs scheduled LDAP synchronizations and cleans authentication and authorization data |
| ucp-client-root-ca | A certificate authority to sign client bundles |
| ucp-cluster-root-ca | A certificate authority used for TLS communication between UCP components |
| ucp-controller | The UCP web server |
| ucp-dsinfo | Docker system information collection script to assist with troubleshooting |
| ucp-kv | Used to store the UCP configurations. Don't use it in your applications, since it's for internal use only |
| ucp-metrics | Used to collect and process metrics for a node, like the disk space available |
| ucp-proxy | A TLS proxy. It allows secure access to the local Docker Engine to UCP components |
| ucp-swarm-manager | Used to provide backwards-compatibility with Docker Swarm |
### UCP components in worker nodes
Worker nodes are the ones where you run your applications. These are the UCP
services running on worker nodes:
| UCP component | Description |
| :------------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| ucp-agent | Monitors the node and ensures the right UCP services are running |
| ucp-dsinfo | Docker system information collection script to assist with troubleshooting |
| ucp-reconcile | When ucp-agent detects that the node is not running the right UCP components, it starts the ucp-reconcile container to converge the node to its desired state. It is expected for the ucp-reconcile container to remain in an exited state when the node is healthy. |
| ucp-proxy | A TLS proxy. It allows secure access to the local Docker Engine to UCP components |
## Volumes used by UCP
Docker UCP uses these named volumes to persist data in all nodes where it runs:
| Volume name | Description |
|:----------------------------|:-----------------------------------------------------------------------------------------|
| ucp-auth-api-certs | Certificate and keys for the authentication and authorization service |
| ucp-auth-store-certs | Certificate and keys for the authentication and authorization store |
| ucp-auth-store-data | Data of the authentication and authorization store, replicated across managers |
| ucp-auth-worker-certs | Certificate and keys for authentication worker |
| ucp-auth-worker-data | Data of the authentication worker |
| ucp-client-root-ca | Root key material for the UCP root CA that issues client certificates |
| ucp-cluster-root-ca | Root key material for the UCP root CA that issues certificates for swarm members |
| ucp-controller-client-certs | Certificate and keys used by the UCP web server to communicate with other UCP components |
| ucp-controller-server-certs | Certificate and keys for the UCP web server running in the node |
| ucp-kv | UCP configuration data, replicated across managers |
| ucp-kv-certs | Certificates and keys for the key-value store |
| ucp-metrics-data | Monitoring data gathered by UCP |
| ucp-metrics-inventory | Configuration file used by the ucp-metrics service |
| ucp-node-certs | Certificate and keys for node communication |
You can customize the volume driver used for these volumes, by creating
the volumes before installing UCP. During the installation, UCP checks which
volumes don't exist in the node, and creates them using the default volume
driver.
By default, the data for these volumes can be found at
`/var/lib/docker/volumes/<volume-name>/_data`.
## How you interact with UCP
There are two ways to interact with UCP: the web UI or the CLI.
You can use the UCP web UI to manage your swarm, grant and revoke user
permissions, deploy, configure, manage, and monitor your applications.
![](images/architecture-3.svg)
UCP also exposes the standard Docker API, so you can continue using existing
tools like the Docker CLI client. Since UCP secures your cluster with role-based
access control, you need to configure your Docker CLI client and other client
tools to authenticate your requests using
[client certificates](user/access-ucp/index.md) that you can download
from your UCP profile page.
## Where to go next
* [System requirements](admin/install/system-requirements.md)
* [Plan your installation](admin/install/system-requirements.md)

View File

@ -0,0 +1,54 @@
---
title: Get support
description: Your Docker EE subscription gives you access to prioritized support. You can file tickets via email or the support portal.
keywords: support, help
---
Your Docker Enterprise Edition subscription gives you access to prioritized
support. The service levels depend on your subscription.
If you need help, you can file a ticket via:
* [Email](mailto:support@docker.com)
* [Docker support page](https://support.docker.com/)
Be sure to use your company email when filing tickets.
Docker Support engineers may ask you to provide a UCP support dump, which is an
archive that contains UCP system logs and diagnostic information. To obtain a
support dump:
## From the UI
1. Log into the UCP web UI with an administrator account.
2. In the top-left menu, click your username and choose
**Download Logs**.
![](images/get-support-1.png){: .with-border}
## From the CLI
To get the support dump from the CLI, use SSH to log into a UCP manager node
and run:
```none
docker container run --rm \
--name ucp \
-v /var/run/docker.sock:/var/run/docker.sock \
--log-driver none \
{{ page.ucp_org }}/{{ page.ucp_repo }}:{{ page.ucp_version }} \
support > docker-support.tgz
```
This support dump only contains logs for the node where you're running the
command. If your UCP is highly available, you should collect support dumps
from all of the manager nodes.
On Windows worker nodes, run the following command to generate a local support dump:
```ps
PS> docker container run --name windowssupport -v 'C:\ProgramData\docker\daemoncerts:C:\ProgramData\docker\daemoncerts' -v 'C:\Windows\system32\winevt\logs:C:\eventlogs:ro' {{ page.ucp_org }}/ucp-dsinfo-win:{{ page.ucp_version }}; docker cp windowssupport:'C:\dsinfo' .; docker rm -f windowssupport
```
This command creates a directory named `dsinfo` in your current directory.
If you want an archive file, you need to create it from the `dsinfo` directory.

Binary file not shown.

After

Width:  |  Height:  |  Size: 166 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 182 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 43 KiB

View File

@ -0,0 +1,139 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg width="740px" height="250px" viewBox="0 0 740 250" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<!-- Generator: Sketch 40.1 (33804) - http://www.bohemiancoding.com/sketch -->
<title>add-labels-1</title>
<desc>Created with Sketch.</desc>
<defs></defs>
<g id="ucp-diagrams" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
<g id="add-labels-1">
<text id="Docker-swarm" font-family="OpenSans-Semibold, Open Sans" font-size="10" font-weight="500" fill="#82949E">
<tspan x="18.025" y="239.009524">Docker swarm</tspan>
</text>
<g id="nodes" transform="translate(107.000000, 15.000000)">
<g id="workers" transform="translate(0.000000, 118.000000)">
<g id="node-1-copy-3" transform="translate(428.000000, 0.000000)">
<g id="node">
<g id="node-label">
<rect id="Rectangle-127" fill="#445D6E" x="0" y="0" width="71" height="21.2904762" rx="2"></rect>
<text id="worker-node" font-family="OpenSans, Open Sans" font-size="8" font-weight="normal" fill="#FFFFFF">
<tspan x="6" y="14">worker node</tspan>
</text>
</g>
<text id="storage=ssd" font-family="OpenSans-Semibold, Open Sans" font-size="10" font-weight="500" fill="#82949E">
<tspan x="3" y="93">storage=ssd</tspan>
</text>
<text id="environment=prod" font-family="OpenSans-Semibold, Open Sans" font-size="10" font-weight="500" fill="#82949E">
<tspan x="3" y="79">environment=prod</tspan>
</text>
</g>
<rect id="node-border" stroke="#445D6E" stroke-width="2" x="0" y="0" width="97" height="102" rx="2"></rect>
</g>
<g id="node-1-copy-4" transform="translate(321.000000, 0.000000)">
<g id="node">
<g id="node-label">
<rect id="Rectangle-127" fill="#445D6E" x="0" y="0" width="71" height="21.2904762" rx="2"></rect>
<text id="worker-node" font-family="OpenSans, Open Sans" font-size="8" font-weight="normal" fill="#FFFFFF">
<tspan x="6" y="14">worker node</tspan>
</text>
</g>
<text id="storage=ssd" font-family="OpenSans-Semibold, Open Sans" font-size="10" font-weight="500" fill="#82949E">
<tspan x="3" y="93">storage=ssd</tspan>
</text>
<text id="environment=prod" font-family="OpenSans-Semibold, Open Sans" font-size="10" font-weight="500" fill="#82949E">
<tspan x="3" y="79">environment=prod</tspan>
</text>
</g>
<rect id="node-border" stroke="#445D6E" stroke-width="2" x="0" y="0" width="97" height="102" rx="2"></rect>
</g>
<g id="node-1-copy-5" transform="translate(214.000000, 0.000000)">
<g id="node">
<g id="node-label">
<rect id="Rectangle-127" fill="#445D6E" x="0" y="0" width="71" height="21.2904762" rx="2"></rect>
<text id="worker-node" font-family="OpenSans, Open Sans" font-size="8" font-weight="normal" fill="#FFFFFF">
<tspan x="6" y="14">worker node</tspan>
</text>
</g>
<text id="storage=ssd" font-family="OpenSans-Semibold, Open Sans" font-size="10" font-weight="500" fill="#82949E">
<tspan x="3" y="93">storage=ssd</tspan>
</text>
<text id="environment=test" font-family="OpenSans-Semibold, Open Sans" font-size="10" font-weight="500" fill="#82949E">
<tspan x="3" y="79">environment=test</tspan>
</text>
</g>
<rect id="node-border" stroke="#445D6E" stroke-width="2" x="0" y="0" width="97" height="102" rx="2"></rect>
</g>
<g id="node-1-copy-6" transform="translate(107.000000, 0.000000)">
<g id="node">
<g id="node-label">
<rect id="Rectangle-127" fill="#445D6E" x="0" y="0" width="71" height="21.2904762" rx="2"></rect>
<text id="worker-node" font-family="OpenSans, Open Sans" font-size="8" font-weight="normal" fill="#FFFFFF">
<tspan x="6" y="14">worker node</tspan>
</text>
</g>
<text id="storage=disk" font-family="OpenSans-Semibold, Open Sans" font-size="10" font-weight="500" fill="#82949E">
<tspan x="3" y="93">storage=disk</tspan>
</text>
<text id="environment=dev" font-family="OpenSans-Semibold, Open Sans" font-size="10" font-weight="500" fill="#82949E">
<tspan x="3" y="79">environment=dev</tspan>
</text>
</g>
<rect id="node-border" stroke="#445D6E" stroke-width="2" x="0" y="0" width="97" height="102" rx="2"></rect>
</g>
<g id="node-1-copy-7">
<g id="node">
<g id="node-label">
<rect id="Rectangle-127" fill="#445D6E" x="0" y="0" width="71" height="21.2904762" rx="2"></rect>
<text id="worker-node" font-family="OpenSans, Open Sans" font-size="8" font-weight="normal" fill="#FFFFFF">
<tspan x="6" y="14">worker node</tspan>
</text>
</g>
<text id="storage=disk" font-family="OpenSans-Semibold, Open Sans" font-size="10" font-weight="500" fill="#82949E">
<tspan x="3" y="93">storage=disk</tspan>
</text>
<text id="environment=dev" font-family="OpenSans-Semibold, Open Sans" font-size="10" font-weight="500" fill="#82949E">
<tspan x="3" y="79">environment=dev</tspan>
</text>
</g>
<rect id="node-border" stroke="#445D6E" stroke-width="2" x="0" y="0" width="97" height="102" rx="2"></rect>
</g>
</g>
<g id="managers" transform="translate(108.000000, 0.000000)">
<g id="node-1-copy-2" transform="translate(214.000000, 0.000000)">
<g id="node">
<g id="node-label">
<rect id="Rectangle-127" fill="#445D6E" x="0" y="0" width="71" height="21.2904762" rx="2"></rect>
<text id="manager-node" font-family="OpenSans, Open Sans" font-size="8" font-weight="normal" fill="#FFFFFF">
<tspan x="6" y="14">manager node</tspan>
</text>
</g>
</g>
<rect id="node-border" stroke="#445D6E" stroke-width="2" x="0" y="0" width="97" height="102" rx="2"></rect>
</g>
<g id="node-1-copy" transform="translate(107.000000, 0.000000)">
<g id="node">
<g id="node-label">
<rect id="Rectangle-127" fill="#445D6E" x="0" y="0" width="71" height="21.2904762" rx="2"></rect>
<text id="manager-node" font-family="OpenSans, Open Sans" font-size="8" font-weight="normal" fill="#FFFFFF">
<tspan x="6" y="14">manager node</tspan>
</text>
</g>
</g>
<rect id="node-border" stroke="#445D6E" stroke-width="2" x="0" y="0" width="97" height="102" rx="2"></rect>
</g>
<g id="node-1">
<g id="node">
<g id="node-label">
<rect id="Rectangle-127" fill="#445D6E" x="0" y="0" width="71" height="21.2904762" rx="2"></rect>
<text id="manager-node" font-family="OpenSans, Open Sans" font-size="8" font-weight="normal" fill="#FFFFFF">
<tspan x="6" y="14">manager node</tspan>
</text>
</g>
</g>
<rect id="node-border" stroke="#445D6E" stroke-width="2" x="0" y="0" width="97" height="102" rx="2"></rect>
</g>
</g>
</g>
<rect id="group" stroke="#82949E" stroke-width="2" stroke-dasharray="5,5,5,5" x="11" y="3" width="718" height="245" rx="2"></rect>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 9.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

View File

@ -0,0 +1,71 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg width="740px" height="250px" viewBox="0 0 740 250" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<!-- Generator: Sketch 40.1 (33804) - http://www.bohemiancoding.com/sketch -->
<title>architecture-1</title>
<desc>Created with Sketch.</desc>
<defs></defs>
<g id="architecture-diagrams" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
<g id="architecture-1">
<g id="Group" transform="translate(28.000000, 51.000000)">
<g id="stack">
<g id="servers" transform="translate(0.000000, 114.000000)">
<g id="cloud">
<rect id="Rectangle-138" fill="#82949E" x="0" y="0" width="172" height="34" rx="2"></rect>
<text id="cloud-servers" font-family="OpenSans, Open Sans" font-size="14" font-weight="normal" fill="#FFFFFF">
<tspan x="42.5097656" y="23">cloud servers</tspan>
</text>
</g>
<g id="virtual" transform="translate(176.000000, 0.000000)">
<rect id="Rectangle-138" fill="#82949E" x="0" y="0" width="172" height="34" rx="2"></rect>
<text id="virtual-servers" font-family="OpenSans, Open Sans" font-size="14" font-weight="normal" fill="#FFFFFF">
<tspan x="39.8608398" y="23">virtual servers</tspan>
</text>
</g>
<g id="physical" transform="translate(352.000000, 0.000000)">
<rect id="Rectangle-138" fill="#82949E" x="0" y="0" width="172" height="34" rx="2"></rect>
<text id="physical-servers" font-family="OpenSans, Open Sans" font-size="14" font-weight="normal" fill="#FFFFFF">
<tspan x="34.2075195" y="23">physical servers</tspan>
</text>
</g>
</g>
<g id="docker" transform="translate(0.000000, 76.000000)">
<rect id="Rectangle-138" fill="#1488C6" x="0" y="0" width="524" height="34" rx="2"></rect>
<text id="Docker-Enterprise-Edition" font-family="OpenSans, Open Sans" font-size="14" font-weight="normal" fill="#FFFFFF">
<tspan x="225.464355" y="23">Docker EE</tspan>
</text>
</g>
<g id="ucp" transform="translate(0.000000, 38.000000)">
<rect id="Rectangle-138" fill="#1488C6" x="0" y="0" width="524" height="34" rx="2"></rect>
<text id="Universal-Control-Pl" font-family="OpenSans, Open Sans" font-size="14" font-weight="normal" fill="#FFFFFF">
<tspan x="185.536621" y="23">Universal Control Plane</tspan>
</text>
</g>
<g id="dtr">
<rect id="Rectangle-138" fill="#1488C6" x="0" y="0" width="172" height="34" rx="2"></rect>
<text id="Docker-Trusted-Regis" font-family="OpenSans, Open Sans" font-size="14" font-weight="normal" fill="#FFFFFF">
<tspan x="8.13183594" y="23">Docker Trusted Registry</tspan>
</text>
</g>
<g id="your-apps" transform="translate(176.000000, 0.000000)">
<rect id="Rectangle-138" fill="#00B6B5" x="0" y="0" width="348" height="34" rx="2"></rect>
<text id="your-applications" font-family="OpenSans, Open Sans" font-size="14" font-weight="normal" fill="#FFFFFF">
<tspan x="118.428223" y="23">your applications</tspan>
</text>
</g>
</g>
<g id="user" transform="translate(528.000000, 44.000000)">
<g transform="translate(44.000000, 0.000000)" fill="#82949E">
<text id="deploy-and-manage" font-family="OpenSans, Open Sans" font-size="12" font-weight="normal">
<tspan x="0" y="39">deploy and manage</tspan>
</text>
<path d="M56,13 C59.59125,13 62.5,10.083125 62.5,6.5 C62.5,2.90875 59.59125,0 56,0 C52.40875,0 49.5,2.90875 49.5,6.5 C49.5,10.083125 52.40875,13 56,13 L56,13 Z M56,16.25 C51.669375,16.25 43,18.419375 43,22.75 L43,26 L69,26 L69,22.75 C69,18.419375 60.330625,16.25 56,16.25 L56,16.25 Z" id="Shape"></path>
</g>
<g id="arrow" transform="translate(0.000000, 8.000000)">
<path d="M1.5,5 L60,5" id="Line" stroke="#82949E" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"></path>
<circle id="Oval" fill="#8F9EA8" cx="5" cy="5" r="5"></circle>
</g>
</g>
</g>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 5.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

View File

@ -0,0 +1,166 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg width="740px" height="250px" viewBox="0 0 740 250" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<!-- Generator: Sketch 40.1 (33804) - http://www.bohemiancoding.com/sketch -->
<title>architecture-2</title>
<desc>Created with Sketch.</desc>
<defs></defs>
<g id="architecture-diagrams" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
<g id="architecture-2">
<text id="Docker-swarm" font-family="OpenSans-Semibold, Open Sans" font-size="10" font-weight="500" fill="#82949E">
<tspan x="178.025" y="239.009524">Docker swarm</tspan>
</text>
<g id="nodes" transform="translate(215.000000, 15.000000)">
<g id="workers" transform="translate(53.000000, 118.000000)">
<g id="node-1">
<g id="node">
<g id="node-label">
<rect id="Rectangle-127" fill="#445D6E" x="0" y="0" width="71" height="21.2904762" rx="2"></rect>
<text id="worker-node" font-family="OpenSans, Open Sans" font-size="8" font-weight="normal" fill="#FFFFFF">
<tspan x="6" y="14">worker node</tspan>
</text>
</g>
</g>
<g id="engine" transform="translate(1.000000, 79.000000)">
<rect id="Rectangle-138" fill="#1488C6" x="0" y="0" width="95" height="22" rx="2"></rect>
<text id="Docker-Enterprise-Engine" font-family="OpenSans, Open Sans" font-size="10" font-weight="normal" fill="#FFFFFF">
<tspan x="24" y="15">Docker EE</tspan>
</text>
</g>
<g id="ucp" transform="translate(1.000000, 56.000000)">
<rect id="Rectangle-138" fill="#1488C6" x="0" y="0" width="95" height="22" rx="2"></rect>
<text id="UCP-agent" font-family="OpenSans, Open Sans" font-size="10" font-weight="normal" fill="#FFFFFF">
<tspan x="23.7373047" y="15">UCP agent</tspan>
</text>
</g>
<g id="app" transform="translate(1.000000, 33.000000)">
<rect id="Rectangle-138" fill="#FFB463" x="0" y="0" width="95" height="22" rx="2"></rect>
<text id="UCP-worker" font-family="OpenSans, Open Sans" font-size="10" font-weight="normal" fill="#FFFFFF">
<tspan x="20.4755859" y="15">UCP worker</tspan>
</text>
</g>
<rect id="node-border" stroke="#445D6E" stroke-width="2" x="0" y="0" width="97" height="102" rx="2"></rect>
</g>
<g id="node-1-copy-3" transform="translate(107.000000, 0.000000)">
<g id="node">
<g id="node-label">
<rect id="Rectangle-127" fill="#445D6E" x="0" y="0" width="71" height="21.2904762" rx="2"></rect>
<text id="worker-node" font-family="OpenSans, Open Sans" font-size="8" font-weight="normal" fill="#FFFFFF">
<tspan x="6" y="14">worker node</tspan>
</text>
</g>
</g>
<g id="engine" transform="translate(1.000000, 79.000000)">
<rect id="Rectangle-138" fill="#1488C6" x="0" y="0" width="95" height="22" rx="2"></rect>
<text id="Docker-Enterprise-Engine" font-family="OpenSans, Open Sans" font-size="10" font-weight="normal" fill="#FFFFFF">
<tspan x="24" y="15">Docker EE</tspan>
</text>
</g>
<g id="ucp" transform="translate(1.000000, 56.000000)">
<rect id="Rectangle-138" fill="#1488C6" x="0" y="0" width="95" height="22" rx="2"></rect>
<text id="UCP-agent" font-family="OpenSans, Open Sans" font-size="10" font-weight="normal" fill="#FFFFFF">
<tspan x="23.7373047" y="15">UCP agent</tspan>
</text>
</g>
<g id="app" transform="translate(1.000000, 33.000000)">
<rect id="Rectangle-138" fill="#FFB463" x="0" y="0" width="95" height="22" rx="2"></rect>
<text id="UCP-worker" font-family="OpenSans, Open Sans" font-size="10" font-weight="normal" fill="#FFFFFF">
<tspan x="20.4755859" y="15">UCP worker</tspan>
</text>
</g>
<rect id="node-border" stroke="#445D6E" stroke-width="2" x="0" y="0" width="97" height="102" rx="2"></rect>
</g>
</g>
<g id="managers">
<g id="node-1">
<g id="node">
<g id="node-label">
<rect id="Rectangle-127" fill="#445D6E" x="0" y="0" width="71" height="21.2904762" rx="2"></rect>
<text id="manager-node" font-family="OpenSans, Open Sans" font-size="8" font-weight="normal" fill="#FFFFFF">
<tspan x="6" y="14">manager node</tspan>
</text>
</g>
</g>
<g id="engine" transform="translate(1.000000, 79.000000)">
<rect id="Rectangle-138" fill="#1488C6" x="0" y="0" width="95" height="22" rx="2"></rect>
<text id="Docker-Enterprise-Engine" font-family="OpenSans, Open Sans" font-size="10" font-weight="normal" fill="#FFFFFF">
<tspan x="24" y="15">Docker EE</tspan>
</text>
</g>
<g id="ucp" transform="translate(1.000000, 56.000000)">
<rect id="Rectangle-138" fill="#1488C6" x="0" y="0" width="95" height="22" rx="2"></rect>
<text id="UCP-agent" font-family="OpenSans, Open Sans" font-size="10" font-weight="normal" fill="#FFFFFF">
<tspan x="23.7373047" y="15">UCP agent</tspan>
</text>
</g>
<g id="app" transform="translate(1.000000, 33.000000)">
<rect id="Rectangle-138" fill="#00B6B5" x="0" y="0" width="95" height="22" rx="2"></rect>
<text id="UCP-manager" font-family="OpenSans, Open Sans" font-size="10" font-weight="normal" fill="#FFFFFF">
<tspan x="16.0297852" y="15">UCP manager</tspan>
</text>
</g>
<rect id="node-border" stroke="#445D6E" stroke-width="2" x="0" y="0" width="97" height="102" rx="2"></rect>
</g>
<g id="node-1-copy" transform="translate(107.000000, 0.000000)">
<g id="node">
<g id="node-label">
<rect id="Rectangle-127" fill="#445D6E" x="0" y="0" width="71" height="21.2904762" rx="2"></rect>
<text id="manager-node" font-family="OpenSans, Open Sans" font-size="8" font-weight="normal" fill="#FFFFFF">
<tspan x="6" y="14">manager node</tspan>
</text>
</g>
</g>
<g id="engine" transform="translate(1.000000, 79.000000)">
<rect id="Rectangle-138" fill="#1488C6" x="0" y="0" width="95" height="22" rx="2"></rect>
<text id="Docker-Enterprise-Engine" font-family="OpenSans, Open Sans" font-size="10" font-weight="normal" fill="#FFFFFF">
<tspan x="24" y="15">Docker EE</tspan>
</text>
</g>
<g id="ucp" transform="translate(1.000000, 56.000000)">
<rect id="Rectangle-138" fill="#1488C6" x="0" y="0" width="95" height="22" rx="2"></rect>
<text id="UCP-agent" font-family="OpenSans, Open Sans" font-size="10" font-weight="normal" fill="#FFFFFF">
<tspan x="23.7373047" y="15">UCP agent</tspan>
</text>
</g>
<g id="app" transform="translate(1.000000, 33.000000)">
<rect id="Rectangle-138" fill="#00B6B5" x="0" y="0" width="95" height="22" rx="2"></rect>
<text id="UCP-manager" font-family="OpenSans, Open Sans" font-size="10" font-weight="normal" fill="#FFFFFF">
<tspan x="16.0297852" y="15">UCP manager</tspan>
</text>
</g>
<rect id="node-border" stroke="#445D6E" stroke-width="2" x="0" y="0" width="97" height="102" rx="2"></rect>
</g>
<g id="node-1-copy-2" transform="translate(214.000000, 0.000000)">
<g id="node">
<g id="node-label">
<rect id="Rectangle-127" fill="#445D6E" x="0" y="0" width="71" height="21.2904762" rx="2"></rect>
<text id="manager-node" font-family="OpenSans, Open Sans" font-size="8" font-weight="normal" fill="#FFFFFF">
<tspan x="6" y="14">manager node</tspan>
</text>
</g>
</g>
<g id="engine" transform="translate(1.000000, 79.000000)">
<rect id="Rectangle-138" fill="#1488C6" x="0" y="0" width="95" height="22" rx="2"></rect>
<text id="Docker-Enterprise-Engine" font-family="OpenSans, Open Sans" font-size="10" font-weight="normal" fill="#FFFFFF">
<tspan x="24" y="15">Docker EE</tspan>
</text>
</g>
<g id="ucp" transform="translate(1.000000, 56.000000)">
<rect id="Rectangle-138" fill="#1488C6" x="0" y="0" width="95" height="22" rx="2"></rect>
<text id="UCP-agent" font-family="OpenSans, Open Sans" font-size="10" font-weight="normal" fill="#FFFFFF">
<tspan x="23.7373047" y="15">UCP agent</tspan>
</text>
</g>
<g id="app" transform="translate(1.000000, 33.000000)">
<rect id="Rectangle-138" fill="#00B6B5" x="0" y="0" width="95" height="22" rx="2"></rect>
<text id="UCP-manager" font-family="OpenSans, Open Sans" font-size="10" font-weight="normal" fill="#FFFFFF">
<tspan x="16.0297852" y="15">UCP manager</tspan>
</text>
</g>
<rect id="node-border" stroke="#445D6E" stroke-width="2" x="0" y="0" width="97" height="102" rx="2"></rect>
</g>
</g>
</g>
<rect id="group" stroke="#82949E" stroke-width="2" stroke-dasharray="5,5,5,5" x="169" y="3" width="401" height="245" rx="2"></rect>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 12 KiB

View File

@ -0,0 +1,233 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg width="740px" height="350px" viewBox="0 0 740 350" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<!-- Generator: Sketch 40.1 (33804) - http://www.bohemiancoding.com/sketch -->
<title>architecture-3</title>
<desc>Created with Sketch.</desc>
<defs>
<circle id="path-1" cx="4" cy="4" r="4"></circle>
<mask id="mask-2" maskContentUnits="userSpaceOnUse" maskUnits="objectBoundingBox" x="-2" y="-2" width="12" height="12">
<rect x="-2" y="-2" width="12" height="12" fill="white"></rect>
<use xlink:href="#path-1" fill="black"></use>
</mask>
<circle id="path-3" cx="4" cy="4" r="4"></circle>
<mask id="mask-4" maskContentUnits="userSpaceOnUse" maskUnits="objectBoundingBox" x="-2" y="-2" width="12" height="12">
<rect x="-2" y="-2" width="12" height="12" fill="white"></rect>
<use xlink:href="#path-3" fill="black"></use>
</mask>
<circle id="path-5" cx="4" cy="4" r="4"></circle>
<mask id="mask-6" maskContentUnits="userSpaceOnUse" maskUnits="objectBoundingBox" x="-2" y="-2" width="12" height="12">
<rect x="-2" y="-2" width="12" height="12" fill="white"></rect>
<use xlink:href="#path-5" fill="black"></use>
</mask>
</defs>
<g id="architecture-diagrams" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
<g id="architecture-3">
<g id="ddc" transform="translate(169.000000, 56.000000)">
<g id="swarm-group" transform="translate(0.000000, 42.000000)">
<text id="Docker-swarm" font-family="OpenSans-Semibold, Open Sans" font-size="10" font-weight="500" fill="#E0E4E7">
<tspan x="9.025" y="236.009524">Docker swarm</tspan>
</text>
<rect id="group" stroke="#E0E4E7" stroke-width="2" stroke-dasharray="5,5,5,5" x="0" y="0" width="401" height="245" rx="2"></rect>
</g>
<g id="load-balancer" transform="translate(47.000000, 0.000000)">
<g id="main">
<rect id="Rectangle-138" fill="#445D6E" x="0" y="0" width="309" height="22" rx="2"></rect>
<text id="your-load-balancer" font-family="OpenSans, Open Sans" font-size="10" font-weight="normal" fill="#FFFFFF">
<tspan x="111.437988" y="15">your load balancer</tspan>
</text>
</g>
<g id="arrows" transform="translate(43.000000, 21.000000)">
<g id="arrow-copy-3" transform="translate(218.500000, 17.000000) scale(1, -1) rotate(-90.000000) translate(-218.500000, -17.000000) translate(202.000000, 13.000000)">
<path d="M2,4 L33,4" id="Line" stroke="#445D6E" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"></path>
<g id="Oval">
<use fill="#445D6E" fill-rule="evenodd" xlink:href="#path-1"></use>
<use stroke="#F7F8F9" mask="url(#mask-2)" stroke-width="4" xlink:href="#path-1"></use>
</g>
</g>
<g id="arrow-copy" transform="translate(111.500000, 17.000000) scale(1, -1) rotate(-90.000000) translate(-111.500000, -17.000000) translate(95.000000, 13.000000)">
<path d="M2,4 L33,4" id="Line" stroke="#445D6E" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"></path>
<g id="Oval">
<use fill="#445D6E" fill-rule="evenodd" xlink:href="#path-3"></use>
<use stroke="#F7F8F9" mask="url(#mask-4)" stroke-width="4" xlink:href="#path-3"></use>
</g>
</g>
<g id="arrow-copy-2" transform="translate(4.500000, 17.000000) scale(1, -1) rotate(-90.000000) translate(-4.500000, -17.000000) translate(-12.000000, 13.000000)">
<path d="M2,4 L33,4" id="Line" stroke="#445D6E" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"></path>
<g id="Oval">
<use fill="#445D6E" fill-rule="evenodd" xlink:href="#path-5"></use>
<use stroke="#F7F8F9" mask="url(#mask-6)" stroke-width="4" xlink:href="#path-5"></use>
</g>
</g>
</g>
</g>
<g id="swam" transform="translate(46.000000, 54.000000)">
<g id="nodes">
<g id="workers" transform="translate(53.000000, 118.000000)">
<g id="node-1">
<g id="node">
<g id="node-label">
<rect id="Rectangle-127" fill="#E0E4E7" x="0" y="0" width="71" height="21.2904762" rx="2"></rect>
<text id="worker-node" font-family="OpenSans, Open Sans" font-size="8" font-weight="normal" fill="#FFFFFF">
<tspan x="6" y="14">worker node</tspan>
</text>
</g>
</g>
<g id="engine" transform="translate(1.000000, 79.000000)">
<rect id="Rectangle-138" fill="#A1CFE8" x="0" y="0" width="95" height="22" rx="2"></rect>
<text id="Docker-Enterprise-Edition" font-family="OpenSans, Open Sans" font-size="10" font-weight="normal" fill="#FFFFFF">
<tspan x="24" y="15">Docker EE</tspan>
</text>
</g>
<g id="ucp" transform="translate(1.000000, 56.000000)">
<rect id="Rectangle-138" fill="#A1CFE8" x="0" y="0" width="95" height="22" rx="2"></rect>
<text id="UCP-agent" font-family="OpenSans, Open Sans" font-size="10" font-weight="normal" fill="#FFFFFF">
<tspan x="23.7373047" y="15">UCP agent</tspan>
</text>
</g>
<g id="app" transform="translate(1.000000, 33.000000)">
<rect id="Rectangle-138" fill="#FFE1C0" x="0" y="0" width="95" height="22" rx="2"></rect>
<text id="UCP-worker" font-family="OpenSans, Open Sans" font-size="10" font-weight="normal" fill="#FFFFFF">
<tspan x="20.4755859" y="15">UCP worker</tspan>
</text>
</g>
<rect id="node-border" stroke="#E0E4E7" stroke-width="2" x="0" y="0" width="97" height="102" rx="2"></rect>
</g>
<g id="node-1-copy-3" transform="translate(107.000000, 0.000000)">
<g id="node">
<g id="node-label">
<rect id="Rectangle-127" fill="#E0E4E7" x="0" y="0" width="71" height="21.2904762" rx="2"></rect>
<text id="worker-node" font-family="OpenSans, Open Sans" font-size="8" font-weight="normal" fill="#FFFFFF">
<tspan x="6" y="14">worker node</tspan>
</text>
</g>
</g>
<g id="engine" transform="translate(1.000000, 79.000000)">
<rect id="Rectangle-138" fill="#A1CFE8" x="0" y="0" width="95" height="22" rx="2"></rect>
<text id="Docker-Enterprise-Edition" font-family="OpenSans, Open Sans" font-size="10" font-weight="normal" fill="#FFFFFF">
<tspan x="24" y="15">Docker EE</tspan>
</text>
</g>
<g id="ucp" transform="translate(1.000000, 56.000000)">
<rect id="Rectangle-138" fill="#A1CFE8" x="0" y="0" width="95" height="22" rx="2"></rect>
<text id="UCP-agent" font-family="OpenSans, Open Sans" font-size="10" font-weight="normal" fill="#FFFFFF">
<tspan x="23.7373047" y="15">UCP agent</tspan>
</text>
</g>
<g id="app" transform="translate(1.000000, 33.000000)">
<rect id="Rectangle-138" fill="#FFE1C0" x="0" y="0" width="95" height="22" rx="2"></rect>
<text id="UCP-worker" font-family="OpenSans, Open Sans" font-size="10" font-weight="normal" fill="#FFFFFF">
<tspan x="20.4755859" y="15">UCP worker</tspan>
</text>
</g>
<rect id="node-border" stroke="#E0E4E7" stroke-width="2" x="0" y="0" width="97" height="102" rx="2"></rect>
</g>
</g>
<g id="managers">
<g id="node-1-copy-2" transform="translate(214.000000, 0.000000)">
<g id="node">
<g id="node-label">
<rect id="Rectangle-127" fill="#445D6E" x="0" y="0" width="71" height="21.2904762" rx="2"></rect>
<text id="manager-node" font-family="OpenSans, Open Sans" font-size="8" font-weight="normal" fill="#FFFFFF">
<tspan x="6" y="14">manager node</tspan>
</text>
</g>
</g>
<g id="engine" transform="translate(1.000000, 79.000000)">
<rect id="Rectangle-138" fill="#1488C6" x="0" y="0" width="95" height="22" rx="2"></rect>
<text id="Docker-Enterprise-Edition" font-family="OpenSans, Open Sans" font-size="10" font-weight="normal" fill="#FFFFFF">
<tspan x="24" y="15">Docker EE</tspan>
</text>
</g>
<g id="ucp" transform="translate(1.000000, 56.000000)">
<rect id="Rectangle-138" fill="#1488C6" x="0" y="0" width="95" height="22" rx="2"></rect>
<text id="UCP-agent" font-family="OpenSans, Open Sans" font-size="10" font-weight="normal" fill="#FFFFFF">
<tspan x="23.7373047" y="15">UCP agent</tspan>
</text>
</g>
<g id="app" transform="translate(1.000000, 33.000000)">
<rect id="Rectangle-138" fill="#00B6B5" x="0" y="0" width="95" height="22" rx="2"></rect>
<text id="UCP-manager" font-family="OpenSans, Open Sans" font-size="10" font-weight="normal" fill="#FFFFFF">
<tspan x="16.0297852" y="15">UCP manager</tspan>
</text>
</g>
<rect id="node-border" stroke="#445D6E" stroke-width="2" x="0" y="0" width="97" height="102" rx="2"></rect>
</g>
<g id="node-1-copy" transform="translate(107.000000, 0.000000)">
<g id="node">
<g id="node-label">
<rect id="Rectangle-127" fill="#445D6E" x="0" y="0" width="71" height="21.2904762" rx="2"></rect>
<text id="manager-node" font-family="OpenSans, Open Sans" font-size="8" font-weight="normal" fill="#FFFFFF">
<tspan x="6" y="14">manager node</tspan>
</text>
</g>
</g>
<g id="engine" transform="translate(1.000000, 79.000000)">
<rect id="Rectangle-138" fill="#1488C6" x="0" y="0" width="95" height="22" rx="2"></rect>
<text id="Docker-Enterprise-Edition" font-family="OpenSans, Open Sans" font-size="10" font-weight="normal" fill="#FFFFFF">
<tspan x="24" y="15">Docker EE</tspan>
</text>
</g>
<g id="ucp" transform="translate(1.000000, 56.000000)">
<rect id="Rectangle-138" fill="#1488C6" x="0" y="0" width="95" height="22" rx="2"></rect>
<text id="UCP-agent" font-family="OpenSans, Open Sans" font-size="10" font-weight="normal" fill="#FFFFFF">
<tspan x="23.7373047" y="15">UCP agent</tspan>
</text>
</g>
<g id="app" transform="translate(1.000000, 33.000000)">
<rect id="Rectangle-138" fill="#00B6B5" x="0" y="0" width="95" height="22" rx="2"></rect>
<text id="UCP-manager" font-family="OpenSans, Open Sans" font-size="10" font-weight="normal" fill="#FFFFFF">
<tspan x="16.0297852" y="15">UCP manager</tspan>
</text>
</g>
<rect id="node-border" stroke="#445D6E" stroke-width="2" x="0" y="0" width="97" height="102" rx="2"></rect>
</g>
<g id="node-1">
<g id="node">
<g id="node-label">
<rect id="Rectangle-127" fill="#445D6E" x="0" y="0" width="71" height="21.2904762" rx="2"></rect>
<text id="manager-node" font-family="OpenSans, Open Sans" font-size="8" font-weight="normal" fill="#FFFFFF">
<tspan x="6" y="14">manager node</tspan>
</text>
</g>
</g>
<g id="engine" transform="translate(1.000000, 79.000000)">
<rect id="Rectangle-138" fill="#1488C6" x="0" y="0" width="95" height="22" rx="2"></rect>
<text id="Docker-Enterprise-Edition" font-family="OpenSans, Open Sans" font-size="10" font-weight="normal" fill="#FFFFFF">
<tspan x="24" y="15">Docker EE</tspan>
</text>
</g>
<g id="ucp" transform="translate(1.000000, 56.000000)">
<rect id="Rectangle-138" fill="#1488C6" x="0" y="0" width="95" height="22" rx="2"></rect>
<text id="UCP-agent" font-family="OpenSans, Open Sans" font-size="10" font-weight="normal" fill="#FFFFFF">
<tspan x="23.7373047" y="15">UCP agent</tspan>
</text>
</g>
<g id="app" transform="translate(1.000000, 33.000000)">
<rect id="Rectangle-138" fill="#00B6B5" x="0" y="0" width="95" height="22" rx="2"></rect>
<text id="UCP-manager" font-family="OpenSans, Open Sans" font-size="10" font-weight="normal" fill="#FFFFFF">
<tspan x="16.0297852" y="15">UCP manager</tspan>
</text>
</g>
<rect id="node-border" stroke="#445D6E" stroke-width="2" x="0" y="0" width="97" height="102" rx="2"></rect>
</g>
</g>
</g>
</g>
</g>
<g id="user" transform="translate(337.000000, 5.000000)" fill="#82949E">
<g>
<text id="UI" font-family="OpenSans, Open Sans" font-size="12" font-weight="normal">
<tspan x="7" y="41">UI</tspan>
</text>
<path d="M13,13 C16.59125,13 19.5,10.083125 19.5,6.5 C19.5,2.90875 16.59125,0 13,0 C9.40875,0 6.5,2.90875 6.5,6.5 C6.5,10.083125 9.40875,13 13,13 L13,13 Z M13,16.25 C8.669375,16.25 0,18.419375 0,22.75 L0,26 L26,26 L26,22.75 C26,18.419375 17.330625,16.25 13,16.25 L13,16.25 Z" id="Shape"></path>
</g>
<g id="user-copy" transform="translate(42.000000, 0.000000)">
<text id="CLI" font-family="OpenSans, Open Sans" font-size="12" font-weight="normal">
<tspan x="4" y="41">CLI</tspan>
</text>
<path d="M13,13 C16.59125,13 19.5,10.083125 19.5,6.5 C19.5,2.90875 16.59125,0 13,0 C9.40875,0 6.5,2.90875 6.5,6.5 C6.5,10.083125 9.40875,13 13,13 L13,13 Z M13,16.25 C8.669375,16.25 0,18.419375 0,22.75 L0,26 L26,26 L26,22.75 C26,18.419375 17.330625,16.25 13,16.25 L13,16.25 Z" id="Shape"></path>
</g>
</g>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 70 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 24 KiB

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 31 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 68 KiB

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 299 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 33 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 75 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 187 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 226 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 82 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 283 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 266 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 226 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 262 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 53 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 67 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 251 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 184 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 162 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 165 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 188 KiB

Some files were not shown because too many files have changed in this diff Show More