From b55460ffbc7a7f6923803105acaec8e84f812a8d Mon Sep 17 00:00:00 2001 From: Jim Galasyn Date: Tue, 20 Jun 2017 17:43:30 -0700 Subject: [PATCH] First drafts of rbac topics (#94) * First drafts of rbac topics * Incorporate feedback for rbac topics --- _data/toc.yaml | 8 ++ .../manage-users/deploy-view-only-service.md | 44 ++++++ .../2.2/guides/admin/manage-users/index.md | 129 +++++++++++++----- .../isolate-nodes-between-teams.md | 123 +++++++++++++++++ .../isolate-volumes-between-teams.md | 66 +++++++++ .../manage-access-with-collections.md | 119 ++++++++++++++++ 6 files changed, 454 insertions(+), 35 deletions(-) create mode 100644 datacenter/ucp/2.2/guides/admin/manage-users/deploy-view-only-service.md create mode 100644 datacenter/ucp/2.2/guides/admin/manage-users/isolate-nodes-between-teams.md create mode 100644 datacenter/ucp/2.2/guides/admin/manage-users/isolate-volumes-between-teams.md create mode 100644 datacenter/ucp/2.2/guides/admin/manage-users/manage-access-with-collections.md diff --git a/_data/toc.yaml b/_data/toc.yaml index 9c25fbb0dd..8b936edee8 100644 --- a/_data/toc.yaml +++ b/_data/toc.yaml @@ -1644,6 +1644,14 @@ manuals: title: Create and manage users - path: /datacenter/ucp/2.2/guides/admin/manage-users/create-and-manage-teams/ title: Create and manage teams + - path: /datacenter/ucp/2.2/guides/admin/manage-users/deploy-view-only-service/ + title: Deploy a service with view-only access across an organization + - path: /datacenter/ucp/2.2/guides/admin/manage-users/isolate-nodes-between-teams/ + title: Isolate swarm nodes between two different teams + - path: /datacenter/ucp/2.2/guides/admin/manage-users/isolate-volumes-between-teams/ + title: Isolate volumes between two different teams + - path: /datacenter/ucp/2.2/guides/admin/manage-users/manage-access-with-collections/ + title: Manage access to resources by using collections - path: /datacenter/ucp/2.2/guides/admin/manage-users/permission-levels/ title: Permission levels - path: /datacenter/ucp/2.2/guides/admin/manage-users/recover-a-user-password/ diff --git a/datacenter/ucp/2.2/guides/admin/manage-users/deploy-view-only-service.md b/datacenter/ucp/2.2/guides/admin/manage-users/deploy-view-only-service.md new file mode 100644 index 0000000000..c51f8f1c24 --- /dev/null +++ b/datacenter/ucp/2.2/guides/admin/manage-users/deploy-view-only-service.md @@ -0,0 +1,44 @@ +--- +title: Deploy a service with view-only access across an organization +description: Create a grant to control access to a service. +keywords: ucp, grant, role, permission, authentication +--- + +In this example, your organization is granted access to a new resource +collection that contains one service. If you don't have an organization +already, create one by navigating to **User Management > Organizations** +and clicking **Create organization**. + +1. In the left pane, click **Collections** to show all of the resource + collections in the swarm. +2. Find the **Shared** collection and click **View collection**. +2. Click **Create collection**, and in the **Collection Name** textbox, enter + a name that describes the resources that you want to group. In this example, + name the collection "View-only services". +3. Click **Create** to create the collection. + +Currently, the new collection is empty. To populate it, deploy a new service +and add it to the collection. + +1. In the left pane, click **Services** to show all of the services running + in the swarm. +2. Click **Create service**, and in the **Name** textbox, enter "WordPress". +3. In the **Image** textbox, enter "wordpress". This identifies the latest + `wordpress` image in the Docker Store. +4. In the left pane, click **Collections**. The user's default collection + appears. + Click **Selected** to list all of the collections. Click **Shared**, + find the **View-only services** collection in the list, and click + **Select**. +5. Click **Deploy** to add the "WordPress" service to the collection and + deploy it. + +You're ready to create a grant for controlling access to the "HelloWorld" service. + +1. Navigate to **User Management > Manage Grants** and click **Create grant**. +2. In the left pane, click **Collections**, navigate to **/Shared/View-only services**, + and click **Select**. +3. Click **Roles**, and select **View Only** in the dropdown list. +4. Click **Subjects**, and under **Select subject type**, click **Organizations**. + In the dropdown, pick the organization that you want to associate with this grant. +5. Click **Create** to grant permissions to the organization. diff --git a/datacenter/ucp/2.2/guides/admin/manage-users/index.md b/datacenter/ucp/2.2/guides/admin/manage-users/index.md index 544e096534..c71d0f1e2a 100644 --- a/datacenter/ucp/2.2/guides/admin/manage-users/index.md +++ b/datacenter/ucp/2.2/guides/admin/manage-users/index.md @@ -1,55 +1,114 @@ --- -description: Learn how to manage permissions in Docker Universal Control Plane. -keywords: authorization, authentication, users, teams, UCP title: Authentication and authorization +description: Manage access to containers, services, volumes, and networks by using role-based access control +keywords: ucp, grant, role, permission, authentication, authorization --- -With Docker Universal Control Plane you get to control who can create and edit -resources like services, images, networks, and volumes in your cluster. +With Docker Universal Control Plane, you get to control who can create and +edit container resources in your swarm, like services, images, networks, +and volumes. You can grant and manage permissions to enforce fine-grained +access control as needed. -By default no one can make changes to your cluster. You can then grant and -manage permissions to enforce fine-grained access control. For that: +## Grant access to swarm resources -* Start by creating a user and assigning them a default permission. +If you're a UCP administrator, you can create *grants* to control how users +and organizations access swarm resources. - Default permissions specify the permission a user has to create and edit - resources. You can choose from four permission levels that range from - no access to full control over the resources. +A grant is made up of a *subject*, a *role*, and a *resource collection*. +A grant defines who (subject) has how much access (role) +to a set of resources (collection). Each grant is a 1:1:1 mapping of +subject, role, collection. For example, you can create a grant that +specifies that the "Prod Team" gains "View Only" permission against +resources in the "/Production" collection. -* Extend the user permissions by adding users to a team. +The usual workflow for creating grants has three steps. - You can extend the user's default permissions by adding the user to a team. - A team defines the permissions users have for a collection of labels, and - thus the resources that have those labels applied to them. +1. Set up your users and teams. For example, you might want three teams, + Dev, QA, and Prod. +2. Organize swarm resources into separate collections that each team will use. +3. Grant access against resource collections for your teams. -## Users and teams +## Subjects -When users create services or networks with no label, those resources are only -visible to them and administrative users. -For a team of users to be able to see and edit the same resources, the -resources needs to have the `com.docker.ucp.access.label` label applied. +A subject represents a user, team, or organization. A subject is granted a role +for a collection of resources. -![](../../images/secure-your-infrastructure-1.svg) +- **User**: A person that the authentication backend validates. You can + assign users to one or more teams and one or more organizations. +- **Organization**: A group of users that share a specific set of + permissions, defined by the roles of the organization. +- **Team**: A group of users that share a set of permissions defined in the + team itself. A team exists only as part of an organization, and all of its + members must be members of the organization. Team members share + organization permissions. A team can be in one organization only. +- **Administrator**: A person who identifies which operations can be + performed against specific resources and who can perform these actions. + An administrator can create and manage role assignments against any user, + team, and organization in the system. Only administrators can manage + grants. -In the example above, we have two sets of containers. One set has all containers -labeled with `com.docker.ucp.access.label=crm`, the other has all containers -labeled with `com.docker.ucp.access.label=billing`. +## Roles -You can now create different teams, and tune the permission level each -team has for those containers. +A role is a set of permitted API operations that you can assign to a specific +subject and collection by using a grant. UCP administrators view and manage +roles by navigating to the **User Management > Roles** page. -![](../../images/secure-your-infrastructure-2.svg) +The system provides the following default roles: -As an example you can create three different teams: +| Built-in role | Description | +|----------------------|-------------| +| `View Only` | The user can view resources like services, volumes, and networks but can't create them. | +| `Restricted Control` | The user can view and edit volumes, networks, and images but can't run a service or container in a way that might affect the node where it's running. The user can't mount a node directory and can't `exec` into containers. Also, The user can't run containers in privileged mode or with additional kernel capabilities. | +| `Full Control` | The user can view and edit volumes, networks, and images, They can create containers without any restriction, but can't see other users' containers. | +| `Scheduler` | The user can schedule and view workloads on worker nodes. By default, all users get a grant with the `Scheduler` role against the `/Shared` collection. | +| `Admin` | The user has full access to all resources, like volumes, networks, images, and containers. | -* The team that's developing the CRM app has access to create and edit -containers with the label `com.docker.ucp.access.label=crm`. -* The team that's developing the Billing app, has access to create and edit -containers with the label `com.docker.ucp.access.label=billing`. -* And of course, the operations team has access to create and edit containers -with any of the two labels. +Administrators can create a custom role that has Docker API permissions +that specify the API actions that a subject may perform. + +The **Roles** page lists the available roles, including the default roles +and any custom roles that administrators have created. In the **Roles** +list, click a role to see the API operations that it uses. For example, the +`Scheduler` role has two of the node operations, `Schedule` and `View`. + +Click **Create role** to create a custom role and define the API operations +that it uses. When you create a custom role, all of the APIs that you can use +are listed on the **Create Role** page. For example, you can create a custom +role that uses all of the node operations, `Join Token`, `Schedule`, +`Update`, and `View`, and you might name it "Node Operator". + +You can give a role a global name, like "Remove Images", which might enable +the **Remove** and **Force Remove** operations for images. You can apply a +role with the same name to different resource collections. + +Only an administrator can create and remove roles. An administrator +can enable and disable roles in the system. Roles can't be edited, so +to change a role's API operations, you must delete it and recreate it. + +You can't delete a custom role if it's used in a grant. You must first delete +the grants that use the role. + +## Resource collections + +Docker EE enables controlling access to container resources by using +*collections*. A collection is a grouping of container resources, like +volumes, networks, secrets, and services, that you access by specifying +a directory-like path. For more info, see +[Manage access to resources by using collections](manage-access-with-collections.md). + +## Transition from UCP 2.1 access control + +- Your existing access labels and permissions are migrated automatically + during an upgrade from UCP 2.1.x. +- Unlabeled "user-owned" resources are migrated into the user's private + collection, in `/Shared/Private/`. +- Old access control labels are migrated into `/Shared/Legacy/`. +- When deploying a resource, choose a collection instead of an access label. +- Use grants for access control, instead of unlabeled permissions. ## Where to go next -* [Create and manage users](create-and-manage-users.md) -* [Create and manage teams](create-and-manage-teams.md) +- [Deploy a service with view-only access across an organization](deploy-view-only-service.md) +- [Isolate volumes between two different teams](isolate-volumes-between-teams.md) +- [Isolate swarm nodes between two different teams](isolate-nodes-between-teams.md) + diff --git a/datacenter/ucp/2.2/guides/admin/manage-users/isolate-nodes-between-teams.md b/datacenter/ucp/2.2/guides/admin/manage-users/isolate-nodes-between-teams.md new file mode 100644 index 0000000000..37a328d1b6 --- /dev/null +++ b/datacenter/ucp/2.2/guides/admin/manage-users/isolate-nodes-between-teams.md @@ -0,0 +1,123 @@ +--- +title: Isolate swarm nodes between two different teams +description: Create grants that limit access to nodes to specific teams. +keywords: ucp, grant, role, permission, authentication +--- + +With Docker EE Advanced, you can enable physical isolation of resources +by organizing nodes into collections and granting `Scheduler` access for +different users. + +In this example, two teams get access to two different node collections, +and UCP access control ensures that the teams can't view or use each other's +container resources. You need at least two worker nodes to complete this +example. + +1. Create an `Ops` team and a `QA` team. +2. Create `/Prod` and `/Staging` collections for the two teams. +3. Assign worker nodes to one collection or the other. +4. Grant the `Ops` and `QA` teams access against their + corresponding collections. + +## Create two teams + +Click the **Organizations** link in the web UI to create two teams in your +organization, named "Ops" and "QA". For more info, see +[Create and manage teams](create-and-manage-teams.md). + +## Create resource collections + +In this example, the Ops and QA teams use two different node groups, +which they access through corresponding resource collections. + +1. In the left pane, click **Collections** to show all of the resource + collections in the swarm. +2. Click **Create collection**, and in the **Collection Name** textbox, enter + "Prod". +3. Click **Create** to create the collection. +4. Find **Prod** in the list, and click **View collection**. +5. Click **Create collection**, and in the **Collection Name** textbox, enter + "ApplicationA". This creates a sub-collection for access control. +6. Navigate to the collections list by clicking **Collections** in the left pane + or at the top of the page. +7. Click **Create collection** again, and in the **Collection Name** textbox, enter + "Staging". Also, create a sub-collection named "ApplicationA". + +You've created four new collections. The `/Prod` and `/Staging` collections +are for the worker nodes, and the `/Prod/ApplicationA` and `/Staging/ApplicationA` +sub-collections are for access control to an application that will be deployed on the corresponding worker nodes. + +## Move worker nodes to collections + +By default, worker nodes are located in the `/Shared` collection. To control +access to nodes, move them to dedicated collections where you can grant +access to specific users, teams, and organizations. + +Move worker nodes by changing the value of the access label key, + `com.docker.ucp.access.label`, to a different collection. + +1. In the left pane, click **Nodes** to view all of the nodes in the swarm. +2. Click a worker node, and in the details pane on the right, click **Edit**. +3. In the **Labels** section, find the access label with the value `/Shared` and + change it to `/Prod`. +4. Click **Save** to move the node to the `/Prod` collection. +5. Repeat the previous steps for another worker node, and move it to the + `/Staging` collection. + +> Note: If you're not running Docker EE Advanced, you'll get the following +> error message when you try to change the access label: +> Nodes must be in either the shared or system collection without an advanced license. + +## Grant access for specific teams + +You'll need four grants to control access to nodes and container resources: + +- Grant the `Ops` team the `Scheduler` role against the `/Prod` nodes. +- Grant the `Ops` team the `Restricted Control` role against the `/Prod/ApplicationA` resources. +- Grant the `QA` team the `Scheduler` role against the `/Staging` nodes. +- Grant the `QA` team the `Restricted Control` role against the `/Staging/ApplicationA` resources. + +These are the steps for creating the grants for the resource collections. + +1. Navigate to **User Management > Manage Grants** and click **Create grant**. +2. In the left pane, click **Collections**, navigate to **/Prod/ApplicationA**, + and click **Select**. +3. Click **Roles**, and select **Restricted Control** in the dropdown list. +4. Click **Subjects**, and under **Select subject type**, click **Organizations**. + select **Ops** from the **Team** dropdown. +5. Click **Create** to grant permissions to the Ops team. +6. Click **Create grant** and repeat the previous steps for the **/Staging/ApplicationA** + collection and the QA team. + +The same workflow applies for creating the grants against the node collections. +Apply the `Scheduler` role to the `/Prod` and `/Staging` collections. + +With these four grants in place, members of the Staging team won't be able +to view or use the `/Prod` nodes, and members of the Ops team won't be able +to view or use the `/Staging` nodes. + +## Access control in action + +You can see access control in action with the following two scenarios. + +### Create production workloads + +Users on the Prod team have permissions to create workloads on the `/Prod` +nodes. + +1. Log in as a user on the Prod team. +2. Change the user's default collection to `/Prod/ApplicationA`. +3. Run `docker stack deploy` with any compose/stack file. +4. All resources are deployed under `/Prod/ApplicationA`, and the + containers are scheduled only on the nodes under `/Prod`. + +### New users can't inspect isolated nodes and container resources + +1. Create a new user. +2. Log in as the new user. +3. Ensure that the `/Shared` collection has at least one worker node. +4. As the new user, run `docker stack deploy `. +5. The new workload is deployed on the nodes under `/Shared` and under + the user's private collection. +6. The new user can't view any of the nodes under `/Prod` or `/Shared`. + diff --git a/datacenter/ucp/2.2/guides/admin/manage-users/isolate-volumes-between-teams.md b/datacenter/ucp/2.2/guides/admin/manage-users/isolate-volumes-between-teams.md new file mode 100644 index 0000000000..2fc815881f --- /dev/null +++ b/datacenter/ucp/2.2/guides/admin/manage-users/isolate-volumes-between-teams.md @@ -0,0 +1,66 @@ +--- +title: Isolate volumes between two different teams +description: Create grants that limit access to volumes to specific teams. +keywords: ucp, grant, role, permission, authentication +--- + +In this example, two teams are granted access to volumes in two different +resource collections. UCP access control prevents the teams from viewing and +accessing each other's volumes, even though they may be located in the same +nodes. + +The procedure has the following steps. + +1. Create two teams +2. Create resource collections +3. Create grants +4. Team member creates volumes + +## Create two teams + +Use the **Organizations** web UI to create two teams in your organization, +named "Dev" and "Prod". +[Learn how to create and manage teams](create-and-manage-teams.md). + +## Create resource collections + +In this example, the Dev and Prod teams use two different volumes, which they +access through two corresponding resource collections. The collections are +placed under the `/Shared` collection. + +1. In the left pane, click **Collections** to show all of the resource + collections in the swarm. +2. Find the **/Shared** collection and click **View collection**. +2. Click **Create collection**, and in the **Collection Name** input, enter + "dev-volumes". +3. Click **Create** to create the collection. +4. Click **Create collection** again, and in the **Collection Name** input, enter + "prod-volumes", and click **Create**. + +## Create grants for controlling access to the new volumes + +1. Navigate to **User Management > Manage Grants** and click **Create grant**. +2. In the left pane, click **Collections**, navigate to **/Shared/dev-volumes**, + and click **Select**. +3. Click **Roles**, and select **Restricted Control** in the dropdown list. +4. Click **Subjects**, and under **Select subject type**, click **Organizations**. + In the dropdown, pick the organization that you want to associate with this grant. + Also, pick **Dev** from the **Team** dropdown. +5. Click **Create** to grant permissions to the Dev team. +6. Click **Create grant** and repeat the previous steps for the **/Shared/prod-volumes** + collection and the Prod team. + +## Create a volume as a team member + +Team members have permission to create volumes in their assigned collection. + +1. Log in as one of the users on the Dev team. +2. In the left pane, click **Volumes** to show all of the + volumes in the swarm that the user can access. +2. Click **Create volume** and name the new volume "dev-data". +3. In the left pane, click **Collections**. The default collection appears. + At the top of the page, click **Shared**, find the **dev-volumes** + collection in the list, and click **Select**. +4. Click **Create** to add the "dev-data" volume to the collection. + + diff --git a/datacenter/ucp/2.2/guides/admin/manage-users/manage-access-with-collections.md b/datacenter/ucp/2.2/guides/admin/manage-users/manage-access-with-collections.md new file mode 100644 index 0000000000..1fea22dac1 --- /dev/null +++ b/datacenter/ucp/2.2/guides/admin/manage-users/manage-access-with-collections.md @@ -0,0 +1,119 @@ +--- +title: Manage access to resources by using collections +description: Use collections to enable access control for worker nodes and container resources. +keywords: ucp, grant, role, permission, authentication, resource collection +--- + +Docker EE enables controlling access to container resources by using +*collections*. A collection is a group of swarm resources, +like services, containers, volumes, networks, and secrets. + +Access to collections goes through a directory structure that arranges a +swarm's resources. To assign permissions, administrators create grants +against directory branches. + +## Directory paths define access to collections + +Access to collections is based on a directory-like structure. +For example, the path to a user's default collection is +`/Shared/Private/`. Every user has a private collection that +has the default permission specified by the UCP administrator. + +Each collection has an access label that identifies its path. +For example, the private collection for user "hans" has a label that looks +like this: + +``` +com.docker.ucp.access.label = /Shared/Private/hans +``` + +You can nest collections. If a user has a grant against a collection, +the grant applies to all of its child collections. + +For a child collection, or for a user who belongs to more than one team, +the system concatenates permissions from multiple roles into an +"effective role" for the user, which specifies the operations that are +allowed against the target. + +## Built-in collections + +UCP provides a number of built-in collections. + +- `/` or `/Swarm` - The root swarm collection. All resources in the + cluster are here. Resources that aren't in a collection are assigned + to the root `/Swarm` directory. +- `/System` - The system collection, which contains UCP managers, DTR nodes, + and UCP/DTR system services. By default, only admins have access to the + system collection, but you can change this. +- `/Shared` - All worker nodes are here by default, for scheduling. + In a system with a standard-tier license, all worker nodes are under + the `/Shared` collection. With the EE Advanced license, administrators + can move worker nodes to other collections and apply role-based access. +- `/Shared/Private` - User private collections are stored here. +- `/Shared/Legacy` - After updating from UCP 2.1, all legacy access control + labels are stored here. + +## Default collections + +A user always has a default collection. The user can select the default +in UI preferences. When a user deploys a resource in the web UI, the +preselected option is the default collection, but this can be changed. + +Users can't deploy a resource without a collection. When deploying a +resource in CLI without an access label, UCP automatically puts the user’s +default collection label on the resource. + +When using Docker Compose, the system applies default collection labels +across all resources in the stack, unless the `com.docker.ucp.access.label` +has been set explicitly. + +## Collections and labels + +Label limitations still apply to collections. You can't modify collections +after resource creation for containers, networks, and volumes. You can +update labels for services, nodes, secrets, and configs. + +For editable resources, like services, secrets, nodes, and configs, +you can change the `com.docker.ucp.access.label` to move resources to +different collections. With the CLI, you can use this label to deploy +resources to a collection other than your default collection. Omitting this +label on the CLI deploys a resource on the user's default resource collection. + +The system uses the additional labels, `com.docker.ucp.collection.*`, to enable +efficient resource lookups. By default, nodes have the +`com.docker.ucp.collection.root`, `com.docker.ucp.collection.shared`, and +`com.docker.ucp.collection.swarm` labels set to `true`. UCP automatically +controls these labels, and you don't need to manage them. + +Collections get generic default names, but you can give them meaningful names, +like "Dev", "Test", and "Prod". + +## Control access to nodes + +The Docker EE Advanced license enables access control on worker nodes. + +When you deploy a resource with a collection, UCP sets a constraint implicitly +based on what nodes the collection, and any ancestor collections, can access. +The `Scheduler` role allows users to deploy resources on a node. +By default, all users have the `Scheduler` role against the `/Shared` +collection. + +When deploying a resource that isn't global, like local volumes, bridge +networks, containers, and services, the system identifies a set of +"schedulable nodes" for the user. The system identifies the target collection +of the resource, like `/Shared/Private/hans`, and it tries to find the parent +that's closest to the root that the user has the `Node Schedule` permission on. + +For example, when a user with a default configuration runs `docker run nginx`, +the system interprets this to mean, "Create an `nginx` container under the +user's default collection, which is at `/Shared/Private/hans`, and deploy it +on one of the nodes under `/Shared`. + +If you want to isolate nodes against other teams, place these nodes in +new collections, and assign the `Scheduler` role, which contains the +`Node Schedule` permission, to the two teams. For more info, see +[Isolate swarm nodes between two different teams](isolate-nodes-between-teams.md). + + + +