diff --git a/_data/toc.yaml b/_data/toc.yaml index f4c9696041..a89245d67e 100644 --- a/_data/toc.yaml +++ b/_data/toc.yaml @@ -2569,6 +2569,201 @@ manuals: title: Release notes - sectiontitle: Previous versions section: + - sectiontitle: Docker Trusted Registry 2.6 + section: + - path: /datacenter/dtr/2.6/guides/ + title: Docker Trusted Registry overview + - path: /datacenter/dtr/2.6/guides/architecture/ + title: Architecture + - sectiontitle: Administration + section: + - sectiontitle: Install + section: + - path: /datacenter/dtr/2.6/guides/admin/install/system-requirements/ + title: System requirements + - path: /datacenter/dtr/2.6/guides/admin/install/ + title: Install + - path: /datacenter/dtr/2.6/guides/admin/install/install-offline/ + title: Install offline + - path: /datacenter/dtr/2.6/guides/admin/upgrade/ + title: Upgrade + - path: /datacenter/dtr/2.6/guides/admin/install/uninstall/ + title: Uninstall + - sectiontitle: Configure + section: + - path: /datacenter/dtr/2.6/guides/admin/configure/license-your-installation/ + title: License your installation + - path: /datacenter/dtr/2.6/guides/admin/configure/use-your-own-tls-certificates/ + title: Use your own TLS certificates + - path: /datacenter/dtr/2.6/guides/admin/configure/enable-single-sign-on/ + title: Enable single sign-on + - sectiontitle: External storage + section: + - path: /datacenter/dtr/2.6/guides/admin/configure/external-storage/ + title: Overview + - path: /datacenter/dtr/2.6/guides/admin/configure/external-storage/storage-backend-migration/ + title: Switch storage backends + - path: /datacenter/dtr/2.6/guides/admin/configure/external-storage/s3/ + title: S3 + - path: /datacenter/dtr/2.6/guides/admin/configure/external-storage/nfs/ + title: NFS + - path: /datacenter/dtr/2.6/guides/admin/configure/set-up-high-availability/ + title: Set up high availability + - path: /datacenter/dtr/2.6/guides/admin/configure/use-a-load-balancer/ + title: Use a load balancer + - path: /datacenter/dtr/2.6/guides/admin/configure/set-up-vulnerability-scans/ + title: Set up vulnerability scans + - sectiontitle: Deploy caches + section: + - title: Cache overview + path: /datacenter/dtr/2.6/guides/admin/configure/deploy-caches/ + - title: Cache deployment strategy + path: /datacenter/dtr/2.6/guides/admin/configure/deploy-caches/strategy/ + - title: Deploy a DTR cache with Docker Swarm + path: /datacenter/dtr/2.6/guides/admin/configure/deploy-caches/simple/ + - title: Deploy a DTR cache with Kubernetes + path: /datacenter/dtr/2.6/guides/admin/configure/deploy-caches/simple-kube/ + - title: Configure caches for high availability + path: /datacenter/dtr/2.6/guides/admin/configure/deploy-caches/high-availability/ + - title: Cache configuration reference + path: /datacenter/dtr/2.6/guides/admin/configure/deploy-caches/configuration-reference/ + - path: /datacenter/dtr/2.6/guides/admin/configure/garbage-collection/ + title: Garbage collection + - title: Allow users to create repositories when pushing + path: /datacenter/dtr/2.6/guides/admin/configure/allow-creation-on-push/ + - path: /datacenter/dtr/2.6/guides/admin/configure/use-a-web-proxy/ + title: Use a web proxy + - sectiontitle: Manage users + section: + - path: /datacenter/dtr/2.6/guides/admin/manage-users/ + title: Authentication and authorization + - path: /datacenter/dtr/2.6/guides/admin/manage-users/create-and-manage-users/ + title: Create and manage users + - path: /datacenter/dtr/2.6/guides/admin/manage-users/create-and-manage-teams/ + title: Create and manage teams + - path: /datacenter/dtr/2.6/guides/admin/manage-users/create-and-manage-orgs/ + title: Create and manage organizations + - path: /datacenter/dtr/2.6/guides/admin/manage-users/permission-levels/ + title: Permission levels + - sectiontitle: Manage webhooks + section: + - title: Create and manage webhooks + path: /datacenter/dtr/2.6/guides/admin/manage-webhooks/ + - title: Use the web interface + path: /datacenter/dtr/2.6/guides/admin/manage-webhooks/use-the-web-ui/ + - title: Use the API + path: /datacenter/dtr/2.6/guides/admin/manage-webhooks/use-the-api/ + - sectiontitle: Manage jobs + section: + - path: /datacenter/dtr/2.6/guides/admin/manage-jobs/job-queue/ + title: Job Queue + - path: /datacenter/dtr/2.6/guides/admin/manage-jobs/audit-jobs-via-ui/ + title: Audit Jobs with the Web Interface + - path: /datacenter/dtr/2.6/guides/admin/manage-jobs/audit-jobs-via-api/ + title: Audit Jobs with the API + - path: /datacenter/dtr/2.6/guides/admin/manage-jobs/auto-delete-job-logs/ + title: Enable Auto-Deletion of Job Logs + - sectiontitle: Monitor and troubleshoot + section: + - path: /datacenter/dtr/2.6/guides/admin/monitor-and-troubleshoot/ + title: Monitor the cluster status + - path: /datacenter/dtr/2.6/guides/admin/monitor-and-troubleshoot/notary-audit-logs/ + title: Check Notary audit logs + - path: /datacenter/dtr/2.6/guides/admin/monitor-and-troubleshoot/troubleshoot-dtr/ + title: Troubleshoot Docker Trusted Registry + - sectiontitle: Disaster recovery + section: + - title: Overview + path: /datacenter/dtr/2.6/guides/admin/disaster-recovery/ + - title: Repair a single replica + path: /datacenter/dtr/2.6/guides/admin/disaster-recovery/repair-a-single-replica/ + - title: Repair a cluster + path: /datacenter/dtr/2.6/guides/admin/disaster-recovery/repair-a-cluster/ + - title: Create a backup + path: /datacenter/dtr/2.6/guides/admin/disaster-recovery/create-a-backup/ + - title: Restore from a backup + path: /datacenter/dtr/2.6/guides/admin/disaster-recovery/restore-from-backup/ + - sectiontitle: CLI Reference + section: + - path: /reference/dtr/2.6/cli/ + title: Overview + - path: /reference/dtr/2.6/cli/backup/ + title: backup + - path: /reference/dtr/2.6/cli/destroy/ + title: destroy + - path: /reference/dtr/2.6/cli/emergency-repair/ + title: emergency-repair + - path: /reference/dtr/2.6/cli/install/ + title: install + - path: /reference/dtr/2.6/cli/join/ + title: join + - path: /reference/dtr/2.6/cli/reconfigure/ + title: reconfigure + - path: /reference/dtr/2.6/cli/remove/ + title: remove + - path: /reference/dtr/2.6/cli/restore/ + title: restore + - path: /reference/dtr/2.6/cli/upgrade/ + title: upgrade + - sectiontitle: User guides + section: + - sectiontitle: Access DTR + section: + - path: /datacenter/dtr/2.6/guides/user/access-dtr/ + title: Configure your Docker daemon + - path: /datacenter/dtr/2.6/guides/user/access-dtr/configure-your-notary-client/ + title: Configure your Notary client + - path: /datacenter/dtr/2.6/guides/user/access-dtr/use-a-cache/ + title: Use a cache + - sectiontitle: Manage images + section: + - path: /datacenter/dtr/2.6/guides/user/manage-images/ + title: Create a repository + - path: /datacenter/dtr/2.6/guides/user/manage-images/review-repository-info/ + title: Review repository info + - path: /datacenter/dtr/2.6/guides/user/manage-images/pull-and-push-images/ + title: Pull and push images + - path: /datacenter/dtr/2.6/guides/user/manage-images/delete-images/ + title: Delete images + - path: /datacenter/dtr/2.6/guides/user/manage-images/scan-images-for-vulnerabilities/ + title: Scan images for vulnerabilities + - title: Override a vulnerability + path: /datacenter/dtr/2.6/guides/user/manage-images/override-a-vulnerability/ + - path: /datacenter/dtr/2.6/guides/user/manage-images/prevent-tags-from-being-overwritten/ + title: Prevent tags from being overwritten + - sectiontitle: Sign images + section: + - path: /datacenter/dtr/2.6/guides/user/manage-images/sign-images/ + title: Sign an image + - path: /datacenter/dtr/2.6/guides/user/manage-images/sign-images/trust-with-remote-ucp/ + title: Trust with a Remote UCP + - sectiontitle: Promotion policies and mirroring + section: + - title: Overview + path: /datacenter/dtr/2.6/guides/user/promotion-policies/ + - title: Promote an image using policies + path: /datacenter/dtr/2.6/guides/user/promotion-policies/internal-promotion/ + - title: Mirror images to another registry + path: /datacenter/dtr/2.6/guides/user/promotion-policies/push-mirror/ + - title: Mirror images from another registry + path: /datacenter/dtr/2.6/guides/user/promotion-policies/pull-mirror/ + - title: Template reference + path: /datacenter/dtr/2.6/guides/user/promotion-policies/templates/ + - sectiontitle: Manage repository events + section: + - title: Audit repository events + path: /datacenter/dtr/2.6/guides/user/audit-repository-events/ + - title: Auto-delete repository events + path: /datacenter/dtr/2.6/guides/admin/configure/auto-delete-repo-events/ + - title: Manage access tokens + path: /datacenter/dtr/2.6/guides/user/access-tokens/ + - title: Tag pruning + path: /datacenter/dtr/2.6/guides/user/tag-pruning/ + - title: API reference + path: /datacenter/dtr/2.6/reference/api/ + nosync: true + - path: /datacenter/dtr/2.6/guides/release-notes/ + title: Release notes - sectiontitle: Docker Trusted Registry 2.5 section: - path: /datacenter/dtr/2.5/guides/ diff --git a/datacenter/dtr/2.6/guides/admin/configure/allow-creation-on-push.md b/datacenter/dtr/2.6/guides/admin/configure/allow-creation-on-push.md new file mode 100644 index 0000000000..6eab296d20 --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/configure/allow-creation-on-push.md @@ -0,0 +1,35 @@ +--- +title: Allow users to create repositories when pushing +description: By default Docker Trusted Registry only allows pushing images to + existing repositories. Learn how to change that. +keywords: dtr, repository +redirect_from: + - /datacenter/dtr/2.5/guides/admin/configure/allow-creation-on-push/ +--- + +By default DTR only allows pushing images if the repository exists, and you +have write access to the repository. + +As an example, if you try to push to `dtr.example.org/library/java:9`, and the +`library/java` repository doesn't exist yet, your push fails. + +You can configure DTR to allow pushing to repositories that don't exist yet. +As an administrator, log into the **DTR web UI**, navigate to the **Settings** +page, and enable **Create repository on push**. + +![DTR settings page](../../images/create-on-push-1.png){: .with-border} + +From now on, when a user pushes to their personal sandbox +(`/`), or if the user is an administrator for the +organization (`/`), DTR will create a repository if it doesn't +exist yet. In that case, the repository is created as private. + +## Use the CLI to enable pushing to repositories that don't exist yet + +```bash +curl --user : \ +--request POST "/api/v0/meta/settings" \ +--header "accept: application/json" \ +--header "content-type: application/json" \ +--data "{ \"createRepositoryOnPush\": true}" +``` diff --git a/datacenter/dtr/2.6/guides/admin/configure/auto-delete-repo-events.md b/datacenter/dtr/2.6/guides/admin/configure/auto-delete-repo-events.md new file mode 100644 index 0000000000..bff0a4b6a7 --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/configure/auto-delete-repo-events.md @@ -0,0 +1,43 @@ +--- +title: Enable Auto-Deletion of Repository Events +description: Enable auto-deletion of image events within a repository for maintenance. +keywords: registry, events, log, activity stream +--- + +## Overview + +Docker Trusted Registry has a global setting for repository event auto-deletion. This allows event records to be removed as part of [garbage collection](../admin/configure/garbage-collection.md). DTR administrators can enable auto-deletion of repository events in DTR 2.6 based on specified conditions which are covered below. + +## Steps + +1. In your browser, navigate to `https://` and log in with your admin credentials. + +2. Select **System** from the left navigation pane which displays the **Settings** page by default. + +3. Scroll down to **Repository Events** and turn on ***Auto-Deletion***. + + ![](../../images/auto-delete-repo-events-0.png){: .img-fluid .with-border} + +4. Specify the conditions with which an event auto-deletion will be triggered. + + ![](../../images/auto-delete-repo-events-1.png){: .img-fluid .with-border} + + DTR allows you to set your auto-deletion conditions based on the following optional repository event attributes: + + | Name | Description | Example | + |:----------------|:---------------------------------------------------| :----------------| + | Age | Lets you remove events older than your specified number of hours, days, weeks or months| `2 months` | + | Max number of events | Lets you specify the maximum number of events allowed in the repositories. | `6000` | + +If you check and specify both, events in your repositories will be removed during garbage collection if either condition is met. You should see a confirmation message right away. + +5. Click **Start GC** if you're ready. Read more about [garbage collection](../admin/configure/garbage-collection/#under-the-hood) if you're unsure about this operation. + +6. Navigate to **System > Job Logs** to confirm that `onlinegc` has happened. + + ![](../../images/auto-delete-repo-events-2.png){: .img-fluid .with-border} + +## Where to go next + +- [Manage job logs](/ee/dtr/admin/manage-jobs/audit-jobs-via-ui/) + diff --git a/datacenter/dtr/2.6/guides/admin/configure/deploy-caches/configuration-reference.md b/datacenter/dtr/2.6/guides/admin/configure/deploy-caches/configuration-reference.md new file mode 100644 index 0000000000..988cc65b1f --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/configure/deploy-caches/configuration-reference.md @@ -0,0 +1,80 @@ +--- +title: DTR cache configuration reference +description: Learn about the different configuration options for DTR caches. +keywords: DTR, cache +--- + +DTR caches are based on Docker Registry, and use the same configuration +file format. +[Learn more about the configuration options](/registry/configuration.md). + +The DTR cache extends the Docker Registry configuration file format by +introducing a new middleware called `downstream` that has three configuration +options: `blobttl`, `upstreams`, and `cas`: + +```none +# Settings that you would include in a +# Docker Registry configuration file followed by + +middleware: + registry: + - name: downstream + options: + blobttl: 24h + upstreams: + - + cas: + - +``` + +Below you can find the description for each parameter, specific to DTR caches. + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ blobttl + + no + +A positive integer and an optional unit of time suffix to determine the TTL (Time to Live) value for blobs in the cache. If blobttl is configured, storage.delete.enabled must be set to true. Acceptable units of time are: +
    +
  • ns (nanoseconds)
  • +
  • us (microseconds)
  • +
  • ms (milliseconds)
  • +
  • s (seconds)
  • +
  • m (minutes)
  • +
  • h (hours)
  • +
+ If you omit the suffix, the system interprets the value as nanoseconds. +
+ cas + + no + + An optional list of absolute paths to PEM-encoded CA certificates of upstream registries or content caches. +
+ upstreams + + yes + + A list of externally-reachable addresses for upstream registries of content caches. If more than one host is specified, it will pull from registries in round-robin order. +
diff --git a/datacenter/dtr/2.6/guides/admin/configure/deploy-caches/high-availability.md b/datacenter/dtr/2.6/guides/admin/configure/deploy-caches/high-availability.md new file mode 100644 index 0000000000..02358e9dc2 --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/configure/deploy-caches/high-availability.md @@ -0,0 +1,76 @@ +--- +title: Configure caches for high availability +description: Learn how to deploy a DTR cache with fault tolerance and high + availability. +keywords: DTR, cache +--- + +If you're deploying a DTR cache in a zone with few users and with no uptime +SLAs, a [single cache service is enough for you](simple.md). + +But if you want to make sure your DTR cache is always available to users +and is highly performant, you should configure your cache deployment for +high availability. + +![Highly-available cache](../../../images/deploy-caches-ha-1.svg) + +## System requirements + +* Multiple nodes, one for each cache replica. +* A load balancer. +* Shared storage system that has read-after-write consistency. + +The way you deploy a DTR cache is the same, whether you're deploying a single +replica or multiple ones. The difference is that you should configure the +replicas to store data using a shared storage system. + +When using a shared storage system, once an image layer is cached, any replica +is able to serve it to users without having to fetch a new copy from DTR. + +DTR caches support the following storage systems: +* Alibaba Cloud Object Storage Service +* Amazon S3 +* Azure Blob Storage +* Google Cloud Storage +* NFS +* Openstack Swift + +If you're using NFS as a shared storage system, make sure the shared +directory is configured with: + +``` +/dtr-cache *(rw,root_squash,no_wdelay) +``` + +This ensures read-after-write consistency for NFS. + +You should also mount the NFS directory on each node where you'll deploy a +DTR cache replica. + +## Label the DTR cache nodes + +Use SSH to log in to a manager node of the swarm where you want to deploy +the DTR cache. + +If you're using UCP to manage that swarm you can also use a client bundle to +configure your Docker CLI client to connect to that swarm. + +Label each node that is going to run the cache replica, by running: + +``` +docker node update --label-add dtr.cache=true +``` + +## Configure and deploy the cache + +Create the cache configuration files by following the +[instructions for deploying a single cache replica](simple.md#prepare-the-cache-deployment). + +Make sure you adapt the `storage` object, using the +[configuration options for the shared storage](/registry/configuration.md#storage) +of your choice. + +## Configure your load balancer + +The last step is to deploy a load balancer of your choice to load-balance +requests across the multiple replicas you deployed. diff --git a/datacenter/dtr/2.6/guides/admin/configure/deploy-caches/index.md b/datacenter/dtr/2.6/guides/admin/configure/deploy-caches/index.md new file mode 100644 index 0000000000..1c5f35974c --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/configure/deploy-caches/index.md @@ -0,0 +1,72 @@ +--- +title: DTR cache overview +description: Deploy DTR caches in different geographical locations for users to + pull images faster. +keywords: DTR, cache +--- + +The further away you are from the geographical location where DTR is deployed, +the longer it will take to pull and push images. +This happens because the files being transferred from DTR to your machine +need to travel a longer distance, across multiple networks. + +![Slow pull](../../../images/deploy-caches-1.svg) + +To decrease the time to pull an image, you can deploy DTR caches geographically +closer to users. + +Caches are transparent to users, since users still log in and pull images using +the DTR URL address. DTR checks if users are authorized to pull the image, and redirects the +request to the cache. + +![Pull with cache](../../../images/deploy-caches-2.svg) + +In this example, DTR is deployed on a datacenter in the United States, and +a cache is deployed in the Asia office. + +Users in the Asia office update their user profile within DTR to fetch from +the cache in their office. They pull an image using: + +``` +# Log in to DTR +docker login dtr.example.org + +# Pull image +docker image pull dtr.example.org/website/ui:3-stable +``` + +DTR authenticates the request and checks if the user has permission to pull the +image they are requesting. If they have permissions, they get an image +manifest containing the list of image layers to pull and redirecting them +to pull the images from the Asia cache. + +When users request those image layers from the Asia cache, the cache pulls +them from DTR and keeps a copy that can be used to serve to other users without +having to pull the image layers from DTR again. + +## Caches or mirroring policies + +Use caches if you: + +* Want to make image pulls faster for users in different geographical regions. +* Want to manage user permissions from a central place. + +If you need users to be able to push images faster, or you want to implement +RBAC policies based on different regions, do not use caches. +Instead, deploy multiple DTR clusters and implement mirroring policies between +them. + +![Mirroring policies](../../../images/deploy-caches-3.svg) + +With mirroring policies you can set up a development pipeline where images +are automatically pushed between different DTR repositories, or across +DTR deployments. + +As an example you can set up a development pipeline with three different stages. +Developers can push and pull images from the development environment, +only pull from QA, and have no access to Production. + +With multiple DTR deployments you can control the permissions developers have +for each deployment, and you can create policies to automatically push images +from one deployment to the next. +[Learn more about deployment policies](../../../user/promotion-policies/index.md). diff --git a/datacenter/dtr/2.6/guides/admin/configure/deploy-caches/simple-kube.md b/datacenter/dtr/2.6/guides/admin/configure/deploy-caches/simple-kube.md new file mode 100644 index 0000000000..0236edb6bc --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/configure/deploy-caches/simple-kube.md @@ -0,0 +1,343 @@ +--- +title: Deploy a DTR cache with Kubernetes +description: Deploy a DTR cache to allow users in remote geographical locations to pull images faster. +keywords: DTR, cache, kubernetes +--- + +This example guides you through deploying a DTR cache, assuming that you've got +a DTR deployment up and running. The below guide has been tested on +Universal Control Plane 3.1, however it should work on any Kubernetes Cluster +1.8 or higher. + +The DTR cache is going to be deployed as a Kubernetes Deployment, so that +Kubernetes automatically takes care of scheduling and restarting the service if +something goes wrong. + +We'll manage the cache configuration using a Kubernetes Config Map, and the TLS +certificates using Kubernetes secrets. This allows you to manage the +configurations securely and independently of the node where the cache is +actually running. + +## Prepare the cache deployment + +At the end of this exercise you should have the following file structure on your +workstation: + +``` +├── dtrcache.yaml # Yaml file to deploy cache with a single command +├── config.yaml # The cache configuration file +└── certs +    ├── cache.cert.pem # The cache public key certificate, including any intermediaries +    ├── cache.key.pem # The cache private key +    └── dtr.cert.pem # DTR CA certificate +``` + +### Create the DTR Cache certificates + +The DTR cache will be deployed with a TLS endpoint. For this you will need to +generate a TLS ceritificate and key from a certificate authority. The way you +expose the DTR Cache will change the SANs required for +this certificate. + +For example: + + - If you are deploying the DTR Cache with an + [Ingress Object](https://kubernetes.io/docs/concepts/services-networking/ingress/) + you will need to use an external DTR cache address which resolves to your + ingress controller as part of your certificate. + - If you are exposing the DTR cache through a Kubernetes + [Cloud Provider](https://kubernetes.io/docs/concepts/services-networking/#loadbalancer) + then you will need the external Loadbalancer address as part of your + certificate. + - If you are exposing the DTR Cache through a + [Node Port](https://kubernetes.io/docs/concepts/services-networking/#nodeport) + or a Host Port you will need to use a node's FQDN as a SAN in your + certificate. + +On your workstation, create a directory called `certs`. Within it place the +newly created certificate `cache.cert.pem` and key `cache.key.pem` for your DTR +cache. Also place the certificate authority (including any intermedite +certificate authorities) of the certificate from your DTR deployment. This could +be sourced from the main DTR deployment using curl. + +``` +$ curl -s https:///ca -o certs/dtr.cert.pem`. +``` + +### Create the DTR Config + +The DTR Cache will take its configuration from a file mounted into the container. +Below is an example configuration file for the DTR Cache. This yaml should be +customised for your environment with the relevant external dtr cache, worker +node or external loadbalancer FQDN. + +With this configuration, the cache fetches image layers from DTR and keeps a +local copy for 24 hours. After that, if a user requests that image layer, the +cache will fetch it again from DTR. + +The cache, by default, is configured to store image data inside its container. +Therefore if something goes wrong with the cache service, and Kubernetes deploys +a new pod, cached data is not persisted. Data will not be lost as it is still +stored in the primary DTR. You can +[customize the storage parameters](/registry/configuration/#storage), +if you want the cached images to be backended by persistent storage. + +> **Note**: Kubernetes Peristent Volumes or Persistent Volume Claims would have to be +> used to provide persistent backend storage capabilities for the cache. + +``` +cat > config.yaml < # Could be DTR Cache / Loadbalancer / Worker Node external FQDN + tls: + certificate: /certs/cache.cert.pem + key: /certs/cache.key.pem +middleware: + registry: + - name: downstream + options: + blobttl: 24h + upstreams: + - https:// # URL of the Main DTR Deployment + cas: + - /certs/dtr.cert.pem +EOF +``` + +See [Configuration Options](/registry/configuration/#list-of-configuration-options) for a full list of registry configuration options. + +### Define Kubernetes Resources + +The Kubernetes Manifest file to deploy the DTR Cache is independent of how you +choose to expose the DTR cache within your environment. The below example has +been tested to work on Universal Control Plane 3.1, however it should work on +any Kubernetes Cluster 1.8 or higher. + +``` +cat > dtrcache.yaml <` and / or `kubectl -n dtr logs `. + +### Exposing the DTR Cache + +For external access to the DTR cache we need to expose the Cache Pods to the +outside world. In Kubernetes there are multiple ways for you to expose a service, +dependent on your infrastructure and your environment. For more information, +see [Publishing services - service types +](https://kubernetes.io/docs/concepts/services-networking/#publishing-services-service-types) on the Kubernetes docs. +It is important though that you are consistent in exposing the cache through the +same interface you created a certificate for [previously](#create-the-dtr-cache-certificates). +Otherwise the TLS certificate may not be valid through this alternative +interface. + +> #### DTR Cache Exposure +> +> You only need to expose your DTR cache through ***one*** external interface. + +#### NodePort + +The first example exposes the DTR cache through **NodePort**. In this example you would +have added a worker node's FQDN to the TLS Certificate in [step 1](#create-the-dtr-cache-certificates). +Here you will be accessing the DTR cache through an exposed port on a worker +node's FQDN. + +``` +cat > dtrcacheservice.yaml <:/v2/_catalog +{"repositories":[]} +``` + +#### Ingress Controller + +This second example will expose the DTR cache through an **ingress** object. In +this example you will need to create a DNS rule in your environment that will +resolve a DTR cache external FQDN address to the address of your ingress +controller. You should have also specified the same DTR cache external FQDN +address within the DTR cache certificate in [step 1](#create-the-dtr-cache-certificates). + +> Note an ingress controller is a prerequisite for this example. If you have not +> deployed an ingress controller on your cluster, see [Layer 7 Routing for UCP](/ee/ucp/kubernetes/layer-7-routing). This +> ingress controller will also need to support SSL passthrough. + +``` +cat > dtrcacheservice.yaml < # Replace this value with your external DTR Cache address + rules: + - host: # Replace this value with your external DTR Cache address + http: + paths: + - backend: + serviceName: dtr-cache + servicePort: 443 +EOF + +kubectl create -f dtrcacheservice.yaml +``` + +You can test that your DTR cache is externally reachable by using curl to hit +the API endpoint. The address should be the one you have defined above in the +serivce definition file. + +``` +curl -X GET https://external-dtr-cache-fqdn/v2/_catalog +{"repositories":[]} +``` + +## Next Steps + +[Integrate your cache into DTR and configure users](simple#register-the-cache-with-dtr) diff --git a/datacenter/dtr/2.6/guides/admin/configure/deploy-caches/simple.md b/datacenter/dtr/2.6/guides/admin/configure/deploy-caches/simple.md new file mode 100644 index 0000000000..4c7bea82c1 --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/configure/deploy-caches/simple.md @@ -0,0 +1,272 @@ +--- +title: Deploy a DTR cache with Swarm +description: Deploy a DTR cache to make users in remove geographical locations + pull images faster. +keywords: DTR, cache +--- + +This example guides you in deploying a DTR cache, assuming that you've got +a DTR deployment up and running. It also assumes that you've provisioned +[multiple nodes and joined them into a swarm](strategy.md#system-requirements). + +![Cache for Asia](../../../images/deploy-caches-simple-1.svg) + +The DTR cache is going to be deployed as a Docker service, so that Docker +automatically takes care of scheduling and restarting the service if +something goes wrong. + +We'll manage the cache configuration using a Docker configuration, and the TLS +certificates using Docker secrets. This allows you to manage the configurations +securely and independently of the node where the cache is actually running. + +## Dedicate a node for the cache + +To make sure the DTR cache is performant, it should be deployed on a node +dedicated just for it. Start by labelling the node where you want +to deploy the cache, so that you target the deployment to that node. + +Use SSH to log in to a manager node of the swarm where you want to deploy +the DTR cache. If you're using UCP to manage that swarm, use a client bundle to +configure your Docker CLI client to connect to the swarm. + +``` +docker node update --label-add dtr.cache=true +``` + +[Learn more about labelling nodes](/engine/swarm/manage-nodes.md#add-or-remove-label-metadata). + + +## Prepare the cache deployment + +Create a file structure that looks like this: + +``` +├── docker-stack.yml # Stack file to deploy cache with a single command +├── config.yml # The cache configuration file +└── certs +    ├── cache.cert.pem # The cache public key certificate +    ├── cache.key.pem # The cache private key +    └── dtr.cert.pem # DTR CA certificate +``` + +Then add the following content to each of the files: + + +
+
+
+      
+version: "3.3"
+services:
+  cache:
+    image: {{ page.dtr_org }}/{{ page.dtr_repo }}-content-cache:{{ page.dtr_version }}
+    entrypoint:
+      - /start.sh
+      - "/config.yml"
+    ports:
+      - 443:443
+    deploy:
+      replicas: 1
+      placement:
+        constraints: [node.labels.dtr.cache == true]
+      restart_policy:
+        condition: on-failure
+    configs:
+      - config.yml
+    secrets:
+      - dtr.cert.pem
+      - cache.cert.pem
+      - cache.key.pem
+configs:
+  config.yml:
+    file: ./config.yml
+secrets:
+  dtr.cert.pem:
+    file: ./certs/dtr.cert.pem
+  cache.cert.pem:
+    file: ./certs/cache.cert.pem
+  cache.key.pem:
+    file: ./certs/cache.key.pem
+    
+  
+
+
+ +
+
+      
+version: 0.1
+log:
+  level: info
+storage:
+  delete:
+    enabled: true
+  filesystem:
+    rootdirectory: /var/lib/registry
+http:
+  addr: 0.0.0.0:443
+  secret: generate-random-secret
+  host: https://<cache-url>
+  tls:
+    certificate: /run/secrets/cache.cert.pem
+    key: /run/secrets/cache.key.pem
+middleware:
+  registry:
+      - name: downstream
+        options:
+          blobttl: 24h
+          upstreams:
+            - https://<dtr-url>:<dtr-port>
+          cas:
+            - /run/secrets/dtr.cert.pem
+    
+  
+
+
+
+ +Add the public key certificate for the cache here. If the certificate has been +signed by an intermediate certificate authority, append it's public key +certificate at the end of the file. + +
+
+ +
+ +Add the unencrypted private key for the cache here. + +
+
+ +
+ +The cache communicates with DTR using TLS. If you've customized DTR +to use TLS certificates issued by a globally trusted certificate authority, +the cache automatically trusts DTR. + +But if you're using the default DTR configuration, or DTR is using TLS +certificates signed by your own certificate authority, you need to configure +the cache to trust DTR. + +Add the DTR CA certificate to the `certs/dtr.cert.pem` file. You can +do this by running: + +``` +curl -sk https:///ca > certs/dtr.cert.pem +``` + +
+
+
+ +With this configuration, the cache fetches image layers from DTR and keeps +a local copy for 24 hours. After that, if a user requests that image layer, +the cache fetches it again from DTR. + +The cache is configured to persist data inside its container. +If something goes wrong with the cache service, Docker automatically redeploys a +new container, but previously cached data is not persisted. +You can [customize the storage parameters](/registry/configuration.md#storage), +if you want to store the image layers using a persistent storage backend. + +Also, the cache is configured to use port 443. If you're already using that +port in the swarm, update the deployment and configuration files to use another +port. Don't forget to create firewall rules for the port you choose. + +## Deploy the cache + +Now that everything is set up, you can deploy the cache by running: + +``` +docker stack deploy --compose-file docker-stack.yml dtr-cache +``` + +You can check if the cache has been successfully deployed by running: + +``` +docker stack ps dtr-cache +``` + +Docker should show the dtr-cache stack is running. + +## Register the cache with DTR + +Now that you've deployed a cache, you need to configure DTR to know about it. +This is done using the `POST /api/v0/content_caches` API. You can use the +DTR interactive API documentation to use this API. + +In the DTR web UI, click the top-right menu, and choose **API docs**. + +![](../../../images/deploy-caches-simple-2.png){: .with-border} + +Navigate to the `POST /api/v0/content_caches` line and click it to expand. +In the **body** field include: + +``` +{ + "name": "region-asia", + "host": "https://:" +} +``` + +Click the **Try it out!** button to make the API call. + +![](../../../images/deploy-caches-simple-3.png){: .with-border} + +## Configure your user account + +Now that you've registered the cache with DTR, users can configure +their user profile to pull images from DTR or the cache. + +In the DTR web UI, navigate to your **Account**, click the **Settings** +tab, and change the **Content Cache** settings to use the cache you deployed. + +![](../../../images/deploy-caches-simple-4.png){: .with-border} + +If you need to set this for multiple users at the same time, use the +`/api/v0/accounts/{username}/settings` API endpoint. + +Now when you pull images, you'll be using the cache. + +## Test that the cache is working + +To validate that the cache is working as expected: + +1. [Push an image to DTR](../../../user/manage-images/pull-and-push-images.md). +2. Make sure your user account is configured to use the cache. +3. Delete the image from your local system. +4. Pull the image from DTR. + + +To validate that the cache is actually serving your request, and to +troubleshoot misconfigurations, check the logs for the cache service +by running: + +``` +docker service logs --follow dtr-cache_cache +``` + +The most common causes of configuration are due to TLS authentication: +* DTR not trusting the cache TLS certificates. +* The cache not trusting DTR TLS certificates. +* Your machine not trusting DTR or the cache. + +When this happens, check the cache logs to troubleshoot the +misconfiguration. + +## Clean up sensitive files + +The certificates and private keys are now managed by Docker in a secure way. +Don't forget to delete sensitive files you've created on disk, like the +private keys for the cache: + +``` +rm -rf certs +``` diff --git a/datacenter/dtr/2.6/guides/admin/configure/deploy-caches/strategy.md b/datacenter/dtr/2.6/guides/admin/configure/deploy-caches/strategy.md new file mode 100644 index 0000000000..3da20d2e28 --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/configure/deploy-caches/strategy.md @@ -0,0 +1,58 @@ +--- +title: Cache deployment strategy +description: Learn how to deploy DTR caches across multiple datacenters to make + image pulls faster. +keywords: DTR, cache +--- + +The main reason to use a DTR cache is so that users can pull images from +a service that's geographically closer to them. + +In this example a company has developers spread across three locations: United +States, Asia, and Europe. Developers working in the US office can pull their +images from DTR without problem, but developers in the Asia and Europe offices +complain that it takes them a long time to pulls images. + +![Offices](../../../images/deploy-caches-strategy-1.svg) + +To address that, you can deploy DTR caches in the Asia and Europe offices, so +that developers working from there can pull images much faster. + +## Deployment overview + +To deploy the DTR caches for this scenario, you need three datacenters: +* The US datacenter runs DTR configured for high availability. +* The Asia datacenter runs a DTR cache. +* The Europe datacenter runs another DTR cache. + +![Offices](../../../images/deploy-caches-strategy-2.svg) + +Both caches are configured to fetch images from DTR. + +## System requirements + +Before deploying a DTR cache in a datacenter, make sure you: + +* Provision multiple nodes and install Docker on them. +* Join the nodes into a Swarm. +* Have one or more dedicated worker nodes just for running the DTR cache. +* Have TLS certificates to use for securing the cache. +* Have a shared storage system, if you want the cache to be highly available. + +If you only plan on running a DTR cache on this datacenter, you just need +[Docker EE Basic](https://www.docker.com/pricing), which only includes the +Docker Engine. + +If you plan on running other workloads on this datacenter, consider deploying +[Docker EE Standard or Advanced](https://www.docker.com/pricing). +This way you can enforce fine-grain control over cluster resources, and makes it +easier to monitor and manage your applications. + +## Ports used + +You can customize the port used by the DTR cache, so you'll have to configure +your firewall rules to make sure users can access the cache using the port +you chose. + +By default the documentation guides you in deploying caches that are exposed +on port 443/TCP using the swarm routing mesh. diff --git a/datacenter/dtr/2.6/guides/admin/configure/enable-single-sign-on.md b/datacenter/dtr/2.6/guides/admin/configure/enable-single-sign-on.md new file mode 100644 index 0000000000..595a01eb4a --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/configure/enable-single-sign-on.md @@ -0,0 +1,45 @@ +--- +title: Enable single sign-on +description: Learn how to set up single sign-on between UCP and DTR, so that your users only have to authenticate once +keywords: dtr, login, sso +--- + +By default, users are shared between UCP and DTR, but you have to authenticate +separately on the web UI of both applications. + +You can configure DTR to have single sign-on (SSO) with UCP, so that users only +have to authenticate once. + +> **Note**: After configuring single sign-on with DTR, users accessing DTR via +> `docker login` should create an [access token](/ee/dtr/user/access-tokens/) and use it to authenticate. + +## At installation time + +When installing DTR, use the `docker/dtr install --dtr-external-url ` +option to enable SSO. When accessing the DTR web UI, users are redirected to the +UCP login page, and once they are authenticated, they're redirected to the URL +you provided to `--dtr-external-url`. + +Use the domain name of DTR, or the domain name of a load balancer, if you're +using one, to load-balance requests across multiple DTR replicas. + +## After install + +In your browser, navigate to the DTR web UI, and choose **Settings**. In the +**General** tab, scroll to **Domain & proxies**. + +Update the **Load balancer / public address** field to the url where users +should be redirected once they are logged in. +Use the domain name of DTR, or the domain name of a load balancer, if you're +using one, to load-balance requests across multiple DTR replicas. + +Then enable **Use single sign-on**. + +![](../../images/enable-sso-1.png){: .with-border} + +Once you save, users are redirected to UCP for logging in, and redirected back to +DTR once they are authenticated. + +## Where to go next + +- [Use your own TLS certificates](use-your-own-tls-certificates.md) diff --git a/datacenter/dtr/2.6/guides/admin/configure/external-storage/index.md b/datacenter/dtr/2.6/guides/admin/configure/external-storage/index.md new file mode 100644 index 0000000000..8d5b0086e5 --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/configure/external-storage/index.md @@ -0,0 +1,95 @@ +--- +title: Configure DTR image storage +description: Storage configuration for Docker Trusted Registry +keywords: dtr, storage drivers, NFS, Azure, S3 +--- + +## Configure your storage backend + +By default DTR uses the local filesystem of the node where it is running to +store your Docker images. You can configure DTR to use an external storage +backend, for improved performance or high availability. + +![architecture diagram](../../../images/configure-external-storage-1.svg) + +If your DTR deployment has a single replica, you can continue using the +local filesystem for storing your Docker images. If your DTR deployment has +multiple replicas, make sure all replicas are +using the same storage backend for high availability. Whenever a user pulls an image, the DTR +node serving the request needs to have access to that image. + +DTR supports the following storage systems: + +* Local filesystem + * [NFS](nfs.md) + * [Bind Mount](/storage/bind-mounts/) + * [Volume](/storage/volumes/) +* Cloud Storage Providers + * [Amazon S3](s3.md) + * [Microsoft Azure](/registry/storage-drivers/azure/) + * [OpenStack Swift](/registry/storage-drivers/swift/) + * [Google Cloud Storage](/registry/storage-drivers/gcs/) + +> **Note**: Some of the previous links are meant to be informative and are not representative of DTR's implementation of these storage systems. + +To configure the storage backend, log in to the DTR web interface +as an admin, and navigate to **System > Storage**. + +![dtr settings](../../../images/configure-external-storage-2.png){: .with-border} + +The storage configuration page gives you the most +common configuration options, but you have the option to upload a configuration file in `.yml`, `.yaml`, or `.txt` format. + +See [Docker Registry Configuration](/registry/configuration.md) for configuration options. + +## Local filesystem + +By default, DTR creates a volume named `dtr-registry-` to store +your images using the local filesystem. You can customize the name and path of +the volume by using `docker/dtr install --dtr-storage-volume` or `docker/dtr reconfigure --dtr-storage-volume`. + +> When running DTR 2.5 (with experimental online garbage collection) and 2.6.0 to 2.6.3, there is an issue with [reconfiguring DTR with `--nfs-storage-url`](/ee/dtr/release-notes#version-26) which leads to erased tags. Make sure to [back up your DTR metadata](/ee/dtr/admin/disaster-recovery/create-a-backup/#back-up-dtr-metadata) before you proceed. To work around the `--nfs-storage-url` flag issue, manually create a storage volume on each DTR node. If DTR is already installed in your cluster, [reconfigure DTR](https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage#reconfigureusingalocalnfsvolume) with the `--dtr-storage-volume` flag using your newly-created volume. +{: .warning} + +If you're deploying DTR with high-availability, you need to use NFS or any other +centralized storage backend so that all your DTR replicas have access to the +same images. + +To check how much space your images are utilizing in the local filesystem, SSH into the DTR node and run: + +```bash +{% raw %} +# Find the path to the volume +docker volume inspect dtr-registry- + +# Check the disk usage +sudo du -hs \ +$(dirname $(docker volume inspect --format '{{.Mountpoint}}' dtr-registry-)) +{% endraw %} +``` + +### NFS + +You can configure your DTR replicas to store images on an NFS partition, so that +all replicas can share the same storage backend. + +[Learn how to configure DTR with NFS](nfs.md). + +## Cloud Storage + +### Amazon S3 + +DTR supports Amazon S3 or other storage systems that are S3-compatible like Minio. +[Learn how to configure DTR with Amazon S3](s3.md). + + + +## Where to go next + +- [Switch storage backends](storage-backend-migration.md) +- [Use NFS](nfs.md) +- [Use S3](s3.md) +- CLI reference pages + - [docker/dtr install](/reference/dtr/2.6/cli/install/) + - [docker/dtr reconfigure](/reference/dtr/2.6/cli/reconfigure/) + - [docker/dtr restore](/reference/dtr/2.6/cli/restore/) diff --git a/datacenter/dtr/2.6/guides/admin/configure/external-storage/nfs.md b/datacenter/dtr/2.6/guides/admin/configure/external-storage/nfs.md new file mode 100644 index 0000000000..8fa0656688 --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/configure/external-storage/nfs.md @@ -0,0 +1,87 @@ +--- +title: Use NFS +description: Learn how to integrate Docker Trusted Registry with NFS +keywords: registry, dtr, storage, nfs +--- + +You can configure DTR to store Docker images in an NFS directory. Starting in DTR 2.6, +changing storage backends involves initializing a new metadatastore instead of reusing an existing volume. +This helps facilitate [online garbage collection](/ee/dtr/admin/configure/garbage-collection/#under-the-hood). +See [changes to NFS reconfiguration below](/ee/dtr/admin/configure/external-storage/nfs/#reconfigure-dtr-to-use-nfs) if you have previously configured DTR to use NFS. + +Before installing or configuring DTR to use an NFS directory, make sure that: + +* The NFS server has been correctly configured +* The NFS server has a fixed IP address +* All hosts running DTR have the correct NFS libraries installed + + +To confirm that the hosts can connect to the NFS server, try to list the +directories exported by your NFS server: + +```bash +showmount -e +``` + +You should also try to mount one of the exported directories: + +```bash +mkdir /tmp/mydir && sudo mount -t nfs : /tmp/mydir +``` + +## Install DTR with NFS + +One way to configure DTR to use an NFS directory is at install time: + +```bash +docker run -it --rm {{ page.dtr_org }}/{{ page.dtr_repo }}:{{ page.dtr_version }} install \ + --nfs-storage-url \ + +``` + +Use the format `nfs:///` for the NFS storage URL. To support **NFS v4**, you can now specify additional options when running [docker/dtr install](/reference/dtr/2.6/cli/install/) with `--nfs-storage-url`. + +When joining replicas to a DTR cluster, the replicas will pick up your storage +configuration, so you will not need to specify it again. + +### Reconfigure DTR to use NFS + +To support **NFS v4**, more NFS options have been added to the CLI. See [New Features for 2.6.0 - CLI](/ee/dtr/release-notes/#260) for updates to [docker/dtr reconfigure](/reference/dtr/2.6/cli/reconfigure/). + +> When running DTR 2.5 (with experimental online garbage collection) and 2.6.0 to 2.6.3, there is an issue with [reconfiguring and restoring DTR with `--nfs-storage-url`](/ee/dtr/release-notes#version-26) which leads to erased tags. Make sure to [back up your DTR metadata](/ee/dtr/admin/disaster-recovery/create-a-backup/#back-up-dtr-metadata) before you proceed. To work around the `--nfs-storage-url` flag issue, manually create a storage volume. If DTR is already installed in your cluster, [reconfigure DTR](/reference/dtr/2.6/cli/reconfigure/) with the `--dtr-storage-volume` flag using your newly-created volume. +> +> See [Reconfigure Using a Local NFS Volume]( https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage#reconfigureusingalocalnfsvolume) for Docker's recommended recovery strategy. +{: .warning} + +#### DTR 2.6.4 + +In DTR 2.6.4, a new flag, `--storage-migrated`, [has been added to `docker/dtr reconfigure`](/reference/dtr/2.6/cli/reconfigure/) which lets you indicate the migration status of your storage data during a reconfigure. [Upgrade to 2.6.4](/reference/dtr/2.6/cli/upgrade/) and follow [Best practice for data migration in 2.6.4](/ee/dtr/admin/configure/external-storage/storage-backend-migration/#best-practice-for-data-migration) when switching storage backends. The following shows you how to reconfigure DTR using an NFSv4 volume as a storage backend: + +```bash +docker run --rm -it \ + docker/dtr:{{ page.dtr_version}} reconfigure \ + --ucp-url \ + --ucp-username \ + --nfs-storage-url + --async-nfs + --storage-migrated +``` + +To reconfigure DTR to stop using NFS storage, leave the `--nfs-storage-url` option +blank: + +```bash +docker run -it --rm {{ page.dtr_org }}/{{ page.dtr_repo }}:{{ page.dtr_version}} reconfigure \ + --nfs-storage-url "" +``` + +## Where to go next + +- [Switch storage backends](storage-backend-migration.md) +- [Create a backup](/ee/dtr/admin/disaster-recovery/create-a-backup/) +- [Restore from a backup](/ee/dtr/admin/disaster-recovery/restore-from-backup/) +- [Configure where images are stored](index.md) +- CLI reference pages + - [docker/dtr install](/reference/dtr/2.6/cli/install/) + - [docker/dtr reconfigure](/reference/dtr/2.6/cli/reconfigure/) + - [docker/dtr restore](/reference/dtr/2.6/cli/restore/) diff --git a/datacenter/dtr/2.6/guides/admin/configure/external-storage/s3.md b/datacenter/dtr/2.6/guides/admin/configure/external-storage/s3.md new file mode 100644 index 0000000000..a47ebea055 --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/configure/external-storage/s3.md @@ -0,0 +1,154 @@ +--- +title: Store images on Amazon S3 +description: Learn how to configure Docker Trusted Registry to store Docker images on + Amazon S3 +keywords: dtr, storage driver, s3 +--- + +You can configure DTR to store Docker images on Amazon S3, or other file servers +with an S3-compatible API like Cleversafe or Minio. + +Amazon S3 and compatible services store files in "buckets", and users have +permissions to read, write, and delete files from those buckets. When you +integrate DTR with Amazon S3, DTR sends all read and write operations to the +S3 bucket so that the images are persisted there. + +## Create a bucket on Amazon S3 + +Before configuring DTR you need to create a bucket on Amazon S3. +To get faster pulls and pushes, you should create the S3 bucket on a region +that's physically close to the servers where DTR is running. + +Start by +[creating a bucket](http://docs.aws.amazon.com/AmazonS3/latest/gsg/CreatingABucket.html). +Then, as a best practice you should +[create a new IAM user](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html) +just for the DTR +integration and apply an IAM policy that ensures the user has limited permissions. + +This user only needs permissions to access the bucket that you'll use to store +images, and be able to read, write, and delete files. + +Here's an example of a policy like that: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "s3:ListAllMyBuckets", + "Resource": "arn:aws:s3:::*" + }, + { + "Effect": "Allow", + "Action": [ + "s3:ListBucket", + "s3:GetBucketLocation", + "s3:ListBucketMultipartUploads" + ], + "Resource": "arn:aws:s3:::" + }, + { + "Effect": "Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject", + "s3:DeleteObject", + "s3:ListBucketMultipartUploads" + ], + "Resource": "arn:aws:s3:::/*" + } + ] +} + +``` + +## Configure DTR + +Once you've created a bucket and user, you can configure DTR to use it. +In your browser, navigate to `https:// Storage**. + +![](../../../images/configure-external-storage-2.png){: .with-border} + +Select the **S3** option, and fill-in the information about the bucket and +user. + +| Field | Description | +|:----------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Root directory | The path in the bucket where images are stored | +| AWS Region name | The region where the bucket is. [Learn more](http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) | +| S3 bucket name | The name of the bucket to store the images | +| AWS access key | The access key to use to access the S3 bucket. This can be left empty if you're using an IAM policy. [Learn more](http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html) | +| AWS secret key | The secret key to use to access the S3 bucket. This can be left empty if you're using an IAM policy | +| Region endpoint | The endpoint name for the region you're using. [Learn more](http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) | + +There are also some advanced settings. + +| Field | Description | +|:-------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------| +| Signature version 4 auth | Authenticate the requests using AWS signature version 4. [Learn more](http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) | +| Use HTTPS | Secure all requests with HTTPS, or make requests in an insecure way | +| Skip TLS verification | Encrypt all traffic, but don't verify the TLS certificate used by the storage backend | +| Root CA certificate | The public key certificate of the root certificate authority that issued the storage backend certificate | + +Once you click **Save**, DTR validates the configurations and saves the changes. + +## Configure your clients + +If you're using a TLS certificate in your storage backend that's not globally +trusted, you'll have to configure all Docker Engines that push or pull from DTR +to trust that certificate. When you push or pull an image DTR redirects the +requests to the storage backend, so if clients don't trust the TLS certificates +of both DTR and the storage backend, they won't be able to push or pull images. +[Learn how to configure the Docker client](../../../user/access-dtr/index.md). + +And if you've configured DTR to skip TLS verification, you also need to +configure all Docker Engines that push or pull from DTR to skip TLS +verification. You do this by adding DTR to +the [list of insecure registries when starting Docker](/engine/reference/commandline/dockerd.md). + +## Supported regions + +DTR supports the following S3 regions: + +| Region | +|:---------------| +| us-east-1 | +| us-east-2 | +| us-west-1 | +| us-west-2 | +| eu-west-1 | +| eu-west-2 | +| eu-central-1 | +| ap-south-1 | +| ap-southeast-1 | +| ap-southeast-2 | +| ap-northeast-1 | +| ap-northeast-2 | +| sa-east-1 | +| cn-north-1 | +| us-gov-west-1 | +| ca-central-1 | + +## Update your S3 settings on the web interface + +When running 2.5.x (with experimental garbage collection) or 2.6.0-2.6.4, there is an issue with [changing your S3 settings on the web interface](/ee/dtr/release-notes#version-26) which leads to erased metadata. Make sure to [back up your DTR metadata](/ee/dtr/admin/disaster-recovery/create-a-backup/#back-up-dtr-metadata) before you proceed. + +## Restore DTR with S3 + +To [restore DTR using your previously configured S3 settings](https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage#restoretocloudstorage), use `docker/dtr restore` with `--dtr-use-default-storage` to keep your metadata. + +## Where to go next + +- [Create a backup](/ee/dtr/admin/disaster-recovery/create-a-backup/) +- [Restore from a backup](/ee/dtr/admin/disaster-recovery/restore-from-backup/) +- [Configure where images are stored](index.md) +- CLI reference pages + - [docker/dtr install](/reference/dtr/2.6/cli/install/) + - [docker/dtr reconfigure](/reference/dtr/2.6/cli/reconfigure/) + - [docker/dtr restore](/reference/dtr/2.6/cli/restore/) + + + + diff --git a/datacenter/dtr/2.6/guides/admin/configure/external-storage/storage-backend-migration.md b/datacenter/dtr/2.6/guides/admin/configure/external-storage/storage-backend-migration.md new file mode 100644 index 0000000000..b8eca97dde --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/configure/external-storage/storage-backend-migration.md @@ -0,0 +1,67 @@ +--- +title: Switch storage backends +description: Storage backend migration for Docker Trusted Registry +keywords: dtr, storage drivers, local volume, NFS, Azure, S3, +--- + +Starting in DTR 2.6, switching storage backends initializes a new metadata store and erases your existing tags. This helps facilitate online garbage collection, which has been introduced in 2.5 as an experimental feature. In earlier versions, DTR would subsequently start a `tagmigration` job to rebuild tag metadata from the file layout in the image layer store. This job has been discontinued for DTR 2.5.x (with garbage collection) and DTR 2.6, as your storage backend could get out of sync with your DTR metadata, like your manifests and existing repositories. As best practice, DTR storage backends and metadata should always be moved, backed up, and restored together. + +## DTR 2.6.4 and above + +In DTR 2.6.4, a new flag, `--storage-migrated`, [has been added to `docker/dtr reconfigure`](/reference/dtr/2.6/cli/reconfigure/) which lets you indicate the migration status of your storage data during a reconfigure. If you are not worried about losing your existing tags, you can skip the recommended steps below and [perform a reconfigure](/reference/dtr/2.6/cli/reconfigure/). + +### Best practice for data migration + +Docker recommends the following steps for your storage backend and metadata migration: + +1. Disable garbage collection by selecting "Never" under **System > Garbage Collection**, so blobs referenced in the backup that you create continue to exist. See [Garbage collection](/ee/dtr/admin/configure/garbage-collection/) for more details. Make sure to keep it disabled while you're performing the metadata backup and migrating your storage data. + + ![](/ee/dtr/images/garbage-collection-0.png){: .img-fluid .with-border} + +2. [Back up your existing metadata](/ee/dtr/admin/disaster-recovery/create-a-backup/#back-up-dtr-metadata). See [docker/dtr backup](/reference/dtr/2.6/cli/backup/) for CLI command description and options. + +3. Migrate the contents of your current storage backend to the new one you are switching to. For example, upload your current storage data to your new NFS server. + +4. [Restore DTR from your backup](/ee/dtr/admin/disaster-recovery/restore-from-backup/) and specify your new storage backend. See [docker/dtr destroy](/reference/dtr/2.6/cli/destroy/) and [docker/dtr restore](/reference/dtr/2.6/cli/backup/) for CLI command descriptions and options. + +5. With DTR restored from your backup and your storage data migrated to your new backend, garbage collect any dangling blobs using the following API request: + + ```bash + curl -u :$TOKEN -X POST "https:///api/v0/jobs" -H "accept: application/json" -H "content-type: application/json" -d "{ \"action": \"onlinegc_blobs\" }" + ``` + + On success, you should get a `202 Accepted` response with a job `id` and other related details. This ensures any blobs which are not referenced in your previously created backup get destroyed. + +### Alternative option for data migration + +If you have a long maintenance window, you can skip some steps from above and do the following: + +1. Put DTR in "read-only" mode using the following API request: + + ```bash + curl -u :$TOKEN -X POST "https:///api/v0/meta/settings" -H "accept: application/json" -H "content-type: application/json" -d "{ \"readOnlyRegistry\": true }" + ``` + On success, you should get a `202 Accepted` response. + +2. Migrate the contents of your current storage backend to the new one you are switching to. For example, upload your current storage data to your new NFS server. + +3. [Reconfigure DTR](/reference/dtr/2.6/cli/reconfigure) while specifying the `--storage-migrated` flag to preserve your existing tags. + + +## DTR 2.6.0-2.6.4 and DTR 2.5 (with experimental garbage collection) + +Make sure to [perform a backup](/ee/dtr/admin/disaster-recovery/create-a-backup/#back-up-dtr-data) before you change your storage backend when running DTR 2.5 (with online garbage collection) and 2.6.0-2.6.3. If you encounter an issue with lost tags, refer to the following resources: + * For changes to reconfigure and restore options in DTR 2.6, see [docker/dtr reconfigure](/reference/dtr/2.6/cli/reconfigure/) and [docker/dtr restore](/reference/dtr/2.6/cli/restore). + * For Docker's recommended recovery strategies, see [DTR 2.6 lost tags after reconfiguring storage](https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage). + * For NFS-specific changes, see [Use NFS](nfs.md). + * For S3-specific changes, see [Learn how to configure DTR with Amazon S3](s3.md). + +Upgrade to [DTR 2.6.4](#dtr-264-and-above) and follow [best practice for data migration](#best-practice-for-data-migration) to avoid the wiped tags issue when moving from one NFS serverto another. + +## Where to go next + +- [Use NFS](nfs.md) +- [Use S3](s3.md) +- CLI reference pages + - [docker/dtr install](/reference/dtr/2.6/cli/install/) + - [docker/dtr reconfigure](/reference/dtr/2.6/cli/reconfigure/) diff --git a/datacenter/dtr/2.6/guides/admin/configure/garbage-collection.md b/datacenter/dtr/2.6/guides/admin/configure/garbage-collection.md new file mode 100644 index 0000000000..c393175d12 --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/configure/garbage-collection.md @@ -0,0 +1,105 @@ +--- +title: Garbage collection +description: Save disk space by configuring the garbage collection settings in + Docker Trusted Registry +keywords: registry, online garbage collection, gc, space, disk space +--- + +You can configure the Docker Trusted Registry (DTR) to automatically delete unused image +layers, thus saving you disk space. This process is also known as garbage collection. + +## How DTR deletes unused layers + +First you configure DTR to run a garbage collection job on a fixed schedule. At +the scheduled time, DTR: + +1. Identifies and marks unused image layers. +2. Deletes the marked image layers. + +Starting in DTR 2.5, we introduced an experimental feature which lets you run garbage collection jobs +without putting DTR in read-only mode. As of v2.6, online garbage collection is no longer in +experimental mode. This means that the registry no longer has to be in read-only mode (or offline) +during garbage collection. + + +## Schedule garbage collection + +In your browser, navigate to `https://` and log in with your credentials. Select **System** on the left navigation pane, and then click +the **Garbage collection** tab to schedule garbage collection. + +![](../../images/garbage-collection-0.png){: .img-fluid .with-border} + +Select for how long the garbage collection job should run: +* Until done: Run the job until all unused image layers are deleted. +* For x minutes: Only run the garbage collection job for a maximum of x minutes +at a time. +* Never: Never delete unused image layers. + +If you select ***Until done*** or ***For x minutes***, you can specify a recurring schedule in UTC (Coordinated Universal Time) with the following options: +* Custom cron schedule - (Hour, Day of Month, Month, Weekday) +* Daily at midnight UTC +* Every Saturday at 1am UTC +* Every Sunday at 1am UTC +* Do not repeat + +![](../../images/garbage-collection-1.png){: .with-border} + +Once everything is configured you can choose to **Save & Start** to +run the garbage collection job immediately, or just **Save** to run the job on the next +scheduled interval. + +## Review the garbage collection job log + +In v2.5, you were notified with a banner under main navigation that no one can push images while a garbage collection job is running. With v2.6, this is no longer the case since garbage collection now happens while DTR is online and writable. + +If you clicked **Save & Start** previously, verify that the garbage collection routine started by navigating to **Job Logs**. + +![](../../images/garbage-collection-2.png){: .with-border} + +## Under the hood + +Each image stored in DTR is made up of multiple files: + +* A list of image layers that are unioned which represents the image filesystem +* A configuration file that contains the architecture of the image and other +metadata +* A manifest file containing the list of all layers and configuration file for +an image + +All these files are tracked in DTR's metadata store in RethinkDB. These files +are tracked in a content-addressable way such that a file corresponds to +a cryptographic hash of the file's content. This means that if two image tags hold exactly the same content, +DTR only stores the image content once while making hash collisions nearly impossible, +even if the tag name is different. + +As an example, if `wordpress:4.8` and `wordpress:latest` have the same content, +the content will only be stored once. If you delete one of these tags, the other won't +be deleted. + +This means that when you delete an image tag, DTR cannot delete the underlying +files of that image tag since other tags may also use the same files. + +To facilitate online garbage collection, DTR makes a couple of changes to how it uses the storage +backend: +1. Layer links – the references within repository directories to +their associated blobs – are no longer in the storage backend. That is because DTR stores these references in RethinkDB instead to enumerate through +them during the marking phase of garbage collection. + +2. Any layers created after an upgrade to 2.6 are no longer content-addressed in +the storage backend. Many cloud provider backends do not give the sequential +consistency guarantees required to deal with the simultaneous deleting and +re-pushing of a layer in a predictable manner. To account for this, DTR assigns +each newly pushed layer a unique ID and performs the translation from content hash +to ID in RethinkDB. + +To delete unused files, DTR does the following: +1. Establish a cutoff time +2. Mark each referenced manifest file with a timestamp. When manifest files +are pushed to DTR, they are also marked with a timestamp +3. Sweep each manifest file that does not have a timestamp after the cutoff time +4. If a file is never referenced – which means no image tag uses it – delete the file +5. Repeat the process for blob links and blob descriptors. + +## Where to go next + +- [Deploy DTR caches](deploy-caches/index.md) diff --git a/datacenter/dtr/2.6/guides/admin/configure/license-your-installation.md b/datacenter/dtr/2.6/guides/admin/configure/license-your-installation.md new file mode 100644 index 0000000000..8a148c1f0e --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/configure/license-your-installation.md @@ -0,0 +1,38 @@ +--- +title: License your installation +description: Learn how to license your Docker Trusted Registry installation. +keywords: dtr, install, license +--- + +By default, you don't need to license your Docker Trusted Registry. When +installing DTR, it automatically starts using the same license file used on +your Docker Universal Control Plane cluster. + +However, there are some situations when you have to manually license your +DTR installation: + +* When upgrading to a new major version, +* When your current license expires. + + +## Download your license + +Go to [Docker Hub](https://hub.docker.com/editions/enterprise/docker-ee-trial) +to download a trial license. + +![](../../images/license-1.png){: .with-border} + + +## License your installation + +Once you've downloaded the license file, you can apply it to your DTR +installation. Navigate to the **DTR web UI**, and then go to the **Settings +page**. + +![](../../images/license-2.png){: .with-border} + +Click the **Apply new license** button, and upload your new license file. + +## Where to go next + +- [Enable single sign-on](enable-single-sign-on.md) diff --git a/datacenter/dtr/2.6/guides/admin/configure/set-up-high-availability.md b/datacenter/dtr/2.6/guides/admin/configure/set-up-high-availability.md new file mode 100644 index 0000000000..5cfb9eef06 --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/configure/set-up-high-availability.md @@ -0,0 +1,92 @@ +--- +title: Set up high availability +description: Lean how to scale Docker Trusted Registry by adding and removing replicas. +keywords: dtr, install, deploy +--- + +Docker Trusted Registry is designed to scale horizontally as your usage +increases. You can add more replicas to make DTR scale to your demand and for +high availability. + +All DTR replicas run the same set of services and changes to their configuration +are automatically propagated to other replicas. + +![](../../images/set-up-high-availability-1.svg) + +To make DTR tolerant to failures, add additional replicas to the DTR cluster. + +| DTR replicas | Failures tolerated | +|:------------:|:------------------:| +| 1 | 0 | +| 3 | 1 | +| 5 | 2 | +| 7 | 3 | + + +When sizing your DTR installation for high-availability, +follow these rules of thumb: + +* Don't create a DTR cluster with just two replicas. Your cluster +won't tolerate any failures, and it's possible that you experience performance +degradation. +* When a replica fails, the number of failures tolerated by your cluster +decreases. Don't leave that replica offline for long. +* Adding too many replicas to the cluster might also lead to performance +degradation, as data needs to be replicated across all replicas. + +To have high-availability on UCP and DTR, you need a minimum of: + +* 3 dedicated nodes to install UCP with high availability, +* 3 dedicated nodes to install DTR with high availability, +* As many nodes as you want for running your containers and applications. + +You also need to configure the DTR replicas to share the +[same object storage](external-storage/index.md). + +## Join more DTR replicas + +To add replicas to an existing DTR deployment: + +1. Use ssh to log into any node that is already part of UCP. + +2. Run the DTR join command: + + ```bash + docker run -it --rm \ + {{ page.dtr_org }}/{{ page.dtr_repo }}:{{ page.dtr_version }} join \ + --ucp-node \ + --ucp-insecure-tls + ``` + + Where the `--ucp-node` is the hostname of the UCP node where you want to + deploy the DTR replica. `--ucp-insecure-tls` tells the command to trust the + certificates used by UCP. + +3. If you have a load balancer, add this DTR replica to the load balancing pool. + +## Remove existing replicas + +To remove a DTR replica from your deployment: + +1. Use ssh to log into any node that is part of UCP. +2. Run the DTR remove command: + +```bash +docker run -it --rm \ + {{ page.dtr_org }}/{{ page.dtr_repo }}:{{ page.dtr_version }} remove \ + --ucp-insecure-tls +``` + +You will be prompted for: + +* Existing replica id: the id of any healthy DTR replica of that cluster +* Replica id: the id of the DTR replica you want to remove. It can be the id of an +unhealthy replica +* UCP username and password: the administrator credentials for UCP + +If you're load-balancing user requests across multiple DTR replicas, don't +forget to remove this replica from the load balancing pool. + +## Where to go next + +- [Set up vulnerability scans](set-up-vulnerability-scans.md) diff --git a/datacenter/dtr/2.6/guides/admin/configure/set-up-vulnerability-scans.md b/datacenter/dtr/2.6/guides/admin/configure/set-up-vulnerability-scans.md new file mode 100644 index 0000000000..30cbc5bd91 --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/configure/set-up-vulnerability-scans.md @@ -0,0 +1,192 @@ +--- +title: Set up Security Scanning in DTR +description: Enable and configure Docker Security Scanning for Docker Trusted Registry. +keywords: registry, scanning, security scan, vulnerability, CVE +redirect_from: + - /datacenter/dtr/2.2/guides/admin/configure/set-up-vulnerability-scans/ +--- + +This page explains how to set up and enable Docker Security Scanning on an +existing installation of Docker Trusted Registry. + +## Prerequisites + +These instructions assume that you have already installed Docker Trusted +Registry, and have access to an account on the DTR instance with administrator +access. + +Before you begin, make sure that you or your organization has purchased a DTR +license that includes Docker Security Scanning, and that your Docker ID can +access and download this license from the Docker Hub. + +If you are using a license associated with an individual account, no additional +action is needed. If you are using a license associated with an organization +account, you may need to make sure your Docker ID is a member of the `Owners` +team. Only `Owners` team members can download license files for an Organization. + +If you will be allowing the Security Scanning database to update itself +automatically, make sure that the server hosting your DTR instance can access +`https://dss-cve-updates.docker.com/` on the standard https port 443. + +## Get the security scanning license. + +If your DTR instance already has a license that includes Security Scanning, skip +this step and proceed to [enable DTR Security Scanning](#enable-dtr-security-scanning). + +> **Tip**: To check if your existing DTR license includes scanning, navigate +> to the DTR **Settings** page, and click **Security**. If an "Enable scanning" +> toggle appears, the license includes scanning. + +If your current DTR license doesn't include scanning, you must first download +the new license. + +1. Log in to the Docker Hub using a Docker ID with access to the license you need. +2. In the top right corner, click your user account icon, and select **My Content**. +3. Locate **Docker Enterprise Edition** in the content list, and click **Setup**. +4. Click **License Key** to download the license. + ![](../../images/security-scanning-setup-1.png){: .with-border} + +Next, install the new license on the DTR instance. + +7. Log in to your DTR instance using an administrator account. +8. Click **Settings** in the left navigation. +9. On the **General** tab click **Apply new license**. + + A file browser dialog appears. + +10. Navigate to where you saved the license key (`.lic`) file, select it, and click **Open**. + +![](../../images/security-scanning-setup-2.png){: .with-border} + +Proceed to [enable DTR Security Scanning](#enable-dtr-security-scanning). + +## Enable DTR security scanning + +To enable security scanning in DTR: + +1. Log in to your DTR instance with an administrator account. +2. Click **Settings** in the left navigation. +3. Click the **Security** tab. + + > **Note**: If you see a message on this tab telling you to contact your Docker sales representative, then the license installed on this DTR instance does not include Docker Security Scanning. Check that you have purchased Security Scanning, and that the DTR instance is using the latest license file. + +4. Click the **Enable scanning** toggle so that it turns blue and says "on". + ![](../../images/security-scanning-setup-3.png){: .with-border} +5. Next, provide a security database for the scanner. **Security scanning will not function until DTR has a security database to use.** + + By default, security scanning is enabled in **Online** mode. In this mode, + DTR attempts to download a security database from a Docker server. If your + installation cannot access `https://dss-cve-updates.docker.com/` you must + manually upload a `.tar` file containing the security database. + + - If you are using `Online` mode, the DTR instance will contact a Docker server, download the latest vulnerability database, and install it. Scanning can begin once this process completes. + - If you are using `Offline` mode, use the instructions in [Update scanning database - offline mode](#update-cve-database---offline-mode) to upload an initial security database. + +By default when Security Scanning is enabled, new repositories will automatically scan on `docker push`. If you had existing repositories before you enabled security scanning, you might want to [change repository scanning behavior](#set-repository-scanning-mode). + +## Set repository scanning mode + +Two modes are available when Security Scanning is enabled: + +- `Scan on push & Scan manually`: the image is re-scanned on each `docker push` to the repository, and whenever a user with `write` access clicks the **Start Scan** links or **Scan** button. +- `Scan manually`: the image is scanned only when a user with `write` access clicks the **Start Scan** links or **Scan** button. + +By default, _new_ repositories are set to `Scan on push & Scan manually`, but +you can change this setting during repository creation. + +![](../../images/security-scanning-setup-4.png){: .with-border} + +Any repositories that existed before scanning was enabled are set to `Scan manually` mode by default. If these repositories are still in use, you can change this setting from each repository's **Settings** page. + +> **Note**: To change an individual repository's scanning mode, you must have +`write` or `admin` access to the repo. + +To change an individual repository's scanning mode: + +1. Navigate to the repository, and click the **Settings** tab. +2. Scroll down to the **Image scanning** section. +3. Select the desired scanning mode. +![](../../images/security-scanning-setup-5.png){: .with-border} + +## Update the CVE scanning database + +Docker Security Scanning indexes the components in your DTR images and compares +them against a known CVE database. When new vulnerabilities are reported, Docker +Security Scanning matches the components in new CVE reports to the indexed +components in your images, and quickly generates an updated report. + +Users with administrator access to DTR can check when the CVE database was last updated from the **Security** tab in the DTR **Settings** pages. + +### Update CVE database - online mode + +By default Docker Security Scanning checks automatically for updates to the +vulnerability database, and downloads them when available. If your installation +does not have access to the public internet, use the [Offline mode instructions below](#update-cve-database-offline-mode). + +To ensure that DTR can access these updates, make sure that the host can reach +`https://dss-cve-updates.docker.com/` on port 443 using https. + +DTR checks for new CVE database updates at 3:00 AM UTC every day. If an update +is found it is downloaded and applied without interrupting any scans in +progress. Once the update is complete, the security scanning system looks for +new vulnerabilities in the indexed components. + +To set the update mode to Online: + +1. Log in to DTR as a user with administrator rights. +2. Click **Settings** in the left navigation and click **Security**. +3. Click **Online**. + +Your choice is saved automatically. + +> **Tip**: DTR also checks for CVE database updates when scanning is first enabled, and when you switch update modes. If you need to check for a CVE database update immediately, you can briefly switch modes from online to offline and back again. + +### Update CVE database - offline mode + +To update the CVE database for your DTR instance when it cannot contact the update server, you download and install a `.tar` file that contains the database updates. To download the file: + +1. Log in to [Docker Hub](https://hub.docker.com/). + + If you are a member of an Organization managing licenses using Docker Hub, + make sure your account is a member of the `Owners` team. Only Owners can + view and manage licenses and other entitlements for Organizations from + Docker Hub. + +2. In the top right corner, click your user account icon, and select **My Content**. +3. If necessary, select an organization account from the **Accounts** menu at the upper right. +4. Locate your Docker EE Advanced subscription or trial. +5. Click **Setup** button. + + ![](../../images/security-scanning-setup-6.png){: .with-border} + +6. Click **Download CVE Vulnerability Database** link to download the database file. + + ![](../../images/security-scanning-setup-7.png){: .with-border} + + If you run into problems, contact us at nautilus-feedback@docker.com for the file. + +To manually update the DTR CVE database from a `.tar` file: + +1. Log in to DTR as a user with administrator rights. +2. Click **Settings** in the left navigation and click **Security**. +4. Click **Upload .tar database file**. +5. Browse to the latest `.tar` file that you received, and click **Open**. + +DTR installs the new CVE database, and begins checking already indexed images +for components that match new or updated vulnerabilities. + +> **Tip**: The Upload button is unavailable while DTR applies CVE database updates. + +## Enable or disable automatic database updates + +To change the update mode: + +1. Log in to DTR as a user with administrator rights. +2. Click **Settings** in the left navigation and click **Security**. +3. Click **Online/Offline**. + +Your choice is saved automatically. + +## Where to go next + +- [Deploy DTR caches](deploy-caches/index.md) diff --git a/datacenter/dtr/2.6/guides/admin/configure/use-a-load-balancer.md b/datacenter/dtr/2.6/guides/admin/configure/use-a-load-balancer.md new file mode 100644 index 0000000000..5351e917fb --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/configure/use-a-load-balancer.md @@ -0,0 +1,280 @@ +--- +title: Use a load balancer +description: Learn how to configure a load balancer to balance user requests across multiple Docker Trusted Registry replicas. +keywords: dtr, load balancer +--- + +Once you’ve joined multiple DTR replicas nodes for +[high-availability](set-up-high-availability.md), you can configure your own +load balancer to balance user requests across all replicas. + +![](../../images/use-a-load-balancer-1.svg) + + +This allows users to access DTR using a centralized domain name. If a replica +goes down, the load balancer can detect that and stop forwarding requests to +it, so that the failure goes unnoticed by users. + +DTR exposes several endpoints you can use to assess if a DTR replica is healthy +or not: + +* `/_ping`: Is an unauthenticated endpoint that checks if the DTR replica is +healthy. This is useful for load balancing or other automated health check tasks. +* `/nginx_status`: Returns the number of connections being handled by the +NGINX front-end used by DTR. +* `/api/v0/meta/cluster_status`: Returns extensive information about all DTR +replicas. + +## Load balance DTR + +DTR does not provide a load balancing service. You can use an on-premises +or cloud-based load balancer to balance requests across multiple DTR replicas. + +> Additional load balancer requirements for UCP +> +> If you are also using UCP, there are [additional requirements](https://docs.docker.com/ee/ucp/admin/configure/join-nodes/use-a-load-balancer/#load-balancing-ucp-and-dtr) if you plan to load balance both UCP and DTR using the same load balancer. +> +>{: .important} + +You can use the unauthenticated `/_ping` endpoint on each DTR replica, +to check if the replica is healthy and if it should remain in the load balancing +pool or not. + +Also, make sure you configure your load balancer to: + +* Load balance TCP traffic on ports 80 and 443. +* Not terminate HTTPS connections. +* Not buffer requests. +* Forward the `Host` HTTP header correctly. +* Have no timeout for idle connections, or set it to more than 10 minutes. + +The `/_ping` endpoint returns a JSON object for the replica being queried of +the form: + +```json +{ + "Error": "error message", + "Healthy": true +} +``` + +A response of `"Healthy": true` means the replica is suitable for taking +requests. It is also sufficient to check whether the HTTP status code is 200. + +An unhealthy replica will return 503 as the status code and populate `"Error"` +with more details on any one of these services: + +* Storage container (registry) +* Authorization (garant) +* Metadata persistence (rethinkdb) +* Content trust (notary) + +Note that this endpoint is for checking the health of a single replica. To get +the health of every replica in a cluster, querying each replica individually is +the preferred way to do it in real time. + + +## Configuration examples + +Use the following examples to configure your load balancer for DTR. + + +
+
+```conf +user nginx; +worker_processes 1; + +error_log /var/log/nginx/error.log warn; +pid /var/run/nginx.pid; + +events { + worker_connections 1024; +} + +stream { + upstream dtr_80 { + server :80 max_fails=2 fail_timeout=30s; + server :80 max_fails=2 fail_timeout=30s; + server :80 max_fails=2 fail_timeout=30s; + } + upstream dtr_443 { + server :443 max_fails=2 fail_timeout=30s; + server :443 max_fails=2 fail_timeout=30s; + server :443 max_fails=2 fail_timeout=30s; + } + server { + listen 443; + proxy_pass dtr_443; + } + + server { + listen 80; + proxy_pass dtr_80; + } +} +``` +
+
+```conf +global + log /dev/log local0 + log /dev/log local1 notice + +defaults + mode tcp + option dontlognull + timeout connect 5s + timeout client 50s + timeout server 50s + timeout tunnel 1h + timeout client-fin 50s +### frontends +# Optional HAProxy Stats Page accessible at http://:8181/haproxy?stats +frontend dtr_stats + mode http + bind 0.0.0.0:8181 + default_backend dtr_stats +frontend dtr_80 + mode tcp + bind 0.0.0.0:80 + default_backend dtr_upstream_servers_80 +frontend dtr_443 + mode tcp + bind 0.0.0.0:443 + default_backend dtr_upstream_servers_443 +### backends +backend dtr_stats + mode http + option httplog + stats enable + stats admin if TRUE + stats refresh 5m +backend dtr_upstream_servers_80 + mode tcp + option httpchk GET /_ping HTTP/1.1\r\nHost:\ + server node01 :80 check weight 100 + server node02 :80 check weight 100 + server node03 :80 check weight 100 +backend dtr_upstream_servers_443 + mode tcp + option httpchk GET /_ping HTTP/1.1\r\nHost:\ + server node01 :443 weight 100 check check-ssl verify none + server node02 :443 weight 100 check check-ssl verify none + server node03 :443 weight 100 check check-ssl verify none +``` +
+
+```json +{ + "Subnets": [ + "subnet-XXXXXXXX", + "subnet-YYYYYYYY", + "subnet-ZZZZZZZZ" + ], + "CanonicalHostedZoneNameID": "XXXXXXXXXXX", + "CanonicalHostedZoneName": "XXXXXXXXX.us-west-XXX.elb.amazonaws.com", + "ListenerDescriptions": [ + { + "Listener": { + "InstancePort": 443, + "LoadBalancerPort": 443, + "Protocol": "TCP", + "InstanceProtocol": "TCP" + }, + "PolicyNames": [] + } + ], + "HealthCheck": { + "HealthyThreshold": 2, + "Interval": 10, + "Target": "HTTPS:443/_ping", + "Timeout": 2, + "UnhealthyThreshold": 4 + }, + "VPCId": "vpc-XXXXXX", + "BackendServerDescriptions": [], + "Instances": [ + { + "InstanceId": "i-XXXXXXXXX" + }, + { + "InstanceId": "i-XXXXXXXXX" + }, + { + "InstanceId": "i-XXXXXXXXX" + } + ], + "DNSName": "XXXXXXXXXXXX.us-west-2.elb.amazonaws.com", + "SecurityGroups": [ + "sg-XXXXXXXXX" + ], + "Policies": { + "LBCookieStickinessPolicies": [], + "AppCookieStickinessPolicies": [], + "OtherPolicies": [] + }, + "LoadBalancerName": "ELB-DTR", + "CreatedTime": "2017-02-13T21:40:15.400Z", + "AvailabilityZones": [ + "us-west-2c", + "us-west-2a", + "us-west-2b" + ], + "Scheme": "internet-facing", + "SourceSecurityGroup": { + "OwnerAlias": "XXXXXXXXXXXX", + "GroupName": "XXXXXXXXXXXX" + } +} +``` +
+
+ + +You can deploy your load balancer using: + + +
+
+```conf +# Create the nginx.conf file, then +# deploy the load balancer + +docker run --detach \ + --name dtr-lb \ + --restart=unless-stopped \ + --publish 80:80 \ + --publish 443:443 \ + --volume ${PWD}/nginx.conf:/etc/nginx/nginx.conf:ro \ + nginx:stable-alpine +``` +
+
+```conf +# Create the haproxy.cfg file, then +# deploy the load balancer + +docker run --detach \ + --name dtr-lb \ + --publish 443:443 \ + --publish 80:80 \ + --publish 8181:8181 \ + --restart=unless-stopped \ + --volume ${PWD}/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro \ + haproxy:1.7-alpine haproxy -d -f /usr/local/etc/haproxy/haproxy.cfg +``` +
+
+ +## Where to go next + +- [Backups and disaster recovery](../disaster-recovery/index.md) +- [Monitor and troubleshoot](../monitor-and-troubleshoot/index.md) diff --git a/datacenter/dtr/2.6/guides/admin/configure/use-a-web-proxy.md b/datacenter/dtr/2.6/guides/admin/configure/use-a-web-proxy.md new file mode 100644 index 0000000000..56f8c86a51 --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/configure/use-a-web-proxy.md @@ -0,0 +1,46 @@ +--- +title: Use a web proxy +description: Learn how to configure Docker Content Trust to use a web proxy to + reach external services. +keywords: dtr, configure, http, proxy +--- + +Docker Trusted Registry makes outgoing connections to check for new versions, +automatically renew its license, and update its vulnerability database. +If DTR can't access the internet, then you'll have to manually apply updates. + +One option to keep your environment secure while still allowing DTR access to +the internet is to use a web proxy. If you have an HTTP or HTTPS proxy, you +can configure DTR to use it. To avoid downtime you should do this configuration +outside business peak hours. + +As an administrator, log into a node where DTR is deployed, and run: + +```bash +docker run -it --rm \ + {{ page.dtr_org }}/{{ page.dtr_repo }}:{{ page.dtr_version }} reconfigure \ + --http-proxy http://: \ + --https-proxy https://: \ + --ucp-insecure-tls +``` + +To confirm how DTR is configured, check the **Settings** page on the web UI. + +![DTR settings](../../images/use-a-web-proxy-1.png){: .with-border} + +If by chance the web proxy requires authentication you can submit the username +and password, in the command, as shown below: + +```bash +docker run -it --rm \ + {{ page.dtr_org }}/{{ page.dtr_repo }}:{{ page.dtr_version }} reconfigure \ + --http-proxy username:password@: \ + --https-proxy username:password@: \ + --ucp-insecure-tls +``` + +> **Note**: DTR will hide the password portion of the URL, when it is displayed in the DTR UI. + +## Where to go next + +- [Configure garbage collection](garbage-collection.md) diff --git a/datacenter/dtr/2.6/guides/admin/configure/use-your-own-tls-certificates.md b/datacenter/dtr/2.6/guides/admin/configure/use-your-own-tls-certificates.md new file mode 100644 index 0000000000..ee175edc04 --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/configure/use-your-own-tls-certificates.md @@ -0,0 +1,44 @@ +--- +title: Use your own TLS certificates +description: Learn how to configure Docker Trusted Registry with your own TLS certificates. +keywords: dtr, tls +--- + +By default the DTR services are exposed using HTTPS, to ensure all +communications between clients and DTR is encrypted. Since DTR +replicas use self-signed certificates for this, when a client accesses +DTR, their browsers won't trust this certificate, so the browser displays a +warning message. + +You can configure DTR to use your own certificates, so that it is automatically +trusted by your users' browser and client tools. + +## Replace the server certificates + +To configure DTR to use your own certificates and keys, go to the +**DTR web UI**, navigate to the **Settings** page, and scroll down to the +**Domain** section. + +![](../../images/use-your-certificates-1.png){: .with-border} + + +Set the DTR domain name and upload the certificates and key: + +* Load balancer/public address, is the domain name clients will use to access DTR. +* TLS certificate, is the server certificate and any intermediate CA public +certificates. This certificate needs to be valid for the DTR public address, +and have SANs for all addresses used to reach the DTR replicas, including load +balancers. +* TLS private key is the server private key. +* TLS CA is the root CA public certificate. + +Finally, click **Save** for the changes to take effect. + +If you're using certificates issued by a globally trusted certificate authority, +any web browser or client tool should now trust DTR. If you're using an internal +certificate authority, you'll need to configure your system to trust that +certificate authority. + +## Where to go next + +- [Set up external storage](external-storage/index.md) diff --git a/datacenter/dtr/2.6/guides/admin/disaster-recovery/create-a-backup.md b/datacenter/dtr/2.6/guides/admin/disaster-recovery/create-a-backup.md new file mode 100644 index 0000000000..75787441db --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/disaster-recovery/create-a-backup.md @@ -0,0 +1,223 @@ +--- +title: Create a backup +description: Learn how to create a backup of Docker Trusted Registry, for disaster recovery. +keywords: dtr, disaster recovery +toc_max_header: 3 +--- + +{% assign metadata_backup_file = "dtr-metadata-backup.tar" %} +{% assign image_backup_file = "dtr-image-backup.tar" %} + + +## Data managed by DTR + +Docker Trusted Registry maintains data about: + +| Data | Description | +|:-----------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------| +| Configurations | The DTR cluster configurations | +| Repository metadata | The metadata about the repositories and images deployed | +| Access control to repos and images | Permissions for teams and repositories | +| Notary data | Notary tags and signatures | +| Scan results | Security scanning results for images | +| Certificates and keys | The certificates, public keys, and private keys that are used for mutual TLS communication | +| Images content | The images you push to DTR. This can be stored on the file system of the node running DTR, or other storage system, depending on the configuration | + +This data is persisted on the host running DTR, using named volumes. +[Learn more about DTR named volumes](../../architecture.md). + +To perform a backup of a DTR node, run the [docker/dtr backup](/reference/dtr/2.6/cli/backup/) command. This +command backs up the following data: + +| Data | Backed up | Description | +|:-----------------------------------|:----------|:---------------------------------------------------------------| +| Configurations | yes | DTR settings | +| Repository metadata | yes | Metadata such as image architecture and size | +| Access control to repos and images | yes | Data about who has access to which images | +| Notary data | yes | Signatures and digests for images that are signed | +| Scan results | yes | Information about vulnerabilities in your images | +| Certificates and keys | yes | TLS certificates and keys used by DTR | +| Image content | no | Needs to be backed up separately, depends on DTR configuration | +| Users, orgs, teams | no | Create a UCP backup to back up this data | +| Vulnerability database | no | Can be redownloaded after a restore | + + +## Back up DTR data + +To create a backup of DTR, you need to: + +1. Back up image content +2. Back up DTR metadata + +You should always create backups from the same DTR replica, to ensure a smoother +restore. If you have not previously performed a backup, the web interface displays a warning for you to do so: + +![](/ee/dtr/images/backup-warning.png) + +#### Find your replica ID + +Since you need your DTR replica ID during a backup, the following covers a few ways for you to determine your replica ID: + +##### UCP web interface + +You can find the list of replicas by navigating to **Shared Resources > Stacks** or **Swarm > Volumes** (when using [swarm mode](/engine/swarm/)) on the UCP web interface. + +##### UCP client bundle + +From a terminal [using a UCP client bundle]((/ee/ucp/user-access/cli/)), run: + +{% raw %} +```bash +docker ps --format "{{.Names}}" | grep dtr + +# The list of DTR containers with /-, e.g. +# node-1/dtr-api-a1640e1c15b6 +``` +{% endraw %} + + +##### SSH access + +Another way to determine the replica ID is to log into a DTR node using SSH and run the following: + +{% raw %} +```bash +REPLICA_ID=$(docker ps --format '{{.Names}}' -f name=dtr-rethink | cut -f 3 -d '-') +&& echo $REPLICA_ID +``` +{% endraw %} + +### Back up image content + +Since you can configure the storage backend that DTR uses to store images, +the way you back up images depends on the storage backend you're using. + +If you've configured DTR to store images on the local file system or NFS mount, +you can back up the images by using SSH to log into a DTR node, +and creating a `tar` archive of the [dtr-registry volume](../../architecture.md): + +#### Example backup command + +##### Local images + +{% raw %} +```none +sudo tar -cf dtr-image-backup-$(date +%Y%m%d-%H_%M_%S).tar \ +/var/lib/docker/volumes/dtr-registry-$(docker ps --format '{{.Names}}' -f name=dtr-rethink | cut -f 3 -d '-') +``` +{% endraw %} + +###### Expected output +```bash +tar: Removing leading `/' from member names +``` + +If you're using a different storage backend, follow the best practices +recommended for that system. + + +### Back up DTR metadata + +To create a DTR backup, load your UCP client bundle, and run the following +command. + +#### Chained commands (Linux only) + +{% raw %} +```none +DTR_VERSION=$(docker container inspect $(docker container ps -f name=dtr-registry -q) | \ + grep -m1 -Po '(?<=DTR_VERSION=)\d.\d.\d'); \ +REPLICA_ID=$(docker ps --format '{{.Names}}' -f name=dtr-rethink | cut -f 3 -d '-'); \ +read -p 'ucp-url (The UCP URL including domain and port): ' UCP_URL; \ +read -p 'ucp-username (The UCP administrator username): ' UCP_ADMIN; \ +read -sp 'ucp password: ' UCP_PASSWORD; \ +docker run --log-driver none -i --rm \ + --env UCP_PASSWORD=$UCP_PASSWORD \ + docker/dtr:$DTR_VERSION backup \ + --ucp-username $UCP_ADMIN \ + --ucp-url $UCP_URL \ + --ucp-ca "$(curl https://${UCP_URL}/ca)" \ + --existing-replica-id $REPLICA_ID > dtr-metadata-${DTR_VERSION}-backup-$(date +%Y%m%d-%H_%M_%S).tar +``` +{% endraw %} + +#### UCP field prompts + +* `` is the URL you use to access UCP. +* `` is the username of a UCP administrator. +* `` is the DTR replica ID to back up. + +The above chained commands run through the following tasks: +1. Sets your DTR version and replica ID. To back up +a specific replica, set the replica ID manually by modifying the +`--existing-replica-id` flag in the backup command. +2. Prompts you for your UCP URL (domain and port) and admin username. +3. Prompts you for your UCP password without saving it to your disk or printing it on the terminal. +4. Retrieves the CA certificate for your specified UCP URL. To skip TLS verification, replace the `--ucp-ca` +flag with `--ucp-insecure-tls`. Docker does not recommend this flag for production environments. +5. Includes DTR version and timestamp to your `tar` backup file. + +You can learn more about the supported flags in +the [DTR backup reference documentation](/reference/dtr/2.6/cli/backup.md). + +By default, the backup command does not pause the DTR replica being backed up to +prevent interruptions of user access to DTR. Since the replica +is not stopped, changes that happen during the backup may not be saved. +Use the `--offline-backup` flag to stop the DTR replica during the backup procedure. If you set this flag, +remove the replica from the load balancing pool to avoid user interruption. + +Also, the backup contains sensitive information +like private keys, so you can encrypt the backup by running: + +```none +gpg --symmetric {{ metadata_backup_file }} +``` + +This prompts you for a password to encrypt the backup, copies the backup file +and encrypts it. + + +### Test your backups + +To validate that the backup was correctly performed, you can print the contents +of the tar file created. The backup of the images should look like: + +```none +tar -tf {{ metadata_backup_file }} + +dtr-backup-v{{ page.dtr_version }}/ +dtr-backup-v{{ page.dtr_version }}/rethink/ +dtr-backup-v{{ page.dtr_version }}/rethink/layers/ +``` + +And the backup of the DTR metadata should look like: + +```none +tar -tf {{ metadata_backup_file }} + +# The archive should look like this +dtr-backup-v{{ page.dtr_version }}/ +dtr-backup-v{{ page.dtr_version }}/rethink/ +dtr-backup-v{{ page.dtr_version }}/rethink/properties/ +dtr-backup-v{{ page.dtr_version }}/rethink/properties/0 +``` + +If you've encrypted the metadata backup, you can use: + +```none +gpg -d {{ metadata_backup_file }} | tar -t +``` + +You can also create a backup of a UCP cluster and restore it into a new +cluster. Then restore DTR on that new cluster to confirm that everything is +working as expected. + +## Where to go next +- [Configure your storage backend](/ee/dtr/admin/configure/external-storage/index.md) +- [Switch your storage backend](/ee/dtr/admin/configure/external-storage/storage-backend-migration.md) +- [Use NFS](/ee/dtr/admin/configure/external-storage/nfs.md) +- [Use S3](/ee/dtr/admin/configure/external-storage/s3.md) +- CLI reference pages + - [docker/dtr install](/reference/dtr/2.6/cli/install/) + - [docker/dtr reconfigure](/reference/dtr/2.6/cli/reconfigure/) + - [docker/dtr restore](/reference/dtr/2.6/cli/restore/) diff --git a/datacenter/dtr/2.6/guides/admin/disaster-recovery/index.md b/datacenter/dtr/2.6/guides/admin/disaster-recovery/index.md new file mode 100644 index 0000000000..6e66181e94 --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/disaster-recovery/index.md @@ -0,0 +1,60 @@ +--- +title: DTR disaster recovery overview +description: Learn the multiple disaster recovery strategies you can use with + Docker Trusted Registry. +keywords: dtr, disaster recovery +--- + +Docker Trusted Registry is a clustered application. You can join multiple +replicas for high availability. + +For a DTR cluster to be healthy, a majority of its replicas (n/2 + 1) need to +be healthy and be able to communicate with the other replicas. This is also +known as maintaining quorum. + +This means that there are three failure scenarios possible. + +## Replica is unhealthy but cluster maintains quorum + +One or more replicas are unhealthy, but the overall majority (n/2 + 1) is still +healthy and able to communicate with one another. + +![Failure scenario 1](../../images/dr-overview-1.svg) + +In this example the DTR cluster has five replicas but one of the nodes stopped +working, and the other has problems with the DTR overlay network. + +Even though these two replicas are unhealthy the DTR cluster has a majority +of replicas still working, which means that the cluster is healthy. + +In this case you should repair the unhealthy replicas, or remove them from +the cluster and join new ones. + +[Learn how to repair a replica](repair-a-single-replica.md). + +## The majority of replicas are unhealthy + +A majority of replicas are unhealthy, making the cluster lose quorum, but at +least one replica is still healthy, or at least the data volumes for DTR are +accessible from that replica. + +![Failure scenario 2](../../images/dr-overview-2.svg) + +In this example the DTR cluster is unhealthy but since one replica is still +running it's possible to repair the cluster without having to restore from +a backup. This minimizes the amount of data loss. + +[Learn how to do an emergency repair](repair-a-cluster.md). + +## All replicas are unhealthy + +This is a total disaster scenario where all DTR replicas were lost, causing +the data volumes for all DTR replicas to get corrupted or lost. + +![Failure scenario 3](../../images/dr-overview-3.svg) + +In a disaster scenario like this, you'll have to restore DTR from an existing +backup. Restoring from a backup should be only used as a last resort, since +doing an emergency repair might prevent some data loss. + +[Learn how to restore from a backup](restore-from-backup.md). \ No newline at end of file diff --git a/datacenter/dtr/2.6/guides/admin/disaster-recovery/repair-a-cluster.md b/datacenter/dtr/2.6/guides/admin/disaster-recovery/repair-a-cluster.md new file mode 100644 index 0000000000..e6bdf42edb --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/disaster-recovery/repair-a-cluster.md @@ -0,0 +1,90 @@ +--- +title: Repair a cluster +description: Learn how to repair DTR when the majority of replicas are unhealthy. +keywords: dtr, disaster recovery +redirect_from: + - /datacenter/dtr/2.5/guides/admin/disaster-recovery/repair-a-cluster/ +--- + +For a DTR cluster to be healthy, a majority of its replicas (n/2 + 1) need to +be healthy and be able to communicate with the other replicas. This is known +as maintaining quorum. + +In a scenario where quorum is lost, but at least one replica is still +accessible, you can use that replica to repair the cluster. That replica doesn't +need to be completely healthy. The cluster can still be repaired as the DTR +data volumes are persisted and accessible. + +![Unhealthy cluster](../../images/repair-cluster-1.svg) + +Repairing the cluster from an existing replica minimizes the amount of data lost. +If this procedure doesn't work, you'll have to +[restore from an existing backup](restore-from-backup.md). + +## Diagnose an unhealthy cluster + +When a majority of replicas are unhealthy, causing the overall DTR cluster to +become unhealthy, operations like `docker login`, `docker pull`, and `docker push` +present `internal server error`. + +Accessing the `/_ping` endpoint of any replica also returns the same error. +It's also possible that the DTR web UI is partially or fully unresponsive. + +## Perform an emergency repair + +Use the `docker/dtr emergency-repair` command to try to repair an unhealthy +DTR cluster, from an existing replica. + +This command checks the data volumes for the DTR + +This command checks the data volumes for the DTR replica are uncorrupted, +redeploys all internal DTR components and reconfigured them to use the existing +volumes. + +It also reconfigures DTR removing all other nodes from the cluster, leaving DTR +as a single-replica cluster with the replica you chose. + +Start by finding the ID of the DTR replica that you want to repair from. +You can find the list of replicas by navigating to **Shared Resources > Stacks** or **Swarm > Volumes** (when using [swarm mode](/engine/swarm/)) on the UCP web interface, or by using +a UCP client bundle to run: + +{% raw %} +```bash +docker ps --format "{{.Names}}" | grep dtr + +# The list of DTR containers with /-, e.g. +# node-1/dtr-api-a1640e1c15b6 +``` +{% endraw %} + +Another way to determine the replica ID is to SSH into a DTR node and run the following: + +{% raw %} +```bash +REPLICA_ID=$(docker inspect -f '{{.Name}}' $(docker ps -q -f name=dtr-rethink) | cut -f 3 -d '-') +&& echo $REPLICA_ID +``` +{% endraw %} + +Then, use your UCP client bundle to run the emergency repair command: + +```bash +docker run -it --rm {{ page.dtr_org }}/{{ page.dtr_repo }}:{{ page.dtr_version }} emergency-repair \ + --ucp-insecure-tls \ + --existing-replica-id +``` + +If the emergency repair procedure is successful, your DTR cluster now has a +single replica. You should now +[join more replicas for high availability](../configure/set-up-high-availability.md). + +![Healthy cluster](../../images/repair-cluster-2.svg) + +If the emergency repair command fails, try running it again using a different +replica ID. As a last resort, you can restore your cluster from an existing +backup. + +## Where to go next + +- [Create a backup](create-a-backup.md) +- [Restore from an existing backup](restore-from-backup.md) diff --git a/datacenter/dtr/2.6/guides/admin/disaster-recovery/repair-a-single-replica.md b/datacenter/dtr/2.6/guides/admin/disaster-recovery/repair-a-single-replica.md new file mode 100644 index 0000000000..f61c21c13b --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/disaster-recovery/repair-a-single-replica.md @@ -0,0 +1,114 @@ +--- +title: Repair a single replica +description: Learn how to repair a single DTR replica when it is unhealthy. +keywords: dtr, disaster recovery +--- + +When one or more DTR replicas are unhealthy but the overall majority +(n/2 + 1) is healthy and able to communicate with one another, your DTR +cluster is still functional and healthy. + +![Cluster with two nodes unhealthy](../../images/repair-replica-1.svg) + +Given that the DTR cluster is healthy, there's no need to execute any disaster +recovery procedures like restoring from a backup. + +Instead, you should: + +1. Remove the unhealthy replicas from the DTR cluster. +2. Join new replicas to make DTR highly available. + +Since a DTR cluster requires a majority of replicas to be healthy at all times, +the order of these operations is important. If you join more replicas before +removing the ones that are unhealthy, your DTR cluster might become unhealthy. + +## Split-brain scenario + +To understand why you should remove unhealthy replicas before joining new ones, +imagine you have a five-replica DTR deployment, and something goes wrong with +the overlay network connection the replicas, causing them to be separated in +two groups. + +![Cluster with network problem](../../images/repair-replica-2.svg) + +Because the cluster originally had five replicas, it can work as long as +three replicas are still healthy and able to communicate (5 / 2 + 1 = 3). +Even though the network separated the replicas in two groups, DTR is still +healthy. + +If at this point you join a new replica instead of fixing the network problem +or removing the two replicas that got isolated from the rest, it's possible +that the new replica ends up in the side of the network partition that has +less replicas. + +![cluster with split brain](../../images/repair-replica-3.svg) + +When this happens, both groups now have the minimum amount of replicas needed +to establish a cluster. This is also known as a split-brain scenario, because +both groups can now accept writes and their histories start diverging, making +the two groups effectively two different clusters. + +## Remove replicas + +To remove unhealthy replicas, you'll first have to find the replica ID +of one of the replicas you want to keep, and the replica IDs of the unhealthy +replicas you want to remove. + +You can find the list of replicas by navigating to **Shared Resources > Stacks** or **Swarm > Volumes** (when using [swarm mode](/engine/swarm/)) on the UCP web interface, or by using the UCP +client bundle to run: + +{% raw %} +```bash +docker ps --format "{{.Names}}" | grep dtr + +# The list of DTR containers with /-, e.g. +# node-1/dtr-api-a1640e1c15b6 +``` +{% endraw %} + +Another way to determine the replica ID is to SSH into a DTR node and run the following: + +{% raw %} +```bash +REPLICA_ID=$(docker inspect -f '{{.Name}}' $(docker ps -q -f name=dtr-rethink) | cut -f 3 -d '-') +&& echo $REPLICA_ID +``` +{% endraw %} + +Then use the UCP client bundle to remove the unhealthy replicas: + +```bash +docker run -it --rm {{ page.dtr_org }}/{{ page.dtr_repo }}:{{ page.dtr_version }} remove \ + --existing-replica-id \ + --replica-ids \ + --ucp-insecure-tls \ + --ucp-url \ + --ucp-username \ + --ucp-password +``` + +You can remove more than one replica at the same time, by specifying multiple +IDs with a comma. + +![Healthy cluster](../../images/repair-replica-4.svg) + +## Join replicas + +Once you've removed the unhealthy nodes from the cluster, you should join new +ones to make sure your cluster is highly available. + +Use your UCP client bundle to run the following command which prompts you for +the necessary parameters: + +```bash +docker run -it --rm \ + {{ page.dtr_org }}/{{ page.dtr_repo }}:{{ page.dtr_version }} join \ + --ucp-node \ + --ucp-insecure-tls +``` + +[Learn more about high availability](../configure/set-up-high-availability.md). + +## Where to go next + +- [Disaster recovery overview](index.md) diff --git a/datacenter/dtr/2.6/guides/admin/disaster-recovery/restore-from-backup.md b/datacenter/dtr/2.6/guides/admin/disaster-recovery/restore-from-backup.md new file mode 100644 index 0000000000..2986726e80 --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/disaster-recovery/restore-from-backup.md @@ -0,0 +1,110 @@ +--- +title: Restore from a backup +description: Learn how to restore a DTR cluster from an existing backup +keywords: dtr, disaster recovery +--- + +{% assign metadata_backup_file = "dtr-metadata-backup.tar" %} +{% assign image_backup_file = "dtr-image-backup.tar" %} + +## Restore DTR data + +If your DTR has a majority of unhealthy replicas, the one way to restore it to +a working state is by restoring from an existing backup. + +To restore DTR, you need to: + +1. Stop any DTR containers that might be running +2. Restore the images from a backup +3. Restore DTR metadata from a backup +4. Re-fetch the vulnerability database + +You need to restore DTR on the same UCP cluster where you've created the +backup. If you restore on a different UCP cluster, all DTR resources will be +owned by users that don't exist, so you'll not be able to manage the resources, +even though they're stored in the DTR data store. + +When restoring, you need to use the same version of the `docker/dtr` image +that you've used when creating the update. Other versions are not guaranteed +to work. + +### Remove DTR containers + +Start by removing any DTR container that is still running: + +```none +docker run -it --rm \ + {{ page.dtr_org }}/{{ page.dtr_repo }}:{{ page.dtr_version }} destroy \ + --ucp-insecure-tls +``` + +### Restore images + +If you had DTR configured to store images on the local filesystem, you can +extract your backup: + +```none +sudo tar -xf {{ image_backup_file }} -C /var/lib/docker/volumes +``` + +If you're using a different storage backend, follow the best practices +recommended for that system. + +### Restore DTR metadata + +You can restore the DTR metadata with the `docker/dtr restore` command. This +performs a fresh installation of DTR, and reconfigures it with +the configuration created during a backup. + +Load your UCP client bundle, and run the following command, replacing the +placeholders for the real values: + +```bash +read -sp 'ucp password: ' UCP_PASSWORD; +``` + +This prompts you for the UCP password. Next, run the following to restore DTR from your backup. You can learn more about the supported flags in [docker/dtr restore](/reference/dtr/2.6/cli/restore). + +```bash +docker run -i --rm \ + --env UCP_PASSWORD=$UCP_PASSWORD \ + {{ page.dtr_org }}/{{ page.dtr_repo }}:{{ page.dtr_version }} restore \ + --ucp-url \ + --ucp-insecure-tls \ + --ucp-username \ + --ucp-node \ + --replica-id \ + --dtr-external-url < {{ metadata_backup_file }} +``` + +Where: + +* `` is the url you use to access UCP +* `` is the username of a UCP administrator +* `` is the hostname of the node where you've restored the images +* `` the id of the replica you backed up +* ``the url that clients use to access DTR + +#### DTR 2.5 and below + +If you're using NFS as a storage backend, also include `--nfs-storage-url` as +part of your restore command, otherwise DTR is restored but starts using a +local volume to persist your Docker images. + +#### DTR 2.5 (with experimental online garbage collection) and DTR 2.6.0-2.6.3 + +> When running DTR 2.5 (with experimental online garbage collection) and 2.6.0 to 2.6.3, there is an issue with [reconfiguring and restoring DTR with `--nfs-storage-url`](/ee/dtr/release-notes#version-26) which leads to erased tags. Make sure to [back up your DTR metadata](/ee/dtr/admin/disaster-recovery/create-a-backup/#back-up-dtr-metadata) before you proceed. To work around the `--nfs-storage-url`flag issue, manually create a storage volume on each DTR node. To [restore DTR](/reference/dtr/2.6/cli/restore/) from an existing backup, use `docker/dtr restore` with `--dtr-storage-volume` and the new volume. +> +> See [Restore to a Local NFS Volume]( https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage#restoretoalocalnfsvolume) for Docker's recommended recovery strategy. +{: .warning} + +### Re-fetch the vulnerability database + +If you're scanning images, you now need to download the vulnerability database. + +After you successfully restore DTR, you can join new replicas the same way you +would after a fresh installation. [Learn more](../configure/set-up-vulnerability-scans.md). + +## Where to go next + +- [docker/dtr restore](/reference/dtr/2.6/cli/restore/) diff --git a/datacenter/dtr/2.6/guides/admin/install/index.md b/datacenter/dtr/2.6/guides/admin/install/index.md new file mode 100644 index 0000000000..779a009003 --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/install/index.md @@ -0,0 +1,146 @@ +--- +title: Install Docker Trusted Registry +description: Learn how to install Docker Trusted Registry for production. +keywords: dtr, registry, install +--- + +Docker Trusted Registry (DTR) is a containerized application that runs on a +swarm managed by the Universal Control Plane (UCP). It can be installed +on-premises or on a cloud infrastructure. + +## Step 1. Validate the system requirements + +Before installing DTR, make sure your +infrastructure meets the [system requirements](./system-requirements) that DTR needs to run. + +## Step 2. Install UCP + +Since DTR requires Docker Universal Control Plane (UCP) +to run, you need to [install UCP](/ee/ucp/admin/install/) on all the nodes where you plan to install DTR. + +DTR needs to be installed on a worker node that is being managed by UCP. +You cannot install DTR on a standalone Docker Engine. + +![](../../images/install-dtr-1.svg) + + +## Step 3. Install DTR + +Once UCP is installed, navigate to the **UCP web UI**. In the **Admin Settings**, +choose **Docker Trusted Registry**. + +![](../../images/install-dtr-2.png){: .with-border} + +After you configure all the options, you'll have a snippet that you can use +to deploy DTR. It should look like this: + +```bash +# Pull the latest version of DTR +$ docker pull {{ page.dtr_org }}/{{ page.dtr_repo }}:{{ page.dtr_version }} + +# Install DTR +$ docker run -it --rm \ + {{ page.dtr_org }}/{{ page.dtr_repo }}:{{ page.dtr_version }} install \ + --ucp-node \ + --ucp-insecure-tls +``` + +You can run that snippet on any node where Docker is installed. As an example +you can SSH into a UCP node and run the DTR installer from there. By default +the installer runs in interactive mode and prompts you for any additional +information that is necessary. +[Learn more about the installer](/reference/dtr/2.6/cli/install/). + +By default DTR is deployed with self-signed certificates, so your UCP deployment +might not be able to pull images from DTR. +Use the `--dtr-external-url :` optional flag while deploying +DTR, so that UCP is automatically reconfigured to trust DTR. Since [HSTS (HTTP Strict-Transport-Security) +header](https://en.wikipedia.org/wiki/HTTP_Strict_Transport_Security) is included in all API responses, +make sure to specify the FQDN (Fully Qualified Domain Name) of your DTR, or your browser may refuse +to load the web interface. + +## Step 4. Check that DTR is running + +In your browser, navigate to the Docker **Universal Control Plane** +web interface, and navigate to **Shared Resources > Stacks**. DTR should be listed +as an application. + + +![](../../images/install-dtr-3.png){: .with-border} + +You can also access the **DTR web interface**, to make sure it is working. In your +browser, navigate to the address where you installed DTR. + +![](../../images/create-repository-1.png){: .with-border} + + +## Step 5. Configure DTR + +After installing DTR, you should configure: + + * The certificates used for TLS communication. [Learn more](../configure/use-your-own-tls-certificates.md). + * The storage backend to store the Docker images. [Lean more](../configure/external-storage/index.md). + + To perform these configurations, navigate to the **Settings** page of DTR. + + ![](../../images/auto-delete-job-logs-1.png){: .with-border} + + +## Step 6. Test pushing and pulling + +Now that you have a working installation of DTR, you should test that you can +push and pull images to it: + +* [Configure your local Docker Engine](../../user/access-dtr/index.md) +* [Create a repository](../../user/manage-images/index.md) +* [Push and pull images](../../user/manage-images/pull-and-push-images.md) + +## Step 7. Join replicas to the cluster + +This step is optional. + +To set up DTR for high availability, +you can add more replicas to your DTR cluster. Adding more replicas allows you +to load-balance requests across all replicas, and keep DTR working if a +replica fails. + +For high-availability you should set 3, 5, or 7 DTR replicas. The nodes where +you're going to install these replicas also need to be managed by UCP. + +To add replicas to a DTR cluster, use the `docker/dtr join` command: + +1. Load your [UCP user bundle](/ee/ucp/user-access/cli/#use-client-certificates). + +2. Run the join command. + + When you join a replica to a DTR cluster, you need to specify the + ID of a replica that is already part of the cluster. You can find an + existing replica ID by going to the **Shared Resources > Stacks** page on UCP. + + Then run: + + ```bash + docker run -it --rm \ + {{ page.dtr_org }}/{{ page.dtr_repo }}:{{ page.dtr_version }} join \ + --ucp-node \ + --ucp-insecure-tls + ``` + + > --ucp-node + > + > The following the --ucp-node flag is the target node to + > install the DTR replica. This is NOT the UCP Manager URL. + {: .important} + +3. Check that all replicas are running. + + In your browser, navigate to the Docker **Universal Control Plane** + web interface, and navigate to **Shared Resources > Stacks**. All replicas should + be displayed. + + ![](../../images/install-dtr-6.png){: .with-border} + +## Where to go next + +- [Install DTR offline](install-offline.md) +- [Upgrade DTR](../upgrade.md) diff --git a/datacenter/dtr/2.6/guides/admin/install/install-offline.md b/datacenter/dtr/2.6/guides/admin/install/install-offline.md new file mode 100644 index 0000000000..5c22de8cb6 --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/install/install-offline.md @@ -0,0 +1,70 @@ +--- +title: Install Docker Trusted Registry offline +description: Learn how to install Docker Trusted Registry on a machine with no internet + access. +keywords: registry, install, offline +--- + +The procedure to install Docker Trusted Registry on a host is the same, +whether that host has access to the internet or not. + +The only difference when installing on an offline host, +is that instead of pulling the UCP images from Docker Hub, you use a +computer that is connected to the internet to download a single package with +all the images. Then you copy that package to the host where you’ll install DTR. + +## Versions available + +{% include components/ddc_url_list_2.html product="dtr" version="2.6" %} + +## Download the offline package + +Use a computer with internet access to download a package with all DTR images: + +```bash +$ wget -O dtr.tar.gz +``` + +Now that you have the package in your local machine, you can transfer it to +the machines where you want to install DTR. + +For each machine where you want to install DTR: + +1. Copy the DTR package to that machine. + + ```bash + $ scp dtr.tar.gz @ + ``` + +2. Use ssh to log into the hosts where you transferred the package. + +3. Load the DTR images. + + Once the package is transferred to the hosts, you can use the + `docker load` command to load the Docker images from the tar archive: + + ```bash + $ docker load -i dtr.tar.gz + ``` + +## Install DTR + +Now that the offline hosts have all the images needed to install DTR, +you can [install DTR on that host](index.md). + +### Preventing outgoing connections + +DTR makes outgoing connections to: + +* report analytics, +* check for new versions, +* check online licenses, +* update the vulnerability scanning database + +All of these uses of online connections are optional. You can choose to +disable or not use any or all of these features on the admin settings page. + +## Where to go next + +- [DTR architecture](../../architecture.md) +- [Install DTR](index.md) diff --git a/datacenter/dtr/2.6/guides/admin/install/system-requirements.md b/datacenter/dtr/2.6/guides/admin/install/system-requirements.md new file mode 100644 index 0000000000..e4f88f17fa --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/install/system-requirements.md @@ -0,0 +1,58 @@ +--- +title: Docker Trusted Registry system requirements +description: Learn about the system requirements for installing Docker Trusted Registry. +keywords: DTR, architecture, requirements +--- + +Docker Trusted Registry can be installed on-premises or on the cloud. +Before installing, be sure your infrastructure has these requirements. + +## Hardware and Software requirements + +You can install DTR on-premises or on a cloud provider. To install DTR, +all nodes must: +* Be a worker node managed by UCP (Universal Control Plane). See [Compatibility Matrix](https://success.docker.com/article/compatibility-matrix) for version compatibility. +* Have a fixed hostname. + +### Minimum requirements + +* 16GB of RAM for nodes running DTR +* 2 vCPUs for nodes running DTR +* 10GB of free disk space + +### Recommended production requirements + + * 16GB of RAM for nodes running DTR + * 4 vCPUs for nodes running DTR + * 25-100GB of free disk space + +Note that Windows container images are typically larger than Linux ones and for +this reason, you should consider provisioning more local storage for Windows +nodes and for DTR setups that will store Windows container images. + +## Ports used + +When installing DTR on a node, make sure the following ports are open on that +node: + +| Direction | Port | Purpose | +|:---------:|:--------|:--------------------------------------| +| in | 80/tcp | Web app and API client access to DTR. | +| in | 443/tcp | Web app and API client access to DTR. | + +These ports are configurable when installing DTR. + +## Compatibility and maintenance lifecycle + +Docker Enterprise Edition is a software subscription that includes three products: + +* Docker Enterprise Engine +* Docker Trusted Registry +* Docker Universal Control Plane + +[Learn more about the maintenance lifecycle for these products](https://success.docker.com/article/Compatibility_Matrix). + +## Where to go next + +- [DTR architecture](../../architecture.md) +- [Install DTR](index.md) diff --git a/datacenter/dtr/2.6/guides/admin/install/uninstall.md b/datacenter/dtr/2.6/guides/admin/install/uninstall.md new file mode 100644 index 0000000000..0e8dda9ed5 --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/install/uninstall.md @@ -0,0 +1,25 @@ +--- +title: Uninstall Docker Trusted Registry +description: Learn how to uninstall your Docker Trusted Registry installation. +keywords: dtr, install, uninstall +--- + +Uninstalling DTR can be done by simply removing all data associated with each +replica. To do that, you just run the destroy command once per replica: + +```bash +docker run -it --rm \ + docker/dtr:{{ page.dtr_version }} destroy \ + --ucp-insecure-tls +``` + +You will be prompted for the UCP URL, UCP credentials, and which replica to +destroy. + +To see what options are available in the destroy command, check the +[destroy command reference documentation](/reference/dtr/2.6/cli/destroy.md). + +## Where to go next + +- [Scale your deployment](../configure/set-up-high-availability.md) +- [Install DTR](index.md) diff --git a/datacenter/dtr/2.6/guides/admin/manage-jobs/audit-jobs-via-api.md b/datacenter/dtr/2.6/guides/admin/manage-jobs/audit-jobs-via-api.md new file mode 100644 index 0000000000..5757d8e595 --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/manage-jobs/audit-jobs-via-api.md @@ -0,0 +1,177 @@ +--- +title: Audit Jobs via the API +description: Learn how Docker Trusted Registry runs batch jobs for job-related troubleshooting. +keywords: dtr, troubleshoot, audit, job logs, jobs, api +--- + + +## Overview + +This covers troubleshooting batch jobs via the API and was introduced in DTR 2.2. Starting in DTR 2.6, admins have the ability to [audit jobs](audit-jobs-via-ui.md) using the web interface. + +## Prerequisite + * [Job Queue](job-queue.md) + +### Job capacity + +Each job runner has a limited capacity and will not claim jobs that require a +higher capacity. You can see the capacity of a job runner via the +`GET /api/v0/workers` endpoint: + +```json +{ + "workers": [ + { + "id": "000000000000", + "status": "running", + "capacityMap": { + "scan": 1, + "scanCheck": 1 + }, + "heartbeatExpiration": "2017-02-18T00:51:02Z" + } + ] +} +``` + +This means that the worker with replica ID `000000000000` has a capacity of 1 +`scan` and 1 `scanCheck`. Next, review the list of available jobs: + +```json +{ + "jobs": [ + { + "id": "0", + "workerID": "", + "status": "waiting", + "capacityMap": { + "scan": 1 + } + }, + { + "id": "1", + "workerID": "", + "status": "waiting", + "capacityMap": { + "scan": 1 + } + }, + { + "id": "2", + "workerID": "", + "status": "waiting", + "capacityMap": { + "scanCheck": 1 + } + } + ] +} +``` + +If worker `000000000000` notices the jobs +in `waiting` state above, then it will be able to pick up jobs `0` and `2` since it has the capacity +for both. Job `1` will have to wait until the previous scan job, `0`, is completed. The job queue will then look like: + +```json +{ + "jobs": [ + { + "id": "0", + "workerID": "000000000000", + "status": "running", + "capacityMap": { + "scan": 1 + } + }, + { + "id": "1", + "workerID": "", + "status": "waiting", + "capacityMap": { + "scan": 1 + } + }, + { + "id": "2", + "workerID": "000000000000", + "status": "running", + "capacityMap": { + "scanCheck": 1 + } + } + ] +} +``` +You can get a list of jobs via the `GET /api/v0/jobs/` endpoint. Each job +looks like: + +```json +{ + "id": "1fcf4c0f-ff3b-471a-8839-5dcb631b2f7b", + "retryFromID": "1fcf4c0f-ff3b-471a-8839-5dcb631b2f7b", + "workerID": "000000000000", + "status": "done", + "scheduledAt": "2017-02-17T01:09:47.771Z", + "lastUpdated": "2017-02-17T01:10:14.117Z", + "action": "scan_check_single", + "retriesLeft": 0, + "retriesTotal": 0, + "capacityMap": { + "scan": 1 + }, + "parameters": { + "SHA256SUM": "1bacd3c8ccb1f15609a10bd4a403831d0ec0b354438ddbf644c95c5d54f8eb13" + }, + "deadline": "", + "stopTimeout": "" +} +``` +The JSON fields of interest here are: + +* `id`: The ID of the job +* `workerID`: The ID of the worker in a DTR replica that is running this job +* `status`: The current state of the job +* `action`: The type of job the worker will actually perform +* `capacityMap`: The available capacity a worker needs for this job to run + + +### Cron jobs + +Several of the jobs performed by DTR are run in a recurrent schedule. You can +see those jobs using the `GET /api/v0/crons` endpoint: + + +```json +{ + "crons": [ + { + "id": "48875b1b-5006-48f5-9f3c-af9fbdd82255", + "action": "license_update", + "schedule": "57 54 3 * * *", + "retries": 2, + "capacityMap": null, + "parameters": null, + "deadline": "", + "stopTimeout": "", + "nextRun": "2017-02-22T03:54:57Z" + }, + { + "id": "b1c1e61e-1e74-4677-8e4a-2a7dacefffdc", + "action": "update_db", + "schedule": "0 0 3 * * *", + "retries": 0, + "capacityMap": null, + "parameters": null, + "deadline": "", + "stopTimeout": "", + "nextRun": "2017-02-22T03:00:00Z" + } + ] +} +``` + +The `schedule` field uses a cron expression following the `(seconds) (minutes) (hours) (day of month) (month) (day of week)` format. For example, `57 54 3 * * *` with cron ID `48875b1b-5006-48f5-9f3c-af9fbdd82255` will be run at `03:54:57` on any day of the week or the month, which is `2017-02-22T03:54:57Z` in the example JSON response above. + +## Where to go next + +- [Enable auto-deletion of job logs](./auto-delete-job-logs.md) diff --git a/datacenter/dtr/2.6/guides/admin/manage-jobs/audit-jobs-via-ui.md b/datacenter/dtr/2.6/guides/admin/manage-jobs/audit-jobs-via-ui.md new file mode 100644 index 0000000000..7d5f2b261b --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/manage-jobs/audit-jobs-via-ui.md @@ -0,0 +1,66 @@ +--- +title: Audit Jobs via the Web Interface +description: View a list of jobs happening within DTR and review the detailed logs for each job. +keywords: dtr, troubleshoot, audit, job logs, jobs, ui +--- + +As of DTR 2.2, admins were able to [view and audit jobs within DTR](audit-jobs-via-api) using the API. DTR 2.6 enhances those capabilities by adding a **Job Logs** tab under **System** settings on the user interface. The tab displays a sortable and paginated list of jobs along with links to associated job logs. + +## Prerequisite + * [Job Queue](job-queue.md) + +## View Jobs List + +To view the list of jobs within DTR, do the following: + +1. Navigate to `https://`and log in with your UCP credentials. + +2. Select **System** from the left navigation pane, and then click **Job Logs**. You should see a paginated list of past, running, and queued jobs. By default, **Job Logs** shows the latest `10` jobs on the first page. + + ![](../../images/view-job-logs-1.png){: .img-fluid .with-border} + + +3. Specify a filtering option. **Job Logs** lets you filter by: + + * Action: See [Audit Jobs via the API: Job Types](job-queue/#job-types) for an explanation on the different actions or job types. + + * Worker ID: The ID of the worker in a DTR replica that is responsible for running the job. + + ![](../../images/view-job-logs-2.png){: .img-fluid .with-border} + + +4. Optional: Click **Edit Settings** on the right of the filtering options to update your **Job Logs** settings. See [Enable auto-deletion of job logs](auto-delete-job-logs) for more details. + +### Job Details + +The following is an explanation of the job-related fields displayed in **Job Logs** and uses the filtered `online_gc` action from above. + +| Job Detail | Description | Example | +|:----------------|:-------------------------------------------------|:--------| +| Action | The type of action or job being performed. See [Job Types](./job-queue/#job-types) for a full list of job types. | `onlinegc` +| ID | The ID of the job. | `ccc05646-569a-4ac4-b8e1-113111f63fb9` | +| Worker | The ID of the worker node responsible for running the job. | `8f553c8b697c`| +| Status | Current status of the action or job. See [Job Status](./job-queue/#job-status) for more details. | `done` | +| Start Time | Time when the job started. | `9/23/2018 7:04 PM` | +| Last Updated | Time when the job was last updated. | `9/23/2018 7:04 PM` | +| View Logs | Links to the full logs for the job. | `[View Logs]` | + +## View Job-specific Logs + +To view the log details for a specific job, do the following: + +1. Click **View Logs** next to the job's **Last Updated** value. You will be redirected to the log detail page of your selected job. + + ![](../../images/view-job-logs-3.png){: .img-fluid .with-border} + + + Notice how the job `ID` is reflected in the URL while the `Action` and the abbreviated form of the job `ID` are reflected in the heading. Also, the JSON lines displayed are job-specific [DTR container logs](https://success.docker.com/article/how-to-check-the-docker-trusted-registry-dtr-logs). See [DTR Internal Components](../../architecture/#dtr-internal-components) for more details. + +2. Enter or select a different line count to truncate the number of lines displayed. Lines are cut off from the end of the logs. + + ![](../../images/view-job-logs-4.png){: .img-fluid .with-border} + + +## Where to go next + +- [Enable auto-deletion of job logs](./auto-delete-job-logs.md) diff --git a/datacenter/dtr/2.6/guides/admin/manage-jobs/auto-delete-job-logs.md b/datacenter/dtr/2.6/guides/admin/manage-jobs/auto-delete-job-logs.md new file mode 100644 index 0000000000..d337217b11 --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/manage-jobs/auto-delete-job-logs.md @@ -0,0 +1,50 @@ +--- +title: Enable Auto-Deletion of Job Logs +description: Enable auto-deletion of old or unnecessary job logs for maintenance. +keywords: dtr, jobs, log, job logs, system +--- + +## Overview + +Docker Trusted Registry has a global setting for auto-deletion of job logs which allows them to be removed as part of [garbage collection](../configure/garbage-collection.md). DTR admins can enable auto-deletion of repository events in DTR 2.6 based on specified conditions which are covered below. + +## Steps + +1. In your browser, navigate to `https://` and log in with your UCP credentials. + +2. Select **System** on the left navigation pane which will display the **Settings** page by default. + +3. Scroll down to **Job Logs** and turn on **Auto-Deletion**. + + ![](../../images/auto-delete-job-logs-1.png){: .img-fluid .with-border} + +4. Specify the conditions with which a job log auto-deletion will be triggered. + + DTR allows you to set your auto-deletion conditions based on the following optional job log attributes: + + | Name | Description | Example | + |:----------------|:---------------------------------------------------| :----------------| + | Age | Lets you remove job logs which are older than your specified number of hours, days, weeks or months| `2 months` | + | Max number of events | Lets you specify the maximum number of job logs allowed within DTR. | `100` | + + ![](../../images/auto-delete-job-logs-2.png){: .img-fluid .with-border} + + + If you check and specify both, job logs will be removed from DTR during garbage collection if either condition is met. You should see a confirmation message right away. + +5. Click **Start Deletion** if you're ready. Read more about [garbage collection](../configure/garbage-collection/#under-the-hood) if you're unsure about this operation. + +6. Navigate to **System > Job Logs** to confirm that [**onlinegc_joblogs**](job-queue/#job-types) has started. For a detailed breakdown of individual job logs, see [View Job-specific Logs](audit-jobs-via-ui/#view-job-specific-logs) in "Audit Jobs via the Web Interface." + + +![](../../images/auto-delete-job-logs-3.png){: .img-fluid .with-border} + + +> Job Log Deletion +> +> When you enable auto-deletion of job logs, the logs will be permanently deleted during garbage collection. See [Configure logging drivers](../../../../config/containers/logging/configure/) for a list of supported logging drivers and plugins. + +## Where to go next + +- [Monitor Docker Trusted Registry](/ee/dtr/admin/monitor-and-troubleshoot/) + diff --git a/datacenter/dtr/2.6/guides/admin/manage-jobs/job-queue.md b/datacenter/dtr/2.6/guides/admin/manage-jobs/job-queue.md new file mode 100644 index 0000000000..da078bb90b --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/manage-jobs/job-queue.md @@ -0,0 +1,80 @@ +--- +title: Job Queue +description: Learn how Docker Trusted Registry runs batch jobs for troubleshooting job-related issues. +keywords: dtr, job queue, job management +--- + +Docker Trusted Registry (DTR) uses a job queue to schedule batch jobs. Jobs are added to a cluster-wide job queue, and then consumed and executed by a job runner within DTR. + +![batch jobs diagram](../../images/troubleshoot-batch-jobs-1.svg) + +All DTR replicas have access to the job queue, and have a job runner component +that can get and execute work. + +## How it works + +When a job is created, it is added to a cluster-wide job queue and enters the `waiting` state. +When one of the DTR replicas is ready to claim the job, it waits a random time of up +to `3` seconds to give every replica the opportunity to claim the task. + +A replica claims a job by adding its replica ID to the job. That way, other +replicas will know the job has been claimed. Once a replica claims a job, it adds +that job to an internal queue, which in turn sorts the jobs by their `scheduledAt` time. +Once that happens, the replica updates the job status to `running`, and +starts executing it. + +The job runner component of each DTR replica keeps a `heartbeatExpiration` +entry on the database that is shared by all replicas. If a replica becomes +unhealthy, other replicas notice the change and update the status of the failing worker to `dead`. +Also, all the jobs that were claimed by the unhealthy replica enter the `worker_dead` state, +so that other replicas can claim the job. + +## Job Types + +DTR runs periodic and long-running jobs. The following is a complete list of jobs you can filter for via [the user interface](../manage-jobs/audit-jobs-via-ui.md) or [the API](../manage-jobs/audit-jobs-via-api.md). + +| Job | Description | +|:------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| gc | A garbage collection job that deletes layers associated with deleted images. | +| onlinegc | A garbage collection job that deletes layers associated with deleted images without putting the registry in read-only mode. | +| onlinegc_metadata | A garbage collection job that deletes metadata associated with deleted images. | +| onlinegc_joblogs | A garbage collection job that deletes job logs based on a configured job history setting. | +| metadatastoremigration | A necessary migration that enables the `onlinegc` feature. | +| sleep | Used for testing the correctness of the jobrunner. It sleeps for 60 seconds. | +| false | Used for testing the correctness of the jobrunner. It runs the `false` command and immediately fails. | +| tagmigration | Used for synchronizing tag and manifest information between the DTR database and the storage backend. | +| bloblinkmigration | A DTR 2.1 to 2.2 upgrade process that adds references for blobs to repositories in the database. | +| license_update | Checks for license expiration extensions if online license updates are enabled. | +| scan_check | An image security scanning job. This job does not perform the actual scanning, rather it spawns `scan_check_single` jobs (one for each layer in the image). Once all of the `scan_check_single` jobs are complete, this job will terminate. | +| scan_check_single | A security scanning job for a particular layer given by the `parameter: SHA256SUM`. This job breaks up the layer into components and checks each component for vulnerabilities. | +| scan_check_all | A security scanning job that updates all of the currently scanned images to display the latest vulnerabilities. | +| update_vuln_db | A job that is created to update DTR's vulnerability database. It uses an Internet connection to check for database updates through `https://dss-cve-updates.docker.com/` and updates the `dtr-scanningstore` container if there is a new update available. | +| scannedlayermigration | A DTR 2.4 to 2.5 upgrade process that restructures scanned image data. | +| push_mirror_tag | A job that pushes a tag to another registry after a push mirror policy has been evaluated. | +| poll_mirror | A global cron that evaluates poll mirroring policies. | +| webhook | A job that is used to dispatch a webhook payload to a single endpoint. | +| nautilus_update_db | The old name for the `update_vuln_db` job. This may be visible on old log files. | +| ro_registry | A user-initiated job for manually switching DTR into read-only mode. | +| tag_pruning | A job for cleaning up unnecessary or unwanted repository tags which can be configured by repository admins. For configuration options, see [Tag Pruning](../../user/tag-pruning). | + +## Job Status + +Jobs can have one of the following status values: + +| Status | Description | +|:----------------|:------------------------------------------------------------------------------------------------------------------------------------------| +| waiting | Unclaimed job waiting to be picked up by a worker. | +| running | The job is currently being run by the specified `workerID`. | +| done | The job has successfully completed. | +| error | The job has completed with errors. | +| cancel_request | The status of a job is monitored by the worker in the database. If the job status changes to `cancel_request`, the job is canceled by the worker. | +| cancel | The job has been canceled and was not fully executed. | +| deleted | The job and its logs have been removed. | +| worker_dead | The worker for this job has been declared `dead` and the job will not continue. | +| worker_shutdown | The worker that was running this job has been gracefully stopped. | +| worker_resurrection | The worker for this job has reconnected to the database and will cancel this job. | + +## Where to go next + +- [Audit Jobs via Web Interface](audit-jobs-via-ui) +- [Audit Jobs via API](audit-jobs-via-api) diff --git a/datacenter/dtr/2.6/guides/admin/manage-users/create-and-manage-orgs.md b/datacenter/dtr/2.6/guides/admin/manage-users/create-and-manage-orgs.md new file mode 100644 index 0000000000..ccabf3a7a5 --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/manage-users/create-and-manage-orgs.md @@ -0,0 +1,47 @@ +--- +title: Create and manage organizations +description: Learn how to set up organizations to enforce security in Docker Trusted + Registry. +keywords: registry, security, permissions, organizations +--- + +When a user creates a repository, only that user has permissions to make changes +to the repository. + +For team workflows, where multiple users have permissions to manage a set of +common repositories, create an organization. By default, DTR has one +organization called 'docker-datacenter', that is shared between DTR and UCP. + +To create a new organization, navigate to the **DTR web UI**, and go to the +**Organizations** page. + +![](../../images/create-and-manage-orgs-1.png){: .with-border} + +Click the **New organization** button, and choose a meaningful name for the +organization. + +![](../../images/create-and-manage-orgs-2.png){: .with-border} + +Repositories owned by this organization will contain the organization name, so +to pull an image from that repository, you'll use: + +```bash +docker pull //: +``` + +Click **Save** to create the organization, and then **click the organization** +to define which users are allowed to manage this +organization. These users will be able to edit the organization settings, edit +all repositories owned by the organization, and define the user permissions for +this organization. + +For this, click the **Add user** button, **select the users** that you want to +grant permissions to manage the organization, and click +**Save**. Then change their permissions from 'Member' to **Org Owner**. + +![](../../images/create-and-manage-orgs-3.png){: .with-border} + +## Where to go next + +- [Create and manage users](create-and-manage-users.md) +- [Create and manage teams](create-and-manage-teams.md) diff --git a/datacenter/dtr/2.6/guides/admin/manage-users/create-and-manage-teams.md b/datacenter/dtr/2.6/guides/admin/manage-users/create-and-manage-teams.md new file mode 100644 index 0000000000..3ffaddc48d --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/manage-users/create-and-manage-teams.md @@ -0,0 +1,62 @@ +--- +title: Create and manage teams in DTR +description: Learn how to manage teams to enforce fine-grain access control in Docker + Trusted Registry. +keywords: registry, security, permissions, teams +--- + +You can extend a user's default permissions by granting them individual +permissions in other image repositories, by adding the user to a team. A team +defines the permissions a set of users have for a set of repositories. + +To create a new team, go to the **DTR web UI**, and navigate to the +**Organizations** page. +Then **click the organization** where you want to create the team. In this +example, we'll create the 'billing' team under the 'whale' organization. + +![organization](../../images/create-and-manage-teams-1.png){: .with-border} + +Click '**+**' to create a new team, and give it a name. + +![create team](../../images/create-and-manage-teams-2.png){: .with-border} + +## Add users to a team + +Once you have created a team, **click the team** name, to manage its settings. +The first thing we need to do is add users to the team. Click the **Add user** +button and add users to the team. + +![add users](../../images/create-and-manage-teams-3.png){: .with-border} + +## Manage team permissions + +The next step is to define the permissions this team has for a set of +repositories. Navigate to the **Repositories** tab, and click the +**Add repository** button. + +![manage permissions](../../images/create-and-manage-teams-4.png){: .with-border} + +Choose the repositories this team has access to, and what permission levels the +team members have. + +![](../../images/create-and-manage-teams-5.png){: .with-border} + +There are three permission levels available: + +| Permission level | Description | +|:-----------------|:-----------------------------------------------------------------| +| Read only | View repository and pull images. | +| Read & Write | View repository, pull and push images. | +| Admin | Manage repository and change its settings, pull and push images. | + +## Delete a team + +If you're an organization owner, you can delete a team in that organization. +Navigate to the **Team**, choose the **Settings** tab, and click **Delete**. + +![delete team](../../images/create-and-manage-teams-6.png){: .with-border} + +## Where to go next + +- [Create and manage users](create-and-manage-users.md) +- [Permission levels](permission-levels.md) diff --git a/datacenter/dtr/2.6/guides/admin/manage-users/create-and-manage-users.md b/datacenter/dtr/2.6/guides/admin/manage-users/create-and-manage-users.md new file mode 100644 index 0000000000..e037ed87d2 --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/manage-users/create-and-manage-users.md @@ -0,0 +1,29 @@ +--- +title: Create and manage users in DTR +description: Learn how to manage user permissions in Docker Trusted Registry. +keywords: registry, security, permissions, users +--- + +When using the built-in authentication, you can create users +to grant them fine-grained permissions. + +Users are shared across UCP and DTR. When you create a new user in +Docker Universal Control Plane, that user becomes available in DTR and vice +versa. + +To create a new user, go to the **DTR web UI**, and navigate to the **Users** +page. + +![](../../images/create-manage-users-1.png){: .with-border} + +Click the **New user** button, and fill-in the user information. + +![](../../images/create-manage-users-2.png){: .with-border} + +Check the **Trusted Registry admin** option, if you want to grant permissions +for the user to be a UCP and DTR administrator. + +## Where to go next + +- [Authentication and authorization](index.md) +- [Create and manage teams](create-and-manage-teams.md) diff --git a/datacenter/dtr/2.6/guides/admin/manage-users/index.md b/datacenter/dtr/2.6/guides/admin/manage-users/index.md new file mode 100644 index 0000000000..306ffb8bb2 --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/manage-users/index.md @@ -0,0 +1,56 @@ +--- +title: Authentication and authorization in DTR +description: Learn about the permission levels available on Docker Trusted Registry. +keywords: registry, security, permissions, users +--- + +With DTR you get to control which users have access to your image repositories. + +By default, anonymous users can only pull images from public repositories. +They can't create new repositories or push to existing ones. +You can then grant permissions to enforce fine-grained access control to image +repositories. For that: + +* Start by creating a user. + + Users are shared across UCP and DTR. When you create a new user in + Docker Universal Control Plane, that user becomes available in DTR and vice + versa. Registered users can create and manage their own repositories. + + You can also integrate with an LDAP service to manage users from a single + place. + +* Extend the permissions by adding the user to a team. + + To extend a user's permission and manage their permissions over repositories, + you add the user to a team. + A team defines the permissions users have for a set of repositories. + + +## Organizations and teams + +When a user creates a repository, only that user can make changes to the +repository settings, and push new images to it. + +Organizations take permission management one step further, since they allow +multiple users to own and manage a common set of repositories. This +is useful when implementing team workflows. With organizations you can +delegate the management of a set of repositories and user permissions to the +organization administrators. + +An organization owns a set of repositories, and defines a set of teams. With +teams you can define fine-grain permissions that a team of +user has for a set of repositories. + +![](../../images/authentication-authorization-1.svg) + +In this example, the 'Whale' organization has three repositories and two teams: + +* Members of the blog team can only see and pull images from the whale/java +repository, +* Members of the billing team can manage the whale/golang repository, and push +and pull images from the whale/java repository. + +## Where to go next + +- [Create and manage users](create-and-manage-users.md) diff --git a/datacenter/dtr/2.6/guides/admin/manage-users/permission-levels.md b/datacenter/dtr/2.6/guides/admin/manage-users/permission-levels.md new file mode 100644 index 0000000000..50e60d7929 --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/manage-users/permission-levels.md @@ -0,0 +1,52 @@ +--- +title: Permission levels in DTR +description: Learn about the permission levels available in Docker Trusted Registry. +keywords: registry, security, permissions +--- + +Docker Trusted Registry allows you to define fine-grain permissions over image +repositories. + +## Administrators + +Users are shared across UCP and DTR. When you create a new user in Docker +Universal Control Plane, that user becomes available in DTR and vice versa. +When you create a trusted admin in DTR, the admin has permissions to manage: + +* Users across UCP and DTR +* DTR repositories and settings +* UCP resources and settings + +## Team permission levels + +Teams allow you to define the permissions a set of user has for a set of +repositories. Three permission levels are available: + +| Repository operation | read | read-write | admin | +|:----------------------|:----:|:----------:|:-----:| +| View/ browse | x | x | x | +| Pull | x | x | x | +| Push | | x | x | +| Start a scan | | x | x | +| Delete tags | | x | x | +| Edit description | | | x | +| Set public or private | | | x | +| Manage user access | | | x | +| Delete repository | | | x | + +Team permissions are additive. When a user is a member of multiple teams, they +have the highest permission level defined by those teams. + +## Overall permissions + +Here's an overview of the permission levels available in DTR: + +* Anonymous or unauthenticated Users: Can search and pull public repositories. +* Authenticated Users: Can search and pull public repos, and create and manage their own repositories. +* Team Member: Everything a user can do, plus the permissions granted by the team the user is a member of. +* Organization Owner: Can manage repositories and teams for the organization. +* Admin: Can manage anything across UCP and DTR. + +## Where to go next + +- [Authentication and authorization](index.md) diff --git a/datacenter/dtr/2.6/guides/admin/manage-webhooks/index.md b/datacenter/dtr/2.6/guides/admin/manage-webhooks/index.md new file mode 100644 index 0000000000..94256f6dea --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/manage-webhooks/index.md @@ -0,0 +1,33 @@ +--- +title: Manage webhooks +description: Learn how to create, configure, and test webhooks in Docker Trusted Registry. +keywords: registry, webhooks +--- + +You can configure DTR to automatically post event notifications to a webhook URL of your choosing. This lets you build complex CI and CD pipelines with your Docker images. The following is a complete list of event types you can trigger webhook notifications for via the [web interface](use-the-web-ui) or the [API](use-the-API). + +## Webhook types + +| Event Type | Scope | Access Level | Availability | +| --------------------------------------- | ----------------------- | ---------------- | ------------ | +| Tag pushed to repository (`TAG_PUSH`) | Individual repositories | Repository admin | Web UI & API | +| Tag pulled from repository (`TAG_PULL`) | Individual repositories | Repository admin | Web UI & API | +| Tag deleted from repository (`TAG_DELETE`) | Individual repositories | Repository admin | Web UI & API | +| Manifest pushed to repository (`MANIFEST_PUSH`) | Individual repositories | Repository admin | Web UI & API | +| Manifest pulled from repository (`MANIFEST_PULL`) | Individual repositories | Repository admin | Web UI & API | +| Manifest deleted from repository (`MANIFEST_DELETE`) | Individual repositories | Repository admin | Web UI & API | +| Security scan completed (`SCAN_COMPLETED`) | Individual repositories | Repository admin | Web UI & API | +| Security scan failed (`SCAN_FAILED`) | Individual repositories | Repository admin | Web UI & API | +| Image promoted from repository (`PROMOTION`) | Individual repositories | Repository admin | Web UI & API | +| Image mirrored from repository (`PUSH_MIRRORING`) | Individual repositories | Repository admin | Web UI & API | +| Image mirrored from remote repository (`POLL_MIRRORING`) | Individual repositories | Repository admin | Web UI & API | +| Repository created, updated, or deleted (`REPO_CREATED`, `REPO_UPDATED`, and `REPO_DELETED`) | Namespaces / Organizations | Namespace / Org owners | API Only | +| Security scanner update completed (`SCANNER_UPDATE_COMPLETED`) | Global | DTR admin | API only | + +You must have admin privileges to a repository or namespace in order to +subscribe to its webhook events. For example, a user must be an admin of repository "foo/bar" to subscribe to its tag push events. A DTR admin can subscribe to any event. + +## Where to go next + +- [Manage webhooks via the web interface](use-the-web-ui) +- [Manage webhooks via the the API](use-the-api) diff --git a/datacenter/dtr/2.6/guides/admin/manage-webhooks/use-the-api.md b/datacenter/dtr/2.6/guides/admin/manage-webhooks/use-the-api.md new file mode 100644 index 0000000000..f710fc1c07 --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/manage-webhooks/use-the-api.md @@ -0,0 +1,311 @@ +--- +title: Manage webhooks via the API +description: Learn how to create, configure, and test webhooks for DTR using the API. +keywords: dtr, webhooks, api, registry +--- + +## Prerequisite + +See [Webhook types](/ee/dtr/admin/manage-webhooks/index.md/#webhook-types) for a list of events you can trigger notifications for via the API. + +## API Base URL + +Your DTR hostname serves as the base URL for your API requests. + +## Swagger API explorer + +From the DTR web interface, click **API** on the bottom left navigation pane to explore the API resources and endpoints. Click **Execute** to send your API request. + +## API requests via curl + +You can use [curl](https://curl.haxx.se/docs/manpage.html) to send HTTP or HTTPS API requests. Note that you will have to specify `skipTLSVerification: true` on your request in order to test the webhook endpoint over HTTP. + +### Example curl request + +```bash +curl -u test-user:$TOKEN -X POST "https://dtr-example.com/api/v0/webhooks" -H "accept: application/json" -H "content-type: application/json" -d "{ \"endpoint\": \"https://webhook.site/441b1584-949d-4608-a7f3-f240bdd31019\", \"key\": \"maria-testorg/lab-words\", \"skipTLSVerification\": true, \"type\": \"TAG_PULL\"}" +``` + +### Example JSON response + +```json +{ + "id": "b7bf702c31601efb4796da59900ddc1b7c72eb8ca80fdfb1b9fecdbad5418155", + "type": "TAG_PULL", + "key": "maria-testorg/lab-words", + "endpoint": "https://webhook.site/441b1584-949d-4608-a7f3-f240bdd31019", + "authorID": "194efd8e-9ee6-4d43-a34b-eefd9ce39087", + "createdAt": "2019-05-22T01:55:20.471286995Z", + "lastSuccessfulAt": "0001-01-01T00:00:00Z", + "inactive": false, + "tlsCert": "", + "skipTLSVerification": true +} +``` + +## Subscribe to events + +To subscribe to events, send a `POST` request to +`/api/v0/webhooks` with the following JSON payload: + +### Example usage + +``` +{ + "type": "TAG_PUSH", + "key": "foo/bar", + "endpoint": "https://example.com" +} +``` + +The keys in the payload are: + +- `type`: The event type to subcribe to. +- `key`: The namespace/organization or repo to subscribe to. For example, "foo/bar" to subscribe to +pushes to the "bar" repository within the namespace/organization "foo". +- `endpoint`: The URL to send the JSON payload to. + +Normal users **must** supply a "key" to scope a particular webhook event to +a repository or a namespace/organization. DTR admins can choose to omit this, +meaning a POST event notification of your specified type will be sent for all DTR repositories and namespaces. + +### Receive a payload + +Whenever your specified event type occurs, DTR will send a POST request to the given +endpoint with a JSON-encoded payload. The payload will always have the +following wrapper: + +``` +{ + "type": "...", + "createdAt": "2012-04-23T18:25:43.511Z", + "contents": {...} +} +``` + +- `type` refers to the event type received at the specified subscription endpoint. +- `contents` refers to the payload of the event itself. Each event is different, therefore the +structure of the JSON object in `contents` will change depending on the event +type. See [Content structure](#content-structure) for more details. + +### Test payload subscriptions + +Before subscribing to an event, you can view and test your endpoints using +fake data. To send a test payload, send `POST` request to +`/api/v0/webhooks/test` with the following payload: + +``` +{ + "type": "...", + "endpoint": "https://www.example.com/" +} +``` + +Change `type` to the event type that you want to receive. DTR will then send +an example payload to your specified endpoint. The example +payload sent is always the same. + +## Content structure + +Comments after (`//`) are for informational purposes only, and the example payloads have been clipped for brevity. + +### Repository event content structure + +**Tag push** + +``` +{ + "namespace": "", // (string) namespace/organization for the repository + "repository": "", // (string) repository name + "tag": "", // (string) the name of the tag just pushed + "digest": "", // (string) sha256 digest of the manifest the tag points to (eg. "sha256:0afb...") + "imageName": "", // (string) the fully-qualified image name including DTR host used to pull the image (eg. 10.10.10.1/foo/bar:tag) + "os": "", // (string) the OS for the tag's manifest + "architecture": "", // (string) the architecture for the tag's manifest + "author": "", // (string) the username of the person who pushed the tag + "pushedAt": "", // (string) JSON-encoded timestamp of when the push occurred + ... +} +``` + +**Tag delete** + +``` +{ + "namespace": "", // (string) namespace/organization for the repository + "repository": "", // (string) repository name + "tag": "", // (string) the name of the tag just deleted + "digest": "", // (string) sha256 digest of the manifest the tag points to (eg. "sha256:0afb...") + "imageName": "", // (string) the fully-qualified image name including DTR host used to pull the image (eg. 10.10.10.1/foo/bar:tag) + "os": "", // (string) the OS for the tag's manifest + "architecture": "", // (string) the architecture for the tag's manifest + "author": "", // (string) the username of the person who deleted the tag + "deletedAt": "", // (string) JSON-encoded timestamp of when the delete occurred + ... +} +``` +**Manifest push** + +``` +{ + "namespace": "", // (string) namespace/organization for the repository + "repository": "", // (string) repository name + "digest": "", // (string) sha256 digest of the manifest (eg. "sha256:0afb...") + "imageName": "", // (string) the fully-qualified image name including DTR host used to pull the image (eg. 10.10.10.1/foo/bar@sha256:0afb...) + "os": "", // (string) the OS for the manifest + "architecture": "", // (string) the architecture for the manifest + "author": "", // (string) the username of the person who pushed the manifest + ... +} +``` + +**Manifest delete** + +``` +{ + "namespace": "", // (string) namespace/organization for the repository + "repository": "", // (string) repository name + "digest": "", // (string) sha256 digest of the manifest (eg. "sha256:0afb...") + "imageName": "", // (string) the fully-qualified image name including DTR host used to pull the image (eg. 10.10.10.1/foo/bar@sha256:0afb...) + "os": "", // (string) the OS for the manifest + "architecture": "", // (string) the architecture for the manifest + "author": "", // (string) the username of the person who deleted the manifest + "deletedAt": "", // (string) JSON-encoded timestamp of when the delete occurred + ... +} +``` + +**Security scan completed** + +``` +{ + "namespace": "", // (string) namespace/organization for the repository + "repository": "", // (string) repository name + "tag": "", // (string) the name of the tag scanned + "imageName": "", // (string) the fully-qualified image name including DTR host used to pull the image (eg. 10.10.10.1/foo/bar:tag) + "scanSummary": { + "namespace": "", // (string) repository's namespace/organization name + "repository": "", // (string) repository name + "tag": "", // (string) the name of the tag just pushed + "critical": 0, // (int) number of critical issues, where CVSS >= 7.0 + "major": 0, // (int) number of major issues, where CVSS >= 4.0 && CVSS < 7 + "minor": 0, // (int) number of minor issues, where CVSS > 0 && CVSS < 4.0 + "last_scan_status": 0, // (int) enum; see scan status section + "check_completed_at": "", // (string) JSON-encoded timestamp of when the scan completed + ... + } +} +``` + +**Security scan failed** + +``` +{ + "namespace": "", // (string) namespace/organization for the repository + "repository": "", // (string) repository name + "tag": "", // (string) the name of the tag scanned + "imageName": "", // (string) the fully-qualified image name including DTR host used to pull the image (eg. 10.10.10.1/foo/bar@sha256:0afb...) + "error": "", // (string) the error that occurred while scanning + ... +} +``` + +### Namespace-specific event structure + +**Repository event (created/updated/deleted)** + +``` +{ + "namespace": "", // (string) repository's namespace/organization name + "repository": "", // (string) repository name + "event": "", // (string) enum: "REPO_CREATED", "REPO_DELETED" or "REPO_UPDATED" + "author": "", // (string) the name of the user responsible for the event + "data": {} // (object) when updating or creating a repo this follows the same format as an API response from /api/v0/repositories/{namespace}/{repository} +} +``` + +### Global event structure + +**Security scanner update complete** + +``` +{ + "scanner_version": "", + "scanner_updated_at": "", // (string) JSON-encoded timestamp of when the scanner updated + "db_version": 0, // (int) newly updated database version + "db_updated_at": "", // (string) JSON-encoded timestamp of when the database updated + "success": // (bool) whether the update was successful + "replicas": { // (object) a map keyed by replica ID containing update information for each replica + "replica_id": { + "db_updated_at": "", // (string) JSON-encoded time of when the replica updated + "version": "", // (string) version updated to + "replica_id": "" // (string) replica ID + }, + ... + } +} +``` + +### Security scan status codes + + +- 0: **Failed**. An error occurred checking an image's layer +- 1: **Unscanned**. The image has not yet been scanned +- 2: **Scanning**. Scanning in progress +- 3: **Pending**: The image will be scanned when a worker is available +- 4: **Scanned**: The image has been scanned but vulnerabilities have not yet been checked +- 5: **Checking**: The image is being checked for vulnerabilities +- 6: **Completed**: The image has been fully security scanned + + +## View and manage existing subscriptions + +### View all subscriptions + +To view existing subscriptions, send a `GET` request to `/api/v0/webhooks`. As +a normal user (i.e. not a DTR admin), this will show all of your +current subscriptions across every namespace/organization and repository. As a DTR +admin, this will show **every** webhook configured for your DTR. + +The API response will be in the following format: + +``` +[ + { + "id": "", // (string): UUID of the webhook subscription + "type": "", // (string): webhook event type + "key": "", // (string): the individual resource this subscription is scoped to + "endpoint": "", // (string): the endpoint to send POST event notifications to + "authorID": "", // (string): the user ID resposible for creating the subscription + "createdAt": "", // (string): JSON-encoded datetime when the subscription was created + }, + ... +] +``` + +For more information, [view the API documentation](/reference/dtr/{{site.dtr_version}}/api/). + +### View subscriptions for a particular resource + +You can also view subscriptions for a given resource that you are an +admin of. For example, if you have admin rights to the repository +"foo/bar", you can view all subscriptions (even other people's) from a +particular API endpoint. These endpoints are: + +- `GET /api/v0/repositories/{namespace}/{repository}/webhooks`: View all +webhook subscriptions for a repository +- `GET /api/v0/repositories/{namespace}/webhooks`: View all webhook subscriptions for a +namespace/organization + +### Delete a subscription + +To delete a webhook subscription, send a `DELETE` request to +`/api/v0/webhooks/{id}`, replacing `{id}` with the webhook subscription ID +which you would like to delete. + +Only a DTR admin or an admin for the resource with the event subscription can delete a subscription. As a normal user, you can only +delete subscriptions for repositories which you manage. + +## Where to go next + +- [Manage jobs](/ee/dtr/admin/manage-jobs/job-queue/) diff --git a/datacenter/dtr/2.6/guides/admin/manage-webhooks/use-the-web-ui.md b/datacenter/dtr/2.6/guides/admin/manage-webhooks/use-the-web-ui.md new file mode 100644 index 0000000000..b3c8dfe26f --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/manage-webhooks/use-the-web-ui.md @@ -0,0 +1,54 @@ +--- +title: Manage repository webhooks via the web interface +description: Learn how to create, configure, and test repository webhooks for DTR using the web interface. +keywords: dtr, webhooks, ui, web interface, registry +--- + +## Prerequisites + +- You must have admin privileges to the repository in order to create a webhook. +- See [Webhook types](/ee/dtr/admin/manage-webhooks/index.md/#webhook-types) for a list of events you can trigger notifications for using the web interface. + +## Create a webhook for your repository + +1. In your browser, navigate to `https://` and log in with your credentials. + +2. Select **Repositories** from the left navigation pane, and then click on the name of the repository that you want to view. Note that you will have to click on the repository name following the `/` after the specific namespace for your repository. + +3. Select the **Webhooks** tab, and click **New Webhook**. + + ![](/ee/dtr/images/manage-webhooks-1.png){: .with-border} + +4. From the drop-down list, select the event that will trigger the webhook. +5. Set the URL which will receive the JSON payload. Click **Test** next to the **Webhook URL** field, so that you can validate that the integration is working. At your specified URL, you should receive a JSON payload for your chosen event type notification. + + ```json + { + "type": "TAG_PUSH", + "createdAt": "2019-05-15T19:39:40.607337713Z", + "contents": { + "namespace": "foo", + "repository": "bar", + "tag": "latest", + "digest": "sha256:b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c", + "imageName": "foo/bar:latest", + "os": "linux", + "architecture": "amd64", + "author": "", + "pushedAt": "2015-01-02T15:04:05Z" + }, + "location": "/repositories/foo/bar/tags/latest" + } + ``` + +6. Expand "Show advanced settings" to paste the TLS certificate associated with your webhook URL. For testing purposes, you can test over HTTP instead of HTTPS. + +7. Click **Create** to save. Once saved, your webhook is active and starts sending POST notifications whenever your chosen event type is triggered. + + ![](/ee/dtr/images/manage-webhooks-2.png){: .with-border} + +As a repository admin, you can add or delete a webhook at any point. Additionally, you can create, view, and delete webhooks for your organization or trusted registry [using the API](use-the-api). + +## Where to go next + +- [Manage webhooks via the API](use-the-api) diff --git a/datacenter/dtr/2.6/guides/admin/monitor-and-troubleshoot/index.md b/datacenter/dtr/2.6/guides/admin/monitor-and-troubleshoot/index.md new file mode 100644 index 0000000000..4e58b544fc --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/monitor-and-troubleshoot/index.md @@ -0,0 +1,75 @@ +--- +title: Monitor Docker Trusted Registry +description: Learn how to monitor your DTR installation. +keywords: registry, monitor, troubleshoot +--- + +Docker Trusted Registry is a Dockerized application. To monitor it, you can +use the same tools and techniques you're already using to monitor other +containerized applications running on your cluster. One way to monitor +DTR is using the monitoring capabilities of Docker Universal Control Plane. + +In your browser, log in to **Docker Universal Control Plane** (UCP), and +navigate to the **Stacks** page. +If you have DTR set up for high-availability, then all the DTR replicas are +displayed. + +![](../../images/monitor-1.png){: .with-border} + +To check the containers for the DTR replica, **click the replica** you want +to inspect, click **Inspect Resource**, and choose **Containers**. + +![](../../images/monitor-2.png){: .with-border} + +Now you can drill into each DTR container to see its logs and find the root +cause of the problem. + +![](../../images/monitor-3.png){: .with-border} + +## Health check endpoints + +DTR also exposes several endpoints you can use to assess if a DTR replica +is healthy or not: + +* `/_ping`: Checks if the DTR replica is healthy, and +returns a simple json response. This is useful for load balancing or other +automated health check tasks. +* `/nginx_status`: Returns the number of connections being handled by the +NGINX front-end used by DTR. +* `/api/v0/meta/cluster_status`: Returns extensive information about all DTR +replicas. + +## Cluster status + +The `/api/v0/meta/cluster_status` [endpoint](/reference/dtr/{{ site.dtr_version +}}/api/) requires administrator credentials, and returns a JSON object for the +entire cluster as observed by the replica being queried. You can authenticate +your requests using HTTP basic auth. + +```bash +curl -ksL -u : https:///api/v0/meta/cluster_status +``` + +```json +{ + "current_issues": [ + { + "critical": false, + "description": "... some replicas are not ready. The following servers are + not reachable: dtr_rethinkdb_f2277ad178f7", + }], + "replica_health": { + "f2277ad178f7": "OK", + "f3712d9c419a": "OK", + "f58cf364e3df": "OK" + }, +} +``` + +You can find health status on the `current_issues` and `replica_health` arrays. +If this endpoint doesn't provide meaningful information when trying to +troubleshoot, try [troubleshooting using logs](troubleshoot-with-logs.md). + +## Where to go next + +- [Troubleshoot with logs](troubleshoot-with-logs.md) diff --git a/datacenter/dtr/2.6/guides/admin/monitor-and-troubleshoot/notary-audit-logs.md b/datacenter/dtr/2.6/guides/admin/monitor-and-troubleshoot/notary-audit-logs.md new file mode 100644 index 0000000000..2465734963 --- /dev/null +++ b/datacenter/dtr/2.6/guides/admin/monitor-and-troubleshoot/notary-audit-logs.md @@ -0,0 +1,267 @@ +--- +title: Check Notary audit logs +description: When you push signed images, Docker Trusted Registry keeps audit + logs for the changes made to the image metadata. Learn how to view these logs. +keywords: registry, monitor, troubleshoot +--- + +Docker Content Trust (DCT) keeps audit logs of changes made to trusted repositories. +Every time you push a signed image to a repository, or delete trust data for a +repository, DCT logs that information. + +These logs are only available from the DTR API. + +## Get an authentication token + +To access the audit logs you need to authenticate your requests using an +authentication token. You can get an authentication token for all repositories, +or one that is specific to a single repository. + + +
+
+ +```bash +curl --insecure --silent \ +--user : \ +"https:///auth/token?realm=dtr&service=dtr&scope=registry:catalog:*" +``` + +
+
+ +```bash +curl --insecure --silent \ +--user : \ +"https:///auth/token?realm=dtr&service=dtr&scope=repository:/:pull" +``` + +
+
+ +DTR returns a JSON file with a token, even when the user doesn't have access +to the repository to which they requested the authentication token. This token +doesn't grant access to DTR repositories. + +The JSON file returned has the following structure: + + +```json +{ + "token": "", + "access_token": "", + "expires_in": "", + "issued_at": "