Merge branch 'master' into orchestration-kuberbac-665
39
_config.yml
|
@ -13,7 +13,7 @@ safe: false
|
|||
lsi: false
|
||||
url: https://docs.docker.com
|
||||
# This needs to have all the directories you expect to be in the archives (delivered by docs-base in the Dockerfile)
|
||||
keep_files: ["v1.4", "v1.5", "v1.6", "v1.7", "v1.8", "v1.9", "v1.10", "v1.11", "v1.12", "v1.13", "v17.03", "v17.06", "v17.09", "v17.12"]
|
||||
keep_files: ["v1.4", "v1.5", "v1.6", "v1.7", "v1.8", "v1.9", "v1.10", "v1.11", "v1.12", "v1.13", "v17.03", "v17.06", "v17.09", "v17.12", "v18.03"]
|
||||
exclude: ["_scripts", "apidocs/layouts", "Gemfile", "hooks"]
|
||||
|
||||
# Component versions -- address like site.docker_ce_stable_version
|
||||
|
@ -21,18 +21,20 @@ exclude: ["_scripts", "apidocs/layouts", "Gemfile", "hooks"]
|
|||
|
||||
latest_stable_docker_engine_api_version: "1.37"
|
||||
docker_ce_stable_version: "18.03"
|
||||
docker_ce_edge_version: "18.05"
|
||||
docker_ee_version: "17.06"
|
||||
docker_ce_edge_version: "18.08"
|
||||
docker_ee_version: "2.1"
|
||||
compose_version: "1.22.0"
|
||||
machine_version: "0.14.0"
|
||||
distribution_version: "2.6"
|
||||
dtr_version: "2.5"
|
||||
ucp_version: "3.0"
|
||||
dtr_version: "2.6"
|
||||
ucp_version: "3.1"
|
||||
|
||||
ucp_versions:
|
||||
- version: "3.0"
|
||||
- version: "3.1"
|
||||
path: /ee/ucp/
|
||||
latest: true
|
||||
- version: "3.0"
|
||||
path: /datacenter/ucp/3.0/guides/
|
||||
- version: "2.2"
|
||||
path: /datacenter/ucp/2.2/guides/
|
||||
- version: "2.1"
|
||||
|
@ -43,9 +45,11 @@ ucp_versions:
|
|||
path: /datacenter/ucp/1.1/overview/
|
||||
|
||||
dtr_versions:
|
||||
- version: "2.5"
|
||||
- version: "2.6"
|
||||
path: /ee/dtr/
|
||||
latest: true
|
||||
- version: "2.5"
|
||||
path: /datacenter/dtr/2.5/guides/
|
||||
- version: "2.4"
|
||||
path: /datacenter/dtr/2.4/guides/
|
||||
- version: "2.3"
|
||||
|
@ -80,8 +84,7 @@ plugins:
|
|||
- jekyll-sitemap
|
||||
|
||||
defaults:
|
||||
-
|
||||
scope:
|
||||
- scope:
|
||||
path: ""
|
||||
type: "pages"
|
||||
values:
|
||||
|
@ -102,7 +105,14 @@ defaults:
|
|||
values:
|
||||
dtr_org: "docker"
|
||||
dtr_repo: "dtr"
|
||||
dtr_version: "2.5.3"
|
||||
dtr_version: "2.6.0"
|
||||
- scope:
|
||||
path: "datacenter/dtr/2.5"
|
||||
values:
|
||||
hide_from_sitemap: true
|
||||
dtr_org: "docker"
|
||||
dtr_repo: "dtr"
|
||||
dtr_version: "2.5.0"
|
||||
- scope:
|
||||
path: "datacenter/dtr/2.4"
|
||||
values:
|
||||
|
@ -138,7 +148,7 @@ defaults:
|
|||
values:
|
||||
ucp_org: "docker"
|
||||
ucp_repo: "ucp"
|
||||
ucp_version: "3.0.4"
|
||||
ucp_version: "3.1.0"
|
||||
- scope: # This is a bit of a hack for the get-support.md topic.
|
||||
path: "ee"
|
||||
values:
|
||||
|
@ -148,6 +158,13 @@ defaults:
|
|||
ucp_version: "3.0.4"
|
||||
dtr_version: "2.5.0"
|
||||
dtr_latest_image: "docker/dtr:2.5.3"
|
||||
- scope:
|
||||
path: "datacenter/ucp/3.0"
|
||||
values:
|
||||
hide_from_sitemap: true
|
||||
ucp_org: "docker"
|
||||
ucp_repo: "ucp"
|
||||
ucp_version: "3.0"
|
||||
- scope:
|
||||
path: "datacenter/ucp/2.2"
|
||||
values:
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
- archive:
|
||||
name: v18.03
|
||||
name: v18.09
|
||||
image: docs/docker.github.io:latest
|
||||
current: true
|
||||
# When you make a new stable archive version, move the edge one to be second in
|
||||
|
@ -7,6 +7,9 @@
|
|||
- archive:
|
||||
name: edge
|
||||
image: docs/docker.github.io:latest
|
||||
- archive:
|
||||
name: v18.03
|
||||
image: docs/docker.github.io:v18.03
|
||||
- archive:
|
||||
name: v17.12
|
||||
image: docs/docker.github.io:v17.12
|
||||
|
|
|
@ -189,6 +189,8 @@ guides:
|
|||
title: Best practices for writing Dockerfiles
|
||||
- path: /develop/develop-images/baseimages/
|
||||
title: Create a base image
|
||||
- path: /develop/develop-images/build_enhancements/
|
||||
title: Docker build enhancements for 18.09
|
||||
- path: /develop/develop-images/multistage-build/
|
||||
title: Use multi-stage builds
|
||||
- path: /engine/reference/builder/
|
||||
|
@ -1560,8 +1562,14 @@ manuals:
|
|||
title: Add labels to cluster nodes
|
||||
- path: /ee/ucp/admin/configure/add-sans-to-cluster/
|
||||
title: Add SANs to cluster certificates
|
||||
- path: /ee/ucp/admin/configure/collect-cluster-metrics/
|
||||
title: Collect UCP cluster metrics with Prometheus
|
||||
- path: /ee/ucp/authorization/configure-rbac-kube/
|
||||
title: Configure native Kubernetes role-based access control
|
||||
- path: /ee/ucp/admin/configure/create-audit-logs/
|
||||
title: Create UCP audit logs
|
||||
- path: /ee/ucp/admin/configure/enable-saml-authentication/
|
||||
title: Enable SAML authentication
|
||||
- path: /ee/ucp/admin/configure/external-auth/
|
||||
title: Integrate with LDAP
|
||||
- path: /ee/ucp/admin/configure/external-auth/enable-ldap-config-file/
|
||||
|
@ -2406,6 +2414,8 @@ manuals:
|
|||
title: Create and manage webhooks
|
||||
- title: Manage access tokens
|
||||
path: /ee/dtr/user/access-tokens/
|
||||
- title: Tag pruning
|
||||
path: /ee/dtr/user/tag-pruning/
|
||||
- title: API reference
|
||||
path: /reference/dtr/2.5/api/
|
||||
nosync: true
|
||||
|
|
|
@ -0,0 +1,157 @@
|
|||
---
|
||||
title: Build Enhancements for Docker
|
||||
description: Learn the new features of Docker Build
|
||||
keywords: build, security, engine, secret, buildkit
|
||||
---
|
||||
|
||||
> Beta disclaimer
|
||||
>
|
||||
> This is beta content. It is not yet complete and should be considered a work in progress. This content is subject to change without notice.
|
||||
|
||||
Docker Build is one of the most used features of the Docker Engine - users ranging from developers, build teams, and release teams all use Docker Build.
|
||||
|
||||
Docker Build enhancements for 18.09 release introduces a much-needed overhaul of the build architecture. By integrating BuildKit, users should see an improvement on performance, storage management, feature functionality, and security.
|
||||
|
||||
* Docker images created with buildkit can be pushed to Docker Hub and DTR just like Docker images created with legacy build
|
||||
* the Dockerfile format that works on legacy build will also work with buildkit builds
|
||||
* The new `--secret` conmmand line option allows the user to pass secret information for building new images with a specified Dockerfile
|
||||
|
||||
For more information on build options, see the reference guide on the [command line build options](../../engine/reference/commandline/build/).
|
||||
|
||||
|
||||
## Requirements
|
||||
|
||||
* System requirements are docker-ce x86_64, ppc64le, s390x, aarch64, armhf; or docker-ee x86_64 only
|
||||
* Network connection required for downloading images of custom frontends
|
||||
|
||||
## Limitations
|
||||
|
||||
* BuildKit mode is incompatible with UCP and Swarm Classic
|
||||
* Only supported on Linux
|
||||
|
||||
## To enable buildkit builds
|
||||
|
||||
Easiest way from a fresh install of docker is to set the `DOCKER_BUILDKIT=1` environment variable when invoking the `docker build` command, such as:
|
||||
|
||||
```
|
||||
$ DOCKER_BUILD=1 docker build .
|
||||
```
|
||||
|
||||
To enable docker buildkit by default, set daemon configuration in `/etc/docker/daemon.json` feature to true and restart the daemon:
|
||||
|
||||
```
|
||||
{ "features": { "buildkit": true } }
|
||||
```
|
||||
|
||||
## New Docker Build command line build output
|
||||
|
||||
New docker build BuildKit TTY output (default):
|
||||
```
|
||||
$ docker build .
|
||||
[+] Building 70.9s (34/59)
|
||||
=> [runc 1/4] COPY hack/dockerfile/install/install.sh ./install.sh 14.0s
|
||||
=> [frozen-images 3/4] RUN /download-frozen-image-v2.sh /build buildpa 24.9s
|
||||
=> [containerd 4/5] RUN PREFIX=/build/ ./install.sh containerd 37.1s
|
||||
=> [tini 2/5] COPY hack/dockerfile/install/install.sh ./install.sh 4.9s
|
||||
=> [vndr 2/4] COPY hack/dockerfile/install/vndr.installer ./ 1.6s
|
||||
=> [dockercli 2/4] COPY hack/dockerfile/install/dockercli.installer ./ 5.9s
|
||||
=> [proxy 2/4] COPY hack/dockerfile/install/proxy.installer ./ 15.7s
|
||||
=> [tomlv 2/4] COPY hack/dockerfile/install/tomlv.installer ./ 12.4s
|
||||
=> [gometalinter 2/4] COPY hack/dockerfile/install/gometalinter.install 25.5s
|
||||
=> [vndr 3/4] RUN PREFIX=/build/ ./install.sh vndr 33.2s
|
||||
=> [tini 3/5] COPY hack/dockerfile/install/tini.installer ./ 6.1s
|
||||
=> [dockercli 3/4] RUN PREFIX=/build/ ./install.sh dockercli 18.0s
|
||||
=> [runc 2/4] COPY hack/dockerfile/install/runc.installer ./ 2.4s
|
||||
=> [tini 4/5] RUN PREFIX=/build/ ./install.sh tini 11.6s
|
||||
=> [runc 3/4] RUN PREFIX=/build/ ./install.sh runc 23.4s
|
||||
=> [tomlv 3/4] RUN PREFIX=/build/ ./install.sh tomlv 9.7s
|
||||
=> [proxy 3/4] RUN PREFIX=/build/ ./install.sh proxy 14.6s
|
||||
=> [dev 2/23] RUN useradd --create-home --gid docker unprivilegeduser 5.1s
|
||||
=> [gometalinter 3/4] RUN PREFIX=/build/ ./install.sh gometalinter 9.4s
|
||||
=> [dev 3/23] RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.ba 4.3s
|
||||
=> [dev 4/23] RUN echo source /usr/share/bash-completion/bash_completion 2.5s
|
||||
=> [dev 5/23] RUN ln -s /usr/local/completion/bash/docker /etc/bash_comp 2.1s
|
||||
```
|
||||
|
||||
New docker build BuildKit plain output:
|
||||
```
|
||||
$ docker build --progress=plain .
|
||||
|
||||
#1 [internal] load .dockerignore
|
||||
#1 digest: sha256:d0b5f1b2d994bfdacee98198b07119b61cf2442e548a41cf4cd6d0471a627414
|
||||
#1 name: "[internal] load .dockerignore"
|
||||
#1 started: 2018-08-31 19:07:09.246319297 +0000 UTC
|
||||
#1 completed: 2018-08-31 19:07:09.246386115 +0000 UTC
|
||||
#1 duration: 66.818µs
|
||||
#1 started: 2018-08-31 19:07:09.246547272 +0000 UTC
|
||||
#1 completed: 2018-08-31 19:07:09.260979324 +0000 UTC
|
||||
#1 duration: 14.432052ms
|
||||
#1 transferring context: 142B done
|
||||
|
||||
|
||||
#2 [internal] load Dockerfile
|
||||
#2 digest: sha256:2f10ef7338b6eebaf1b072752d0d936c3d38c4383476a3985824ff70398569fa
|
||||
#2 name: "[internal] load Dockerfile"
|
||||
#2 started: 2018-08-31 19:07:09.246331352 +0000 UTC
|
||||
#2 completed: 2018-08-31 19:07:09.246386021 +0000 UTC
|
||||
#2 duration: 54.669µs
|
||||
#2 started: 2018-08-31 19:07:09.246720773 +0000 UTC
|
||||
#2 completed: 2018-08-31 19:07:09.270231987 +0000 UTC
|
||||
#2 duration: 23.511214ms
|
||||
#2 transferring dockerfile: 9.26kB done
|
||||
```
|
||||
|
||||
## Overriding default frontends
|
||||
|
||||
To override the default frontend, set the first line of the Dockerfile as a comment with a specific frontend image:
|
||||
```
|
||||
# syntax = <frontend image>, e.g. # syntax = tonistiigi/dockerfile:secrets20180808
|
||||
```
|
||||
|
||||
## New Docker Build secret information
|
||||
|
||||
The new `--secret` flag for docker build allows the user to pass secret information to be used in the Dockerfile for building docker images in a safe way that will not end up stored in the final image.
|
||||
|
||||
`id` is the identifier to pass into the `docker build --secret`. This identifier is associated with the `RUN --mount` identifier to use in the Dockerfile. Docker does not use the filename of where the secret is kept outside of the Dockerfile, since this may be sensitive information.
|
||||
|
||||
`dst` renames the secret file to a specific file in the Dockerfile `RUN` command to use.
|
||||
|
||||
For example, with a secret piece of information stored in a text file:
|
||||
|
||||
```
|
||||
$ echo 'WARMACHINEROX' > mysecret.txt
|
||||
```
|
||||
|
||||
And with a Dockerfile that specifies use of a buildkit frontend `dockerfile:secrets20180828`, the secret can be accessed.
|
||||
|
||||
For example:
|
||||
```
|
||||
# syntax = dockerfile:secrets20180828
|
||||
FROM alpine
|
||||
RUN --mount=type=secret,id=mysecret cat /run/secrets/mysecret # shows secret from default secret location
|
||||
RUN --mount=type=secret,id=mysecret,dst=/foobar cat /foobar # shows secret from custom secret location
|
||||
```
|
||||
|
||||
This Dockerfile is only to demonstrate that the secret can be accessed. As you can see the secret printed in the build output. The final image built will not have the secret file:
|
||||
|
||||
```
|
||||
$ docker build --no-cache --progress=plain --secret id=mysecret,src=mysecret.txt .
|
||||
...
|
||||
#8 [2/3] RUN --mount=type=secret,id=mysecret cat /run/secrets/mysecret
|
||||
#8 digest: sha256:5d8cbaeb66183993700828632bfbde246cae8feded11aad40e524f54ce7438d6
|
||||
#8 name: "[2/3] RUN --mount=type=secret,id=mysecret cat /run/secrets/mysecret"
|
||||
#8 started: 2018-08-31 21:03:30.703550864 +0000 UTC
|
||||
#8 1.081 WARMACHINEROX
|
||||
#8 completed: 2018-08-31 21:03:32.051053831 +0000 UTC
|
||||
#8 duration: 1.347502967s
|
||||
|
||||
|
||||
#9 [3/3] RUN --mount=type=secret,id=mysecret,dst=/foobar cat /foobar
|
||||
#9 digest: sha256:6c7ebda4599ec6acb40358017e51ccb4c5471dc434573b9b7188143757459efa
|
||||
#9 name: "[3/3] RUN --mount=type=secret,id=mysecret,dst=/foobar cat /foobar"
|
||||
#9 started: 2018-08-31 21:03:32.052880985 +0000 UTC
|
||||
#9 1.216 WARMACHINEROX
|
||||
#9 completed: 2018-08-31 21:03:33.523282118 +0000 UTC
|
||||
#9 duration: 1.470401133s
|
||||
...
|
||||
```
|
|
@ -2,41 +2,36 @@
|
|||
title: Garbage collection
|
||||
description: Save disk space by configuring the garbage collection settings in
|
||||
Docker Trusted Registry
|
||||
keywords: registry, garbage collection, gc, space, disk space
|
||||
keywords: registry, online garbage collection, gc, space, disk space
|
||||
---
|
||||
|
||||
You can configure Docker Trusted Registry to automatically delete unused image
|
||||
> BETA DISCLAIMER
|
||||
>
|
||||
> This is beta content. It is not yet complete and should be considered a work in progress. This content is subject to change without notice.
|
||||
|
||||
You can configure the Docker Trusted Registry (DTR) to automatically delete unused image
|
||||
layers, thus saving you disk space. This process is also known as garbage collection.
|
||||
|
||||
## How DTR deletes unused layers
|
||||
|
||||
First you configure DTR to run a garbage collection job on a fixed schedule. At
|
||||
the scheduled time DTR:
|
||||
the scheduled time, DTR:
|
||||
|
||||
2. Identifies and marks unused image layers.
|
||||
3. Deletes the marked image layers.
|
||||
1. Identifies and marks unused image layers.
|
||||
2. Deletes the marked image layers.
|
||||
|
||||
By default, when the garbage collection job starts DTR is put in read-only mode.
|
||||
Starting in DTR 2.5, we introduced an experimental feature which lets you run garbage collection jobs
|
||||
without putting DTR in read-only mode. As of v2.6, online garbage collection is no longer in
|
||||
experimental mode. This means that the registry no longer has to be in read-only mode (or offline)
|
||||
during garbage collection.
|
||||
|
||||
Starting in DTR 2.5, you can configure DTR to run garbage collection jobs
|
||||
without putting DTR in read-only. This feature is still experimental.
|
||||
|
||||
To enable this, navigate to the **DTR web UI**, go to **Settings** and
|
||||
choose **Garbage collection**.
|
||||
|
||||
{: .with-border}
|
||||
|
||||
Once enabled this setting can't be changed back. The upgrade process might
|
||||
take a while depending on the amount of Docker images that you have stored.
|
||||
|
||||
During this upgrade users can still push and pull images from DTR, but
|
||||
the garbage collection job will be temporarily disabled.
|
||||
|
||||
## Schedule garbage collection
|
||||
|
||||
Navigate to the **Settings** page, and choose **Garbage collection**.
|
||||
In your browser, navigate to `https://<dtr-url>` and log in with your credentials. Select **System** on the left navigation pane, and then click
|
||||
the **Garbage collection** tab to schedule garbage collection.
|
||||
|
||||
{: .with-border}
|
||||
{: .with-border}
|
||||
|
||||
Select for how long the garbage collection job should run:
|
||||
* Until done: Run the job until all unused image layers are deleted.
|
||||
|
@ -44,53 +39,70 @@ Select for how long the garbage collection job should run:
|
|||
at a time.
|
||||
* Never: Never delete unused image layers.
|
||||
|
||||
Once you select for how long to run the garbage collection job, you can
|
||||
configure its schedule (in UTC time) using the cron format.
|
||||
If you select *Until done* or *For x minutes*, you can specify a recurring schedule in UTC (Coordinated Universal Time) with the following options:
|
||||
* Custom cron schedule - (Hour, Day of Month, Month, Weekday)
|
||||
* Daily at midnight UTC
|
||||
* Every Saturday at 1am UTC
|
||||
* Every Sunday at 1am UTC
|
||||
* Do not repeat
|
||||
|
||||
{: .with-border}
|
||||
{: .with-border}
|
||||
|
||||
Once everything is configured you can chose to **Save & start** to immediately
|
||||
run the garbage collection job, or just **Save** to run the job on the next
|
||||
Once everything is configured you can choose to **Save & Start** to
|
||||
run the garbage collection job immediately, or just **Save** to run the job on the next
|
||||
scheduled interval.
|
||||
|
||||
## Stop the garbage collection job
|
||||
## Review the garbage collection job log
|
||||
|
||||
Once the garbage collection job starts running, a banner is displayed on the
|
||||
web UI explaining that users can't push images. If you're an administrator, you can click the banner to stop the garbage
|
||||
collection job.
|
||||
In v2.5, you were notified with a banner under main navigation that no one can push images while a garbage collection job is running. With v2.6, this is no longer the case since garbage collection now happens while DTR is online and writable.
|
||||
|
||||
{: .with-border}
|
||||
If you clicked **Save & Start** previously, verify that the garbage collection routine started by navigating to **Job Logs**.
|
||||
|
||||
{: .with-border}
|
||||
|
||||
## Under the hood
|
||||
|
||||
Each image stored in DTR is made up of multiple files:
|
||||
|
||||
* A list of image layers that represent the image filesystem.
|
||||
* A list of image layers that are unioned which represents the image filesystem
|
||||
* A configuration file that contains the architecture of the image and other
|
||||
metadata.
|
||||
metadata
|
||||
* A manifest file containing the list of all layers and configuration file for
|
||||
an image.
|
||||
an image
|
||||
|
||||
All these files are stored in a content-addressable way in which the name of
|
||||
the file is the result of hashing the file's content. This means that if two
|
||||
image tags have exactly the same content, DTR only stores the image content
|
||||
once, even if the tag name is different.
|
||||
All these files are tracked in DTR's metadata store in RethinkDB. These files
|
||||
are tracked in a content-addressable way such that a file corresponds to
|
||||
a cryptographic hash of the file's content. This means that if two image tags hold exactly the same content,
|
||||
DTR only stores the image content once while making hash collisions nearly impossible,
|
||||
even if the tag name is different.
|
||||
|
||||
As an example, if `wordpress:4.8` and `wordpress:latest` have the same content,
|
||||
they will only be stored once. If you delete one of these tags, the other won't
|
||||
the content will only be stored once. If you delete one of these tags, the other won't
|
||||
be deleted.
|
||||
|
||||
This means that when users delete an image tag, DTR can't delete the underlying
|
||||
files of that image tag since it's possible that there are other tags that
|
||||
also use the same files.
|
||||
This means that when you delete an image tag, DTR cannot delete the underlying
|
||||
files of that image tag since other tags may also use the same files.
|
||||
|
||||
To delete unused image layers, DTR:
|
||||
1. Becomes read-only to make sure that no one is able to push an image, thus
|
||||
changing the underlying files in the filesystem.
|
||||
2. Check all the manifest files and keep a record of the files that are
|
||||
referenced.
|
||||
3. If a file is never referenced, that means that no image tag uses it, so it
|
||||
can be safely deleted.
|
||||
To facilitate online garbage collection, DTR makes a couple of changes to how it uses the storage
|
||||
backend:
|
||||
1. Layer links – the references within repository directories to
|
||||
their associated blobs – are no longer in the storage backend. That is because DTR stores these references in RethinkDB instead to enumerate through
|
||||
them during the marking phase of garbage collection.
|
||||
|
||||
2. Any layers created after an upgrade to 2.6 are no longer content-addressed in
|
||||
the storage backend. Many cloud provider backends do not give the sequential
|
||||
consistency guarantees required to deal with the simultaneous deleting and
|
||||
re-pushing of a layer in a predictable manner. To account for this, DTR assigns
|
||||
each newly pushed layer a unique ID and performs the translation from content hash
|
||||
to ID in RethinkDB.
|
||||
|
||||
To delete unused files, DTR does the following:
|
||||
1. Establish a cutoff time
|
||||
2. Mark each referenced manifest file with a timestamp. When manifest files
|
||||
are pushed to DTR, they are also marked with a timestamp
|
||||
3. Sweep each manifest file that does not have a timestamp after the cutoff time
|
||||
4. If a file is never referenced – which means no image tag uses it – delete the file
|
||||
5. Repeat the process for blob links and blob descriptors.
|
||||
|
||||
## Where to go next
|
||||
|
||||
|
|
Before Width: | Height: | Size: 291 KiB After Width: | Height: | Size: 92 KiB |
Before Width: | Height: | Size: 101 KiB After Width: | Height: | Size: 104 KiB |
Before Width: | Height: | Size: 112 KiB After Width: | Height: | Size: 209 KiB |
After Width: | Height: | Size: 72 KiB |
After Width: | Height: | Size: 117 KiB |
After Width: | Height: | Size: 135 KiB |
After Width: | Height: | Size: 99 KiB |
After Width: | Height: | Size: 119 KiB |
After Width: | Height: | Size: 103 KiB |
|
@ -0,0 +1,74 @@
|
|||
---
|
||||
title: Tag Pruning
|
||||
description: Skip the management headache of deciding which tags to delete or preserve by configuring a tag pruning policy or enforcing a tag limit per repository in the Docker Trusted Registry
|
||||
keywords: registry, tag pruning, tag limit, repo management
|
||||
---
|
||||
|
||||
> BETA DISCLAIMER
|
||||
>
|
||||
> This is beta content. It is not yet complete and should be considered a work in progress. This content is subject to change without notice.
|
||||
|
||||
## Overview
|
||||
|
||||
Tag pruning is the process of cleaning up unnecessary or unwanted repository tags. As of v2.6, you can configure the Docker Trusted Registry (DTR) to automatically perform tag pruning on repositories that you manage by:
|
||||
|
||||
* specifying a tag pruning policy or alternatively,
|
||||
* setting a tag limit
|
||||
|
||||
|
||||
> Tag Pruning
|
||||
>
|
||||
> When run, tag pruning only deletes a tag and does not carry out any actual blob deletion. For actual blob deletions, see [Garbage Collection](../../admin/configure/garbage-collection.md).
|
||||
|
||||
In the following section, we will cover how to specify a tag pruning policy and set a tag limit on repositories that you manage. It will not include modifying or deleting a tag pruning policy.
|
||||
|
||||
## Specify a tag pruning policy
|
||||
|
||||
As a repository administrator, you can now add tag pruning policies on each repository that you manage. To get started, navigate to `https://<dtr-url>` and log in with your credentials.
|
||||
|
||||
Select **Repositories** on the left navigation pane, and then click on the name of the repository
|
||||
that you want to update. Note that you will have to click on the repository name following
|
||||
the `/` after the specific namespace for your repository.
|
||||
|
||||
{: .with-border}
|
||||
|
||||
Select the **Pruning** tab, and click **New pruning policy** to specify your tag pruning criteria:
|
||||
|
||||
{: .with-border}
|
||||
|
||||
|
||||
DTR allows you to set your pruning triggers based on the following image attributes:
|
||||
|
||||
| Name | Description | Example |
|
||||
|:----------------|:---------------------------------------------------| :----------------|
|
||||
| Tag name | Whether the tag name equals, starts with, ends with, contains, is one of, or is not one of your specified string values | Tag name = `test`|
|
||||
| Component name | Whether the image has a given component and the component name equals, starts with, ends with, contains, is one of, or is not one of your specified string values | Component name starts with `b` |
|
||||
| Vulnerabilities | Whether the image has vulnerabilities – critical, major, minor, or all – and your selected vulnerability filter is greater than or equals, greater than, equals, not equals, less than or equals, or less than your specified number | Critical vulnerabilities = `3` |
|
||||
| License | Whether the image uses an intellectual property license and is one of or not one of your specified words | License name = `docker` |
|
||||
| Last updated at | Whether the last image update was before your specified number of hours, days, weeks, or months. For details on valid time units, see [Go's ParseDuration function](https://golang.org/pkg/time/#ParseDuration). | Last updated at: Hours = `12` |
|
||||
|
||||
Specify one or more image attributes to add to your pruning criteria, then choose **Prune future tags** to apply your selection to future tags or **Prune all tags** to evaluate existing tags on your repository. Upon selection, you will see a confirmation message and will be redirected to your newly updated **Pruning** tab.
|
||||
|
||||
{: .with-border}
|
||||
|
||||
|
||||
If you have specified multiple pruning policies on the repository, the **Pruning** tab will display a list of your prune triggers and details on when the last tag pruning was performed based on the trigger, a toggle for deactivating or reactivating the trigger, and a **View** link for modifying or deleting your selected trigger.
|
||||
|
||||
{: .with-border}
|
||||
|
||||
All tag pruning policies on your account are evaluated every 15 minutes. Any qualifying tags are then deleted from the metadata store. If a tag pruning policy is modified or created, then the tag pruning policy for the *affected* repository will be evaluated.
|
||||
|
||||
## Set a tag limit
|
||||
|
||||
In addition to pruning policies, you can also set tag limits on repositories that you manage to restrict the number of tags on a given repository. Repository tag limits are processed in a first in first out (FIFO) manner. For example, if you set a tag limit of 2, adding a third tag would push out the first.
|
||||
|
||||
{: .with-border}
|
||||
|
||||
To set a tag limit, select the repository that you want to update and click the **Settings** tab. Specify a number in the **Pruning** section and click **Save**. The **Pruning** tab will now display your tag limit above the prune triggers list along with a link to modify this setting.
|
||||
|
||||
|
||||
{: .with-border}
|
||||
|
||||
## Where to go next
|
||||
|
||||
- [Garbage collection](../../admin/configure/garbage-collection.md)
|
|
@ -0,0 +1,79 @@
|
|||
---
|
||||
title: Create UCP audit logs
|
||||
description: Learn how to create audit logs of all activity in UCP
|
||||
keywords: logs, ucp, swarm, kubernetes, audits
|
||||
---
|
||||
|
||||
> Beta disclaimer
|
||||
>
|
||||
> This is beta content. It is not yet complete and should be considered a work in progress. This content is subject to change without notice.
|
||||
|
||||
Audit logs are focused on external user/agent actions and security rather than understanding state or events of the system itself. They are a chronological record of security-relevant activities by individual users, administrators or software components that have affected the system.
|
||||
|
||||
Audit Logs capture all HTTP actions (GET, PUT, POST, PATCH, DELETE) to all UCP API, Swarm API and Kubernetes API endpoints that are invoked (except for the ignored list) and sent to Docker Engine via stdout. Creating audit logs is CLI driven and is an UCP component that integrates with Swarm, K8s, and UCP APIs.
|
||||
|
||||
## Logging levels
|
||||
|
||||
To allow more control to administrators over the audit Logging, three audit logging levels are provided:
|
||||
|
||||
- None: audit logging is disabled
|
||||
- Metadata: includes the following:
|
||||
- Method and API endpoint for the request
|
||||
- UCP user who made the request
|
||||
- Response Status (success or failure)
|
||||
- Timestamp of the call
|
||||
- Object ID of any created or updated resource (for create or update API calls). We do not include names of created or updated resources
|
||||
- License Key
|
||||
- Remote Address
|
||||
- Request: includes all fields from the Metadata level as well as the request payload.
|
||||
|
||||
## Benefits
|
||||
|
||||
You can use audit logs to help with the following use cases:
|
||||
|
||||
- **Historical Troubleshooting** - Audit logs are helpful in determining a sequence of past events that explain why an issue occured.
|
||||
- **Security Analysis and Auditing** - Security is one of the primary uses for audit logs. A full record of all user interactions with the container infrastructure gives your security team full visibility to questionable or attempted unauthorized accesses.
|
||||
- **Chargeback** - You can use audit logs and information about the resources to generate chargeback information.
|
||||
- **Alerting** - If there is a watch on an event stream or a notification created by the event, alerting features can be built on top of event tools that generate alerts for ops teams (PagerDuty, OpsGenie, Slack, or custom solutions).
|
||||
|
||||
## Procedure
|
||||
|
||||
1. Download the UCP Client bundle [Download client bundle from the command line] (https://success.docker.com/article/download-client-bundle-from-the-cli).
|
||||
|
||||
2. Retrieve JSON for current audit log configuration.
|
||||
```
|
||||
export DOCKER_CERT_PATH=~/ucp-bundle-dir/
|
||||
curl --cert ${DOCKER_CERT_PATH}/cert.pem --key ${DOCKER_CERT_PATH}/key.pem --cacert ${DOCKER_CERT_PATH}/ca.pem -k -X GET https://ucp-domain/api/ucp/config/logging > auditlog.json
|
||||
```
|
||||
3. Modify the auditLevel field to metadata or request.
|
||||
```
|
||||
vi auditlog.json
|
||||
|
||||
{"logLevel":"INFO","auditLevel":"metadata","supportDumpIncludeAuditLogs":false}
|
||||
```
|
||||
4. Send the JSON request for the auditlog config with the same API path but with the `PUT` method.
|
||||
```
|
||||
curl --cert ${DOCKER_CERT_PATH}/cert.pem --key ${DOCKER_CERT_PATH}/key.pem --cacert ${DOCKER_CERT_PATH}/ca.pem -k -H "Content-Type: application/json" -X PUT --data $(cat auditlog.json) https://ucp-domain/api/ucp/config/logging
|
||||
```
|
||||
|
||||
5. Create any workload or RBAC grants in Kubernetes and generate a support dump to check the contents of ucp-controller.log file for audit log entries.
|
||||
|
||||
6. Optionally, configure the Docker Engine driver to logstash and collect and query audit logs within ELK stack after deploying ELK. https://success.docker.com/article/elasticsearch-logstash-kibana-logging
|
||||
|
||||
## API endpoints ignored
|
||||
|
||||
The following API endpoints are ignored since they are not considered security events and may create a large amount of log entries.
|
||||
|
||||
- /_ping
|
||||
- /ca
|
||||
- /auth
|
||||
- /trustedregistryca
|
||||
- /kubeauth
|
||||
- /metrics
|
||||
- /info
|
||||
- /version*
|
||||
- /debug
|
||||
- /openid_keys
|
||||
- /apidocs
|
||||
- /kubernetesdocs
|
||||
- /manage
|
|
@ -8,18 +8,16 @@ keywords: SAML, ucp, authentication, SSO, Okta, ADFS
|
|||
>
|
||||
> This is beta content. It is not yet complete and should be considered a work in progress. This content is subject to change without notice.
|
||||
|
||||
Ping Identity integration requires these values:
|
||||
|
||||
SAML is commonly supported by enterprise authentication systems. SAML-based single sign-on (SSO) gives you access to UCP through a SAML 2.0-compliant identity provider.
|
||||
|
||||
SAML-based single sign-on (SSO) gives you access to UCP through a SAML 2.0-compliant identity provider. UCP supports SAML for authentication as a service provider integrated with your identity provider.
|
||||
|
||||
For more information about SAML, see the [SAML XML website] (http://saml.xml.org/).
|
||||
For more information about SAML, see the [SAML XML website](http://saml.xml.org/).
|
||||
|
||||
UCP supports these identity providers:
|
||||
|
||||
- (Okta) [https://www.okta.com/]
|
||||
- (ADFS) [https://docs.microsoft.com/en-us/windows-server/identity/active-directory-federation-services]
|
||||
- [Okta](https://www.okta.com/)
|
||||
- [ADFS](https://docs.microsoft.com/en-us/windows-server/identity/active-directory-federation-services)
|
||||
|
||||
## Configure identity provider integration
|
||||
|
||||
|
@ -29,8 +27,8 @@ There are values your identity provider needs for successful integration with UC
|
|||
|
||||
Okta integration requires these values:
|
||||
|
||||
- URL for single signon (SSO). This value is the URL for UCP, qualified with `/enzi/v0/saml/acs`. For example, `https://<^>111.111.111.111<^^>/enzi/v0/saml/acs`.
|
||||
- Service provider audience URI. This value is the URL for UCP, qualified with `/enzi/v0/saml/metadata`. For example, `https://<^>111.111.111.111<^^>/enzi/v0/saml/metadata`.
|
||||
- URL for single signon (SSO). This value is the URL for UCP, qualified with `/enzi/v0/saml/acs`. For example, `https://111.111.111.111/enzi/v0/saml/acs`.
|
||||
- Service provider audience URI. This value is the URL for UCP, qualified with `/enzi/v0/saml/metadata`. For example, `https://111.111.111.111/enzi/v0/saml/metadata`.
|
||||
- NameID format. Select Unspecified.
|
||||
- Application username. Email (For example, a custom `${f:substringBefore(user.email, "@")}` specifies the username portion of the email address.
|
||||
- Attribute Statements:
|
||||
|
@ -44,7 +42,7 @@ Name: `is-admin`, Filter: (user defined) for identifying if the user is an admin
|
|||
|
||||
ADFS integration requires these values:
|
||||
|
||||
- Service provider metadata URI. This value is the URL for UCP, qualified with `/enzi/v0/saml/metadata`. For example, `https://<^>111.111.111.111<^^>/enzi/v0/saml/metadata`.
|
||||
- Service provider metadata URI. This value is the URL for UCP, qualified with `/enzi/v0/saml/metadata`. For example, `https://111.111.111.111/enzi/v0/saml/metadata`.
|
||||
- Attribute Store: Active Directory.
|
||||
- Add LDAP Attribute = Email Address; Outgoing Claim Type: Email Address
|
||||
- Add LDAP Attribute = Display-Name; Outgoing Claim Type: Common Name
|
||||
|
@ -58,15 +56,15 @@ ADFS integration requires these values:
|
|||
|
||||
To enable SAML authentication:
|
||||
|
||||
1 Go to the UCP web UI.
|
||||
1. Go to the UCP web interface.
|
||||
2. Navigate to the **Admin Settings**.
|
||||
3. Select **Authentication & Authorization**.
|
||||
|
||||

|
||||

|
||||
|
||||
4. In the **SAML Enabled** section, select **Yes** to display the required settings.
|
||||
|
||||

|
||||

|
||||
|
||||
5. In **IdP Metadata URL** enter the URL for the identity provider's metadata.
|
||||
6. In **UCP Host** enter the URL that includes the IP address of your UCP console.
|
||||
|
|
|
@ -18,6 +18,20 @@ upgrade your installation to the latest release.
|
|||
* [Version 3.0](#version-30)
|
||||
* [Version 2.2](#version-22)
|
||||
|
||||
# Version 3.1
|
||||
|
||||
## Beta 1 (2018-09-11)
|
||||
|
||||
**New Features**
|
||||
* Default address pool for Swarm is now user configurable
|
||||
* UCP now supports Kubernetes Network Encryption using IPSec
|
||||
* UCP now supports Kubernetes v1.11
|
||||
* UCP now supports Kubernetes native role-based access control
|
||||
* UCP now provides service metrics for all API calls, using Prometheus deployed as Kubernetes Daemon Set
|
||||
* UCP now supports use of an external Prometheus instance to scrape metrics from UPC endpoints
|
||||
* UCP suppoprts SAML authentication
|
||||
* DTR vulnerability scan data is now available through the UCP UI
|
||||
|
||||
# Version 3.0
|
||||
|
||||
## 3.0.4 (2018-08-09)
|
||||
|
@ -25,8 +39,8 @@ upgrade your installation to the latest release.
|
|||
**Bug fixes**
|
||||
|
||||
* Security
|
||||
* Fixed a critical security issue where the LDAP bind username and password
|
||||
were stored in clear text on UCP hosts. Please refer to [this KB article](https://success.docker.com/article/upgrading-to-ucp-2-2-12-ucp-3-0-4/) for proper implementation of this fix.
|
||||
* Fixed a critical security issue where the LDAP bind username and password
|
||||
were stored in cleartext on UCP hosts. Please refer to [this KB article](https://success.docker.com/article/upgrading-to-ucp-2-2-12-ucp-3-0-4/) for proper implementation of this fix.
|
||||
|
||||
**Known Issue**
|
||||
|
||||
|
@ -280,9 +294,9 @@ deprecated. Deploy your applications as Swarm services or Kubernetes workloads.
|
|||
**Bug fixes**
|
||||
|
||||
* Security
|
||||
* Fixed a critical security issue where the LDAP bind username and password
|
||||
were stored in clear text on UCP hosts. Please refer to the following KB article
|
||||
https://success.docker.com/article/upgrading-to-ucp-2-2-12-ucp-3-0-4/
|
||||
* Fixed a critical security issue where the LDAP bind username and password
|
||||
were stored in cleartext on UCP hosts. Please refer to the following KB article
|
||||
https://success.docker.com/article/upgrading-to-ucp-2-2-12-ucp-3-0-4/
|
||||
for proper implementation of this fix.
|
||||
|
||||
## Version 2.2.11 (2018-07-26)
|
||||
|
|
|
@ -69,7 +69,7 @@ on a node depend on whether the node is a manager or a worker.
|
|||
Internally, UCP uses the following components:
|
||||
|
||||
* Calico 3.0.1
|
||||
* Kubernetes 1.8.11
|
||||
* Kubernetes 1.11
|
||||
|
||||
### UCP components in manager nodes
|
||||
|
||||
|
|
|
@ -46,6 +46,7 @@ swarm to maintain a consistent view of the swarm and all services running on it.
|
|||
swarm.
|
||||
* creates an overlay network named `ingress` for publishing service ports
|
||||
external to the swarm.
|
||||
* creates an overlay default IP addresses and subnet mask for your networks
|
||||
|
||||
The output for `docker swarm init` provides the connection command to use when
|
||||
you join new worker nodes to the swarm:
|
||||
|
@ -62,6 +63,38 @@ To add a worker to this swarm, run the following command:
|
|||
|
||||
To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.
|
||||
```
|
||||
### Configuring default address pools
|
||||
|
||||
By default Docker Swarm uses a default address pool `10.0.0.0/8` for global scope (overlay) networks. Every network that does not have a subnet specified will have a subnet sequentially allocated from this pool. In some circumstances it may be desireable to use a different default IP address pool for networks. For example, if the default `10.0.0.0/8` range conflicts with already allocated address space in your network then it is desireable to ensure that networks use a different range without requiring Swarm users to specify each subnet with the `--subnet` command.
|
||||
|
||||
To configure custom default address pools, you must define pools at Swarm initialization using the `--default-addr-pool` flag. To create the custom address pool for Swarm, you must define at least one default address pool, and an optional default address pool subnet mask. The default address pool uses CIDR notation.
|
||||
|
||||
Docker allocates subnet addresses from the address ranges specified by the --default-addr-pool options. For example, a command line option `--default-addr-pool 10.10.0.0/16` indicates that Docker will allocate subnets from that `/16` address range. If `--default-addr-pool-mask-len` were unspecified or set explicitly to 24, this would result in 256 `/24` networks of the form `10.10.X.0/24`.
|
||||
|
||||
The subnet range comes from the `--default-addr-pool`, (such as `10.10.0.0/16`). The size of 16 there represents the number of networks one can create within that `default-addr-pool` range. The `--default-address-pool` option may occur multiple times with each option providing additional addresses for docker to use for overlay subnets.
|
||||
|
||||
The format of the command is:
|
||||
```
|
||||
$ docker swarm init --default-address-pool <IP range in CIDR> [--default-address-pool <IP range in CIDR> --default-addr-pool-mask-length <CIDR value>]
|
||||
```
|
||||
To create a default IP address pool with a /16 (class B) for the 10.20.0.0 network looks like this:
|
||||
|
||||
```
|
||||
$ docker swarm init --default-addr-pool 10.20.0.0/16
|
||||
```
|
||||
|
||||
To create a default IP address pool with a /16 (class B) for the 10.20.0.0 and 10.30.0.0 networks, and to create a subnet mask of /26 for each network looks like this:
|
||||
|
||||
```
|
||||
$ docker swarm init --default-addr-pool 10.20.0.0/16 --default-addr-pool 10.30.0.0/16 --default-addr-pool-mask-length 26
|
||||
```
|
||||
|
||||
In this example, `docker network create -d overlay net1` will result in `10.20.0.0/26` as the allocated subnet for `net1`, and `docker network create -d overlay net2` will result in `10.20.0.64/26` as the allocated subnet for `net2`. This continues until all the subnets are exhausted.
|
||||
|
||||
Refer to the following pages for more information:
|
||||
- [Swarm networking](./networking.md) for more information about the default address pool usage
|
||||
- [UCP Installation Planning](../../ee/ucp/admin/install/plan-installation.md) for more information about planning the network design before installation
|
||||
- `docker swarm init` [CLI reference](../reference/commandline/swarm_init.md) for more detail on the `--default-address-pool` flag.
|
||||
|
||||
### Configure the advertise address
|
||||
|
||||
|
|
|
@ -1,124 +0,0 @@
|
|||
---
|
||||
description: High level discussion of garbage collection
|
||||
keywords: registry, garbage, images, tags, repository, distribution
|
||||
title: Garbage collection
|
||||
---
|
||||
|
||||
As of v2.4.0 a garbage collector command is included within the registry binary.
|
||||
This document describes what this command does and how and why it should be used.
|
||||
|
||||
## About garbage collection
|
||||
|
||||
In the context of the Docker registry, garbage collection is the process of
|
||||
removing blobs from the filesystem when they are no longer referenced by a
|
||||
manifest. Blobs can include both layers and manifests.
|
||||
|
||||
Registry data can occupy considerable amounts of disk space. In addition,
|
||||
garbage collection can be a security consideration, when it is desirable to ensure
|
||||
that certain layers no longer exist on the filesystem.
|
||||
|
||||
## Garbage collection in practice
|
||||
|
||||
Filesystem layers are stored by their content address in the Registry. This
|
||||
has many advantages, one of which is that data is stored once and referred to by manifests.
|
||||
See [here](compatibility.md#content-addressable-storage-cas) for more details.
|
||||
|
||||
Layers are therefore shared amongst manifests; each manifest maintains a reference
|
||||
to the layer. As long as a layer is referenced by one manifest, it cannot be garbage
|
||||
collected.
|
||||
|
||||
Manifests and layers can be `deleted` with the registry API (refer to the API
|
||||
documentation [here](spec/api.md#deleting-a-layer) and
|
||||
[here](spec/api.md#deleting-an-image) for details). This API removes references
|
||||
to the target and makes them eligible for garbage collection. It also makes them
|
||||
unable to be read via the API.
|
||||
|
||||
If a layer is deleted, it is removed from the filesystem when garbage collection
|
||||
is run. If a manifest is deleted the layers to which it refers are removed from
|
||||
the filesystem if no other manifests refers to them.
|
||||
|
||||
|
||||
### Example
|
||||
|
||||
In this example manifest A references two layers: `a` and `b`. Manifest `B` references
|
||||
layers `a` and `c`. In this state, nothing is eligible for garbage collection:
|
||||
|
||||
```
|
||||
A -----> a <----- B
|
||||
\--> b |
|
||||
c <--/
|
||||
```
|
||||
|
||||
Manifest B is deleted via the API:
|
||||
|
||||
```
|
||||
A -----> a B
|
||||
\--> b
|
||||
c
|
||||
```
|
||||
|
||||
In this state layer `c` no longer has a reference and is eligible for garbage
|
||||
collection. Layer `a` had one reference removed but not garbage
|
||||
collected as it is still referenced by manifest `A`. The blob representing
|
||||
manifest `B` is eligible for garbage collection.
|
||||
|
||||
After garbage collection has been run, manifest `A` and its blobs remain.
|
||||
|
||||
```
|
||||
A -----> a
|
||||
\--> b
|
||||
```
|
||||
|
||||
|
||||
### More details about garbage collection
|
||||
|
||||
Garbage collection runs in two phases. First, in the 'mark' phase, the process
|
||||
scans all the manifests in the registry. From these manifests, it constructs a
|
||||
set of content address digests. This set is the 'mark set' and denotes the set
|
||||
of blobs to *not* delete. Secondly, in the 'sweep' phase, the process scans all
|
||||
the blobs and if a blob's content address digest is not in the mark set, the
|
||||
process deletes it.
|
||||
|
||||
|
||||
> **Note**: You should ensure that the registry is in read-only mode or not running at
|
||||
> all. If you were to upload an image while garbage collection is running, there is the
|
||||
> risk that the image's layers are mistakenly deleted leading to a corrupted image.
|
||||
|
||||
This type of garbage collection is known as stop-the-world garbage collection.
|
||||
|
||||
## Run garbage collection
|
||||
|
||||
Garbage collection can be run as follows
|
||||
|
||||
`bin/registry garbage-collect [--dry-run] /path/to/config.yml`
|
||||
|
||||
The garbage-collect command accepts a `--dry-run` parameter, which prints the progress
|
||||
of the mark and sweep phases without removing any data. Running with a log level of `info`
|
||||
gives a clear indication of items eligible for deletion.
|
||||
|
||||
The config.yml file should be in the following format:
|
||||
|
||||
```
|
||||
version: 0.1
|
||||
storage:
|
||||
filesystem:
|
||||
rootdirectory: /registry/data
|
||||
```
|
||||
|
||||
_Sample output from a dry run garbage collection with registry log level set to `info`_
|
||||
|
||||
```
|
||||
hello-world
|
||||
hello-world: marking manifest sha256:fea8895f450959fa676bcc1df0611ea93823a735a01205fd8622846041d0c7cf
|
||||
hello-world: marking blob sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb
|
||||
hello-world: marking blob sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4
|
||||
hello-world: marking configuration sha256:690ed74de00f99a7d00a98a5ad855ac4febd66412be132438f9b8dbd300a937d
|
||||
ubuntu
|
||||
|
||||
4 blobs marked, 5 blobs eligible for deletion
|
||||
blob eligible for deletion: sha256:28e09fddaacbfc8a13f82871d9d66141a6ed9ca526cb9ed295ef545ab4559b81
|
||||
blob eligible for deletion: sha256:7e15ce58ccb2181a8fced7709e9893206f0937cc9543bc0c8178ea1cf4d7e7b5
|
||||
blob eligible for deletion: sha256:87192bdbe00f8f2a62527f36bb4c7c7f4eaf9307e4b87e8334fb6abec1765bcb
|
||||
blob eligible for deletion: sha256:b549a9959a664038fc35c155a95742cf12297672ca0ae35735ec027d55bf4e97
|
||||
blob eligible for deletion: sha256:f251d679a7c61455f06d793e43c06786d7766c88b8c24edf242b2c08e3c3f599
|
||||
```
|
|
@ -13,12 +13,17 @@ Disallow: /v1.12/
|
|||
Disallow: /v1.13/
|
||||
Disallow: /v17.03/
|
||||
Disallow: /v17.06/
|
||||
Disallow: /v18.03/
|
||||
|
||||
# Docker Datacenter archives
|
||||
Disallow: /datacenter/dtr/2.2
|
||||
Disallow: /datacenter/dtr/2.1
|
||||
Disallow: /datacenter/dtr/2.0
|
||||
Disallow: /datacenter/dtr/2.1
|
||||
Disallow: /datacenter/dtr/2.2
|
||||
Disallow: /datacenter/dtr/2.3
|
||||
Disallow: /datacenter/dtr/2.4
|
||||
Disallow: /datacenter/dtr/2.5
|
||||
|
||||
Disallow: /datacenter/ucp/3.0
|
||||
Disallow: /datacenter/ucp/2.1
|
||||
Disallow: /datacenter/ucp/2.0
|
||||
Disallow: /datacenter/ucp/1.1
|
||||
|
|