Merge branch 'main' into chrisward/progs-reorg

This commit is contained in:
Chris Chinchilla 2023-04-19 13:32:06 +02:00 committed by GitHub
commit 2f024f7124
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
62 changed files with 858 additions and 325 deletions

View File

@ -25,3 +25,6 @@ Swarm Mode
[Mm]oby
dockerd
dockerignore
Docker Hub Vulnerability Scanning
Docker Vulnerability Scanning
Basic vulnerability scanning

View File

@ -35,7 +35,7 @@ jobs:
fail-fast: false
matrix:
target:
- htmlproofer
- htmltest
- mdl
steps:
-
@ -70,7 +70,7 @@ jobs:
// construct annotations by parsing output
switch ("${{ matrix.target }}") {
case "htmlproofer":
case "htmltest":
const re = /^- (.+)\n \* (.+) \(line (\d+)\)\n(.+)$/gm;
while (true) {
const result = re.exec(results);
@ -79,7 +79,7 @@ jobs:
}
core.error(`${result[2]}\n${result[4]}`, {
title: 'Link check failed',
title: 'HTML test failed',
// file: result[1],
// startLine: result[3],
});

1
.gitignore vendored
View File

@ -10,3 +10,4 @@ CNAME
_kbase/**
/vendor
/lint
tmp/.htmltest/**

17
.htmltest.yml Normal file
View File

@ -0,0 +1,17 @@
DirectoryPath: "_site"
EnforceHTTPS: false
CheckDoctype: false
CheckExternal: false
IgnoreAltMissing: true
IgnoreAltEmpty: true
IgnoreEmptyHref: true
IgnoreDirectoryMissingTrailingSlash: true
IgnoreURLs:
- "^/docker-hub/api/latest/.*$"
- "^/engine/api/v.+/#.*$"
- "^/glossary/.*$"
IgnoreDirs:
- "engine/api"
- "registry/configuration"
- "compose/compose-file" # temporarily ignore until upstream is fixed
CacheExpires: "6h"

View File

@ -63,28 +63,27 @@ RUN --mount=type=bind,target=.,rw \
bundle exec jekyll build --profile -d ${TARGET} --config ${CONFIG_FILES}
EOT
# htmlproofer checks for broken links
FROM gem AS htmlproofer-base
RUN --mount=type=bind,from=generate,source=/out,target=_site <<EOF
htmlproofer ./_site \
--disable-external \
--internal-domains="docs.docker.com,docs-stage.docker.com,localhost:4000" \
--file-ignore="/^./_site/engine/api/.*$/,./_site/registry/configuration/index.html" \
--url-ignore="/^/docker-hub/api/latest/.*$/,/^/engine/api/v.+/#.*$/,/^/glossary/.*$/" > /results 2>&1
# htmltest checks for broken links
FROM wjdp/htmltest:v0.17.0 as htmltest-base
RUN --mount=type=bind,from=generate,source=/out,target=_site \
--mount=type=bind,source=.htmltest.yml,target=.htmltest.yml \
<<EOF
htmltest > /results 2>&1
rc=$?
if [[ $rc -eq 0 ]]; then
echo -n > /results
fi
EOF
FROM htmlproofer-base as htmlproofer
FROM base as htmltest
COPY --from=htmltest-base /results /results
RUN <<EOF
cat /results
[ ! -s /results ] || exit 1
EOF
FROM scratch as htmlproofer-output
COPY --from=htmlproofer-base /results /results
FROM scratch as htmltest-output
COPY --from=htmltest-base /results /results
# mdl is a lint tool for markdown files
FROM gem AS mdl-base

View File

@ -17,7 +17,6 @@ gem 'rouge', '3.27.0'
gem 'front_matter_parser', '1.0.1'
gem 'git', '1.13.0'
gem 'html-proofer', '3.19.4'
gem 'mdl', '0.11.0'
gem 'octopress-hooks', '2.6.2'
gem 'rake', '13.0.6'

View File

@ -10,8 +10,6 @@ GEM
em-websocket (0.5.3)
eventmachine (>= 0.12.9)
http_parser.rb (~> 0)
ethon (0.15.0)
ffi (>= 1.15.0)
eventmachine (1.2.7)
ffi (1.15.5)
forwardable-extended (2.6.0)
@ -19,14 +17,6 @@ GEM
git (1.13.0)
addressable (~> 2.8)
rchardet (~> 1.8)
html-proofer (3.19.4)
addressable (~> 2.3)
mercenary (~> 0.3)
nokogiri (~> 1.13)
parallel (~> 1.10)
rainbow (~> 3.0)
typhoeus (~> 1.3)
yell (~> 2.0)
http_parser.rb (0.8.0)
i18n (1.12.0)
concurrent-ruby (~> 1.0)
@ -75,20 +65,11 @@ GEM
tomlrb
mixlib-shellout (3.2.7)
chef-utils
nokogiri (1.14.3-aarch64-linux)
racc (~> 1.4)
nokogiri (1.14.3-arm-linux)
racc (~> 1.4)
nokogiri (1.14.3-x86_64-linux)
racc (~> 1.4)
octopress-hooks (2.6.2)
jekyll (>= 2.0)
parallel (1.22.1)
pathutil (0.16.2)
forwardable-extended (~> 2.6)
public_suffix (5.0.1)
racc (1.6.2)
rainbow (3.1.1)
rake (13.0.6)
rb-fsevent (0.11.2)
rb-inotify (0.10.1)
@ -102,10 +83,7 @@ GEM
terminal-table (2.0.0)
unicode-display_width (~> 1.1, >= 1.1.1)
tomlrb (2.0.3)
typhoeus (1.4.0)
ethon (>= 0.9.0)
unicode-display_width (1.8.0)
yell (2.2.2)
PLATFORMS
aarch64-linux
@ -115,7 +93,6 @@ PLATFORMS
DEPENDENCIES
front_matter_parser (= 1.0.1)
git (= 1.13.0)
html-proofer (= 3.19.4)
jekyll (= 4.2.2)
jekyll-redirect-from
jekyll-relative-links

View File

@ -256,6 +256,9 @@ fetch-remote:
- dest: "compose/compose-file/12-interpolation.md"
src:
- "12-interpolation.md"
- dest: "compose/compose-file/13-merge.md"
src:
- "13-merge.md"
- dest: "compose/compose-file/build.md"
src:
- "build.md"

View File

@ -910,7 +910,7 @@ examples: |-
#### Prerequisites
The example on this page is using experimental mode in Docker 19.03.
The example on this page is using experimental mode in Docker 23.03.
Experimental mode can be enabled by using the `--experimental` flag when starting
the Docker daemon or setting `experimental: true` in the `daemon.json` configuration
@ -922,21 +922,21 @@ examples: |-
```console
Client: Docker Engine - Community
Version: 19.03.8
API version: 1.40
Go version: go1.12.17
Git commit: afacb8b
Built: Wed Mar 11 01:21:11 2020
Version: 23.0.3
API version: 1.42
Go version: go1.19.7
Git commit: 3e7cbfd
Built: Tue Apr 4 22:05:41 2023
OS/Arch: darwin/amd64
Experimental: false
Context: default
Server: Docker Engine - Community
Engine:
Version: 19.03.8
API version: 1.40 (minimum version 1.12)
Go version: go1.12.17
Git commit: afacb8b
Built: Wed Mar 11 01:29:16 2020
Version: 23.0.3
API version: 1.42 (minimum version 1.12)
Go version: go1.19.7
Git commit: 59118bf
Built: Tue Apr 4 22:05:41 2023
OS/Arch: linux/amd64
Experimental: true
[...]

View File

@ -60,14 +60,11 @@ examples: |-
Debug Mode: false
Plugins:
buildx: Docker Buildx (Docker Inc.)
Version: v0.8.2
Version: v0.10.4
Path: /usr/libexec/docker/cli-plugins/docker-buildx
compose: Docker Compose (Docker Inc.)
Version: v2.6.0
Version: v2.17.2
Path: /usr/libexec/docker/cli-plugins/docker-compose
scan: Docker Scan (Docker Inc.)
Version: v0.17.0
Path: /usr/libexec/docker/cli-plugins/docker-scan
Server:
Containers: 14
@ -75,7 +72,7 @@ examples: |-
Paused: 1
Stopped: 10
Images: 52
Server Version: 22.06.0
Server Version: 23.0.3
Storage Driver: overlay2
Backing Filesystem: extfs
Supports d_type: true
@ -90,11 +87,11 @@ examples: |-
Network: bridge host ipvlan macvlan null overlay
Log: awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog
Swarm: inactive
Runtimes: io.containerd.runc.v2 io.containerd.runtime.v1.linux runc
Runtimes: io.containerd.runc.v2 runc
Default Runtime: runc
Init Binary: docker-init
containerd version: 212e8b6fa2f44b9c21b2798135fc6fb7c53efc16
runc version: v1.1.1-0-g52de29d
containerd version: 2806fc1057397dbaeefbea0e4e17bddfbd388f38
runc version: v1.1.5-0-gf19387a
init version: de40ad0
Security Options:
apparmor
@ -114,7 +111,7 @@ examples: |-
Username: gordontheturtle
Registry: https://index.docker.io/v1/
Experimental: false
Insecure registries:
Insecure Registries:
myinsecurehost:5000
127.0.0.0/8
Live Restore Enabled: false
@ -127,7 +124,7 @@ examples: |-
```console
$ docker info --format '{{json .}}'
{"ID":"I54V:OLXT:HVMM:TPKO:JPHQ:CQCD:JNLC:O3BZ:4ZVJ:43XJ:PFHZ:6N2S","Containers":14, ...}
{"ID":"4cee4408-10d2-4e17-891c-a41736ac4536","Containers":14, ...}
```
### Run `docker info` on Windows
@ -141,9 +138,12 @@ examples: |-
Context: default
Debug Mode: false
Plugins:
buildx: Docker Buildx (Docker Inc., v0.8.2-docker)
compose: Docker Compose (Docker Inc., v2.6.0)
scan: Docker Scan (Docker Inc., v0.17.0)
buildx: Docker Buildx (Docker Inc.)
Version: v0.10.4
Path: C:\Program Files\Docker\cli-plugins\docker-buildx.exe
compose: Docker Compose (Docker Inc.)
Version: v2.17.2
Path: C:\Program Files\Docker\cli-plugins\docker-compose.exe
Server:
Containers: 1
@ -151,7 +151,7 @@ examples: |-
Paused: 0
Stopped: 1
Images: 17
Server Version: 20.10.16
Server Version: 23.0.3
Storage Driver: windowsfilter
Logging Driver: json-file
Plugins:

View File

@ -143,10 +143,10 @@ examples: |-
$ docker node ls --filter node.label=region
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION
yg550ettvsjn6g6t840iaiwgb * swarm-test-01 Ready Active Leader 20.10.2
2lm9w9kbepgvkzkkeyku40e65 swarm-test-02 Ready Active Reachable 20.10.2
hc0pu7ntc7s4uvj4pv7z7pz15 swarm-test-03 Ready Active Reachable 20.10.2
n41b2cijmhifxxvz56vwrs12q swarm-test-04 Ready Active 20.10.2
yg550ettvsjn6g6t840iaiwgb * swarm-test-01 Ready Active Leader 23.0.3
2lm9w9kbepgvkzkkeyku40e65 swarm-test-02 Ready Active Reachable 23.0.3
hc0pu7ntc7s4uvj4pv7z7pz15 swarm-test-03 Ready Active Reachable 23.0.3
n41b2cijmhifxxvz56vwrs12q swarm-test-04 Ready Active 23.0.3
```
Show all nodes that have a `region` node label, with value `region-a`:
@ -155,8 +155,8 @@ examples: |-
$ docker node ls --filter node.label=region=region-a
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION
yg550ettvsjn6g6t840iaiwgb * swarm-test-01 Ready Active Leader 20.10.2
2lm9w9kbepgvkzkkeyku40e65 swarm-test-02 Ready Active Reachable 20.10.2
yg550ettvsjn6g6t840iaiwgb * swarm-test-01 Ready Active Leader 23.0.3
2lm9w9kbepgvkzkkeyku40e65 swarm-test-02 Ready Active Reachable 23.0.3
```
#### membership
@ -236,7 +236,7 @@ examples: |-
To list all nodes in JSON format, use the `json` directive:
```console
$ docker node ls --format json
{"Availability":"Active","EngineVersion":"20.10.5","Hostname":"docker-desktop","ID":"k8f4w7qtzpj5sqzclcqafw35g","ManagerStatus":"Leader","Self":true,"Status":"Ready","TLSStatus":"Ready"}
{"Availability":"Active","EngineVersion":"23.0.3","Hostname":"docker-desktop","ID":"k8f4w7qtzpj5sqzclcqafw35g","ManagerStatus":"Leader","Self":true,"Status":"Ready","TLSStatus":"Ready"}
```
deprecated: false
min_api_version: "1.24"

View File

@ -15,10 +15,8 @@ long: |-
If you are behind an HTTP proxy server, for example in corporate settings,
before open a connect to registry, you may need to configure the Docker
daemon's proxy settings, using the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY`
environment variables. To set these environment variables on a host using
`systemd`, refer to the [control and configure Docker with systemd](/config/daemon/systemd/#httphttps-proxy)
for variables configuration.
daemon's proxy settings, refer to the [dockerd command-line reference](dockerd.md#proxy-configuration)
for details.
### Concurrent downloads

View File

@ -136,16 +136,16 @@ examples: |-
Running `docker stats` with customized format on all (Running and Stopped) containers.
```console
$ docker stats --all --format "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}" fervent_panini 5acfcb1b4fd1 drunk_visvesvaraya big_heisenberg
$ docker stats --all --format "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}" fervent_panini 5acfcb1b4fd1 humble_visvesvaraya big_heisenberg
CONTAINER CPU % MEM USAGE / LIMIT
fervent_panini 0.00% 56KiB / 15.57GiB
5acfcb1b4fd1 0.07% 32.86MiB / 15.57GiB
drunk_visvesvaraya 0.00% 0B / 0B
humble_visvesvaraya 0.00% 0B / 0B
big_heisenberg 0.00% 0B / 0B
```
`drunk_visvesvaraya` and `big_heisenberg` are stopped containers in the above example.
`humble_visvesvaraya` and `big_heisenberg` are stopped containers in the above example.
Running `docker stats` on all running containers against a Windows daemon.

View File

@ -24,30 +24,30 @@ long: |-
```console
$ docker version
Client:
Version: 20.10.16
API version: 1.41
Go version: go1.17.10
Git commit: aa7e414
Built: Thu May 12 09:17:28 2022
Client: Docker Engine - Community
Version: 23.0.3
API version: 1.42
Go version: go1.19.7
Git commit: 3e7cbfd
Built: Tue Apr 4 22:05:41 2023
OS/Arch: darwin/amd64
Context: default
Server: Docker Desktop 4.8.2 (77141)
Server: Docker Desktop 4.19.0 (12345)
Engine:
Version: 20.10.16
API version: 1.41 (minimum version 1.12)
Go version: go1.17.10
Git commit: f756502
Built: Thu May 12 09:15:33 2022
Version: 23.0.3
API version: 1.42 (minimum version 1.12)
Go version: go1.19.7
Git commit: 59118bf
Built: Tue Apr 4 22:05:41 2023
OS/Arch: linux/amd64
Experimental: false
containerd:
Version: 1.6.4
GitCommit: 212e8b6fa2f44b9c21b2798135fc6fb7c53efc16
Version: 1.6.20
GitCommit: 2806fc1057397dbaeefbea0e4e17bddfbd388f38
runc:
Version: 1.1.1
GitCommit: v1.1.1-0-g52de29d
Version: 1.1.5
GitCommit: v1.1.5-0-gf19387a
docker-init:
Version: 0.19.0
GitCommit: de40ad0
@ -69,12 +69,12 @@ long: |-
$ docker version
Client:
Version: 20.10.16
API version: 1.40 (downgraded from 1.41)
Go version: go1.17.10
Git commit: aa7e414
Built: Thu May 12 09:17:28 2022
Client: Docker Engine - Community
Version: 23.0.3
API version: 1.40 (downgraded from 1.42)
Go version: go1.19.7
Git commit: 3e7cbfd
Built: Tue Apr 4 22:05:41 2023
OS/Arch: darwin/amd64
Context: remote-test-server
@ -129,7 +129,7 @@ long: |-
$ unset DOCKER_API_VERSION
$ docker version --format '{{.Client.APIVersion}}'
1.41
1.42
```
usage: docker version [OPTIONS]
pname: docker
@ -170,7 +170,7 @@ examples: |-
```console
$ docker version --format '{{.Server.Version}}'
20.10.16
23.0.3
```
### Get the client API version
@ -180,7 +180,7 @@ examples: |-
```console
$ docker version --format '{{.Client.APIVersion}}'
1.41
1.42
```
The version shown is the API version that is negotiated between the client
@ -192,7 +192,7 @@ examples: |-
```console
$ docker version --format '{{json .}}'
{"Client":{"Platform":{"Name":"Docker Engine - Community"},"Version":"19.03.8","ApiVersion":"1.40","DefaultAPIVersion":"1.40","GitCommit":"afacb8b","GoVersion":"go1.12.17","Os":"darwin","Arch":"amd64","BuildTime":"Wed Mar 11 01:21:11 2020","Experimental":true},"Server":{"Platform":{"Name":"Docker Engine - Community"},"Components":[{"Name":"Engine","Version":"19.03.8","Details":{"ApiVersion":"1.40","Arch":"amd64","BuildTime":"Wed Mar 11 01:29:16 2020","Experimental":"true","GitCommit":"afacb8b","GoVersion":"go1.12.17","KernelVersion":"4.19.76-linuxkit","MinAPIVersion":"1.12","Os":"linux"}},{"Name":"containerd","Version":"v1.2.13","Details":{"GitCommit":"7ad184331fa3e55e52b890ea95e65ba581ae3429"}},{"Name":"runc","Version":"1.0.0-rc10","Details":{"GitCommit":"dc9208a3303feef5b3839f4323d9beb36df0a9dd"}},{"Name":"docker-init","Version":"0.18.0","Details":{"GitCommit":"fec3683"}}],"Version":"19.03.8","ApiVersion":"1.40","MinAPIVersion":"1.12","GitCommit":"afacb8b","GoVersion":"go1.12.17","Os":"linux","Arch":"amd64","KernelVersion":"4.19.76-linuxkit","Experimental":true,"BuildTime":"2020-03-11T01:29:16.000000000+00:00"}}
{"Client":"Version":"23.0.3","ApiVersion":"1.42", ...}
```
deprecated: false
experimental: false

View File

@ -932,6 +932,8 @@ reference:
title: Extensions
- path: /compose/compose-file/12-interpolation/
title: Interpolation
- path: /compose/compose-file/13-merge/
title: Merge
- path: /compose/compose-file/build/
title: Compose file build
- path: /compose/compose-file/deploy/
@ -1685,12 +1687,16 @@ manuals:
title: SLSA definitions
- path: /build/attestations/attestation-storage/
title: Attestation storage
- sectiontitle: Dockerfile
section:
- path: /build/dockerfile/frontend/
title: Custom Dockerfile syntax
- path: /build/dockerfile/release-notes/
title: Release notes
- sectiontitle: BuildKit
section:
- path: /build/buildkit/
title: Overview
- path: /build/buildkit/dockerfile-frontend/
title: Custom Dockerfile syntax
- path: /build/buildkit/configure/
title: Configure
- path: /build/buildkit/toml-configuration/
@ -1772,6 +1778,8 @@ manuals:
title: Extend services in Compose
- path: /compose/networking/
title: Networking in Compose
- path: /compose/file-watch/
title: Automatically update services with file watch (Experimental)
- path: /compose/production/
title: Using Compose in production
- path: /compose/startup-order/
@ -1861,8 +1869,6 @@ manuals:
title: Team
- path: /docker-hub/onboard-business/
title: Business
- path: /docker-hub/onboarding-faqs/
title: FAQs
- sectiontitle: Set up your company
section:
- path: /docker-hub/creating-companies/
@ -1879,8 +1885,6 @@ manuals:
title: Create and manage a team
- path: /docker-hub/members/
title: Manage members
- path: /docker-hub/configure-sign-in/
title: Enforce sign-in
- sectiontitle: Single Sign-on
section:
- path: /single-sign-on/
@ -1911,6 +1915,8 @@ manuals:
title: Recover your Docker Hub account
- path: /docker-hub/2fa/new-recovery-code/
title: Generate a new recovery code
- path: /docker-hub/configure-sign-in/
title: Enforce sign-in for Desktop
- path: /docker-hub/audit-log/
title: Audit logs
- path: /docker-hub/domain-audit/
@ -1919,6 +1925,8 @@ manuals:
title: Image Access Management
- path: /docker-hub/deactivate-account/
title: Deactivate an account or organization
- path: /docker-hub/onboarding-faqs/
title: FAQs
- sectiontitle: Billing
section:

View File

@ -2,5 +2,5 @@
>
> From the end of June 2023 Compose V1 won't be supported anymore and will be removed from all Docker Desktop versions.
>
> Make sure you switch to [Compose V2](/compose/compose-file/) with the `docker compose` CLI plugin or by activating the **Use Docker Compose V2** setting in Docker Desktop. For more information, see the [Evolution of Compose](/compose/compose-v2/)
> Make sure you switch to [Compose V2](/compose/compose-file/) with the `docker compose` CLI plugin or by activating the **Use Docker Compose V2** setting in Docker Desktop. For more information, see the [Evolution of Compose](/compose/compose-v2/).
{: .important}

View File

@ -8,9 +8,9 @@
> [Mac with Apple chip](https://desktop.docker.com/mac/main/arm64{{ include.build_path }}Docker.dmg) ([checksum](https://desktop.docker.com/mac/main/arm64{{ include.build_path }}checksums.txt){: target="_blank" rel="noopener" class="_"}) {% if include.all or include.linux %} | {% endif %}
{% endif -%}
{% if include.all or include.linux -%}
> [Debian](https://desktop.docker.com/linux/main/amd64{{ include.build_path }}docker-desktop-4.17.0-amd64.deb) -
> [RPM](https://desktop.docker.com/linux/main/amd64{{ include.build_path }}docker-desktop-4.17.0-x86_64.rpm) -
> [Arch package](https://desktop.docker.com/linux/main/amd64{{ include.build_path }}docker-desktop-4.17.0-x86_64.pkg.tar.zst) ([checksum](https://desktop.docker.com/linux/main/amd64{{ include.build_path }}checksums.txt){: target="_blank" rel="noopener" class="_"})
> [Debian](https://desktop.docker.com/linux/main/amd64{{ include.build_path }}docker-desktop-4.18.0-amd64.deb) -
> [RPM](https://desktop.docker.com/linux/main/amd64{{ include.build_path }}docker-desktop-4.18.0-x86_64.rpm) -
> [Arch package](https://desktop.docker.com/linux/main/amd64{{ include.build_path }}docker-desktop-4.18.0-x86_64.pkg.tar.zst) ([checksum](https://desktop.docker.com/linux/main/amd64{{ include.build_path }}checksums.txt){: target="_blank" rel="noopener" class="_"})
{% endif -%}
{% if include.build_path == "/" -%}
{: .tip}

View File

@ -0,0 +1,5 @@
> Experimental
>
> The "labs" channel provides early access to Dockerfile features that are not
> yet available in the stable channel.
{: .experimental }

8
_includes/root-errors.md Normal file
View File

@ -0,0 +1,8 @@
> **Tip**
>
> Receiving errors when trying to run without root?
>
> The `docker` user group exists but contains no users, which is why youre required
> to use `sudo` to run Docker commands. Continue to [Linux postinstall](/engine/install/linux-postinstall)
> to allow non-privileged users to run Docker commands and for other optional configuration steps.
{: .tip}

View File

@ -98,8 +98,8 @@ CMD flask run --host 0.0.0.0 --port 8000
The first line to add to a Dockerfile is a [`# syntax` parser directive](../../engine/reference/builder.md#syntax).
While optional, this directive instructs the Docker builder what syntax to use
when parsing the Dockerfile, and allows older Docker versions with [BuildKit enabled](../buildkit/index.md#getting-started)
to use a specific [Dockerfile frontend](../buildkit/dockerfile-frontend.md)
before starting the build. [Parser directives](../../engine/reference/builder.md/#parser-directives)
to use a specific [Dockerfile frontend](../dockerfile/frontend.md) before
starting the build. [Parser directives](../../engine/reference/builder.md/#parser-directives)
must appear before any other comment, whitespace, or Dockerfile instruction in
your Dockerfile, and should be the first line in Dockerfiles.

View File

@ -19,7 +19,7 @@ It also introduces support for handling more complex scenarios:
[build context](../building/context.md) between builds
- Detect and skip transferring unused files in your
[build context](../building/context.md)
- Use [Dockerfile frontend](dockerfile-frontend.md) implementations with many
- Use [Dockerfile frontend](../dockerfile/frontend.md) implementations with many
new features
- Avoid side effects with rest of the API (intermediate images and containers)
- Prioritize your build cache for automatic pruning
@ -70,7 +70,7 @@ work for the features used by their definition.
For example, to build a [Dockerfile](../../engine/reference/builder.md) with
BuildKit, you would
[use an external Dockerfile frontend](dockerfile-frontend.md).
[use an external Dockerfile frontend](../dockerfile/frontend.md).
## Getting started

View File

@ -18,7 +18,7 @@ want to consider the [registry](./registry.md) cache.
```console
$ docker buildx build --push -t <registry>/<image> \
--cache-to type=inline \
--cache-from type=registry,ref=<registry>/image .
--cache-from type=registry,ref=<registry>/<image> .
```
No additional parameters are supported for the `inline` cache.

View File

@ -1,6 +1,8 @@
---
title: Custom Dockerfile syntax
keywords: build, buildkit, dockerfile, frontend
redirect_from:
- /build/buildkit/dockerfile-frontend/
---
## Dockerfile frontend

View File

@ -0,0 +1,254 @@
---
title: Dockerfile release notes
description: Release notes for Dockerfile frontend
keywords: build, dockerfile, frontend, release notes
toc_max: 2
---
This page contains information about the new features, improvements, known
issues, and bug fixes in [Dockerfile reference](../../engine/reference/builder.md).
For usage, see the [Dockerfile frontend syntax](frontend.md) page.
## 1.5.2
{% include release-date.html date="2023-02-14" %}
### Bug fixes and enhancements
* Fix building from Git reference that is missing branch name but contains a
subdir
* 386 platform image is now included in the release
## 1.5.1
{% include release-date.html date="2023-01-18" %}
### Bug fixes and enhancements
* Fix possible panic when warning conditions appear in multi-platform builds
## 1.5.0 (labs)
{% include release-date.html date="2023-01-10" %}
{% include dockerfile-labs-channel.md %}
### New
* `ADD` command now supports [`--checksum` flag](../../engine/reference/builder.md#verifying-a-remote-file-checksum-add---checksumchecksum-http-src-dest)
to validate the contents of the remote URL contents
## 1.5.0
{% include release-date.html date="2023-01-10" %}
### New
* `ADD` command can now [import files directly from Git URLs](../../engine/reference/builder.md#adding-a-git-repository-add-git-ref-dir)
### Bug fixes and enhancements
* Named contexts now support `oci-layout://` protocol for including images from
local OCI layout structure
* Dockerfile now supports secondary requests for listing all build targets or
printing outline of accepted parameters for a specific build target
* Dockerfile `#syntax` directive that redirects to an external frontend image
now allows the directive to be also set with `//` comments or JSON. The file
may also contain a shebang header
* Named context can now be initialized with an empty scratch image
* Named contexts can now be initialized with an SSH Git URL
* Fix handling of `ONBUILD` when importing Schema1 images
## 1.4.3
{% include release-date.html date="2022-08-23" %}
### Bug fixes and enhancements
* Fix creation timestamp not getting reset when building image from
`docker-image://` named context
* Fix passing `--platform` flag of `FROM` command when loading
`docker-image://` named context
## 1.4.2
{% include release-date.html date="2022-05-06" %}
### Bug fixes and enhancements
* Fix loading certain environment variables from an image passed with built
context
## 1.4.1
{% include release-date.html date="2022-04-08" %}
### Bug fixes and enhancements
* Fix named context resolution for cross-compilation cases from input when input
is built for a different platform
## 1.4.0
{% include release-date.html date="2022-03-09" %}
### New
* [`COPY --link` and `ADD --link`](../../engine/reference/builder.md#copy---link)
allow copying files with increased cache efficiency and rebase images without
requiring them to be rebuilt. `--link` copies files to a separate layer and
then uses new LLB MergeOp implementation to chain independent layers together
* [Heredocs](../../engine/reference/builder.md#here-documents) support have
been promoted from labs channel to stable. This feature allows writing
multiline inline scripts and files
* Additional [named build contexts](../../engine/reference/commandline/buildx_build.md#build-context)
can be passed to build to add or overwrite a stage or an image inside the
build. A source for the context can be a local source, image, Git, or HTTP URL
* [`BUILDKIT_SANDBOX_HOSTNAME` build-arg](../../engine/reference/builder.md#buildkit-built-in-build-args)
can be used to set the default hostname for the `RUN` steps
### Bug fixes and enhancements
* When using a cross-compilation stage, the target platform for a step is now
seen on progress output
* Fix some cases where Heredocs incorrectly removed quotes from content
## 1.3.1
{% include release-date.html date="2021-10-04" %}
### Bug fixes and enhancements
* Fix parsing "required" mount key without a value
## 1.3.0 (labs)
{% include release-date.html date="2021-07-16" %}
{% include dockerfile-labs-channel.md %}
### New
* `RUN` and `COPY` commands now support [Here-document syntax](../../engine/reference/builder.md#here-documents)
allowing writing multiline inline scripts and files
## 1.3.0
{% include release-date.html date="2021-07-16" %}
### New
* `RUN` command allows [`--network` flag](../../engine/reference/builder.md#run---network)
for requesting a specific type of network conditions. `--network=host`
requires allowing `network.host` entitlement. This feature was previously
only available on labs channel
### Bug fixes and enhancements
* `ADD` command with a remote URL input now correctly handles the `--chmod` flag
* Values for [`RUN --mount` flag](../../engine/reference/builder.md#run---mount)
now support variable expansion, except for the `from` field
* Allow [`BUILDKIT_MULTI_PLATFORM` build arg](../../engine/reference/builder.md#buildkit-built-in-build-args)
to force always creating multi-platform image, even if only contains single
platform
## 1.2.1 (labs)
{% include release-date.html date="2020-12-12" %}
{% include dockerfile-labs-channel.md %}
### Bug fixes and enhancements
* `RUN` command allows [`--network` flag](../../engine/reference/builder.md#run---network)
for requesting a specific type of network conditions. `--network=host`
requires allowing `network.host` entitlement
## 1.2.1
{% include release-date.html date="2020-12-12" %}
### Bug fixes and enhancements
* Revert "Ensure ENTRYPOINT command has at least one argument"
* Optimize processing `COPY` calls on multi-platform cross-compilation builds
## 1.2.0 (labs)
{% include release-date.html date="2020-12-03" %}
{% include dockerfile-labs-channel.md %}
### Bug fixes and enhancements
* Experimental channel has been renamed to *labs*
## 1.2.0
{% include release-date.html date="2020-12-03" %}
### New
* [`RUN --mount` syntax](../../engine/reference/builder.md#run---mount) for
creating secret, ssh, bind, and cache mounts have been moved to mainline
channel
* [`ARG` command](../../engine/reference/builder.md#arg) now supports defining
multiple build args on the same line similarly to `ENV`
### Bug fixes and enhancements
* Metadata load errors are now handled as fatal to avoid incorrect build results
* Allow lowercase Dockerfile name
* `--chown` flag in `ADD` now allows parameter expansion
* `ENTRYPOINT` requires at least one argument to avoid creating broken images
## 1.1.7
{% include release-date.html date="2020-04-18" %}
### Bug fixes and enhancements
* Forward `FrontendInputs` to the gateway
## 1.1.2 (experimental)
{% include release-date.html date="2019-07-31" %}
{% include dockerfile-labs-channel.md %}
### Bug fixes and enhancements
* Allow setting security mode for a process with `RUN --security=sandbox|insecure`
* Allow setting uid/gid for [cache mounts](../../engine/reference/builder.md#run---mounttypecache)
* Avoid requesting internally linked paths to be pulled to build context
* Ensure missing cache IDs default to target paths
* Allow setting namespace for cache mounts with [`BUILDKIT_CACHE_MOUNT_NS` build arg](../../engine/reference/builder.md#buildkit-built-in-build-args)
## 1.1.2
{% include release-date.html date="2019-07-31" %}
### Bug fixes and enhancements
* Fix workdir creation with correct user and don't reset custom ownership
* Fix handling empty build args also used as `ENV`
* Detect circular dependencies
## 1.1.0
{% include release-date.html date="2019-04-27" %}
### New
* `ADD/COPY` commands now support implementation based on `llb.FileOp` and do
not require helper image if builtin file operations support is available
* `--chown` flag for `COPY` command now supports variable expansion
### Bug fixes and enhancements
* To find the files ignored from the build context Dockerfile frontend will
first look for a file `<path/to/Dockerfile>.dockerignore` and if it is not
found `.dockerignore` file will be looked up from the root of the build
context. This allows projects with multiple Dockerfiles to use different
`.dockerignore` definitions

View File

@ -153,11 +153,11 @@ advanced scenarios.
<div class="col-xs-12 col-sm-12 col-md-12 col-lg-4 block">
<div class="component">
<div class="component-icon">
<a href="/build/buildkit/dockerfile-frontend/">
<a href="/build/dockerfile/frontend/">
<img src="/assets/images/build-frontends.svg" alt="Pen writing on a document" width="70px" height="70px">
</a>
</div>
<h2><a href="/build/buildkit/dockerfile-frontend/">Dockerfile frontend</a></h2>
<h2><a href="/build/dockerfile/frontend/">Dockerfile frontend</a></h2>
<p>
Learn about the Dockerfile frontend for BuildKit.
</p>

View File

@ -0,0 +1,7 @@
---
title: Merge and override
keywords: compose, compose specification
fetch_remote:
line_start: 2
line_end: -1
---

View File

@ -9,8 +9,7 @@ toc_min: 1
## Reference and guidelines
These topics describe version 3 of the Compose file format. This is the newest
version.
These topics describe version 3 of the Compose file format.
## Compose and Docker compatibility matrix

View File

@ -4,6 +4,17 @@ keywords: fig, composition, compose, versions, upgrading, docker
title: Compose file versions and upgrading
---
>**Warning**
>
>This page contains information on the legacy versions of Compose, also collectively referred to as Compose V1.
>From the end of June 2023 Compose V1 wont be supported anymore.
>
>The latest and recommended version of Compose is the [Compose Specification](index.md).
>Make sure you switch to [Compose V2](/compose/compose-file/) with the `docker compose` CLI plugin or by activating the **Use Docker Compose V2** setting in Docker Desktop.
>
> For more information, see the [Evolution of Compose](/compose/compose-v2/).
{: .warning}
The Compose file is a [YAML](https://yaml.org) file defining services,
networks, and volumes for a Docker application.
@ -11,7 +22,6 @@ The Compose file formats are now described in these references, specific to each
| **Reference file** | **What changed in this version** |
|:------------------------------------------------------|:---------------------------------|
| [Compose Specification](index.md) (most current, and recommended) | [Versioning](compose-versioning.md#versioning) |
| [Version 3](compose-file-v3.md) | [Version 3 updates](#version-3) |
| [Version 2](compose-file-v2.md) | [Version 2 updates](#version-2) |
| Version 1 (Deprecated) | [Version 1 updates](#version-1-deprecated) |
@ -21,8 +31,6 @@ compatibility, and [how to upgrade](#upgrading).
## Compatibility matrix
There are several versions of the Compose file format 1, 2, 2.x, and 3.x
{% include content/compose-matrix.md %}
> Looking for more detail on Docker and Compose compatibility?
@ -79,36 +87,7 @@ Several things differ depending on which version you use:
These differences are explained below.
### Version 1 (Deprecated)
Compose files that do not declare a version are considered "version 1". In those
files, all the [services](compose-file-v3.md#service-configuration-reference) are
declared at the root of the document.
Version 1 is supported by **Compose up to 1.6.x**. It will be deprecated in a
future Compose release.
Version 1 files cannot declare named
[volumes](compose-file-v3.md#volume-configuration-reference), [networks](compose-file-v3.md#network-configuration-reference) or
[build arguments](compose-file-v3.md#args).
Compose does not take advantage of [networking](../networking.md) when you
use version 1: every container is placed on the default `bridge` network and is
reachable from every other container at its IP address. You need to use
`links` to enable discovery between containers.
Example:
web:
build: .
ports:
- "8000:5000"
volumes:
- .:/code
links:
- redis
redis:
image: redis
### Version 2
@ -394,6 +373,38 @@ Introduces the following additional parameters:
configurations. This option is only supported when deploying swarm services
using `docker stack deploy`.
### Version 1 (Deprecated)
Compose versions below 1.6.x are
Compose files that do not declare a version are considered "version 1". In those
files, all the [services](compose-file-v3.md#service-configuration-reference) are
declared at the root of the document.
Version 1 is supported by Compose up to 1.6.x** and has been deprecated.
Version 1 files cannot declare named
[volumes](compose-file-v3.md#volume-configuration-reference), [networks](compose-file-v3.md#network-configuration-reference) or
[build arguments](compose-file-v3.md#args).
Compose does not take advantage of [networking](../networking.md) when you
use version 1: every container is placed on the default `bridge` network and is
reachable from every other container at its IP address. You need to use
`links` to enable discovery between containers.
Example:
web:
build: .
ports:
- "8000:5000"
volumes:
- .:/code
links:
- redis
redis:
image: redis
## Upgrading
### Version 2.x to 3.x
@ -433,6 +444,28 @@ Compose files. (For more information, see [Extending services](../extends.md#ext
- `link_local_ips` in `networks`: This option has not been introduced in
`version: "3.x"` Compose files.
#### Compatibility mode
`docker-compose` 1.20.0 introduces a new `--compatibility` flag designed to
help developers transition to version 3 more easily. When enabled,
`docker-compose` reads the `deploy` section of each service's definition and
attempts to translate it into the equivalent version 2 parameter. Currently,
the following deploy keys are translated:
- [resources](compose-file-v3.md#resources) limits and memory reservations
- [replicas](compose-file-v3.md#replicas)
- [restart_policy](compose-file-v3.md#restart_policy) `condition` and `max_attempts`
All other keys are ignored and produce a warning if present. You can review
the configuration that will be used to deploy by using the `--compatibility`
flag with the `config` command.
> Do not use this in production
>
> We recommend against using `--compatibility` mode in production. The
> resulting configuration is only an approximate using non-Swarm mode
> properties, it may produce unexpected results.
### Version 1 to 2.x
In the majority of cases, moving from version 1 to 2 is a very simple process:
@ -516,29 +549,4 @@ It's more complicated if you're using particular configuration features:
data:
external: true
## Compatibility mode
`docker-compose` 1.20.0 introduces a new `--compatibility` flag designed to
help developers transition to version 3 more easily. When enabled,
`docker-compose` reads the `deploy` section of each service's definition and
attempts to translate it into the equivalent version 2 parameter. Currently,
the following deploy keys are translated:
- [resources](compose-file-v3.md#resources) limits and memory reservations
- [replicas](compose-file-v3.md#replicas)
- [restart_policy](compose-file-v3.md#restart_policy) `condition` and `max_attempts`
All other keys are ignored and produce a warning if present. You can review
the configuration that will be used to deploy by using the `--compatibility`
flag with the `config` command.
> Do not use this in production!
>
> We recommend against using `--compatibility` mode in production. Because the
> resulting configuration is only an approximate using non-Swarm mode
> properties, it may produce unexpected results.
## Compose file format references
- [Compose Specification](index.md)
- [Compose file version 3](compose-file-v3.md)
- [Compose file version 2](compose-file-v2.md)

View File

@ -19,7 +19,7 @@ Between 2014 and 2017 two other noticeable versions of Compose, which introduced
These three key file format versions and releases prior to v1.29.2 are collectively referred to as Compose V1.
In mid-2020 Compose V2 was released. It merged Compose file format V2 and V3 and was written in Go. The file format is defined by the [Compose specification](https://github.com/compose-spec/compose-spec){:target="_blank" rel="noopener" class="_"}. Compose V2 is the latest and recommended version of Compose. It provides improved integration with other Docker command-line features, and simplified installation on macOS, Windows, and Linux.
In mid-2020 Compose V2 was released. It merged Compose file format V2 and V3 and was written in Go. The file format is defined by the [Compose specification](https://github.com/compose-spec/compose-spec){:target="_blank" rel="noopener" class="_"}. Compose V2 is the latest and recommended version of Compose and is compatible with Docker Engine version 19.03.0 and later. It provides improved integration with other Docker command-line features, and simplified installation on macOS, Windows, and Linux.
It makes a clean distinction between the Compose YAML file model and the `docker-compose`
implementation. Making this change has enabled a number of enhancements, including

139
compose/file-watch.md Normal file
View File

@ -0,0 +1,139 @@
---
description: File watch automatically updates running services as you work
keywords: compose, file watch, experimental
title: Automatically update services with file watch
---
{% include compose-eol.md %}
> **Note**
>
> The Compose file watch feature is currently [Experimental](../release-lifecycle.md).
Use `watch` to automatically update your running Compose services as you edit and save your code.
For many projects, this enables a hands-off development workflow once Compose is running: services automatically update themselves as you save your work.
You do not need to enable `watch` for all services in a Compose project. In some instances, only part of the project (e.g. Javascript frontend) might be suitable for automatic updates.
`watch` adheres to the following file path rules:
* All paths are relative to the build context
* Directories are watched recursively
* Glob patterns are not supported
* Rules from `.dockerignore` apply
* Use `include` / `exclude` to override
* Temporary/backup files for common IDEs (Vim, Emacs, JetBrains, & more) are ignored automatically
* `.git` directories are ignored automatically
## Configuration
The `watch` attribute defines a list of rules that control automatic service updates based on local file changes.
Each rule requires, a `path` pattern and `action` to take when a modification is detected. There are two possible actions for `watch` and depending on
the `action`, additional fields might be accepted or required.
### `action`
#### Sync
If `action` is set to `sync`, Compose makes sure any changes made to files on your host automatically match with the corresponding files within the service container.
Sync is ideal for frameworks that support "Hot Reload" or equivalent functionality.
More generally, sync rules can be used in place of bind mounts for many development use cases.
##### Comparison to bind mounts
Compose also supports sharing a host directory inside service containers. Watch mode does not replace this functionality but exists as a companion specifically suited to developing in containers.
Most importantly, watch mode allows for greater granularity than is practical with a bind mount. Watch rules allow ignoring specific files or entire directories within the watched tree.
For example, in a JavaScript project, ignoring the `node_modules/` directory has a couple benefits:
* Performance: file trees with many small files can cause high I/O load in some configurations
* Multi-platform: compiled artifacts cannot be shared if the host OS (e.g. Windows, macOS) or architecture (e.g. arm64) is different than the container
For example, in a Node.js project, it's not recommended to sync the `node_modules/` directory. Even though JavaScript is interpreted, npm packages can contain native code that is not portable across platforms.
#### Rebuild
If `action` is set to `rebuild`, Compose automatically builds a new image with BuildKit and replaces the running service container.
The behavior is the same as running `docker compose up --build <svc>`.
Rebuild is ideal for compiled languages or as fallbacks for modifications to particular files that require a full
image rebuild (e.g. `package.json`).
>**Tip**
>
> Optimize your `Dockerfile` for speedy
incremental rebuilds with [image layer caching](/build/cache)
and [multi-stage builds](/build/building/multi-stage/).
{: .tip}
### `path` and `target`
The `target` field controls how the path is mapped into the container.
For `path: ./app/html` and a change to `./app/html/index.html`:
* `target: /app/html` -> `/app/html/index.html`
* `target: /app/static` -> `/app/static/index.html`
* `target: /assets` -> `/assets/index.html`
## Example
Watch mode can be used with many different languages and frameworks.
The specific paths and rules will vary project to project, but the concepts remain the same.
This minimal example targets a Node.js application with the following structure:
```text
myproject/
├── web/
│ ├── App.jsx
│ └── index.js
├── Dockerfile
├── compose.yaml
└── package.json
```
```yaml
services:
web:
build: .
command: npm start
x-develop:
watch:
- action: sync
path: ./web
target: /src/web
- action: rebuild
path: package.json
```
In this example, when running `docker compose up --build --wait`, a container for the `web` service is launched using an image built from the `Dockerfile` in the project root.
The `web` service runs `npm start` for its command, which then launches a development version of the application with Hot Module Reload enabled in the bundler (Webpack, Vite, Turbopack, etc).
After the service is up, running `docker compose alpha watch` starts watch mode.
Then, whenever a source file in the `web/` directory is changed, Compose syncs the file to the corresponding location under `/src/web` inside the container.
For example, `./web/App.jsx` is copied to `/src/web/App.jsx`.
Once copied, the bundler updates the running application without a restart.
Unlike source code files, adding a new dependency cant be done on-the-fly, so whenever `package.json` is changed, Compose
rebuilds the image and recreates the `web` service container.
This pattern can be followed for many languages and frameworks, such as Python with Flask: Python source files can be synced while a change to `requirements.txt` should trigger a rebuild.
## Use `watch`
1. Add `watch` sections to one or more services in `compose.yaml`.
2. Launch a Compose project with `docker compose up --build --wait`.
3. Run `docker compose alpha watch` to start the file watch mode.
4. Edit service source files using your preferred IDE or editor.
>**Tip**
>
> Looking for a sample project to test things out? Check
out [`dockersamples/avatars`](https://github.com/dockersamples/avatars) for a demonstration of Compose `watch`.
{: .tip}
## Feedback
We are actively looking for feedback on this feature. Give feedback or report any bugs you may find in the [Compose Specification repository](https://github.com/compose-spec/compose-spec/pull/253).

View File

@ -20,7 +20,7 @@ To install the Compose plugin on Linux, you can either:
> These instructions assume you already have Docker Engine and Docker CLI installed and now want to install the Compose plugin.
For Compose standalone, see [Install Compose Standalone](other.md).
### Install using the repository
## Install using the repository
1. Set up the repository. Find distro-specific instructions in:
@ -55,7 +55,7 @@ For Compose standalone, see [Install Compose Standalone](other.md).
Where `vN.N.N` is placeholder text standing in for the latest version.
#### Update Compose
### Update Compose
To update the Compose plugin, run the following commands:
@ -72,7 +72,7 @@ To update the Compose plugin, run the following commands:
$ sudo yum install docker-compose-plugin
```
### Install the plugin manually
## Install the plugin manually
> **Note**
>

View File

@ -34,6 +34,11 @@ redirect_from:
## 2.17.0
{% include release-date.html date="2023-03-23" %}
### Upgrade notes
- Project name validation is more strictly enforced. Project names can only include letters, numbers, `_`, `-` and must be lowercase and start with a letter or number.
- Boolean fields in YAML must be either `true` or `false`. Deprecated YAML 1.1 values such as "on" or "no" are not supported.
- Duplicate YAML merge keys (`<<`) are rejected.
### Update
- Dependencies upgrade: bump buildkit to v0.11.4
- Dependencies upgrade: bump buildx to v0.10.4
@ -53,8 +58,6 @@ redirect_from:
target="_blank" rel="noopener" class="_"}
* Progress writer now uses `dockercli.Err` stream. Fixed [compose#10366](https://github.com/docker/compose/issues/10366){:
target="_blank" rel="noopener" class="_"}
* Introduced `dockerfile_inline`. Fixed [compose#8077](https://github.com/docker/compose/issues/8077){:
target="_blank" rel="noopener" class="_"}
* Added support for `additional_contexts` in the `build` service configuration. Fixed [compose#9461](https://github.com/docker/compose/issues/9461){:
target="_blank" rel="noopener" class="_"} [compose#9961](https://github.com/docker/compose/issues/9961){:
target="_blank" rel="noopener" class="_"}

View File

@ -9,7 +9,7 @@ title: Overview
>
> The Dev Environments feature is currently in [Beta](../../release-lifecycle.md#beta). We recommend that you do not use this in production environments.
Dev Environments lets you create a configurable developer environment with all the code and tools you need to quickly get up and running.
Dev Environments let you create a configurable developer environment with all the code and tools you need to quickly get up and running.
It uses tools built into code editors that allows Docker to access code mounted into a container rather than on your local host. This isolates the tools, files and running services on your machine allowing multiple versions of them to exist side by side.

View File

@ -59,8 +59,6 @@ Your Mac must meet the following requirements to install Docker Desktop successf
$ softwareupdate --install-rosetta
```
For more information, see [Docker Desktop for Apple silicon](../install/mac-install.md).
</div>
</div>
@ -111,11 +109,10 @@ The `install` command accepts the following flags:
## Where to go next
- [Docker Desktop for Apple silicon](../install/mac-install.md) for detailed information about Docker Desktop for Apple silicon.
- [Troubleshooting](../troubleshoot/overview.md) describes common problems, workarounds, how
to run and submit diagnostics, and submit issues.
- [FAQs](../faqs/general.md) provide answers to frequently asked questions.
- [Release notes](../release-notes.md) lists component updates, new features, and improvements associated with Docker Desktop releases.
- [Get started with Docker](../../get-started/index.md) provides a general Docker tutorial.
* [Back up and restore data](../backup-and-restore.md) provides instructions
- [Back up and restore data](../backup-and-restore.md) provides instructions
on backing up and restoring data related to Docker.

View File

@ -2,7 +2,7 @@
description: Troubleshooting topics
keywords: Linux, Mac, Windows, troubleshooting, topics, Docker Desktop
title: Troubleshoot topics
toc_max: 3
toc_max: 4
---
## Topics for all platforms

View File

@ -17,6 +17,7 @@ Additionally, with WSL 2, the time required to start a Docker daemon after a col
Before you turn on the Docker Desktop WSL 2, ensure you have:
- WSL version 1.1.3.0 or above.
- Windows 10, version 1903 or higher, or Windows 11.
- Enabled WSL 2 feature on Windows. For detailed instructions, refer to the [Microsoft documentation](https://docs.microsoft.com/en-us/windows/wsl/install-win10){:target="_blank" rel="noopener" class="_"}.
- Downloaded and installed the [Linux kernel update package](https://docs.microsoft.com/windows/wsl/wsl2-kernel){:target="_blank" rel="noopener" class="_"}.
@ -158,6 +159,10 @@ GPU Device 0: "GeForce RTX 2060 with Max-Q Design" with compute capability 7.5
= 2724.379 single-precision GFLOP/s at 20 flops per interaction
```
> **Note**
>
> GPU support is only available in Docker Desktop for Windows with the WSL2 backend.
>
## Feedback
Your feedback is very important to us. Let us know your feedback by creating an issue in the [Docker Desktop for Windows GitHub](https://github.com/docker/for-win/issues){:target="_blank" rel="noopener" class="_"} repository and adding the **WSL 2** label.

View File

@ -36,18 +36,18 @@ target "vendor" {
}
group "validate" {
targets = ["htmlproofer", "mdl"]
targets = ["htmltest", "mdl"]
}
target "htmlproofer" {
target "htmltest" {
inherits = ["_common"]
target = "htmlproofer"
target = "htmltest"
output = ["type=cacheonly"]
}
target "htmlproofer-output" {
target "htmltest-output" {
inherits = ["_common"]
target = "htmlproofer-output"
target = "htmltest-output"
output = ["./lint"]
}

View File

@ -7,7 +7,7 @@ title: Manage company owners
> **Note**
>
> The company layer is in [early access](../release-lifecycle.md#early-access-ea)
> and requires a Docker Team or Business subscription.
> and requires a Docker Business subscription.
As a company owner, you can configure [Single Sign-on (SSO)](../single-sign-on/configure/index.md) and [System for Cross-domain Identity Management (SCIM)](../docker-hub/scim.md) for all organizations under the company. This is only visible if your organization has a Docker Business subscription. If you want to upgrade your subscription to include the organization under the company, see [upgrade your subscription](../subscription/upgrade.md).

View File

@ -7,7 +7,7 @@ title: Overview
> **Note**
>
> The company layer is in [early access](../release-lifecycle.md#early-access-ea)
> and requires a Docker Team or Business subscription.
> and requires a Docker Business subscription.
A company provides a single point of visibility across multiple organizations. Docker introduced this new view to simplify the management of Docker organizations and settings. It's available to Docker Business subscribers.

View File

@ -15,10 +15,38 @@ Once you enable group mappings in your connection, users assigned to that group
>Use the same names for the Docker teams as your group names in the IdP to prevent further configuration. When you sync groups, a group is created if it doesnt already exist.
{: .tip}
To take advantage of group mapping, make sure you have [enabled SCIM](scim.md) and then follow the instructions provided by your IdP:
## How group mapping works
IdPs share with Docker the main attributes of every authorized user through SSO, such as email address, name, surname, and groups. These attributes are used by Just-In-Time (JIT) Provisioning to create or update the users Docker profile and their associations with organizations and teams on Docker Hub.
Docker uses the email address of the user to identify them on the platform. Every Docker account must have a unique email address at all times.
After every successful SSO sign-in authentication, the JIT provisioner performs the following actions:
1. Checks if there's an existing Docker account with the email address of the user that just authenticated.
a) If no account is found with the same email address, it creates a new Docker account using basic user attributes (email, name, and surname). The JIT provisioner generates a new username for this new account by using the email, name, and random numbers to make sure that all account usernames are unique in the platform.
b) If an account exists for this email address, it uses this account and updates the full name of the users profile if needed.
2. Checks if the IdP shared group mappings while authenticating the user.
a) If the IdP provided group mappings for the user, the user gets added to the organizations and teams indicated by the group mappings.
b) If the IdP didn't provide group mappings, it checks if the user is already a member of the organization, or if the SSO connection is for multiple organizations (only at company level) and if the user is a member of any of those organizations. If the user is not a member, it adds the user to the default team and organization configured in the SSO connection.
![JIT provisioning](images/jit.PNG)
## Use group mapping
To take advantage of group mapping, follow the instructions provided by your IdP:
- [Okta](https://help.okta.com/en-us/Content/Topics/users-groups-profiles/usgp-enable-group-push.htm){: target="_blank" rel="noopener" class="_" }
- [Azure AD](https://learn.microsoft.com/en-us/azure/active-directory/app-provisioning/customize-application-attributes){: target="_blank" rel="noopener" class="_" }
- [OneLogin](https://developers.onelogin.com/scim/create-app){: target="_blank" rel="noopener" class="_" }
Once complete, a user who signs in to Docker through SSO is automatically added to the organizations and teams mapped in the IdP.
>**Tip**
>
> [Enable SCIM](scim.md) to take advantage of automatic user provisioning and de-provisioning. If you don't enable SCIM users are only automatically provisioned. You have to de-provision them manually.
{: .tip}

BIN
docker-hub/images/jit.PNG Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

View File

@ -53,17 +53,21 @@ behavior.
![The insights and analytics tab on the Docker Hub website](./images/insights-and-analytics-tab.png)
Select the time span you want to view analytics data, and export the data in
either a summary or raw format. The summary format shows you image pulls per
tag, and the raw format lists information about every image pull for the
selected time span. Data points include tag, type of pull, user geolocation,
client tool (user agent), and more.
You can use the view to select the time span you want to view analytics data and export the data in
either a summary or raw format.
The summary format shows image pulls per tag, and the raw format lists information about every image pull for the
selected time span. Data points include tag, type of pull, user geolocation, client tool (user agent), and more.
## Vulnerability scanning
Automatic vulnerability scanning for images published to Docker Hub.
Scanning images ensures that the published content is secure, and underlines to
developers that they can trust it. Scanning can be enabled on a per-repository
[Docker Scout](/scout/){:
target="blank" rel="noopener" class=""} provides automatic vulnerability scanning
for DVP images published to Docker Hub.
Scanning images ensures that the published content is secure, and proves to
developers that they can trust the image.
You can enable scanning on a per-repository
basis, refer to [vulnerability scanning](/docker-hub/vulnerability-scanning/){:
target="blank" rel="noopener" class=""} for more information about how to use
it.

View File

@ -4,29 +4,29 @@ description: Provides usage statistics of your images on Docker Hub.
keywords: docker hub, hub, insights, analytics, api, verified publisher
---
Insights and analytics provides usage analytics for your Docker Verified
Publisher (DVP) images on Docker Hub. With this tool, you have self-serve access
Insights and analytics provides usage analytics for Docker Verified
Publisher (DVP) images on Docker Hub, providing self-serve access
to metrics as both raw data and summary data for a desired time span. You can
view number of image pulls by tag or by digest, and get breakdowns by
geolocation, cloud provider, client, and more. Head to the
geolocation, cloud provider, client, and more.
Head to the
[Docker Verified Publisher Program page](https://www.docker.com/partners/programs/){: target="blank" rel="noopener" class="_" }
to learn more about the benefits of becoming a verified publisher.
## View the analytics data
Analytics data for your repositories is available on the **Insights and
You can find analytics data for your repositories on the **Insights and
analytics** dashboard at the following URL:
`https://hub.docker.com/orgs/{namespace}/insights`. The dashboard contains a
chart visualization of the usage data, as well as a table where you can download
visualization of the usage data and a table where you can download
the data as CSV files.
To view data in the chart:
- Select the data granularity: weekly or monthly
- Select the time interval: 3, 6, or 12 months
- Select one or more repositories in the list.
You can filter the list by repository name.
- Select one or more repositories in the list
![Insights and analytics chart visualization](./images/chart.png)
@ -37,18 +37,17 @@ To view data in the chart:
> for points in time.
{: .tip }
### Share
### Share analytics data
You can share the visualization chart with others using the share icon located
just above the chart:
You can share the visualization with others using the share icon above the chart.
This is a convenient way to share statistics with others in your organization.
![Chart share icon](./images/chart-share-icon.png)
Selecting the icon generates a link that gets copied to your clipboard. The link
preserves the display selections you've made. When someone uses the link, the
Selecting the icon generates a link that's copied to your clipboard. The link
preserves the display selections you made. When someone follows the link, the
**Insights and analytics** page opens and displays the chart with the same
configuration as you had set up when creating the link. This is a convenient way
to quickly share statistics with others in your organization.
configuration as you had set up when creating the link.
## Exporting analytics data
@ -61,10 +60,9 @@ Sunday) or monthly format. Monthly data is available from the first day of the
following calendar month. You can import this data into your own systems, or you
can analyze it manually as a spreadsheet.
### Export data using the website
### Export data
Here's how to export usage data for your organization's images using the Docker
Hub website.
Export usage data for your organization's images using the Docker Hub website by following these steps:
1. Sign in to [Docker Hub](https://hub.docker.com/){: target="_blank"
rel="noopener" class="_"} and select **Organizations**.
@ -103,7 +101,7 @@ represents an image pull.
| Data point | Description | Date added |
| ----------------------------- | ------------------------------------------------------------------------------------------------------------ | ----------------- |
| Action | Request type, see [Action classification rules][1]. One of `pull_by_tag`, `pull_by_digest`, `version_check`. | January 1, 2022 |
| Action day | The date part of the timestamp: `YYYY-MM-DD` | January 1, 2022 |
| Action day | The date part of the timestamp: `YYYY-MM-DD`. | January 1, 2022 |
| Country | Request origin country. | January 1, 2022 |
| Digest | Image digest. | January 1, 2022 |
| HTTP method | HTTP method used in the request, see [registry API documentation][2] for details. | January 1, 2022 |
@ -112,8 +110,8 @@ represents an image pull.
| Reference | Image digest or tag used in the request. | January 1, 2022 |
| Repository | Docker [repository][4] (image name). | January 1, 2022 |
| Tag (included when available) | Tag name that's only available if the request referred to a tag. | January 1, 2022 |
| Timestamp | Date and time of the request: `YYYY-MM-DD 00:00:00` | January 1, 2022 |
| Type | The industry from which the event originates. One of `business`, `isp`, `hosting`, `education`, `null` | January 1, 2022 |
| Timestamp | Date and time of the request: `YYYY-MM-DD 00:00:00`. | January 1, 2022 |
| Type | The industry from which the event originates. One of `business`, `isp`, `hosting`, `education`, `null`. | January 1, 2022 |
| User agent tool | The application a user used to pull an image (for example, `docker` or `containerd`). | January 1, 2022 |
| User agent version | The version of the application used to pull an image. | January 1, 2022 |
| Domain | Request origin domain, see [Privacy](#privacy). | October 11, 2022 |
@ -164,16 +162,16 @@ target="_blank" rel="noopener" class="_"}.
| Starting event | Reference | Followed by | Resulting action | Use case(s) | Notes |
| :------------- | :-------- | :-------------------------------------------------------------- | :--------------- | :------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| HEAD | tag | N/A | Version check | User already has all layers existing on local machine | This is similar to the use case of a pull by tag when the user already has all the image layers existing locally, however, we are able to differentiate the user intent and classify accordingly. |
| HEAD | tag | N/A | Version check | User already has all layers existing on local machine | This is similar to the use case of a pull by tag when the user already has all the image layers existing locally, however, it differentiates the user intent and classifies accordingly. |
| GET | tag | N/A | Pull by tag | User already has all layers existing on local machine and/or the image is single-arch |
| GET | tag | Get by different digest | Pull by tag | Image is multi-arch | Second GET by digests must be different from the first |
| HEAD | tag | GET by same digest | Pull by tag | Image is multi-arch but some or all image layers already exist on the local machine. | The HEAD by tag will send the most current digest, the following GET must be by that same digest. There may occur an additional GET, if the image is multi-arch (see the next row in this table). If the user doesn't want the most recent digest, then the user would perform HEAD by digest. |
| HEAD | tag | GET by the same digest, then a second GET by a different digest | Pull by tag | Image is multi-arch | The HEAD by tag will send the most recent digest, the following GET must be by that same digest. Since the image is multi-arch, there is a second GET by a different digest. If the user doesn't want the most recent digest, then the user would perform HEAD by digest. |
| HEAD | tag | GET by same digest, then a second GET by different digest | Pull by tag | Image is multi-arch | The HEAD by tag will send the most current digest, the following GET must be by that same digest. Since the image is multi-arch, there is a second GET by a different digest. If the user doesn't want the most recent digest, then the user would perform HEAD by digest. |
| GET | tag | Get by different digest | Pull by tag | Image is multi-arch | Second GET by digest must be different from the first. |
| HEAD | tag | GET by same digest | Pull by tag | Image is multi-arch but some or all image layers already exist on the local machine | The HEAD by tag sends the most current digest, the following GET must be by that same digest. There may occur an additional GET, if the image is multi-arch (see the next row in this table). If the user doesn't want the most recent digest, then the user performs HEAD by digest. |
| HEAD | tag | GET by the same digest, then a second GET by a different digest | Pull by tag | Image is multi-arch | The HEAD by tag sends the most recent digest, the following GET must be by that same digest. Since the image is multi-arch, there is a second GET by a different digest. If the user doesn't want the most recent digest, then the user performs HEAD by digest. |
| HEAD | tag | GET by same digest, then a second GET by different digest | Pull by tag | Image is multi-arch | The HEAD by tag sends the most current digest, the following GET must be by that same digest. Since the image is multi-arch, there is a second GET by a different digest. If the user doesn't want the most recent digest, then the user performs HEAD by digest. |
| GET | digest | N/A | Pull by digest | User already has all layers existing on local machine and/or the image is single-arch |
| HEAD | digest | N/A | Pull by digest | User already has all layers existing on their local machine. |
| GET | digest | GET by different digest | Pull by digest | Image is multi-arch | The second GET by digest must be different from the first |
| HEAD | digest | GET by same digest | Pull by digest | Image is single arch and/or image is multi-arch but some part of the image already exists on the local machine |
| HEAD | digest | N/A | Pull by digest | User already has all layers existing on their local machine |
| GET | digest | GET by different digest | Pull by digest | Image is multi-arch | The second GET by digest must be different from the first. |
| HEAD | digest | GET by same digest | Pull by digest | Image is single-arch and/or image is multi-arch but some part of the image already exists on the local machine |
| HEAD | digest | GET by same digest, then a second GET by different digest | Pull by Digest | Image is multi-arch |
## Changes in data over time
@ -200,11 +198,11 @@ consumers of content on Docker Hub remain completely anonymous.
> analytics data.
{: .important }
The summary dataset includes Unique IP address count. This data point only
The summary dataset includes unique IP address count. This data point only
includes the number of distinct unique IP addresses that request an image.
Individual IP addresses are never shared.
The raw dataset includes user IP domains as a data point. That's the domain name
The raw dataset includes user IP domains as a data point. This is the domain name
associated with the IP address used to pull an image. If the IP type is
`business`, the domain represents the company or organization associated with
that IP address (for example, `docker.com`). For any other IP type that's not

View File

@ -168,7 +168,9 @@ $ sudo yum-config-manager \
This command downloads a test image and runs it in a container. When the
container runs, it prints a confirmation message and exits.
You have now successfully installed and started Docker Engine. The docker user group exists but contains no users, which is why youre required to use sudo to run Docker commands. Continue to [Linux postinstall](linux-postinstall.md) to allow non-privileged users to run Docker commands and for other optional configuration steps.
You have now successfully installed and started Docker Engine.
{% include root-errors.md %}
#### Upgrade Docker Engine

View File

@ -173,11 +173,9 @@ Raspbian.
This command downloads a test image and runs it in a container. When the
container runs, it prints a confirmation message and exits.
You have now successfully installed and started Docker Engine. The `docker` user
group exists but contains no users, which is why you're required to use `sudo`
to run Docker commands. Continue to [Linux post-install](linux-postinstall.md)
to allow non-privileged users to run Docker commands and for other optional
configuration steps.
You have now successfully installed and started Docker Engine.
{% include root-errors.md %}
#### Upgrade Docker Engine

View File

@ -191,10 +191,9 @@ a new file each time you want to upgrade Docker Engine.
This command downloads a test image and runs it in a container. When the
container runs, it prints a message and exits.
This installs and runs Docker Engine. Use `sudo` to run Docker commands.
Continue to [Post-installation steps for Linux](linux-postinstall.md) to allow
non-privileged users to run Docker commands and for other optional configuration
steps.
This installs and runs Docker Engine.
{% include root-errors.md %}
#### Upgrade Docker Engine

View File

@ -155,10 +155,9 @@ $ sudo yum-config-manager \
This command downloads a test image and runs it in a container. When the
container runs, it prints a message and exits.
This installs and runs Docker Engine. Use `sudo` to run Docker
commands. Continue to [Linux postinstall](linux-postinstall.md) to allow
non-privileged users to run Docker commands and for other optional configuration
steps.
This installs and runs Docker Engine.
{% include root-errors.md %}
#### Upgrade Docker Engine

View File

@ -173,10 +173,9 @@ $ sudo zypper addrepo {{ download-url-base }}/docker-ce.repo
This command downloads a test image and runs it in a container. When the
container runs, it prints a message and exits.
This installs and runs Docker Engine. Use `sudo` to run Docker
commands. Continue to [Linux postinstall](linux-postinstall.md) to allow
non-privileged users to run Docker commands and for other optional configuration
steps.
This installs and runs Docker Engine.
{% include root-errors.md %}
#### Upgrade Docker Engine

View File

@ -169,11 +169,13 @@ Docker from the repository.
This command downloads a test image and runs it in a container. When the
container runs, it prints a confirmation message and exits.
You have now successfully installed and started Docker Engine. The `docker` user
group exists but contains no users, which is why you're required to use `sudo`
to run Docker commands. Continue to [Linux post-install](linux-postinstall.md)
to allow non-privileged users to run Docker commands and for other optional
configuration steps.
You have now successfully installed and started Docker Engine.
> Receiving errors when trying to run without root?
>
> The `docker` user group exists but contains no users, which is why you're required
> to use `sudo` to run Docker commands. Continue to [Linux post-install](linux-postinstall.md)
> to allow non-privileged users to run Docker commands and for other optional configuration steps.
#### Upgrade Docker Engine

View File

@ -1,6 +1,6 @@
---
title: Dockerfile reference
description: "Dockerfiles use a simple DSL which allows you to automate the steps you would normally manually take to create an image."
description: Find all the available commands you can use in a Dockerfile and learn how to use them, including COPY, ARG, ENTRYPOINT, and more.
keywords: dockerfile, docker file, docker copy, dockerfile exec, docker entrypoint, dockerfile entrypoint, dockerfile arg, docker args, entrypoint, shell dockerfile
toc_max: 3
redirect_from:

View File

@ -2,6 +2,7 @@
datafolder: engine-cli
datafile: docker_run
title: docker run
description: Learn all there is to know about the docker run command and how to use it in the Docker CLI.
redirect_from:
- /reference/run/
- /edge/engine/reference/commandline/run/

View File

@ -41,6 +41,33 @@ Changing the version format is a stepping-stone towards Go module compatibility,
but the repository doesn't yet use Go modules, and still requires using a "+incompatible" version.
Work continues towards Go module compatibility in a future release.
## 23.0.4
{% include release-date.html date="2023-04-17" %}
For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones:
- [docker/cli, 23.0.4 milestone](https://github.com/docker/cli/milestone/77?closed=1)
- [moby/moby, 23.0.4 milestone](https://github.com/moby/moby/milestone/117?closed=1)
### Bug fixes and enhancements
- Fix a performance regression in Docker CLI 23.0.0 [docker/cli#4141](https://github.com/docker/cli/pull/4141).
- Fix progress indicator on `docker cp` not functioning as intended [docker/cli#4157](https://github.com/docker/cli/pull/4157).
- Fix shell completion for `docker compose --file` [docker/cli#4177](https://github.com/docker/cli/pull/4177).
- Fix an error caused by incorrect handling of "default-address-pools" in `daemon.json` [moby/moby#45246](https://github.com/moby/moby/pull/45246).
### Packaging Updates
- Fix missing packages for CentOS 9 Stream.
- Upgrade Go to `1.19.8`. [docker/docker-ce-packaging#878](https://github.com/docker/docker-ce-packaging/pull/878),
[docker/cli#4164](https://github.com/docker/cli/pull/4164), [moby/moby#45277](https://github.com/moby/moby/pull/45277),
which contains fixes for [CVE-2023-24537](https://github.com/advisories/GHSA-fp86-2355-v99r),
[CVE-2023-24538](https://github.com/advisories/GHSA-v4m2-x4rp-hv22),
[CVE-2023-24534](https://github.com/advisories/GHSA-8v5j-pwr7-w5f8),
and [CVE-2023-24536](https://github.com/advisories/GHSA-9f7g-gqwh-jpf5)
## 23.0.3
{% include release-date.html date="2023-04-04" %}

View File

@ -1,21 +1,21 @@
---
title: "Persist the DB"
keywords: get started, setup, orientation, quickstart, intro, concepts, containers, docker desktop
description: Making our DB persistent in our application
description: Making your DB persistent in your application
---
In case you didn't notice, our todo list is being wiped clean every single time
we launch the container. Why is this? Let's dive into how the container is working.
In case you didn't notice, your todo list is empty every single time
you launch the container. Why is this? In this part, you'll dive into how the container is working.
## The container's filesystem
When a container runs, it uses the various layers from an image for its filesystem.
Each container also gets its own "scratch space" to create/update/remove files. Any
changes won't be seen in another container, _even if_ they are using the same image.
changes won't be seen in another container, even if they're using the same image.
### See this in practice
To see this in action, we're going to start two containers and create a file in each.
To see this in action, you're going to start two containers and create a file in each.
What you'll see is that the files created in one container aren't available in another.
1. Start an `ubuntu` container that will create a file named `/data.txt` with a random number
@ -25,11 +25,31 @@ What you'll see is that the files created in one container aren't available in a
$ docker run -d ubuntu bash -c "shuf -i 1-10000 -n 1 -o /data.txt && tail -f /dev/null"
```
In case you're curious about the command, we're starting a bash shell and invoking two
commands (why we have the `&&`). The first portion picks a single random number and writes
In case you're curious about the command, you're starting a bash shell and invoking two
commands (why you have the `&&`). The first portion picks a single random number and writes
it to `/data.txt`. The second command is simply watching a file to keep the container running.
2. Validate that you can see the output by accessing the terminal in the container. To do so, go to **Containers** in Docker Desktop, hover over the container running the **ubuntu** image, and select the **Show container actions** menu. From the dropdown menu, select **Open in terminal**.
2. Validate that you can see the output by accessing the terminal in the container. To do so, you can use the CLI or Docker Desktop's graphical interface.
<ul class="nav nav-tabs">
<li class="active"><a data-toggle="tab" data-target="#cli">CLI</a></li>
<li><a data-toggle="tab" data-target="#gui">Docker Desktop</a></li>
</ul>
<div class="tab-content">
<div id="cli" class="tab-pane fade in active" markdown="1">
On the command line, use the `docker exec` command to access the container. You need to get the
container's ID (use `docker ps` to get it). In your Mac or Linux terminal, or in Windows Command Prompt or PowerShell, get the content with the following command.
```console
$ docker exec <container-id> cat /data.txt
```
<hr>
</div>
<div id="gui" class="tab-pane fade" markdown="1">
In Docker Desktop, go to **Containers**, hover over the container running the **ubuntu** image, and select the **Show container actions** menu. From the dropdown menu, select **Open in terminal**.
You will see a terminal that is running a shell in the Ubuntu container. Run the following command to see the content of the `/data.txt` file. Close this terminal afterwards again.
@ -37,17 +57,13 @@ What you'll see is that the files created in one container aren't available in a
$ cat /data.txt
```
If you prefer the command line you can use the `docker exec` command to do the same. You need to get the
container's ID (use `docker ps` to get it) and get the content with the following command.
<hr>
</div>
</div>
```console
$ docker exec <container-id> cat /data.txt
```
You should see a random number.
You should see a random number!
3. Now, let's start another `ubuntu` container (the same image) and we'll see we don't have the same
file.
3. Now, start another `ubuntu` container (the same image) and you'll see you don't have the same file. In your Mac or Linux terminal, or in Windows Command Prompt or PowerShell, get the content with the following command.
```console
$ docker run -it ubuntu ls /
@ -61,31 +77,30 @@ What you'll see is that the files created in one container aren't available in a
## Container volumes
With the previous experiment, we saw that each container starts from the image definition each time it starts.
While containers can create, update, and delete files, those changes are lost when the container is removed
and all changes are isolated to that container. With volumes, we can change all of this.
With the previous experiment, you saw that each container starts from the image definition each time it starts.
While containers can create, update, and delete files, those changes are lost when you remove the container
and Docker isolates all changes to that container. With volumes, you can change all of this.
[Volumes](../storage/volumes.md) provide the ability to connect specific filesystem paths of
the container back to the host machine. If a directory in the container is mounted, changes in that
directory are also seen on the host machine. If we mount that same directory across container restarts, we'd see
the container back to the host machine. If you mount a directory in the container, changes in that
directory are also seen on the host machine. If you mount that same directory across container restarts, you'd see
the same files.
There are two main types of volumes. We will eventually use both, but we will start with volume mounts.
There are two main types of volumes. You'll eventually use both, but you'll start with volume mounts.
## Persist the todo data
By default, the todo app stores its data in a SQLite database at
`/etc/todos/todo.db` in the container's filesystem. If you're not familiar with SQLite, no worries! It's simply a relational database in
which all of the data is stored in a single file. While this isn't the best for large-scale applications,
it works for small demos. We'll talk about switching this to a different database engine later.
`/etc/todos/todo.db` in the container's filesystem. If you're not familiar with SQLite, no worries! It's simply a relational database that stores all the data in a single file. While this isn't the best for large-scale applications,
it works for small demos. You'll learn how to switch this to a different database engine later.
With the database being a single file, if we can persist that file on the host and make it available to the
With the database being a single file, if you can persist that file on the host and make it available to the
next container, it should be able to pick up where the last one left off. By creating a volume and attaching
(often called "mounting") it to the directory the data is stored in, we can persist the data. As our container
writes to the `todo.db` file, it will be persisted to the host in the volume.
(often called "mounting") it to the directory where you stored the data, you can persist the data. As your container
writes to the `todo.db` file, it will persist the data to the host in the volume.
As mentioned, we are going to use a volume mount. Think of a volume mount as an opaque bucket of data.
Docker fully manages the volume, including where it is stored on disk. You only need to remember the
As mentioned, you're going to use a volume mount. Think of a volume mount as an opaque bucket of data.
Docker fully manages the volume, including the storage location on disk. You only need to remember the
name of the volume.
1. Create a volume by using the `docker volume create` command.
@ -96,8 +111,8 @@ name of the volume.
2. Stop and remove the todo app container once again in the Dashboard (or with `docker rm -f <id>`), as it is still running without using the persistent volume.
3. Start the todo app container, but add the `--mount` option to specify a volume mount. We will give the volume a name, and mount
it to `/etc/todos` in the container, which will capture all files created at the path.
3. Start the todo app container, but add the `--mount` option to specify a volume mount. Give the volume a name, and mount
it to `/etc/todos` in the container, which captures all files created at the path.
```console
$ docker run -dp 3000:3000 --mount type=volume,src=todo-db,target=/etc/todos getting-started
@ -112,11 +127,11 @@ name of the volume.
6. Start a new container using the same command from above.
7. Open the app. You should see your items still in your list!
7. Open the app. You should see your items still in your list.
8. Go ahead and remove the container when you're done checking out your list.
Hooray! You've now learned how to persist data!
You've now learned how to persist data.
## Dive into the volume
@ -138,20 +153,20 @@ $ docker volume inspect todo-db
]
```
The `Mountpoint` is the actual location on the disk where the data is stored. Note that on most machines, you will
need to have root access to access this directory from the host. But, that's where it is!
The `Mountpoint` is the actual location of the data on the disk. Note that on most machines, you will
need to have root access to access this directory from the host. But, that's where it is.
> **Accessing volume data directly on Docker Desktop**
>
> While running in Docker Desktop, the Docker commands are actually running inside a small VM on your machine.
> If you wanted to look at the actual contents of the Mount point directory, you would need to look inside of
> If you wanted to look at the actual contents of the mount point directory, you would need to look inside of
> that VM.
## Next steps
At this point, you have a functioning application that can survive restarts! You can show it off to your investors and hope they can catch your vision!
At this point, you have a functioning application that can survive restarts.
However, you saw earlier that rebuilding images for every change takes quite a bit of time. There's got to be a better
way to make changes, right? With bind mounts (which was hinted at earlier), there is a better way!
way to make changes, right? With bind mounts, there is a better way.
[Use bind mounts](06_bind_mounts.md){: .button .primary-btn}

View File

@ -33,8 +33,7 @@ $ git clone https://github.com/docker/welcome-to-docker
If you don't have git, download the source and extract it.
[Download the source](https://github.com/docker/
welcome-to-docker/archive/refs/heads/main.zip){: .button .primary-btn}
[Download the source](https://github.com/docker/welcome-to-docker/archive/refs/heads/main.zip){: .button .primary-btn}
<hr>
</div>
@ -122,7 +121,7 @@ EXPOSE 3000
CMD [ "serve", "-s", "build" ]
```
## Step 3: Build your first image
## Step 4: Build your first image
An image is like a static version of a container. You always need an image to run a container. Once you have a Dockerfile in your repository, run the following `docker build` command in the project folder to create an image.
@ -132,13 +131,13 @@ $ docker build -t welcome-to-docker .
Building the image may take some time. After your image is built, you can view your image in the **Images** tab in Docker Desktop.
## Step 4: Run your container
## Step 5: Run your container
To run your image as a container, go to the **Images** tab, and then select **Run** in the **Actions** column of your image. When the **Optional settings** appear, specify the **Host port** number `8089` and then select **Run**.
![Running an image in Docker Desktop](images/getting-started-run-image.gif){:width="500px"}
## Step 5: Verify that your container is running
## Step 6: Verify that your container is running
You can use Docker Desktop to view and access running containers. Go to the **Containers** tab to view your container and select the link in the **Port(s)** column or go to [http://localhost:8089](http://localhost:8089){:target="_blank" rel="noopener" class="_"} to verify that the application is running.

View File

@ -522,7 +522,7 @@ In this module, we met our example application and built and container image for
In the next module, well take a look at how to:
[Run your image as a container](run-containers.md){: .button .outline-btn}
[Run your image as a container](run-containers.md){: .button .primary-btn}
## Feedback

View File

@ -20,7 +20,7 @@ You can also consider deploying your application to a public Cloud provider, suc
In the next module, well look into some options for doing so:
[Deploy your app](deploy.md){: .button .outline-btn}
[Deploy your app](deploy.md){: .button .primary-btn}
## Feedback

View File

@ -740,7 +740,7 @@ In this module, we set up a containerised development environment with our appli
In the next module, well take a look at one possible approach to running functional tests in Docker. See:
[Run your tests](run-tests.md){: .button .outline-btn}
[Run your tests](run-tests.md){: .button .primary-btn}
## Feedback

View File

@ -44,4 +44,4 @@ The aim of this guide is to provide enough examples and instructions for you to
Let's get started!
[Build your Go image](build-images.md){: .button .outline-btn}
[Build your Go image](build-images.md){: .button .primary-btn}

View File

@ -198,7 +198,7 @@ Now, we can easily identify our container based on the name.
In this module, we learned how to run containers and publish ports. We also learned to manage the lifecycle of containers. We then discussed the importance of naming our containers so that they are more easily identifiable. In the next module, well learn how to run a database in a container and connect it to our application. See:
[How to develop your application](develop.md){: .button .outline-btn}
[How to develop your application](develop.md){: .button .primary-btn}
## Feedback

View File

@ -102,7 +102,7 @@ In this module, we've seen an example of using Docker for isolated functional te
In the next module, well take a look at how to set up a CI/CD pipeline using GitHub Actions. See:
[Configure CI/CD](configure-ci-cd.md){: .button .outline-btn}
[Configure CI/CD](configure-ci-cd.md){: .button .primary-btn}
## Feedback

View File

@ -160,6 +160,27 @@ $ docker run \
docker/artifactory-agent:v1
```
#### Analyzing pre-existing data
By default the agent detects and analyzes images as they're created and
updated. If you want to use the agent to analyze pre-existing images, you
can use backfill mode. Use the `--backfill-from=TIME` command line option,
where `TIME` is an ISO 8601 formatted time, to run the agent in backfill mode.
If you use this option, the agent analyzes all images pushed between that
time and the current time when the agent starts, then exits.
For example:
```console
$ docker run \
--mount type=bind,src=/var/opt/artifactory-agent,target=/opt/artifactory-agent/data \
docker/artifactory-agent:v1 --backfill-from=2022-04-10T10:00:00Z
```
When running a backfill multiple times, the agent won't analyze images that
it's already analyzed. To force re-analysis, provide the `--force` command
line flag.
### View analysis results
You can view the image analysis results in the Docker Scout web UI.

View File

@ -49,6 +49,10 @@ Follow the steps on this page to configure SSO for your organization or company.
- SAML: **Entity ID**, **ACS URL**
- Azure AD (OIDC): **Redirect URL**
![SAML](../../docker-hub/images/saml-create-connection.png){: width="500px" }
![Azure AD](../../docker-hub/images/azure-create-connection.png){: width="500px" }
4. From your IdP, copy and paste the following values into the Docker **Settings** fields:
- SAML: **SAML Sign-on URL**, **x509 Certificate**
@ -68,16 +72,21 @@ The SSO connection is now created. You can continue to set up [SSO Group Mapping
## Optional step three: Test your SSO configuration
After youve completed the SSO configuration process in Docker Hub, you can test the configuration when you sign in to Docker Hub using an incognito browser. Log in to Docker Hub using your domain email address. You are then redirected to your IdP's login page to authenticate.
After youve completed the SSO configuration process in Docker Hub, you can test the configuration when you sign in to Docker Hub using an incognito browser. Sign in to Docker Hub using your domain email address. You are then redirected to your IdP's login page to authenticate.
1. Authenticate through email instead of using your Docker ID, and test the login process.
2. To authenticate through CLI, your users must have a PAT before you enforce SSO for CLI users.
## Optional step four: Enforce SSO log-in in Docker Hub
## Optional step four: Enforce SSO
1. In the **Single Sign-On Connections** table, select the **Action** icon and then **Enforce Single Sign-on**.
When SSO is enforced, your users are unable to modify their email address and password, convert a user account to an organization, or set up 2FA through Docker Hub. You must enable 2FA through your IdP.
2. Continue with the on-screen instructions and verify that youve completed the tasks.
3. Select **Turn on enforcement** to complete.
To enforce SSO log-in for Docker Desktop, see [Enforce sign-in](../../docker-hub/configure-sign-in.md).
Your users must now sign in to Docker with SSO.
>**Important**
>
>If SSO isn't enforced, users can choose to sign in with either their Docker ID or SSO.
{: .important}