diff --git a/.github/vale/Docker/ComplexWords.yml b/.github/vale/Docker/ComplexWords.yml index 7c4fb90cd3..03af01a343 100644 --- a/.github/vale/Docker/ComplexWords.yml +++ b/.github/vale/Docker/ComplexWords.yml @@ -63,6 +63,7 @@ swap: facilitate: ease females: women finalize: complete|finish + illustrate: show itemized: listed jeopardize: risk liaise: work with|partner with diff --git a/.github/vale/Docker/Substitute.yml b/.github/vale/Docker/Substitute.yml index d44f28011b..9acd2d28a4 100644 --- a/.github/vale/Docker/Substitute.yml +++ b/.github/vale/Docker/Substitute.yml @@ -3,6 +3,8 @@ message: "Consider using '%s' instead of '%s'" link: https://docs.docker.com/contribute/style/recommended-words/ ignorecase: true level: suggestion +action: + name: replace swap: '\b(?:eg|e\.g\.)[\s,]': for example '\b(?:ie|i\.e\.)[\s,]': that is diff --git a/.github/vale/Vocab/Docker/accept.txt b/.github/vale/Vocab/Docker/accept.txt index 55de96b6d7..15788d2708 100644 --- a/.github/vale/Vocab/Docker/accept.txt +++ b/.github/vale/Vocab/Docker/accept.txt @@ -1,4 +1,5 @@ (Certified|Verified) Publisher( Program)? +Docker-Sponsored Open Source Autotest BuildKit Docker diff --git a/README.md b/README.md index 0a860be5a3..9645991f8f 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ an existing issue. If possible, we recommend that you suggest a fix to the issue by creating a pull request. You can ask general questions and get community support through the [Docker -Community Slack](http://dockr.ly/slack). Personalized support is available +Community Slack](https://dockr.ly/comm-slack). Personalized support is available through the Docker Pro, Team, and Business subscriptions. See [Docker Pricing](https://www.docker.com/pricing) for details. @@ -49,4 +49,4 @@ contribute, see our [Contribute section](https://docs.docker.com/contribute/over ## Copyright and license -Copyright 2013-2023 Docker, inc, released under the Apache 2.0 license. +Copyright 2013-2023 Docker, inc, released under the Apache 2.0 license . diff --git a/_config.yml b/_config.yml index d8fa1a732e..cbbd0472bc 100644 --- a/_config.yml +++ b/_config.yml @@ -45,7 +45,7 @@ exclude: latest_engine_api_version: "1.42" docker_ce_version: "23.0.0" compose_v1_version: "1.29.2" -compose_version: "v2.16.0" +compose_version: "v2.17.2" compose_file_v3: "3.9" compose_file_v2: "2.4" machine_version: "0.16.0" @@ -215,3 +215,50 @@ fetch-remote: - dest: "build/attestations/attestation-storage.md" src: - "docs/attestations/attestation-storage.md" + + - repo: "https://github.com/compose-spec/compose-spec" + default_branch: "master" + ref: "master" + paths: + - dest: "compose/compose-file/01-status.md" + src: + - "01-status.md" + - dest: "compose/compose-file/02-model.md" + src: + - "02-model.md" + - dest: "compose/compose-file/03-compose-file.md" + src: + - "03-compose-file.md" + - dest: "compose/compose-file/04-version-and-name.md" + src: + - "04-version-and-name.md" + - dest: "compose/compose-file/05-services.md" + src: + - "05-services.md" + - dest: "compose/compose-file/06-networks.md" + src: + - "06-networks.md" + - dest: "compose/compose-file/07-volumes.md" + src: + - "07-volumes.md" + - dest: "compose/compose-file/08-configs.md" + src: + - "08-configs.md" + - dest: "compose/compose-file/09-secrets.md" + src: + - "09-secrets.md" + - dest: "compose/compose-file/10-fragments.md" + src: + - "10-fragments.md" + - dest: "compose/compose-file/11-extension.md" + src: + - "11-extension.md" + - dest: "compose/compose-file/12-interpolation.md" + src: + - "12-interpolation.md" + - dest: "compose/compose-file/build.md" + src: + - "build.md" + - dest: "compose/compose-file/deploy.md" + src: + - "deploy.md" diff --git a/_data/compose-cli/docker_compose.yaml b/_data/compose-cli/docker_compose.yaml index c3ee6f33c3..16b3d3fd9d 100644 --- a/_data/compose-cli/docker_compose.yaml +++ b/_data/compose-cli/docker_compose.yaml @@ -63,12 +63,19 @@ long: |- ### Use `-p` to specify a project name - Each configuration has a project name. If you supply a `-p` flag, you can specify a project name. If you don’t - specify the flag, Compose uses the current directory name. - Project name can also be set by `COMPOSE_PROJECT_NAME` environment variable. - - Many Compose subcommands can be run without a Compose file by passing - the project name. + Each configuration has a project name. Compose sets the project name using + the following mechanisms, in order of precedence: + - The `-p` command line flag + - The `COMPOSE_PROJECT_NAME` environment variable + - The top level `name:` variable from the config file (or the last `name:` + from a series of config files specified using `-f`) + - The `basename` of the project directory containing the config file (or + containing the first config file specified using `-f`) + - The `basename` of the current directory if no config file is specified + Project names must contain only lowercase letters, decimal digits, dashes, + and underscores, and must begin with a lowercase letter or decimal digit. If + the `basename` of the project directory or current directory violates this + constraint, you must use one of the other mechanisms. ```console $ docker compose -p my_project ps -a @@ -198,7 +205,8 @@ options: kubernetes: false swarm: false - option: env-file - value_type: string + value_type: stringArray + default_value: '[]' description: Specify an alternate environment file. deprecated: false hidden: false diff --git a/_data/compose-cli/docker_compose_convert.yaml b/_data/compose-cli/docker_compose_convert.yaml new file mode 100644 index 0000000000..d191322196 --- /dev/null +++ b/_data/compose-cli/docker_compose_convert.yaml @@ -0,0 +1,140 @@ +command: docker compose convert +aliases: docker compose convert, docker compose config +short: Converts the compose file to platform's canonical format +long: |- + `docker compose convert` renders the actual data model to be applied on the target platform. When used with the Docker engine, + it merges the Compose files set by `-f` flags, resolves variables in the Compose file, and expands short-notation into + the canonical format. + + To allow smooth migration from docker-compose, this subcommand declares alias `docker compose config` +usage: docker compose convert [OPTIONS] [SERVICE...] +pname: docker compose +plink: docker_compose.yaml +options: + - option: format + value_type: string + default_value: yaml + description: 'Format the output. Values: [yaml | json]' + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: hash + value_type: string + description: Print the service config hash, one per line. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: images + value_type: bool + default_value: "false" + description: Print the image names, one per line. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: no-consistency + value_type: bool + default_value: "false" + description: | + Don't check model consistency - warning: may produce invalid Compose output + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: no-interpolate + value_type: bool + default_value: "false" + description: Don't interpolate environment variables. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: no-normalize + value_type: bool + default_value: "false" + description: Don't normalize compose model. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: output + shorthand: o + value_type: string + description: Save to file (default to stdout) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: profiles + value_type: bool + default_value: "false" + description: Print the profile names, one per line. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: quiet + shorthand: q + value_type: bool + default_value: "false" + description: Only validate the configuration, don't print anything. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: resolve-image-digests + value_type: bool + default_value: "false" + description: Pin image tags to digests. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: services + value_type: bool + default_value: "false" + description: Print the service names, one per line. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: volumes + value_type: bool + default_value: "false" + description: Print the volume names, one per line. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_data/compose-cli/docker_compose_restart.yaml b/_data/compose-cli/docker_compose_restart.yaml index ea6c2628b8..91ba742e84 100644 --- a/_data/compose-cli/docker_compose_restart.yaml +++ b/_data/compose-cli/docker_compose_restart.yaml @@ -15,6 +15,16 @@ usage: docker compose restart [OPTIONS] [SERVICE...] pname: docker compose plink: docker_compose.yaml options: + - option: no-deps + value_type: bool + default_value: "false" + description: Don't restart dependent services. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false - option: timeout shorthand: t value_type: int diff --git a/_data/compose-cli/docker_compose_up.yaml b/_data/compose-cli/docker_compose_up.yaml index f8ab468e7b..fb3dcb98cc 100644 --- a/_data/compose-cli/docker_compose_up.yaml +++ b/_data/compose-cli/docker_compose_up.yaml @@ -231,18 +231,6 @@ options: experimentalcli: false kubernetes: false swarm: false - - option: timeout - shorthand: t - value_type: int - default_value: "10" - description: | - Use this timeout in seconds for container shutdown when attached or when containers are already running. - deprecated: false - hidden: false - experimental: false - experimentalcli: false - kubernetes: false - swarm: false - option: timestamps value_type: bool default_value: "false" @@ -263,6 +251,28 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: wait-timeout + value_type: int + default_value: "0" + description: timeout waiting for application to be running|healthy. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: waitTimeout + shorthand: t + value_type: int + default_value: "10" + description: | + Use this waitTimeout in seconds for container shutdown when attached or when containers are already running. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false deprecated: false experimental: false experimentalcli: false diff --git a/_data/init-cli/docker_init.yaml b/_data/init-cli/docker_init.yaml new file mode 100644 index 0000000000..cecb378cb5 --- /dev/null +++ b/_data/init-cli/docker_init.yaml @@ -0,0 +1,150 @@ +command: docker init +short: Creates Docker-related starter files for your project +long: |- + Initialize a project with the files necessary to run the project in a container. + + Docker Desktop 4.18 and later provides the Docker Init plugin with the `docker init` CLI command. Run `docker init` in your project directory to be walked through the creation of the following files with sensible defaults for your project: + + * .dockerignore + * Dockerfile + * docker-compose.yaml + + If any of the files already exist, a prompt appears and provides a warning as well as giving you the option to overwrite all the files. + + > **Warning** + > + > You can't recover overwritten files. + > To back up an existing file before selecting to overwrite it, rename the file or copy it to another directory. + {: .warning} + + After running `docker init`, you can choose one of the following templates: + + * Go: Suitable for a Go server application. + * Other: General purpose starting point for containerizing your application. + + After `docker init` has completed, you must modify the created files and tailor them to your project. Visit the following topics to learn more about the files: + + * [.dockerignore](../../../engine/reference/builder.md#dockerignore-file) + * [Dockerfile](../../../engine/reference/builder.md) + * [docker-compose.yaml](../../../compose/compose-file/03-compose-file.md) + +usage: docker init [OPTIONS] +pname: docker +plink: docker.yaml +options: + - option: version + value_type: bool + default_value: "false" + description: Display version of the init plugin + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false +examples: |- + ### Example of selecting Other + + The following example runs `docker init` and walks through the options after selecting `Other`. + + ```console + $ docker init + + Welcome to the Docker Init CLI! + + This utility will walk you through creating the following files with sensible defaults for your project: + - .dockerignore + - Dockerfile + - docker-compose.yaml + + Let's get started! + + ? What application platform does your project use? [Use arrows to move, type to filter] + > Go - suitable for a Go server application + Other - general purpose starting point for containerizing your application + Don't see something you need? Let us know! + Quit + ``` + + The following appears after selecting `Other`. + + ```console + CREATED: .dockerignore + CREATED: Dockerfile + CREATED: docker-compose.yaml + + ✔ Your Docker files are ready! + + Take a moment to review them and tailor them to your application. + + When you're ready, start your application by running: docker compose up --build + ``` + + ### Example of selecting Go + + The following example runs `docker init` and walks through the options after selecting `Go`. + + ```console + $ docker init + + Welcome to the Docker Init CLI! + + This utility will walk you through creating the following files with sensible defaults for your project: + - .dockerignore + - Dockerfile + - docker-compose.yaml + + Let's get started! + + ? What application platform does your project use? [Use arrows to move, type to filter] + > Go - (detected) suitable for a Go server application + Other - general purpose starting point for containerizing your application + Don't see something you need? Let us know! + Quit + ``` + + The following appears after selecting `Go`. + + ```console + ? What application platform does your project use? Go + ? What version of Go do you want to use? (1.20) + ``` + + The following appears after selecting the default `1.20`. + + ```console + ? What version of Go do you want to use? 1.20 + ? What's the relative directory (with a leading .) of your main package? (.) + ``` + + The following appears after selecting the default `.`. + + ```console + ? What's the relative directory (with a leading .) of your main package? . + ? What port does your server listen on? (3333) + ``` + + The following appears after selecting the default `3333`. + + ```console + ? What port does your server listen on? 3333 + + CREATED: .dockerignore + CREATED: Dockerfile + CREATED: docker-compose.yaml + + ✔ Your Docker files are ready! + + Take a moment to review them and tailor them to your application. + + When you're ready, start your application by running: docker compose up --build -d + + Your application will be available at http://localhost:3333 + + To stop your application, run: docker compose down + ``` diff --git a/_data/scout-cli/docker_scout.yaml b/_data/scout-cli/docker_scout.yaml index bf884ac855..9e06255a14 100644 --- a/_data/scout-cli/docker_scout.yaml +++ b/_data/scout-cli/docker_scout.yaml @@ -5,10 +5,16 @@ usage: docker scout [command] pname: docker plink: docker.yaml cname: + - docker scout compare - docker scout cves + - docker scout quickview + - docker scout recommendations - docker scout version clink: + - docker_scout_compare.yaml - docker_scout_cves.yaml + - docker_scout_quickview.yaml + - docker_scout_recommendations.yaml - docker_scout_version.yaml deprecated: false experimental: false diff --git a/_data/scout-cli/docker_scout_compare.yaml b/_data/scout-cli/docker_scout_compare.yaml new file mode 100644 index 0000000000..cf01b4d5bf --- /dev/null +++ b/_data/scout-cli/docker_scout_compare.yaml @@ -0,0 +1,212 @@ +command: docker scout compare +short: '[early preview] Compare two images and display differences' +long: |- + The `docker scout compare` command analyzes two images and displays a comparison of both. + + > This command is in **early preview** and its behaviour might change in the future + + The main usage is to compare two versions of the same image. + For instance when a new image is built and compared to the version running in production. + + The following artifact types are supported: + + - Images + - OCI layout directories + - Tarball archives, as created by `docker save` + + The tool analyzes the provided software artifact, and generates a vulnerability report. + + By default, the tool expects an image reference, such as: + + - `redis` + - `curlimages/curl:7.87.0` + - `mcr.microsoft.com/dotnet/runtime:7.0` + + If the artifact you want to analyze is an OCI directory or a tarball archive, you must use the `--type` or `--to-type` flag. +usage: docker scout compare --to IMAGE|DIRECTORY|ARCHIVE IMAGE|DIRECTORY|ARCHIVE +pname: docker scout +plink: docker_scout.yaml +options: + - option: debug + value_type: bool + default_value: "false" + description: Debug messages + deprecated: false + hidden: true + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: format + value_type: string + default_value: text + description: |- + Output format of the generated vulnerability report: + - text: default output, plain text with or without colors depending on the terminal + - markdown: Markdown output + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: ignore-base + value_type: bool + default_value: "false" + description: Filter out CVEs introduced from base image + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: only-fixed + value_type: bool + default_value: "false" + description: Filter to fixable CVEs + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: only-package-type + value_type: stringSlice + default_value: '[]' + description: | + Comma separated list of package types (like apk, deb, rpm, npm, pypi, golang, etc) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: only-severity + value_type: stringSlice + default_value: '[]' + description: | + Comma separated list of severities (critical, high, medium, low, unspecified) to filter CVEs by + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: only-unfixed + value_type: bool + default_value: "false" + description: Filter to unfixed CVEs + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: output + shorthand: o + value_type: string + description: Write the report to a file. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: platform + value_type: string + description: Platform of image to analyze + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: ref + value_type: string + description: |- + Reference to use if the provided tarball contains multiple references. + Can only be used with --type archive. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: to + value_type: string + description: image, directory or archive to compare to + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: to-ref + value_type: string + description: |- + Reference to use if the provided tarball contains multiple references. + Can only be used with --type archive. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: to-type + value_type: string + default_value: image + description: |- + Type of the image to analyze. Can be one of: + - image + - oci-dir + - archive (docker save tarball) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: type + value_type: string + default_value: image + description: |- + Type of the image to analyze. Can be one of: + - image + - oci-dir + - archive (docker save tarball) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +examples: |- + ### Compare an image to the latest tag + + ```console + $ docker scout compare --to namespace/repo:latest namespace/repo:v1.2.3-pre + ``` + + ### Ignore base images + + ```console + $ docker scout compare --ignore-base --to namespace/repo:latest namespace/repo:v1.2.3-pre + ``` + + ### Generate a markdown output + + ```console + $ docker scout compare --format markdown --to namespace/repo:latest namespace/repo:v1.2.3-pre + ``` + + ### Only compare maven packages and only display critical vulnerabilities for maven packages + + ```console + $ docker scout compare --only-package-type maven --only-severity critical --to namespace/repo:latest namespace/repo:v1.2.3-pre + ``` +deprecated: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_data/scout-cli/docker_scout_cves.yaml b/_data/scout-cli/docker_scout_cves.yaml index c852b528a6..bb5bf4f596 100644 --- a/_data/scout-cli/docker_scout_cves.yaml +++ b/_data/scout-cli/docker_scout_cves.yaml @@ -54,7 +54,7 @@ options: swarm: false - option: format value_type: string - default_value: txt-by-packages + default_value: packages description: |- Output format of the generated vulnerability report: - packages: default output, plain text with vulnerabilities grouped by packages @@ -111,7 +111,7 @@ options: value_type: stringSlice default_value: '[]' description: | - Comma separated list of severities (critical, high, medium, low) to filter CVEs by + Comma separated list of severities (critical, high, medium, low, unspecified) to filter CVEs by deprecated: false hidden: false experimental: false @@ -140,7 +140,7 @@ options: swarm: false - option: platform value_type: string - description: Platform of image for which to list CVEs for + description: Platform of image to analyze deprecated: false hidden: false experimental: false diff --git a/_data/scout-cli/docker_scout_quickview.yaml b/_data/scout-cli/docker_scout_quickview.yaml new file mode 100644 index 0000000000..9c8c2346bb --- /dev/null +++ b/_data/scout-cli/docker_scout_quickview.yaml @@ -0,0 +1,91 @@ +command: docker scout quickview +short: Quick overview of an image +long: |- + The `docker scout quickview` command displays a quick overview of an image. + It displays a summary of the vulnerabilities in the image and the vulnerabilities from the base image. + If available it also displays base image refresh and update recommendations. +usage: docker scout quickview IMAGE|DIRECTORY|ARCHIVE +pname: docker scout +plink: docker_scout.yaml +options: + - option: debug + value_type: bool + default_value: "false" + description: Debug messages + deprecated: false + hidden: true + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: output + shorthand: o + value_type: string + description: Write the report to a file. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: platform + value_type: string + description: Platform of image to analyze + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: ref + value_type: string + description: |- + Reference to use if the provided tarball contains multiple references. + Can only be used with --type archive. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: type + value_type: string + default_value: image + description: |- + Type of the image to analyze. Can be one of: + - image + - oci-dir + - archive (docker save tarball) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +examples: |- + ### Quick overview of an image + + ```console + $ docker scout quickview golang:1.19.4 + ...Pulling + ✓ Pulled + ✓ SBOM of image already cached, 278 packages indexed + + Your image golang:1.19.4 │ 5C 3H 6M 63L + Base image buildpack-deps:bullseye-scm │ 5C 1H 3M 48L 6? + Refreshed base image buildpack-deps:bullseye-scm │ 0C 0H 0M 42L + │ -5 -1 -3 -6 -6 + Updated base image buildpack-deps:sid-scm │ 0C 0H 1M 29L + │ -5 -1 -2 -19 -6 + + │ Know more about vulnerabilities: + │ docker scout cves golang:1.19.4 + │ Know more about base image update recommendations: + │ docker scout recommendations golang:1.19.4 + ``` +deprecated: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_data/scout-cli/docker_scout_recommendations.yaml b/_data/scout-cli/docker_scout_recommendations.yaml new file mode 100644 index 0000000000..5200ca30ea --- /dev/null +++ b/_data/scout-cli/docker_scout_recommendations.yaml @@ -0,0 +1,133 @@ +command: docker scout recommendations +short: Display available base image updates and remediation recommendations +long: |- + The `docker scout recommendations` command display recommendations for base images updates. + It analyzes the image and display recommendations to refresh or update the base image. + For each recommendation it shows a list of benefits like less vulnerabilities, smaller image, etc. + + The following artifact types are supported: + + - Images + - OCI layout directories + - Tarball archives, as created by `docker save` + + The tool analyzes the provided software artifact, and generates base image updates and remediation recommendations. + + By default, the tool expects an image reference, such as: + + - `redis` + - `curlimages/curl:7.87.0` + - `mcr.microsoft.com/dotnet/runtime:7.0` + + If the artifact you want to analyze is an OCI directory or a tarball archive, you must use the `--type` flag. +usage: docker scout recommendations IMAGE|DIRECTORY|ARCHIVE +pname: docker scout +plink: docker_scout.yaml +options: + - option: debug + value_type: bool + default_value: "false" + description: Debug messages + deprecated: false + hidden: true + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: only-refresh + value_type: bool + default_value: "false" + description: Only display base image refresh recommendations + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: only-update + value_type: bool + default_value: "false" + description: Only display base image update recommendations + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: output + shorthand: o + value_type: string + description: Write the report to a file. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: platform + value_type: string + description: Platform of image to analyze + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: ref + value_type: string + description: |- + Reference to use if the provided tarball contains multiple references. + Can only be used with --type archive. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: tag + value_type: string + description: Specify tag + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: type + value_type: string + default_value: image + description: |- + Type of the image to analyze. Can be one of: + - image + - oci-dir + - archive (docker save tarball) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +examples: |- + ### Display base image update recommendations + + ```console + $ docker scout recommendations golang:1.19.4 + ``` + + ### Display base image refresh only recommendations + + ```console + $ docker scout recommendations --only-refresh golang:1.19.4 + ``` + + ### Display base image update only recommendations + + ```console + $ docker scout recommendations --only-update golang:1.19.4 + ``` +deprecated: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_data/scout-cli/docker_scout_sbom.yaml b/_data/scout-cli/docker_scout_sbom.yaml new file mode 100644 index 0000000000..7a1d3e99e1 --- /dev/null +++ b/_data/scout-cli/docker_scout_sbom.yaml @@ -0,0 +1,135 @@ +command: docker scout sbom +short: Generate or display SBOM of an image +long: |- + The `docker scout sbom` command analyzes a software artifact to generate the corresponding Software Bill Of Materials (SBOM). + + The SBOM can be used to list all packages, or the ones from a specific type (as dep, maven, etc). + + The following artifact types are supported: + + - Images + - OCI layout directories + - Tarball archives, as created by `docker save` + + The tool analyzes the provided software artifact, and generates a vulnerability report. + + By default, the tool expects an image reference, such as: + + - `redis` + - `curlimages/curl:7.87.0` + - `mcr.microsoft.com/dotnet/runtime:7.0` + + If the artifact you want to analyze is an OCI directory or a tarball archive, you must use the `--type` flag. +usage: docker scout sbom IMAGE|DIRECTORY|ARCHIVE +pname: docker scout +plink: docker_scout.yaml +options: + - option: debug + value_type: bool + default_value: "false" + description: Debug messages + deprecated: false + hidden: true + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: format + value_type: string + default_value: json + description: |- + Output format: + - list: list of packages of the image + - json: json representation of the SBOM + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: only-package-type + value_type: stringSlice + default_value: '[]' + description: |- + Comma separated list of package types (like apk, deb, rpm, npm, pypi, golang, etc) + Can only be used with --format list + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: output + shorthand: o + value_type: string + description: Write the report to a file. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: platform + value_type: string + description: Platform of image to analyze + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: ref + value_type: string + description: |- + Reference to use if the provided tarball contains multiple references. + Can only be used with --type archive. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: type + value_type: string + default_value: image + description: |- + Type of the image to analyze. Can be one of: + - image + - oci-dir + - archive (docker save tarball) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +examples: |- + ### Display the list of packages + + ```console + $ docker scout sbom alpine + ``` + + ### Only display packages of a specific type + + ```console + $ docker scout sbom --only-package-type apk alpine + ``` + + ### Display the full SBOM as json + + ```console + $ docker scout sbom --format json alpine + ``` + + ### Write SBOM to a file + + ```console + $ docker scout sbom --format json --output alpine.sbom alpine + ``` +deprecated: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_data/toc.yaml b/_data/toc.yaml index c15ef3d043..a845b0aa19 100644 --- a/_data/toc.yaml +++ b/_data/toc.yaml @@ -49,6 +49,18 @@ guides: path: /get-started/09_image_best/ - title: "Part 10: What next?" path: /get-started/11_what_next/ +- sectiontitle: Docker Desktop hands-on guides + section: + - path: /get-started/hands-on-overview/ + title: Overview + - path: /get-started/what-is-a-container/ + title: What is a container? + - path: /get-started/run-your-own-container/ + title: How do I run a container? + - path: /get-started/run-docker-hub-images/ + title: Run Docker Hub images + - path: /get-started/publish-your-own-image/ + title: Publish your images - sectiontitle: Language-specific guides section: - path: /language/ @@ -499,6 +511,8 @@ reference: title: docker import - path: /engine/reference/commandline/info/ title: docker info + - path: /engine/reference/commandline/init/ + title: docker init (Beta) - path: /engine/reference/commandline/inspect/ title: docker inspect - path: /engine/reference/commandline/kill/ @@ -609,8 +623,14 @@ reference: section: - path: /engine/reference/commandline/scout/ title: docker scout + - path: /engine/reference/commandline/scout_compare/ + title: docker scout compare - path: /engine/reference/commandline/scout_cves/ title: docker scout cves + - path: /engine/reference/commandline/scout_quickview/ + title: docker scout quickview + - path: /engine/reference/commandline/scout_recommendations/ + title: docker scout recommendations - path: /engine/reference/commandline/scout_version/ title: docker scout version - path: /engine/reference/commandline/search/ @@ -884,8 +904,34 @@ reference: path: /engine/reference/builder/ - sectiontitle: Compose file reference section: - - path: /compose/compose-file/ - title: Compose Specification + - sectiontitle: Compose specification + section: + - path: /compose/compose-file/ + title: Overview + - path: /compose/compose-file/01-status/ + title: Status of the specification + - path: /compose/compose-file/02-model/ + title: Compose application model + - path: /compose/compose-file/03-compose-file/ + title: The Compose file + - path: /compose/compose-file/04-version-and-name/ + title: Version and name top-level element + - path: /compose/compose-file/05-services/ + title: Services top-level element + - path: /compose/compose-file/06-networks/ + title: Network top-level element + - path: /compose/compose-file/07-volumes/ + title: Volumes top-level element + - path: /compose/compose-file/08-configs/ + title: Configs top-level element + - path: /compose/compose-file/09-secrets/ + title: Secrets top-level element + - path: /compose/compose-file/10-fragments/ + title: Fragments + - path: /compose/compose-file/11-extension/ + title: Extensions + - path: /compose/compose-file/12-interpolation/ + title: Interpolation - path: /compose/compose-file/build/ title: Compose file build - path: /compose/compose-file/deploy/ @@ -1581,6 +1627,16 @@ manuals: title: Kubernetes driver - path: /build/drivers/remote/ title: Remote driver + - sectiontitle: Exporters + section: + - path: /build/exporters/ + title: Overview + - path: /build/exporters/image-registry/ + title: Image and registry exporters + - path: /build/exporters/local-tar/ + title: Local and tar exporters + - path: /build/exporters/oci-docker/ + title: OCI and Docker exporters - sectiontitle: Cache section: - path: /build/cache/ @@ -1603,50 +1659,6 @@ manuals: title: Azure Blob Storage - path: /build/cache/backends/s3/ title: Amazon S3 - - sectiontitle: Exporters - section: - - path: /build/exporters/ - title: Overview - - path: /build/exporters/image-registry/ - title: Image and registry exporters - - path: /build/exporters/local-tar/ - title: Local and tar exporters - - path: /build/exporters/oci-docker/ - title: OCI and Docker exporters - - sectiontitle: Continuous integration - section: - - path: /build/ci/ - title: CI with Docker - - sectiontitle: GitHub Actions - section: - - path: /build/ci/github-actions/ - title: Introduction - - path: /build/ci/github-actions/configure-builder/ - title: Configuring your builder - - path: /build/ci/github-actions/multi-platform/ - title: Multi-platform image - - path: /build/ci/github-actions/secrets/ - title: Secrets - - path: /build/ci/github-actions/push-multi-registries/ - title: Push to multi-registries - - path: /build/ci/github-actions/manage-tags-labels/ - title: Manage tags and labels - - path: /build/ci/github-actions/cache/ - title: Cache management - - path: /build/ci/github-actions/export-docker/ - title: Export to Docker - - path: /build/ci/github-actions/test-before-push/ - title: Test before push - - path: /build/ci/github-actions/local-registry/ - title: Local registry - - path: /build/ci/github-actions/share-image-jobs/ - title: Share built image between jobs - - path: /build/ci/github-actions/named-contexts/ - title: Named contexts - - path: /build/ci/github-actions/copy-image-registries/ - title: Copy image between registries - - path: /build/ci/github-actions/update-dockerhub-desc/ - title: Update Docker Hub repo description - sectiontitle: Bake section: - path: /build/bake/ @@ -1683,6 +1695,40 @@ manuals: title: Configure - path: /build/buildkit/toml-configuration/ title: TOML configuration + - sectiontitle: Continuous integration + section: + - path: /build/ci/ + title: CI with Docker + - sectiontitle: GitHub Actions + section: + - path: /build/ci/github-actions/ + title: Introduction + - path: /build/ci/github-actions/configure-builder/ + title: Configuring your builder + - path: /build/ci/github-actions/multi-platform/ + title: Multi-platform image + - path: /build/ci/github-actions/secrets/ + title: Secrets + - path: /build/ci/github-actions/push-multi-registries/ + title: Push to multi-registries + - path: /build/ci/github-actions/manage-tags-labels/ + title: Manage tags and labels + - path: /build/ci/github-actions/cache/ + title: Cache management + - path: /build/ci/github-actions/export-docker/ + title: Export to Docker + - path: /build/ci/github-actions/test-before-push/ + title: Test before push + - path: /build/ci/github-actions/local-registry/ + title: Local registry + - path: /build/ci/github-actions/share-image-jobs/ + title: Share built image between jobs + - path: /build/ci/github-actions/named-contexts/ + title: Named contexts + - path: /build/ci/github-actions/copy-image-registries/ + title: Copy image between registries + - path: /build/ci/github-actions/update-dockerhub-desc/ + title: Update Docker Hub repo description - path: /build/release-notes/ title: Release notes - sectiontitle: Docker Compose @@ -1741,68 +1787,54 @@ manuals: section: - path: /docker-hub/ title: Overview + - path: /docker-id/ + title: Create an account - path: /docker-hub/quickstart/ title: Quickstart - - path: /docker-id/ - title: Docker ID accounts - sectiontitle: Repositories section: - - path: /docker-hub/repos/ - title: Manage + - path: /docker-hub/repos/create/ + title: Create - path: /docker-hub/repos/access/ title: Access - - path: /docker-hub/repos/configure/ - title: Configure - - path: /docker-hub/service-accounts/ - title: Service accounts + - path: /docker-hub/repos/ + title: Manage - path: /docker-hub/official_images/ title: Docker Official images - - sectiontitle: Automated builds - section: - - path: /docker-hub/builds/ - title: Set up automated builds - - path: /docker-hub/builds/automated-testing/ - title: Testing in automated builds - - path: /docker-hub/builds/advanced/ - title: Advanced automated builds - - path: /docker-hub/builds/link-source/ - title: Link to GitHub and BitBucket - - path: /docker-hub/webhooks/ - title: Webhooks - - path: /docker-hub/vulnerability-scanning/ - title: Vulnerability scanning - - path: /docker-hub/audit-log/ - title: Audit logs - - sectiontitle: Security and authentication - section: - - path: /docker-hub/access-tokens/ - title: Manage access tokens - - sectiontitle: Two-factor authentication - section: - - path: /docker-hub/2fa/ - title: Enable two-factor authentication - - path: /docker-hub/2fa/disable-2fa/ - title: Disable two-factor authentication - - path: /docker-hub/2fa/recover-hub-account/ - title: Recover your Docker Hub account - - path: /docker-hub/2fa/new-recovery-code/ - title: Generate a new recovery code - path: /docker-hub/download-rate-limit/ title: Download rate limit - - sectiontitle: Administration + - path: /docker-hub/webhooks/ + title: Webhooks + - path: /docker-hub/service-accounts/ + title: Service accounts + - sectiontitle: Automated builds section: - - path: /docker-hub/image-management/ - title: Advanced Image Management dashboard - - path: /docker-hub/convert-account/ - title: Convert an account into an organization - - path: /docker-hub/deactivate-account/ - title: Deactivate an account or an organization + - path: /docker-hub/builds/how-builds-work/ + title: How Automated builds work + - path: /docker-hub/builds/ + title: Set up Automated builds + - path: /docker-hub/builds/manage-builds/ + title: Manage your builds + - path: /docker-hub/builds/troubleshoot/ + title: Troubleshoot your builds + - path: /docker-hub/builds/automated-testing/ + title: Testing in Automated builds + - path: /docker-hub/builds/advanced/ + title: Advanced options for builds + - path: /docker-hub/builds/link-source/ + title: Link to GitHub and BitBucket + - path: /docker-hub/vulnerability-scanning/ + title: Vulnerability scanning + - path: /docker-hub/image-management/ + title: Advanced Image Management - sectiontitle: Docker Verified Publisher section: - path: /docker-hub/publish/ title: Overview - path: /docker-hub/publish/insights-analytics/ title: Insights and analytics + - path: /docker-hub/dsos-program/ + title: Docker-Sponsored Open Source Program - path: /docker-hub/oci-artifacts/ title: OCI artifacts - path: /docker-hub/release-notes/ @@ -1819,8 +1851,10 @@ manuals: - path: /scout/artifactory/ title: Artifactory integration -- sectiontitle: Administration +- sectiontitle: Administration and security section: + - path: /docker-hub/admin-overview/ + title: Overview - sectiontitle: Onboarding section: - path: /docker-hub/onboard-team/ @@ -1839,8 +1873,10 @@ manuals: title: FAQs - path: /docker-hub/orgs/ title: Create your organization + - path: /docker-hub/convert-account/ + title: Convert an account into an organization - path: /docker-hub/manage-a-team/ - title: Manage a team + title: Create and manage a team - path: /docker-hub/members/ title: Manage members - path: /docker-hub/configure-sign-in/ @@ -1859,10 +1895,30 @@ manuals: title: FAQs - path: /docker-hub/scim/ title: SCIM + - path: /docker-hub/group-mapping/ + title: Group mapping + - sectiontitle: Security and authentication + section: + - path: /docker-hub/access-tokens/ + title: Create and manage access tokens + - sectiontitle: Two-factor authentication + section: + - path: /docker-hub/2fa/ + title: Enable two-factor authentication + - path: /docker-hub/2fa/disable-2fa/ + title: Disable two-factor authentication + - path: /docker-hub/2fa/recover-hub-account/ + title: Recover your Docker Hub account + - path: /docker-hub/2fa/new-recovery-code/ + title: Generate a new recovery code + - path: /docker-hub/audit-log/ + title: Audit logs - path: /docker-hub/domain-audit/ title: Domain audit - path: /docker-hub/image-access-management/ title: Image Access Management + - path: /docker-hub/deactivate-account/ + title: Deactivate an account or organization - sectiontitle: Billing section: diff --git a/_includes/desktop-install.html b/_includes/desktop-install.html index 9924565939..7a8cf2e560 100644 --- a/_includes/desktop-install.html +++ b/_includes/desktop-install.html @@ -13,7 +13,7 @@ Download file

- Checksum: SHA-256 69ea659b0ca0e160a1de9bd63dc5697f5eb89fff1d33484fb8ef9793e43d0d45 + Checksum: SHA-256 7b17e26d7c2d0245ba9f2526e20349e113819cfb47d1f3e8dbd3cc8ea8ccf6b7 @@ -31,7 +31,7 @@ Download file

- Checksum: SHA-256 eb0531122a62859ce7b029e943fdad365603a916e6c15c107514c1e4a818d7ef + Checksum: SHA-256 2e099af08e17666228282b970992160fa423ce8f5fa9e36b79495a1960803091 @@ -49,7 +49,7 @@ Download file

- Checksum: SHA-256 5e01465d93dfe18d7678a96705e7c26bb654b6766f06373b5cffbf77c641bccc + Checksum: SHA-256 2ae4b2ec556c107f969e51b72ad1920fefa38dbd0d8e3db64815c26b9f2b126d @@ -64,10 +64,10 @@
- Download file + Download file

- Checksum: SHA-256 6828d35ae02763255790de6690909935a1f7c951373179ac0efd6c6b578b5219 + Checksum: SHA-256 d579f653b5223a3c24234992203283c5a6da28146702f8899964e08b8ba45198
@@ -82,10 +82,10 @@
- Download file + Download file

- Checksum: SHA-256 7973c5bf41bdc78ca39ba64f93c6e4a33263d8dbfc604651bf1562bfeeea26f7 + Checksum: SHA-256 4d64ea1c9e9da66ca0a37820135926eb4bbcdf658cafdbe87497aae384bf65c7
@@ -100,10 +100,10 @@
- Download file + Download file

- Checksum: SHA-256 c783ce942c84f899d1f576d01d34fd4de3cefa0a1d577eda2bc5c4ceaec6cfdb + Checksum: SHA-256 0d6f8c54457f89cfaba3222ab4754caa8df547a0050aa8749d679609bad9456d
diff --git a/_layouts/landing.html b/_layouts/landing.html index e4edbe2902..3c08846942 100644 --- a/_layouts/landing.html +++ b/_layouts/landing.html @@ -144,19 +144,18 @@

Multi-stage builds

-

Get support

+

Get support

Docker community Slack

-

Contact Docker -

-

+

Contact Docker

+
+ @@ -367,11 +366,11 @@

Develop with Docker

Learn how to develop language-specific apps using Docker.

-

Containerize a Node.js app using Docker

-

Containerize a Python app using Docker

-

Containerize a Java app using Docker

-

Containerize a Go app using Docker

-

View more languages and frameworks in Docker samples

+

Containerize a Node.js app using Docker

+

Containerize a Python app using Docker

+

Containerize a Java app using Docker

+

Containerize a Go app using Docker

+

View more languages and frameworks in Docker samples

@@ -386,7 +385,7 @@
- +
Tutorial

Self-paced tutorials to increase your Docker knowledge.

diff --git a/assets/images/arrow-up.svg b/assets/images/arrow-up.svg new file mode 100644 index 0000000000..080c6e5367 --- /dev/null +++ b/assets/images/arrow-up.svg @@ -0,0 +1,38 @@ + + + + + + + diff --git a/assets/images/desktop.svg b/assets/images/desktop.svg new file mode 100644 index 0000000000..9b860ba6f1 --- /dev/null +++ b/assets/images/desktop.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/assets/images/sign-on.svg b/assets/images/sign-on.svg new file mode 100644 index 0000000000..ae97ce20a9 --- /dev/null +++ b/assets/images/sign-on.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/assets/images/sso.svg b/assets/images/sso.svg new file mode 100644 index 0000000000..74c98cd475 --- /dev/null +++ b/assets/images/sso.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/assets/js/anchorlinks.js b/assets/js/anchorlinks.js index 088a0a1e49..750f964703 100644 --- a/assets/js/anchorlinks.js +++ b/assets/js/anchorlinks.js @@ -1,7 +1,7 @@ (function (d) { "use strict"; for (const h of d.querySelectorAll("H1, H2, H3")) { - if (h.id != null && h.id.length > 0) { + if (h.id != null && h.id.length > 0 && !h.parentElement.classList.contains("component")) { h.insertAdjacentHTML('beforeend', `🔗`) } } diff --git a/billing/faqs.md b/billing/faqs.md index 4b86156ffd..78210decea 100644 --- a/billing/faqs.md +++ b/billing/faqs.md @@ -48,3 +48,15 @@ Contact the [Docker Sales Team](https://www.docker.com/company/contact){:target= ### Do I need to do anything at the end of my subscription term? No. All monthly and annual subscriptions are automatically renewed at the end of the term using the original form of payment. + +### How do I redeem a coupon? + +You can redeem a coupon for any paid Docker subscription. + +A coupon can be used when you: +- Sign up to a new paid subscription from a free subscription +- Upgrade an existing paid subscription + +You are asked to enter your coupon code when you confirm or enter your payment method. + +If you use a coupon to pay for a subscription, when the coupon expires, your payment method is charged the full cost of your subscription. If you do not have a saved payment method, your account is downgraded to a free subscription. diff --git a/build/attestations/sbom.md b/build/attestations/sbom.md index 187eab0469..f28d3fa8fc 100644 --- a/build/attestations/sbom.md +++ b/build/attestations/sbom.md @@ -214,7 +214,7 @@ an open source tool for generating an SBOM. You can select a different plugin to use with the `generator` option, specifying an image that implements the -[BuildKit SBOM scanner protocol](https://github.com/moby/buildkit/blob/master/docs/sbom-protocol.md){: target="blank" rel="noopener" }. +[BuildKit SBOM scanner protocol](https://github.com/moby/buildkit/blob/master/docs/attestations/sbom-protocol.md){: target="blank" rel="noopener" }. ```console $ docker buildx build --attest type=sbom,generator= . diff --git a/build/bake/compose-file.md b/build/bake/compose-file.md index b220d8e943..ef4867ca97 100644 --- a/build/bake/compose-file.md +++ b/build/bake/compose-file.md @@ -98,7 +98,7 @@ Unlike the [HCL format](file-definition.md#hcl-definition), there are some limitations with the compose format: * Specifying variables or global scope attributes is not yet supported -* `inherits` service field is not supported, but you can use [YAML anchors](https://docs.docker.com/compose/compose-file/#fragments){:target="blank" rel="noopener" class=""} +* `inherits` service field is not supported, but you can use [YAML anchors](../../compose/compose-file/10-fragments.md){:target="blank" rel="noopener" class=""} to reference other services like the example above ## `.env` file @@ -155,7 +155,7 @@ $ docker buildx bake --print ## Extension field with `x-bake` Even if some fields are not (yet) available in the compose specification, you -can use the [special extension](../../compose/compose-file/index.md#extension) +can use the [special extension](../../compose/compose-file/11-extension.md) field `x-bake` in your compose file to evaluate extra fields: ```yaml diff --git a/build/building/env-vars.md b/build/building/env-vars.md index 78cbc7ff54..35d750866c 100644 --- a/build/building/env-vars.md +++ b/build/building/env-vars.md @@ -183,7 +183,7 @@ attestations. Usage: ```console -$ export BUILDX_NO_DEFAULT_ATTESTATION=1 +$ export BUILDX_NO_DEFAULT_ATTESTATIONS=1 ``` ## BUILDX_NO_DEFAULT_LOAD diff --git a/build/index.md b/build/index.md index f1a132648b..47f4fc4a13 100644 --- a/build/index.md +++ b/build/index.md @@ -96,34 +96,6 @@ advanced scenarios.

-
-
-
- - Two arrows rotating in a circle - -
-

Build caching

-

- Avoid unnecessary repetitions of costly operations, such as package installs. -

-
-
-
-
-
- - Infinity loop - -
-

Continuous integration

-

- Learn how to use Docker in your continuous integration pipelines. -

-
-
- -
@@ -137,6 +109,21 @@ advanced scenarios.

+
+
+
+ + Two arrows rotating in a circle + +
+

Build caching

+

+ Avoid unnecessary repetitions of costly operations, such as package installs. +

+
+
+
+
@@ -150,6 +137,19 @@ advanced scenarios.

+
+
+
+ + Infinity loop + +
+

Continuous integration

+

+ Learn how to use Docker in your continuous integration pipelines. +

+
+
diff --git a/build/install-buildx.md b/build/install-buildx.md index c5c34e240d..492ae58ac7 100644 --- a/build/install-buildx.md +++ b/build/install-buildx.md @@ -6,6 +6,10 @@ redirect_from: - /build/buildx/install/ --- +This page describes how to install Buildx, the CLI plugin for managing Docker builds. + +Buildx requires Docker Engine version 19.03 or later. + ## Docker Desktop Docker Buildx is included by default in Docker Desktop. diff --git a/compose/compose-file/01-status.md b/compose/compose-file/01-status.md new file mode 100644 index 0000000000..1c8047d6bc --- /dev/null +++ b/compose/compose-file/01-status.md @@ -0,0 +1,7 @@ +--- +title: Status of the specification +keywords: compose, compose specification +fetch_remote: + line_start: 2 + line_end: -1 +--- diff --git a/compose/compose-file/02-model.md b/compose/compose-file/02-model.md new file mode 100644 index 0000000000..e4c353003d --- /dev/null +++ b/compose/compose-file/02-model.md @@ -0,0 +1,7 @@ +--- +title: Compose application model +keywords: compose, compose specification +fetch_remote: + line_start: 2 + line_end: -1 +--- diff --git a/compose/compose-file/03-compose-file.md b/compose/compose-file/03-compose-file.md new file mode 100644 index 0000000000..efe23a554a --- /dev/null +++ b/compose/compose-file/03-compose-file.md @@ -0,0 +1,7 @@ +--- +title: The Compose file +keywords: compose, compose specification +fetch_remote: + line_start: 2 + line_end: -1 +--- diff --git a/compose/compose-file/04-version-and-name.md b/compose/compose-file/04-version-and-name.md new file mode 100644 index 0000000000..6dad195811 --- /dev/null +++ b/compose/compose-file/04-version-and-name.md @@ -0,0 +1,7 @@ +--- +title: Version and name top-level element +keywords: compose, compose specification +fetch_remote: + line_start: 2 + line_end: -1 +--- diff --git a/compose/compose-file/05-services.md b/compose/compose-file/05-services.md new file mode 100644 index 0000000000..04587a59ea --- /dev/null +++ b/compose/compose-file/05-services.md @@ -0,0 +1,7 @@ +--- +title: Services top-level element +keywords: compose, compose specification +fetch_remote: + line_start: 2 + line_end: -1 +--- diff --git a/compose/compose-file/06-networks.md b/compose/compose-file/06-networks.md new file mode 100644 index 0000000000..8117c862ed --- /dev/null +++ b/compose/compose-file/06-networks.md @@ -0,0 +1,7 @@ +--- +title: Networks top-level element +keywords: compose, compose specification +fetch_remote: + line_start: 2 + line_end: -1 +--- diff --git a/compose/compose-file/07-volumes.md b/compose/compose-file/07-volumes.md new file mode 100644 index 0000000000..200f6a1c10 --- /dev/null +++ b/compose/compose-file/07-volumes.md @@ -0,0 +1,7 @@ +--- +title: Volumes top-level element +keywords: compose, compose specification +fetch_remote: + line_start: 2 + line_end: -1 +--- diff --git a/compose/compose-file/08-configs.md b/compose/compose-file/08-configs.md new file mode 100644 index 0000000000..0d7db3f06b --- /dev/null +++ b/compose/compose-file/08-configs.md @@ -0,0 +1,7 @@ +--- +title: Configs top-level element +keywords: compose, compose specification +fetch_remote: + line_start: 2 + line_end: -1 +--- diff --git a/compose/compose-file/09-secrets.md b/compose/compose-file/09-secrets.md new file mode 100644 index 0000000000..ccc154efe5 --- /dev/null +++ b/compose/compose-file/09-secrets.md @@ -0,0 +1,7 @@ +--- +title: Secrets top-level element +keywords: compose, compose specification +fetch_remote: + line_start: 2 + line_end: -1 +--- diff --git a/compose/compose-file/10-fragments.md b/compose/compose-file/10-fragments.md new file mode 100644 index 0000000000..0c5d1a6c39 --- /dev/null +++ b/compose/compose-file/10-fragments.md @@ -0,0 +1,7 @@ +--- +title: Fragments +keywords: compose, compose specification +fetch_remote: + line_start: 2 + line_end: -1 +--- diff --git a/compose/compose-file/11-extension.md b/compose/compose-file/11-extension.md new file mode 100644 index 0000000000..31cccd4366 --- /dev/null +++ b/compose/compose-file/11-extension.md @@ -0,0 +1,7 @@ +--- +title: Extensions +keywords: compose, compose specification +fetch_remote: + line_start: 2 + line_end: -1 +--- diff --git a/compose/compose-file/12-interpolation.md b/compose/compose-file/12-interpolation.md new file mode 100644 index 0000000000..2c02bb7752 --- /dev/null +++ b/compose/compose-file/12-interpolation.md @@ -0,0 +1,7 @@ +--- +title: Interpolation +keywords: compose, compose specification +fetch_remote: + line_start: 2 + line_end: -1 +--- diff --git a/compose/compose-file/build.md b/compose/compose-file/build.md index 5cf923318e..108ba58932 100644 --- a/compose/compose-file/build.md +++ b/compose/compose-file/build.md @@ -1,435 +1,7 @@ --- -description: Compose file build reference -keywords: fig, composition, compose, docker title: Compose file build reference -toc_max: 4 -toc_min: 2 +keywords: fig, composition, compose, docker +fetch_remote: + line_start: 8 + line_end: -1 --- - -Compose specification is a platform-neutral way to define multi-container applications. A Compose implementation -focusing on development use-case to run application on local machine will obviously also support (re)building -application from sources. The Compose Build specification allows to define the build process within a Compose file -in a portable way. - -## Definitions - -Compose Specification is extended to support an OPTIONAL `build` subsection on services. This section define the -build requirements for service container image. Only a subset of Compose file services MAY define such a Build -subsection, others being created based on `Image` attribute. When a Build subsection is present for a service, it -is *valid* for a Compose file to miss an `Image` attribute for corresponding service, as Compose implementation -can build image from source. - -Build can be either specified as a single string defining a context path, or as a detailed build definition. - -In the former case, the whole path is used as a Docker context to execute a docker build, looking for a canonical -`Dockerfile` at context root. Context path can be absolute or relative, and if so relative path MUST be resolved -from Compose file parent folder. As an absolute path prevent the Compose file to be portable, Compose implementation -SHOULD warn user accordingly. - -In the later case, build arguments can be specified, including an alternate `Dockerfile` location. This one can be -absolute or relative path. If Dockerfile path is relative, it MUST be resolved from context path. As an absolute -path prevent the Compose file to be portable, Compose implementation SHOULD warn user if an absolute alternate -Dockerfile path is used. - -## Consistency with Image - -When service definition do include both `Image` attribute and a `Build` section, Compose implementation can't -guarantee a pulled image is strictly equivalent to building the same image from sources. Without any explicit -user directives, Compose implementation with Build support MUST first try to pull Image, then build from source -if image was not found on registry. Compose implementation MAY offer options to customize this behaviour by user -request. - -## Publishing built images - -Compose implementation with Build support SHOULD offer an option to push built images to a registry. Doing so, it -MUST NOT try to push service images without an `Image` attribute. Compose implementation SHOULD warn user about -missing `Image` attribute which prevent image being pushed. - -Compose implementation MAY offer a mechanism to compute an `Image` attribute for service when not explicitly -declared in yaml file. In such a case, the resulting Compose configuration is considered to have a valid `Image` -attribute, whenever the actual raw yaml file doesn't explicitly declare one. - -## Illustrative sample - -The following sample illustrates Compose specification concepts with a concrete sample application. The sample is non-normative. - -```yaml -services: - frontend: - image: awesome/webapp - build: ./webapp - - backend: - image: awesome/database - build: - context: backend - dockerfile: ../backend.Dockerfile - - custom: - build: ~/custom -``` - -When used to build service images from source, such a Compose file will create three docker images: - -* `awesome/webapp` docker image is built using `webapp` sub-directory within Compose file parent folder as docker build context. Lack of a `Dockerfile` within this folder will throw an error. -* `awesome/database` docker image is built using `backend` sub-directory within Compose file parent folder. `backend.Dockerfile` file is used to define build steps, this file is searched relative to context path, which means for this sample `..` will resolve to Compose file parent folder, so `backend.Dockerfile` is a sibling file. -* a docker image is built using `custom` directory within user's HOME as docker context. Compose implementation warn user about non-portable path used to build image. - -On push, both `awesome/webapp` and `awesome/database` docker images are pushed to (default) registry. `custom` service image is skipped as no `Image` attribute is set and user is warned about this missing attribute. - -## Build definition - -The `build` element define configuration options that are applied by Compose implementations to build Docker image from source. -`build` can be specified either as a string containing a path to the build context or a detailed structure: - -```yml -services: - webapp: - build: ./dir -``` - -Using this string syntax, only the build context can be configured as a relative path to the Compose file's parent folder. -This path MUST be a directory and contain a `Dockerfile`. - -Alternatively `build` can be an object with fields defined as follow - -### context (REQUIRED) - -`context` defines either a path to a directory containing a Dockerfile, or a url to a git repository. - -When the value supplied is a relative path, it MUST be interpreted as relative to the location of the Compose file. -Compose implementations MUST warn user about absolute path used to define build context as those prevent Compose file -from being portable. - -```yml -build: - context: ./dir -``` - -See [Build context](../../build/building/context.md) page for more information. - -### dockerfile - -`dockerfile` allows to set an alternate Dockerfile. A relative path MUST be resolved from the build context. -Compose implementations MUST warn user about absolute path used to define Dockerfile as those prevent Compose file -from being portable. - -```yml -build: - context: . - dockerfile: webapp.Dockerfile -``` - -### args - -`args` define build arguments, i.e. Dockerfile `ARG` values. - -Using following Dockerfile: - -```Dockerfile -ARG GIT_COMMIT -RUN echo "Based on commit: $GIT_COMMIT" -``` - -`args` can be set in Compose file under the `build` key to define `GIT_COMMIT`. `args` can be set a mapping or a list: - -```yml -build: - context: . - args: - GIT_COMMIT: cdc3b19 -``` - -```yml -build: - context: . - args: - - GIT_COMMIT=cdc3b19 -``` - -Value can be omitted when specifying a build argument, in which case its value at build time MUST be obtained by user interaction, -otherwise build arg won't be set when building the Docker image. - -```yml -args: - - GIT_COMMIT -``` - -### ssh - -`ssh` defines SSH authentications that the image builder SHOULD use during image build (e.g., cloning private repository) - -`ssh` property syntax can be either: -* `default` - let the builder connect to the ssh-agent. -* `ID=path` - a key/value definition of an ID and the associated path. Can be either a [PEM](https://en.wikipedia.org/wiki/Privacy-Enhanced_Mail) file, or path to ssh-agent socket - -Simple `default` sample -```yaml -build: - context: . - ssh: - - default # mount the default ssh agent -``` -or -```yaml -build: - context: . - ssh: ["default"] # mount the default ssh agent -``` - -Using a custom id `myproject` with path to a local SSH key: -```yaml -build: - context: . - ssh: - - myproject=~/.ssh/myproject.pem -``` -Image builder can then rely on this to mount SSH key during build. -For illustration, [BuildKit extended syntax](https://github.com/compose-spec/compose-spec/pull/234/%5Bmoby/buildkit@master/frontend/dockerfile/docs/syntax.md#run---mounttypessh%5D(https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#run---mounttypessh)) can be used to mount ssh key set by ID and access a secured resource: - -`RUN --mount=type=ssh,id=myproject git clone ...` - -### cache_from - -`cache_from` defines a list of sources the Image builder SHOULD use for cache resolution. - -Cache location syntax MUST follow the global format `[NAME|type=TYPE[,KEY=VALUE]]`. Simple `NAME` is actually a shortcut notation for `type=registry,ref=NAME`. - -Compose Builder implementations MAY support custom types, the Compose Specification defines canonical types which MUST be supported: - -- `registry` to retrieve build cache from an OCI image set by key `ref` - - -```yml -build: - context: . - cache_from: - - alpine:latest - - type=local,src=path/to/cache - - type=gha -``` - -Unsupported caches MUST be ignored and not prevent user from building image. - -### cache_to - -`cache_to` defines a list of export locations to be used to share build cache with future builds. - -```yml -build: - context: . - cache_to: - - user/app:cache - - type=local,dest=path/to/cache -``` - -Cache target is defined using the same `type=TYPE[,KEY=VALUE]` syntax defined by [`cache_from`](#cache_from). - -Unsupported cache target MUST be ignored and not prevent user from building image. - -### extra_hosts - -`extra_hosts` adds hostname mappings at build-time. Use the same syntax as [extra_hosts](index.md#extra_hosts). - -```yml -extra_hosts: - - "somehost:162.242.195.82" - - "otherhost:50.31.209.229" -``` - -Compose implementations MUST create matching entry with the IP address and hostname in the container's network -configuration, which means for Linux `/etc/hosts` will get extra lines: - -``` -162.242.195.82 somehost -50.31.209.229 otherhost -``` - -### isolation - -`isolation` specifies a build’s container isolation technology. Like [isolation](index.md#isolation) supported values -are platform-specific. - -### labels - -`labels` add metadata to the resulting image. `labels` can be set either as an array or a map. - -reverse-DNS notation SHOULD be used to prevent labels from conflicting with those used by other software. - -```yml -build: - context: . - labels: - com.example.description: "Accounting webapp" - com.example.department: "Finance" - com.example.label-with-empty-value: "" -``` - -```yml -build: - context: . - labels: - - "com.example.description=Accounting webapp" - - "com.example.department=Finance" - - "com.example.label-with-empty-value" -``` - -### no_cache - -`no_cache` disables image builder cache and enforces a full rebuild from source for all image layers. This only -applies to layers declared in the Dockerfile, referenced images COULD be retrieved from local image store whenever tag -has been updated on registry (see [pull](#pull)). - -### pull - -`pull` requires the image builder to pull referenced images (`FROM` Dockerfile directive), even if those are already -available in the local image store. - -### shm_size - -`shm_size` set the size of the shared memory (`/dev/shm` partition on Linux) allocated for building Docker image. Specify -as an integer value representing the number of bytes or as a string expressing a [byte value](index.md#specifying-byte-values). - -```yml -build: - context: . - shm_size: '2gb' -``` - -```yaml -build: - context: . - shm_size: 10000000 -``` - -### target - -`target` defines the stage to build as defined inside a multi-stage `Dockerfile`. - -```yml -build: - context: . - target: prod -``` - -### secrets -`secrets` grants access to sensitive data defined by [secrets](index.md#secrets) on a per-service build basis. Two -different syntax variants are supported: the short syntax and the long syntax. - -Compose implementations MUST report an error if the secret isn't defined in the -[`secrets`](index.md#secrets-top-level-element) section of the Compose file. - -#### Short syntax - -The short syntax variant only specifies the secret name. This grants the -container access to the secret and mounts it as read-only to `/run/secrets/` -within the container. The source name and destination mountpoint are both set -to the secret name. - -The following example uses the short syntax to grant the build of the `frontend` service -access to the `server-certificate` secret. The value of `server-certificate` is set -to the contents of the file `./server.cert`. - -```yml -services: - frontend: - build: - context: . - secrets: - - server-certificate -secrets: - server-certificate: - file: ./server.cert -``` - -#### Long syntax - -The long syntax provides more granularity in how the secret is created within -the service's containers. - -- `source`: The name of the secret as it exists on the platform. -- `target`: The name of the file to be mounted in `/run/secrets/` in the - service's task containers. Defaults to `source` if not specified. -- `uid` and `gid`: The numeric UID or GID that owns the file within - `/run/secrets/` in the service's task containers. Default value is USER running container. -- `mode`: The [permissions](https://chmod-calculator.com/) for the file to be mounted in `/run/secrets/` - in the service's task containers, in octal notation. - Default value is world-readable permissions (mode `0444`). - The writable bit MUST be ignored if set. The executable bit MAY be set. - -The following example sets the name of the `server-certificate` secret file to `server.crt` -within the container, sets the mode to `0440` (group-readable), and sets the user and group -to `103`. The value of `server-certificate` secret is provided by the platform through a lookup and -the secret lifecycle is not directly managed by the Compose implementation. - -```yml -services: - frontend: - build: - context: . - secrets: - - source: server-certificate - target: server.cert - uid: "103" - gid: "103" - mode: 0440 -secrets: - server-certificate: - external: true -``` - -Service builds MAY be granted access to multiple secrets. Long and short syntax for secrets MAY be used in the -same Compose file. Defining a secret in the top-level `secrets` MUST NOT imply granting any service build access to it. -Such grant must be explicit within the service specification as a [secrets](index.md#secrets) service element. - -### tags - -`tags` defines a list of tag mappings that MUST be associated to the build image. This list comes in addition of -the `image` [property defined in the service section](index.md#image) - -```yml -tags: - - "myimage:mytag" - - "registry/username/myrepos:my-other-tag" -``` - -### platforms - -`platforms` defines a list of target [platforms](index.md#platform). - -```yml -build: - context: "." - platforms: - - "linux/amd64" - - "linux/arm64" -``` - -When the `platforms` attribute is omitted, Compose implementations MUST include the service's platform -in the list of the default build target platforms. - -Compose implementations SHOULD report an error in the following cases: -* when the list contains multiple platforms but the implementation is incapable of storing multi-platform images -* when the list contains an unsupported platform -```yml -build: - context: "." - platforms: - - "linux/amd64" - - "unsupported/unsupported" -``` -* when the list is non-empty and does not contain the service's platform -```yml -services: - frontend: - platform: "linux/amd64" - build: - context: "." - platforms: - - "linux/arm64" -``` - -## Implementations - -* [docker-compose](../../compose/index.md) -* [buildx bake](../../build/bake/index.md) diff --git a/compose/compose-file/deploy.md b/compose/compose-file/deploy.md index dafb6e81a9..ec0c84c274 100644 --- a/compose/compose-file/deploy.md +++ b/compose/compose-file/deploy.md @@ -1,298 +1,7 @@ --- -description: Compose file deploy reference -keywords: fig, composition, compose, docker title: Compose file deploy reference -toc_max: 4 -toc_min: 2 +keywords: fig, composition, compose, docker +fetch_remote: + line_start: 8 + line_end: -1 --- - -Compose specification is a platform-neutral way to define multi-container applications. A Compose implementation supporting -deployment of application model MAY require some additional metadata as the Compose application model is way too abstract -to reflect actual infrastructure needs per service, or lifecycle constraints. - -Compose Specification Deployment allows users to declare additional metadata on services so Compose implementations get -relevant data to allocate adequate resources on platform and configure them to match user's needs. - -## Definitions - -Compose Specification is extended to support an OPTIONAL `deploy` subsection on services. This section define runtime requirements -for a service. - -### endpoint_mode - -`endpoint_mode` specifies a service discovery method for external clients connecting to a service. Default and available values -are platform specific, anyway the Compose specification define two canonical values: - -* `endpoint_mode: vip`: Assigns the service a virtual IP (VIP) that acts as the front end for clients to reach the service - on a network. Platform routes requests between the client and nodes running the service, without client knowledge of how - many nodes are participating in the service or their IP addresses or ports. - -* `endpoint_mode: dnsrr`: Platform sets up DNS entries for the service such that a DNS query for the service name returns a - list of IP addresses (DNS round-robin), and the client connects directly to one of these. - -```yml -services: - frontend: - image: awesome/webapp - ports: - - "8080:80" - deploy: - mode: replicated - replicas: 2 - endpoint_mode: vip -``` - -### labels - -`labels` specifies metadata for the service. These labels MUST *only* be set on the service and *not* on any containers for the service. -This assumes the platform has some native concept of "service" that can match Compose application model. - -```yml -services: - frontend: - image: awesome/webapp - deploy: - labels: - com.example.description: "This label will appear on the web service" -``` - -### mode - -`mode` define the replication model used to run the service on platform. Either `global` (exactly one container per physical node) or `replicated` (a specified number of containers). The default is `replicated`. - -```yml -services: - frontend: - image: awesome/webapp - deploy: - mode: global -``` - -### placement - -`placement` specifies constraints and preferences for platform to select a physical node to run service containers. - -#### constraints - -`constraints` defines a REQUIRED property the platform's node MUST fulfill to run service container. Can be set either -by a list or a map with string values. - -```yml -deploy: - placement: - constraints: - - disktype=ssd -``` - -```yml -deploy: - placement: - constraints: - disktype: ssd -``` - -#### preferences - -`preferences` defines a property the platform's node SHOULD fulfill to run service container. Can be set either -by a list or a map with string values. - -```yml -deploy: - placement: - preferences: - - datacenter=us-east -``` - -```yml -deploy: - placement: - preferences: - datacenter: us-east -``` - -### replicas - -If the service is `replicated` (which is the default), `replicas` specifies the number of containers that SHOULD be -running at any given time. - -```yml -services: - frontend: - image: awesome/webapp - deploy: - mode: replicated - replicas: 6 -``` - -### resources - -`resources` configures physical resource constraints for container to run on platform. Those constraints can be configured -as a: - -- `limits`: The platform MUST prevent container to allocate more -- `reservations`: The platform MUST guarantee container can allocate at least the configured amount - -```yml -services: - frontend: - image: awesome/webapp - deploy: - resources: - limits: - cpus: '0.50' - memory: 50M - pids: 1 - reservations: - cpus: '0.25' - memory: 20M -``` - -#### cpus - -`cpus` configures a limit or reservation for how much of the available CPU resources (as number of cores) a container can use. - -#### memory - -`memory` configures a limit or reservation on the amount of memory a container -can allocate, set as a string expressing a -[byte value](index.md#specifying-byte-values). - -#### pids - -`pids` tunes a container’s PIDs limit, set as an integer. - -#### devices - -`devices` configures reservations of the devices a container can use. It contains a list of reservations, each set as an object with the following parameters: `capabilities`, `driver`, `count`, `device_ids` and `options`. - -Devices are reserved using a list of capabilities, making `capabilities` the only required field. A device MUST satisfy all the requested capabilities for a successful reservation. - -##### capabilities - -`capabilities` are set as a list of strings, expressing both generic and driver specific capabilities. -The following generic capabilities are recognized today: - -- `gpu`: Graphics accelerator -- `tpu`: AI accelerator - -To avoid name clashes, driver specific capabilities MUST be prefixed with the driver name. -For example, reserving an nVidia CUDA-enabled accelerator might look like this: - -```yml -deploy: - resources: - reservations: - devices: - - capabilities: ["nvidia-compute"] -``` - -##### driver - -A different driver for the reserved device(s) can be requested using `driver` field. The value is specified as a string. - -```yml -deploy: - resources: - reservations: - devices: - - capabilities: ["nvidia-compute"] - driver: nvidia -``` - -##### count - -If `count` is set to `all` or not specified, Compose implementations MUST reserve all devices that satisfy the requested capabilities. Otherwise, Compose implementations MUST reserve at least the number of devices specified. The value is specified as an integer. - -```yml -deploy: - resources: - reservations: - devices: - - capabilities: ["tpu"] - count: 2 -``` - -`count` and `device_ids` fields are exclusive. Compose implementations MUST return an error if both are specified. - -##### device_ids - -If `device_ids` is set, Compose implementations MUST reserve devices with the specified IDs providing they satisfy the requested capabilities. The value is specified as a list of strings. - -```yml -deploy: - resources: - reservations: - devices: - - capabilities: ["gpu"] - device_ids: ["GPU-f123d1c9-26bb-df9b-1c23-4a731f61d8c7"] -``` - -`count` and `device_ids` fields are exclusive. Compose implementations MUST return an error if both are specified. - -##### options - -Driver specific options can be set with `options` as key-value pairs. - -```yml -deploy: - resources: - reservations: - devices: - - capabilities: ["gpu"] - driver: gpuvendor - options: - virtualization: false -``` - -### restart_policy - -`restart_policy` configures if and how to restart containers when they exit. If `restart_policy` is not set, Compose implementations MUST consider `restart` field set by service configuration. - -- `condition`: One of `none`, `on-failure` or `any` (default: `any`). -- `delay`: How long to wait between restart attempts, specified as a [duration](index.md#specifying-durations) (default: 0). -- `max_attempts`: How many times to attempt to restart a container before giving up (default: never give up). If the restart does not - succeed within the configured `window`, this attempt doesn't count toward the configured `max_attempts` value. - For example, if `max_attempts` is set to '2', and the restart fails on the first attempt, more than two restarts MUST be attempted. -- `window`: How long to wait before deciding if a restart has succeeded, specified as a [duration](index.md#specifying-durations) (default: - decide immediately). - -```yml -deploy: - restart_policy: - condition: on-failure - delay: 5s - max_attempts: 3 - window: 120s -``` - -### rollback_config - -`rollback_config` configures how the service should be rollbacked in case of a failing update. - -- `parallelism`: The number of containers to rollback at a time. If set to 0, all containers rollback simultaneously. -- `delay`: The time to wait between each container group's rollback (default 0s). -- `failure_action`: What to do if a rollback fails. One of `continue` or `pause` (default `pause`) -- `monitor`: Duration after each task update to monitor for failure `(ns|us|ms|s|m|h)` (default 0s). -- `max_failure_ratio`: Failure rate to tolerate during a rollback (default 0). -- `order`: Order of operations during rollbacks. One of `stop-first` (old task is stopped before starting new one), - or `start-first` (new task is started first, and the running tasks briefly overlap) (default `stop-first`). - -### update_config - -`update_config` configures how the service should be updated. Useful for configuring rolling updates. - -- `parallelism`: The number of containers to update at a time. -- `delay`: The time to wait between updating a group of containers. -- `failure_action`: What to do if an update fails. One of `continue`, `rollback`, or `pause` (default: `pause`). -- `monitor`: Duration after each task update to monitor for failure `(ns|us|ms|s|m|h)` (default 0s). -- `max_failure_ratio`: Failure rate to tolerate during an update. -- `order`: Order of operations during updates. One of `stop-first` (old task is stopped before starting new one), - or `start-first` (new task is started first, and the running tasks briefly overlap) (default `stop-first`). - -```yml -deploy: - update_config: - parallelism: 2 - delay: 10s - order: stop-first -``` diff --git a/compose/compose-file/index.md b/compose/compose-file/index.md index 4ed8698d8d..17618e8f22 100644 --- a/compose/compose-file/index.md +++ b/compose/compose-file/index.md @@ -4,7 +4,7 @@ keywords: docker compose file, docker compose yml, docker compose reference, doc redirect_from: - /compose/yaml/ - /compose/compose-file/compose-file-v1/ -title: Compose file specification +title: Overview toc_max: 4 toc_min: 1 --- @@ -17,2512 +17,97 @@ Specification](https://github.com/compose-spec/compose-spec/blob/master/spec.md) target="_blank" rel="noopener" class="_"}. The Compose spec merges the legacy 2.x and 3.x versions, aggregating properties across these formats and is implemented by **Compose 1.27.0+**. -## Status of this document - -This document specifies the Compose file format used to define multi-containers applications. Distribution of this document is unlimited. - -The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as described in [RFC 2119](https://tools.ietf.org/html/rfc2119){: target="_blank" rel="noopener" class="_"}. - -### Requirements and optional attributes - -The Compose specification includes properties designed to target a local [OCI](https://opencontainers.org/){: target="_blank" rel="noopener" class="_"} container runtime, -exposing Linux kernel specific configuration options, but also some Windows container specific properties, as well as cloud platform features related to resource placement on a cluster, replicated application distribution and scalability. - -We acknowledge that no Compose implementation is expected to support **all** attributes, and that support for some properties -is Platform dependent and can only be confirmed at runtime. The definition of a versioned schema to control the supported -properties in a Compose file, established by the [docker-compose](https://github.com/docker/compose){: target="_blank" rel="noopener" class="_"} tool where the Compose -file format was designed, doesn't offer any guarantee to the end-user attributes will be actually implemented. - -The specification defines the expected configuration syntax and behavior, but - until noted - supporting any of those is OPTIONAL. - -A Compose implementation to parse a Compose file using unsupported attributes SHOULD warn user. We recommend implementors -to support those running modes: - -* default: warn user about unsupported attributes, but ignore them -* strict: warn user about unsupported attributes and reject the compose file -* loose: ignore unsupported attributes AND unknown attributes (that were not defined by the spec by the time implementation was created) - -## The Compose application model - -The Compose specification allows one to define a platform-agnostic container based application. Such an application is designed as a set of containers which have to both run together with adequate shared resources and communication channels. - -Computing components of an application are defined as [Services](#services-top-level-element). A Service is an abstract concept implemented on platforms by running the same container image (and configuration) one or more times. - -Services communicate with each other through [Networks](#networks-top-level-element). In this specification, a Network is a platform capability abstraction to establish an IP route between containers within services connected together. Low-level, platform-specific networking options are grouped into the Network definition and MAY be partially implemented on some platforms. - -Services store and share persistent data into [Volumes](#volumes-top-level-element). The specification describes such a persistent data as a high-level filesystem mount with global options. Actual platform-specific implementation details are grouped into the Volumes definition and MAY be partially implemented on some platforms. - -Some services require configuration data that is dependent on the runtime or platform. For this, the specification defines a dedicated concept: [Configs](#configs-top-level-element). From a Service container point of view, Configs are comparable to Volumes, in that they are files mounted into the container. But the actual definition involves distinct platform resources and services, which are abstracted by this type. - -A [Secret](#secrets-top-level-element) is a specific flavor of configuration data for sensitive data that SHOULD NOT be exposed without security considerations. Secrets are made available to services as files mounted into their containers, but the platform-specific resources to provide sensitive data are specific enough to deserve a distinct concept and definition within the Compose specification. - -Distinction within Volumes, Configs and Secret allows implementations to offer a comparable abstraction at service level, but cover the specific configuration of adequate platform resources for well identified data usages. - -A **Project** is an individual deployment of an application specification on a platform. A project's name is used to group -resources together and isolate them from other applications or other installation of the same Compose specified application with distinct parameters. A Compose implementation creating resources on a platform MUST prefix resource names by project and -set the label `com.docker.compose.project`. - -Project name can be set explicitly by top-level `name` attribute. Compose implementation MUST offer a way for user to set a custom project name and override this name, so that the same `compose.yaml` file can be deployed twice on the same infrastructure, without changes, by just passing a distinct name. - -### Illustrative example - -The following example illustrates Compose specification concepts with a concrete example application. The example is non-normative. - -Consider an application split into a frontend web application and a backend service. - -The frontend is configured at runtime with an HTTP configuration file managed by infrastructure, providing an external domain name, and an HTTPS server certificate injected by the platform's secured secret store. - -The backend stores data in a persistent volume. - -Both services communicate with each other on an isolated back-tier network, while frontend is also connected to a front-tier network and exposes port 443 for external usage. - -``` -(External user) --> 443 [frontend network] - | - +--------------------+ - | frontend service |...ro... - | "webapp" |...ro... #secured - +--------------------+ - | - [backend network] - | - +--------------------+ - | backend service | r+w ___________________ - | "database" |=======( persistent volume ) - +--------------------+ \_________________/ -``` - -The example application is composed of the following parts: - -- 2 services, backed by Docker images: `webapp` and `database` -- 1 secret (HTTPS certificate), injected into the frontend -- 1 configuration (HTTP), injected into the frontend -- 1 persistent volume, attached to the backend -- 2 networks - -```yml -services: - frontend: - image: awesome/webapp - ports: - - "443:8043" - networks: - - front-tier - - back-tier - configs: - - httpd-config - secrets: - - server-certificate - - backend: - image: awesome/database - volumes: - - db-data:/etc/data - networks: - - back-tier - -volumes: - db-data: - driver: flocker - driver_opts: - size: "10GiB" - -configs: - httpd-config: - external: true - -secrets: - server-certificate: - external: true - -networks: - # The presence of these objects is sufficient to define them - front-tier: {} - back-tier: {} -``` - -This example illustrates the distinction between volumes, configs and secrets. While all of them are all exposed -to service containers as mounted files or directories, only a volume can be configured for read+write access. -Secrets and configs are read-only. The volume configuration allows you to select a volume driver and pass driver options -to tweak volume management according to the actual infrastructure. Configs and Secrets rely on platform services, -and are declared `external` as they are not managed as part of the application lifecycle: the Compose implementation -will use a platform-specific lookup mechanism to retrieve runtime values. - -## Compose file - -The Compose file is a [YAML](http://yaml.org/) file defining -[version](#version-top-level-element) (DEPRECATED), -[services](#services-top-level-element) (REQUIRED), -[networks](#networks-top-level-element), -[volumes](#volumes-top-level-element), -[configs](#configs-top-level-element) and -[secrets](#secrets-top-level-element). -The default path for a Compose file is `compose.yaml` (preferred) or `compose.yml` in working directory. -Compose implementations SHOULD also support `docker-compose.yaml` and `docker-compose.yml` for backward compatibility. -If both files exist, Compose implementations MUST prefer canonical `compose.yaml` one. - -Multiple Compose files can be combined together to define the application model. The combination of YAML files -MUST be implemented by appending/overriding YAML elements based on Compose file order set by the user. Simple -attributes and maps get overridden by the highest order Compose file, lists get merged by appending. Relative -paths MUST be resolved based on the **first** Compose file's parent folder, whenever complimentary files being -merged are hosted in other folders. - -As some Compose file elements can both be expressed as single strings or complex objects, merges MUST apply to -the expanded form. - -### Profiles - -Profiles allow to adjust the Compose application model for various usages and environments. A Compose -implementation SHOULD allow the user to define a set of active profiles. The exact mechanism is implementation -specific and MAY include command line flags, environment variables, etc. - -The Services top-level element supports a `profiles` attribute to define a list of named profiles. Services without -a `profiles` attribute set MUST always be enabled. A service MUST be ignored by the Compose -implementation when none of the listed `profiles` match the active ones, unless the service is -explicitly targeted by a command. In that case its `profiles` MUST be added to the set of active profiles. -All other top-level elements are not affected by `profiles` and are always active. - -References to other services (by `links`, `extends` or shared resource syntax `service:xxx`) MUST not -automatically enable a component that would otherwise have been ignored by active profiles. Instead the -Compose implementation MUST return an error. - -#### Illustrative example - -```yaml -services: - foo: - image: foo - bar: - image: bar - profiles: - - test - baz: - image: baz - depends_on: - - bar - profiles: - - test - zot: - image: zot - depends_on: - - bar - profiles: - - debug -``` - -- Compose application model parsed with no profile enabled only contains the `foo` service. -- If profile `test` is enabled, model contains the services `bar` and `baz` which are enabled by the - `test` profile and service `foo` which is always enabled. -- If profile `debug` is enabled, model contains both `foo` and `zot` services, but not `bar` and `baz` - and as such the model is invalid regarding the `depends_on` constraint of `zot`. -- If profiles `debug` and `test` are enabled, model contains all services: `foo`, `bar`, `baz` and `zot`. -- If Compose implementation is executed with `bar` as explicit service to run, it and the `test` profile - will be active even if `test` profile is not enabled _by the user_. -- If Compose implementation is executed with `baz` as explicit service to run, the service `baz` and the - profile `test` will be active and `bar` will be pulled in by the `depends_on` constraint. -- If Compose implementation is executed with `zot` as explicit service to run, again the model will be - invalid regarding the `depends_on` constraint of `zot` since `zot` and `bar` have no common `profiles` - listed. -- If Compose implementation is executed with `zot` as explicit service to run and profile `test` enabled, - profile `debug` is automatically enabled and service `bar` is pulled in as a dependency starting both - services `zot` and `bar`. - -## Version top-level element - -Top-level `version` property is defined by the specification for backward compatibility but is only informative. - -A Compose implementation SHOULD NOT use this version to select an exact schema to validate the Compose file, but -prefer the most recent schema at the time it has been designed. - -Compose implementations SHOULD validate whether they can fully parse the Compose file. If some fields are unknown, typically -because the Compose file was written with fields defined by a newer version of the specification, Compose implementations -SHOULD warn the user. Compose implementations MAY offer options to ignore unknown fields (as defined by ["loose"](#requirements-and-optional-attributes) mode). - -## Name top-level element - -Top-level `name` property is defined by the specification as project name to be used if user doesn't set one explicitly. -Compose implementations MUST offer a way for user to override this name, and SHOULD define a mechanism to compute a -default project name, to be used if the top-level `name` element is not set. - -Whenever project name is defined by top-level `name` or by some custom mechanism, it MUST be exposed for -[interpolation](#interpolation) and environment variable resolution as `COMPOSE_PROJECT_NAME` - -```yml -services: - foo: - image: busybox - environment: - - COMPOSE_PROJECT_NAME - command: echo "I'm running ${COMPOSE_PROJECT_NAME}" -``` - -## Services top-level element - -A Service is an abstract definition of a computing resource within an application which can be scaled/replaced -independently from other components. Services are backed by a set of containers, run by the platform -according to replication requirements and placement constraints. Being backed by containers, Services are defined -by a Docker image and set of runtime arguments. All containers within a service are identically created with these -arguments. - -A Compose file MUST declare a `services` root element as a map whose keys are string representations of service names, -and whose values are service definitions. A service definition contains the configuration that is applied to each -container started for that service. - -Each service MAY also include a Build section, which defines how to create the Docker image for the service. -Compose implementations MAY support building docker images using this service definition. If not implemented -the Build section SHOULD be ignored and the Compose file MUST still be considered valid. - -Build support is an OPTIONAL aspect of the Compose specification, and is -described in detail in the [Build support](build.md) documentation. - -Each Service defines runtime constraints and requirements to run its containers. The `deploy` section groups -these constraints and allows the platform to adjust the deployment strategy to best match containers' needs with -available resources. - -Deploy support is an OPTIONAL aspect of the Compose specification, and is -described in detail in the [Deployment support](deploy.md) documentation. -If not implemented the Deploy section SHOULD be ignored and the Compose file MUST still be considered valid. - -### build - -`build` specifies the build configuration for creating container image from source, as defined in the [Build support](build.md) documentation. - - -### blkio_config - -`blkio_config` defines a set of configuration options to set block IO limits for this service. - -```yml -services: - foo: - image: busybox - blkio_config: - weight: 300 - weight_device: - - path: /dev/sda - weight: 400 - device_read_bps: - - path: /dev/sdb - rate: '12mb' - device_read_iops: - - path: /dev/sdb - rate: 120 - device_write_bps: - - path: /dev/sdb - rate: '1024k' - device_write_iops: - - path: /dev/sdb - rate: 30 -``` - -#### device_read_bps, device_write_bps - -Set a limit in bytes per second for read / write operations on a given device. -Each item in the list MUST have two keys: - -- `path`: defining the symbolic path to the affected device. -- `rate`: either as an integer value representing the number of bytes or as a string expressing a byte value. - -#### device_read_iops, device_write_iops - -Set a limit in operations per second for read / write operations on a given device. -Each item in the list MUST have two keys: - -- `path`: defining the symbolic path to the affected device. -- `rate`: as an integer value representing the permitted number of operations per second. - -#### weight - -Modify the proportion of bandwidth allocated to this service relative to other services. -Takes an integer value between 10 and 1000, with 500 being the default. - -#### weight_device - -Fine-tune bandwidth allocation by device. Each item in the list must have two keys: - -- `path`: defining the symbolic path to the affected device. -- `weight`: an integer value between 10 and 1000. - -### cpu_count - -`cpu_count` defines the number of usable CPUs for service container. - -### cpu_percent - -`cpu_percent` defines the usable percentage of the available CPUs. - -### cpu_shares - -`cpu_shares` defines (as integer value) service container relative CPU weight versus other containers. - -### cpu_period - -`cpu_period` allow Compose implementations to configure CPU CFS (Completely Fair Scheduler) period when platform is based -on Linux kernel. - -### cpu_quota - -`cpu_quota` allow Compose implementations to configure CPU CFS (Completely Fair Scheduler) quota when platform is based -on Linux kernel. - -### cpu_rt_runtime - -`cpu_rt_runtime` configures CPU allocation parameters for platform with support for realtime scheduler. Can be either -an integer value using microseconds as unit or a [duration](#specifying-durations). - -```yml - cpu_rt_runtime: '400ms' - cpu_rt_runtime: 95000` -``` - -### cpu_rt_period - -`cpu_rt_period` configures CPU allocation parameters for platform with support for realtime scheduler. Can be either -an integer value using microseconds as unit or a [duration](#specifying-durations). - -```yml - cpu_rt_period: '1400us' - cpu_rt_period: 11000` -``` - -### cpus - -_DEPRECATED: use [deploy.reservations.cpus](deploy.md#cpus)_ - -`cpus` define the number of (potentially virtual) CPUs to allocate to service containers. This is a fractional number. -`0.000` means no limit. - -### cpuset - -`cpuset` defines the explicit CPUs in which to allow execution. Can be a range `0-3` or a list `0,1` - - -### cap_add - -`cap_add` specifies additional container [capabilities](http://man7.org/linux/man-pages/man7/capabilities.7.html) -as strings. - -``` -cap_add: - - ALL -``` - -### cap_drop - -`cap_drop` specifies container [capabilities](http://man7.org/linux/man-pages/man7/capabilities.7.html) to drop -as strings. - -``` -cap_drop: - - NET_ADMIN - - SYS_ADMIN -``` - -### cgroup_parent - -`cgroup_parent` specifies an OPTIONAL parent [cgroup](http://man7.org/linux/man-pages/man7/cgroups.7.html) for the container. - -``` -cgroup_parent: m-executor-abcd -``` - -### command - -`command` overrides the default command declared by the container image (i.e. by Dockerfile's `CMD`). - -``` -command: bundle exec thin -p 3000 -``` - -The command can also be a list, in a manner similar to [Dockerfile](../../engine/reference/builder.md#cmd): - -``` -command: [ "bundle", "exec", "thin", "-p", "3000" ] -``` - -### configs - -`configs` grant access to configs on a per-service basis using the per-service `configs` -configuration. Two different syntax variants are supported. - -Compose implementations MUST report an error if config doesn't exist on platform or isn't defined in the -[`configs`](#configs-top-level-element) section of this Compose file. - -There are two syntaxes defined for configs. To remain compliant to this specification, an implementation -MUST support both syntaxes. Implementations MUST allow use of both short and long syntaxes within the same document. - -#### Short syntax - -The short syntax variant only specifies the config name. This grants the -container access to the config and mounts it at `/` -within the container. The source name and destination mount point are both set -to the config name. - -The following example uses the short syntax to grant the `redis` service -access to the `my_config` and `my_other_config` configs. The value of -`my_config` is set to the contents of the file `./my_config.txt`, and -`my_other_config` is defined as an external resource, which means that it has -already been defined in the platform. If the external config does not exist, -the deployment MUST fail. - -```yml -services: - redis: - image: redis:latest - configs: - - my_config -configs: - my_config: - file: ./my_config.txt - my_other_config: - external: true -``` - -#### Long syntax - -The long syntax provides more granularity in how the config is created within the service's task containers. - -- `source`: The name of the config as it exists in the platform. -- `target`: The path and name of the file to be mounted in the service's - task containers. Defaults to `/` if not specified. -- `uid` and `gid`: The numeric UID or GID that owns the mounted config file - within the service's task containers. Default value when not specified is USER running container. -- `mode`: The [permissions](https://web.archive.org/web/20220310140126/http://permissions-calculator.org/) for the file that is mounted within the service's - task containers, in octal notation. Default value is world-readable (`0444`). - Writable bit MUST be ignored. The executable bit can be set. - -The following example sets the name of `my_config` to `redis_config` within the -container, sets the mode to `0440` (group-readable) and sets the user and group -to `103`. The `redis` service does not have access to the `my_other_config` -config. - -```yml -services: - redis: - image: redis:latest - configs: - - source: my_config - target: /redis_config - uid: "103" - gid: "103" - mode: 0440 -configs: - my_config: - external: true - my_other_config: - external: true -``` - -You can grant a service access to multiple configs, and you can mix long and short syntax. - -### container_name - -`container_name` is a string that specifies a custom container name, rather than a generated default name. - -```yml -container_name: my-web-container -``` - -Compose implementation MUST NOT scale a service beyond one container if the Compose file specifies a -`container_name`. Attempting to do so MUST result in an error. - -If present, `container_name` SHOULD follow the regex format of `[a-zA-Z0-9][a-zA-Z0-9_.-]+` - -### credential_spec - -`credential_spec` configures the credential spec for a managed service account. - -Compose implementations that support services using Windows containers MUST support `file:` and -`registry:` protocols for credential_spec. Compose implementations MAY also support additional -protocols for custom use-cases. - -The `credential_spec` must be in the format `file://` or `registry://`. - -```yml -credential_spec: - file: my-credential-spec.json -``` - -When using `registry:`, the credential spec is read from the Windows registry on -the daemon's host. A registry value with the given name must be located in: - - HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs - -The following example loads the credential spec from a value named `my-credential-spec` -in the registry: - -```yml -credential_spec: - registry: my-credential-spec -``` - -#### Example gMSA configuration - -When configuring a gMSA credential spec for a service, you only need -to specify a credential spec with `config`, as shown in the following example: - -```yml -services: - myservice: - image: myimage:latest - credential_spec: - config: my_credential_spec - -configs: - my_credentials_spec: - file: ./my-credential-spec.json| -``` - -### depends_on - -`depends_on` expresses startup and shutdown dependencies between services. - -#### Short syntax - -The short syntax variant only specifies service names of the dependencies. -Service dependencies cause the following behaviors: - -- Compose implementations MUST create services in dependency order. In the following - example, `db` and `redis` are created before `web`. - -- Compose implementations MUST remove services in dependency order. In the following - example, `web` is removed before `db` and `redis`. - -Simple example: - -```yml -services: - web: - build: . - depends_on: - - db - - redis - redis: - image: redis - db: - image: postgres -``` - -Compose implementations MUST guarantee dependency services have been started before -starting a dependent service. -Compose implementations MAY wait for dependency services to be "ready" before -starting a dependent service. - -#### Long syntax - -The long form syntax enables the configuration of additional fields that can't be -expressed in the short form. - -- `condition`: condition under which dependency is considered satisfied - - `service_started`: is an equivalent of the short syntax described above - - `service_healthy`: specifies that a dependency is expected to be "healthy" - (as indicated by [healthcheck](#healthcheck)) before starting a dependent - service. - - `service_completed_successfully`: specifies that a dependency is expected to run - to successful completion before starting a dependent service. - -Service dependencies cause the following behaviors: - -- Compose implementations MUST create services in dependency order. In the following - example, `db` and `redis` are created before `web`. - -- Compose implementations MUST wait for healthchecks to pass on dependencies - marked with `service_healthy`. In the following example, `db` is expected to - be "healthy" before `web` is created. - -- Compose implementations MUST remove services in dependency order. In the following - example, `web` is removed before `db` and `redis`. - -Simple example: - -```yml -services: - web: - build: . - depends_on: - db: - condition: service_healthy - redis: - condition: service_started - redis: - image: redis - db: - image: postgres -``` - -Compose implementations MUST guarantee dependency services have been started before -starting a dependent service. -Compose implementations MUST guarantee dependency services marked with -`service_healthy` are "healthy" before starting a dependent service. - - -### deploy - -`deploy` specifies the configuration for the deployment and lifecycle of services, as defined [here](deploy.md). - - -### device_cgroup_rules - -`device_cgroup_rules` defines a list of device cgroup rules for this container. -The format is the same format the Linux kernel specifies in the [Control Groups -Device Whitelist Controller](https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v1/devices.html){: target="_blank" rel="noopener" class="_"}. - -```yml -device_cgroup_rules: - - 'c 1:3 mr' - - 'a 7:* rmw' -``` - -### devices - -`devices` defines a list of device mappings for created containers in the form of -`HOST_PATH:CONTAINER_PATH[:CGROUP_PERMISSIONS]`. - -```yml -devices: - - "/dev/ttyUSB0:/dev/ttyUSB0" - - "/dev/sda:/dev/xvda:rwm" -``` - -### dns - -`dns` defines custom DNS servers to set on the container network interface configuration. Can be a single value or a list. - -```yml -dns: 8.8.8.8 -``` - -```yml -dns: - - 8.8.8.8 - - 9.9.9.9 -``` - -### dns_opt - -`dns_opt` list custom DNS options to be passed to the container’s DNS resolver (`/etc/resolv.conf` file on Linux). - -```yml -dns_opt: - - use-vc - - no-tld-query -``` - -### dns_search - -`dns` defines custom DNS search domains to set on container network interface configuration. Can be a single value or a list. - -```yml -dns_search: example.com -``` - -```yml -dns_search: - - dc1.example.com - - dc2.example.com -``` - -### domainname - -`domainname` declares a custom domain name to use for the service container. MUST be a valid RFC 1123 hostname. - -### entrypoint - -`entrypoint` overrides the default entrypoint for the Docker image (i.e. `ENTRYPOINT` set by Dockerfile). -Compose implementations MUST clear out any default command on the Docker image - both `ENTRYPOINT` and `CMD` instruction -in the Dockerfile - when `entrypoint` is configured by a Compose file. If [`command`](#command) is also set, -it is used as parameter to `entrypoint` as a replacement for Docker image's `CMD` - -```yml -entrypoint: /code/entrypoint.sh -``` - -The entrypoint can also be a list, in a manner similar to -[Dockerfile](../../engine/reference/builder.md#cmd): - -```yml -entrypoint: - - php - - -d - - zend_extension=/usr/local/lib/php/extensions/no-debug-non-zts-20100525/xdebug.so - - -d - - memory_limit=-1 - - vendor/bin/phpunit -``` - -### env_file - -`env_file` adds environment variables to the container based on file content. - -```yml -env_file: .env -``` - -`env_file` can also be a list. The files in the list MUST be processed from the top down. For the same variable -specified in two env files, the value from the last file in the list MUST stand. - -```yml -env_file: - - ./a.env - - ./b.env -``` - -Relative path MUST be resolved from the Compose file's parent folder. As absolute paths prevent the Compose -file from being portable, Compose implementations SHOULD warn users when such a path is used to set `env_file`. - -Environment variables declared in the [environment](#environment) section -MUST override these values – this holds true even if those values are -empty or undefined. - -#### Env_file format - -Each line in an env file MUST be in `VAR[=[VAL]]` format. Lines beginning with `#` MUST be ignored. -Blank lines MUST also be ignored. - -The value of `VAL` is used as a raw string and not modified at all. If the value is surrounded by quotes -(as is often the case for shell variables), the quotes MUST be **included** in the value passed to containers -created by the Compose implementation. - -`VAL` MAY be omitted, in such cases the variable value is empty string. -`=VAL` MAY be omitted, in such cases the variable is **unset**. - -```bash -# Set Rails/Rack environment -RACK_ENV=development -VAR="quoted" -``` - -### environment - -`environment` defines environment variables set in the container. `environment` can use either an array or a -map. Any boolean values; true, false, yes, no, SHOULD be enclosed in quotes to ensure -they are not converted to True or False by the YAML parser. - -Environment variables MAY be declared by a single key (no value to equals sign). In such a case Compose -implementations SHOULD rely on some user interaction to resolve the value. If they do not, the variable -is unset and will be removed from the service container environment. - -Map syntax: - -```yml -environment: - RACK_ENV: development - SHOW: "true" - USER_INPUT: -``` - -Array syntax: - -```yml -environment: - - RACK_ENV=development - - SHOW=true - - USER_INPUT -``` - -When both `env_file` and `environment` are set for a service, values set by `environment` have precedence. - -### expose - -`expose` defines the ports that Compose implementations MUST expose from container. These ports MUST be -accessible to linked services and SHOULD NOT be published to the host machine. Only the internal container -ports can be specified. - -```yml -expose: - - "3000" - - "8000" -``` - -### extends - -Extend another service, in the current file or another, optionally overriding configuration. You can use -`extends` on any service together with other configuration keys. The `extends` value MUST be a mapping -defined with a required `service` and an optional `file` key. - -```yaml -extends: - file: common.yml - service: webapp -``` - -If supported Compose implementations MUST process `extends` in the following way: - -- `service` defines the name of the service being referenced as a base, for example `web` or `database`. -- `file` is the location of a Compose configuration file defining that service. - -#### Restrictions - -The following restrictions apply to the service being referenced: - -- Services that have dependencies on other services cannot be used as a base. Therefore, any key - that introduces a dependency on another service is incompatible with `extends`. The - non-exhaustive list of such keys is: `links`, `volumes_from`, `container` mode (in `ipc`, `pid`, - `network_mode` and `net`), `service` mode (in `ipc`, `pid` and `network_mode`), `depends_on`. -- Services cannot have circular references with `extends` - -Compose implementations MUST return an error in all of these cases. - -#### Finding referenced service - -`file` value can be: - -- Not present. - This indicates that another service within the same Compose file is being referenced. -- File path, which can be either: - - Relative path. This path is considered as relative to the location of the main Compose - file. - - Absolute path. - -Service denoted by `service` MUST be present in the identified referenced Compose file. -Compose implementations MUST return an error if: - -- Service denoted by `service` was not found -- Compose file denoted by `file` was not found - -#### Merging service definitions - -Two service definitions (_main_ one in the current Compose file and _referenced_ one -specified by `extends`) MUST be merged in the following way: - -- Mappings: keys in mappings of _main_ service definition override keys in mappings - of _referenced_ service definition. Keys that aren't overridden are included as is. -- Sequences: items are combined together into an new sequence. Order of elements is - preserved with the _referenced_ items coming first and _main_ items after. -- Scalars: keys in _main_ service definition take precedence over keys in the - _referenced_ one. - -##### Mappings - -The following keys should be treated as mappings: `build.args`, `build.labels`, -`build.extra_hosts`, `deploy.labels`, `deploy.update_config`, `deploy.rollback_config`, -`deploy.restart_policy`, `deploy.resources.limits`, `environment`, `healthcheck`, -`labels`, `logging.options`, `sysctls`, `storage_opt`, `extra_hosts`, `ulimits`. - -One exception that applies to `healthcheck` is that _main_ mapping cannot specify -`disable: true` unless _referenced_ mapping also specifies `disable: true`. Compose -implementations MUST return an error in this case. - -For example, the input below: - -```yaml -services: - common: - image: busybox - environment: - TZ: utc - PORT: 80 - cli: - extends: - service: common - environment: - PORT: 8080 -``` - -Produces the following configuration for the `cli` service. The same output is -produced if array syntax is used. - -```yaml -environment: - PORT: 8080 - TZ: utc -image: busybox -``` - -Items under `blkio_config.device_read_bps`, `blkio_config.device_read_iops`, -`blkio_config.device_write_bps`, `blkio_config.device_write_iops`, `devices` and -`volumes` are also treated as mappings where key is the target path inside the -container. - -For example, the input below: - -```yaml -services: - common: - image: busybox - volumes: - - common-volume:/var/lib/backup/data:rw - cli: - extends: - service: common - volumes: - - cli-volume:/var/lib/backup/data:ro -``` - -Produces the following configuration for the `cli` service. Note that mounted path -now points to the new volume name and `ro` flag was applied. - -```yaml -image: busybox -volumes: -- cli-volume:/var/lib/backup/data:ro -``` - -If _referenced_ service definition contains `extends` mapping, the items under it -are simply copied into the new _merged_ definition. Merging process is then kicked -off again until no `extends` keys are remaining. - -For example, the input below: - -```yaml -services: - base: - image: busybox - user: root - common: - image: busybox - extends: - service: base - cli: - extends: - service: common -``` - -Produces the following configuration for the `cli` service. Here, `cli` services -gets `user` key from `common` service, which in turn gets this key from `base` -service. - -```yaml -image: busybox -user: root -``` - -##### Sequences - -The following keys should be treated as sequences: `cap_add`, `cap_drop`, `configs`, -`deploy.placement.constraints`, `deploy.placement.preferences`, -`deploy.reservations.generic_resources`, `device_cgroup_rules`, `expose`, -`external_links`, `ports`, `secrets`, `security_opt`. -Any duplicates resulting from the merge are removed so that the sequence only -contains unique elements. - -For example, the input below: - -```yaml -services: - common: - image: busybox - security_opt: - - label:role:ROLE - cli: - extends: - service: common - security_opt: - - label:user:USER -``` - -Produces the following configuration for the `cli` service. - -```yaml -image: busybox -security_opt: -- label:role:ROLE -- label:user:USER -``` - -In case list syntax is used, the following keys should also be treated as sequences: -`dns`, `dns_search`, `env_file`, `tmpfs`. Unlike sequence fields mentioned above, -duplicates resulting from the merge are not removed. - -##### Scalars - -Any other allowed keys in the service definition should be treated as scalars. - -### external_links - -`external_links` link service containers to services managed outside this Compose application. -`external_links` define the name of an existing service to retrieve using the platform lookup mechanism. -An alias of the form `SERVICE:ALIAS` can be specified. - -```yml -external_links: - - redis - - database:mysql - - database:postgresql -``` - -### extra_hosts - -`extra_hosts` adds hostname mappings to the container network interface configuration (`/etc/hosts` for Linux). -Values MUST set hostname and IP address for additional hosts in the form of `HOSTNAME:IP`. - -```yml -extra_hosts: - - "somehost:162.242.195.82" - - "otherhost:50.31.209.229" -``` - -Compose implementations MUST create matching entry with the IP address and hostname in the container's network -configuration, which means for Linux `/etc/hosts` will get extra lines: - -``` -162.242.195.82 somehost -50.31.209.229 otherhost -``` - -### group_add - -`group_add` specifies additional groups (by name or number) which the user inside the container MUST be a member of. - -An example of where this is useful is when multiple containers (running as different users) need to all read or write -the same file on a shared volume. That file can be owned by a group shared by all the containers, and specified in -`group_add`. - -```yml -services: - myservice: - image: alpine - group_add: - - mail -``` - -Running `id` inside the created container MUST show that the user belongs to the `mail` group, which would not have -been the case if `group_add` were not declared. - -### healthcheck - -`healthcheck` declares a check that's run to determine whether or not containers for this -service are "healthy". This overrides -[HEALTHCHECK Dockerfile instruction](../../engine/reference/builder.md#healthcheck) -set by the service's Docker image. - -```yml -healthcheck: - test: ["CMD", "curl", "-f", "http://localhost"] - interval: 1m30s - timeout: 10s - retries: 3 - start_period: 40s -``` - -`interval`, `timeout` and `start_period` are [specified as durations](#specifying-durations). - -`test` defines the command the Compose implementation will run to check container health. It can be -either a string or a list. If it's a list, the first item must be either `NONE`, `CMD` or `CMD-SHELL`. -If it's a string, it's equivalent to specifying `CMD-SHELL` followed by that string. - -```yml -# Hit the local web app -test: ["CMD", "curl", "-f", "http://localhost"] -``` - -Using `CMD-SHELL` will run the command configured as a string using the container's default shell -(`/bin/sh` for Linux). Both forms below are equivalent: - -```yml -test: ["CMD-SHELL", "curl -f http://localhost || exit 1"] -``` - -```yml -test: curl -f https://localhost || exit 1 -``` - -`NONE` disable the healthcheck, and is mostly useful to disable Healthcheck set by image. Alternatively -the healthcheck set by the image can be disabled by setting `disable: true`: - -```yml -healthcheck: - disable: true -``` - -### hostname - -`hostname` declares a custom host name to use for the service container. MUST be a valid RFC 1123 hostname. - -### image - -`image` specifies the image to start the container from. Image MUST follow the Open Container Specification -[addressable image format](https://github.com/opencontainers/org/blob/master/docs/docs/introduction/digests.md), -as `[/][/][:|@]`. - -```yml - image: redis - image: redis:5 - image: redis@sha256:0ed5d5928d4737458944eb604cc8509e245c3e19d02ad83935398bc4b991aac7 - image: library/redis - image: docker.io/library/redis - image: my_private.registry:5000/redis -``` - -If the image does not exist on the platform, Compose implementations MUST attempt to pull it based on the `pull_policy`. -Compose implementations with build support MAY offer alternative options for the end user to control precedence of -pull over building the image from source, however pulling the image MUST be the default behavior. - -`image` MAY be omitted from a Compose file as long as a `build` section is declared. Compose implementations -without build support MUST fail when `image` is missing from the Compose file. - -### init - -`init` run an init process (PID 1) inside the container that forwards signals and reaps processes. -Set this option to `true` to enable this feature for the service. - -```yml -services: - web: - image: alpine:latest - init: true -``` - -The init binary that is used is platform specific. - -### ipc - -`ipc` configures the IPC isolation mode set by service container. Available -values are platform specific, but Compose specification defines specific values -which MUST be implemented as described if supported: - -- `shareable` which gives the container own private IPC namespace, with a - possibility to share it with other containers. -- `service:{name}` which makes the container join another (`shareable`) - container's IPC namespace. - -```yml - ipc: "shareable" - ipc: "service:[service name]" -``` - -### isolation - -`isolation` specifies a container’s isolation technology. Supported values are platform-specific. - -### labels - -`labels` add metadata to containers. You can use either an array or a map. - -It's recommended that you use reverse-DNS notation to prevent your labels from conflicting with -those used by other software. - -```yml -labels: - com.example.description: "Accounting webapp" - com.example.department: "Finance" - com.example.label-with-empty-value: "" -``` - -```yml -labels: - - "com.example.description=Accounting webapp" - - "com.example.department=Finance" - - "com.example.label-with-empty-value" -``` - -Compose implementations MUST create containers with canonical labels: - -- `com.docker.compose.project` set on all resources created by Compose implementation to the user project name -- `com.docker.compose.service` set on service containers with service name as defined in the Compose file - -The `com.docker.compose` label prefix is reserved. Specifying labels with this prefix in the Compose file MUST -result in a runtime error. - -### links - -`links` defines a network link to containers in another service. Either specify both the service name and -a link alias (`SERVICE:ALIAS`), or just the service name. - -```yml -web: - links: - - db - - db:database - - redis -``` - -Containers for the linked service MUST be reachable at a hostname identical to the alias, or the service name -if no alias was specified. - -Links are not required to enable services to communicate - when no specific network configuration is set, -any service MUST be able to reach any other service at that service’s name on the `default` network. If services -do declare networks they are attached to, `links` SHOULD NOT override the network configuration and services not -attached to a shared network SHOULD NOT be able to communicate. Compose implementations MAY NOT warn the user -about this configuration mismatch. - -Links also express implicit dependency between services in the same way as -[depends_on](#depends_on), so they determine the order of service startup. - -### logging - -`logging` defines the logging configuration for the service. - -```yml -logging: - driver: syslog - options: - syslog-address: "tcp://192.168.0.42:123" -``` - -The `driver` name specifies a logging driver for the service's containers. The default and available values -are platform specific. Driver specific options can be set with `options` as key-value pairs. - -### mac_address - -`mac_address` sets a MAC address for service container. - -### mem_limit - -_DEPRECATED: use [deploy.limits.memory](deploy.md#memory)_ - -### mem_reservation - -_DEPRECATED: use [deploy.reservations.memory](deploy.md#memory)_ - -### mem_swappiness - -`mem_swappiness` defines as a percentage (a value between 0 and 100) for the host kernel to swap out -anonymous memory pages used by a container. - -- a value of 0 turns off anonymous page swapping. -- a value of 100 sets all anonymous pages as swappable. - -Default value is platform specific. - -### memswap_limit - -`memswap_limit` defines the amount of memory container is allowed to swap to disk. This is a modifier -attribute that only has meaning if `memory` is also set. Using swap allows the container to write excess -memory requirements to disk when the container has exhausted all the memory that is available to it. -There is a performance penalty for applications that swap memory to disk often. - -- If `memswap_limit` is set to a positive integer, then both `memory` and `memswap_limit` MUST be set. `memswap_limit` represents the total amount of memory and swap that can be used, and `memory` controls the amount used by non-swap memory. So if `memory`="300m" and `memswap_limit`="1g", the container can use 300m of memory and 700m (1g - 300m) swap. -- If `memswap_limit` is set to 0, the setting MUST be ignored, and the value is treated as unset. -- If `memswap_limit` is set to the same value as `memory`, and `memory` is set to a positive integer, the container does not have access to swap. See Prevent a container from using swap. -- If `memswap_limit` is unset, and `memory` is set, the container can use as much swap as the `memory` setting, if the host container has swap memory configured. For instance, if `memory`="300m" and `memswap_limit` is not set, the container can use 600m in total of memory and swap. -- If `memswap_limit` is explicitly set to -1, the container is allowed to use unlimited swap, up to the amount available on the host system. - -### network_mode - -`network_mode` set service containers network mode. Available values are platform specific, but Compose -specification define specific values which MUST be implemented as described if supported: - -- `none` which disable all container networking -- `host` which gives the container raw access to host's network interface -- `service:{name}` which gives the containers access to the specified service only - -```yml - network_mode: "host" - network_mode: "none" - network_mode: "service:[service name]" -``` - -### networks - -`networks` defines the networks that service containers are attached to, referencing entries under the -[top-level `networks` key](#networks-top-level-element). - -```yml -services: - some-service: - networks: - - some-network - - other-network -``` - -#### aliases - -`aliases` declares alternative hostnames for this service on the network. Other containers on the same -network can use either the service name or this alias to connect to one of the service's containers. - -Since `aliases` are network-scoped, the same service can have different aliases on different networks. - -> **Note**: A network-wide alias can be shared by multiple containers, and even by multiple services. -> If it is, then exactly which container the name resolves to is not guaranteed. - -The general format is shown here: - -```yml -services: - some-service: - networks: - some-network: - aliases: - - alias1 - - alias3 - other-network: - aliases: - - alias2 -``` - -In the example below, service `frontend` will be able to reach the `backend` service at -the hostname `backend` or `database` on the `back-tier` network, and service `monitoring` -will be able to reach same `backend` service at `db` or `mysql` on the `admin` network. - -```yml -services: - frontend: - image: awesome/webapp - networks: - - front-tier - - back-tier - - monitoring: - image: awesome/monitoring - networks: - - admin - - backend: - image: awesome/backend - networks: - back-tier: - aliases: - - database - admin: - aliases: - - mysql - -networks: - front-tier: - back-tier: - admin: -``` - -#### ipv4_address, ipv6_address - -Specify a static IP address for containers for this service when joining the network. - -The corresponding network configuration in the [top-level networks section](#networks) MUST have an -`ipam` block with subnet configurations covering each static address. - -```yml -services: - frontend: - image: awesome/webapp - networks: - front-tier: - ipv4_address: 172.16.238.10 - ipv6_address: 2001:3984:3989::10 - -networks: - front-tier: - ipam: - driver: default - config: - - subnet: "172.16.238.0/24" - - subnet: "2001:3984:3989::/64" -``` - -#### link_local_ips - -`link_local_ips` specifies a list of link-local IPs. Link-local IPs are special IPs which belong to a well -known subnet and are purely managed by the operator, usually dependent on the architecture where they are -deployed. Implementation is Platform specific. - -Example: - -```yaml -services: - app: - image: busybox - command: top - networks: - app_net: - link_local_ips: - - 57.123.22.11 - - 57.123.22.13 -networks: - app_net: - driver: bridge -``` - -#### priority - -`priority` indicates in which order Compose implementation SHOULD connect the service’s containers to its -networks. If unspecified, the default value is 0. - -In the following example, the app service connects to app_net_1 first as it has the highest priority. It then connects to app_net_3, then app_net_2, which uses the default priority value of 0. - -```yaml -services: - app: - image: busybox - command: top - networks: - app_net_1: - priority: 1000 - app_net_2: - - app_net_3: - priority: 100 -networks: - app_net_1: - app_net_2: - app_net_3: -``` - -### oom_kill_disable - -If `oom_kill_disable` is set Compose implementation MUST configure the platform so it won't kill the container in case -of memory starvation. - -### oom_score_adj - -`oom_score_adj` tunes the preference for containers to be killed by platform in case of memory starvation. Value MUST -be within [-1000,1000] range. - -### pid - -`pid` sets the PID mode for container created by the Compose implementation. -Supported values are platform specific. - -### pids_limit - -_DEPRECATED: use [deploy.reservations.pids](deploy.md#pids)_ - -`pids_limit` tunes a container’s PIDs limit. Set to -1 for unlimited PIDs. - -```yml -pids_limit: 10 -``` - -### platform - -`platform` defines the target platform containers for this service will run on, using the `os[/arch[/variant]]` syntax. -Compose implementation MUST use this attribute when declared to determine which version of the image will be pulled -and/or on which platform the service’s build will be performed. - -```yml -platform: osx -platform: windows/amd64 -platform: linux/arm64/v8 -``` - -### ports - -Exposes container ports. -Port mapping MUST NOT be used with `network_mode: host` and doing so MUST result in a runtime error. - -#### Short syntax - -The short syntax is a colon-separated string to set host IP, host port and container port -in the form: - -`[HOST:]CONTAINER[/PROTOCOL]` where: - -- `HOST` is `[IP:](port | range)` -- `CONTAINER` is `port | range` -- `PROTOCOL` to restrict port to specified protocol. `tcp` and `udp` values are defined by the specification, - Compose implementations MAY offer support for platform-specific protocol names. - -Host IP, if not set, MUST bind to all network interfaces. Port can be either a single -value or a range. Host and container MUST use equivalent ranges. - -Either specify both ports (`HOST:CONTAINER`), or just the container port. In the latter case, the -Compose implementation SHOULD automatically allocate any unassigned host port. - -`HOST:CONTAINER` SHOULD always be specified as a (quoted) string, to avoid conflicts -with [yaml base-60 float](https://yaml.org/type/float.html){: target="_blank" rel="noopener" class="_"}. - -Samples: - -```yml -ports: - - "3000" - - "3000-3005" - - "8000:8000" - - "9090-9091:8080-8081" - - "49100:22" - - "127.0.0.1:8001:8001" - - "127.0.0.1:5000-5010:5000-5010" - - "6060:6060/udp" -``` - -> **Note**: Host IP mapping MAY not be supported on the platform, in such case Compose implementations SHOULD reject -> the Compose file and MUST inform the user they will ignore the specified host IP. - -#### Long syntax - -The long form syntax allows the configuration of additional fields that can't be -expressed in the short form. - -- `target`: the container port -- `published`: the publicly exposed port. Can be set as a range using syntax `start-end`, then actual port SHOULD be assigned within this range based on available ports. -- `host_ip`: the Host IP mapping, unspecified means all network interfaces (`0.0.0.0`) -- `protocol`: the port protocol (`tcp` or `udp`), unspecified means any protocol -- `mode`: `host` for publishing a host port on each node, or `ingress` for a port to be load balanced. - -```yml -ports: - - target: 80 - host_ip: 127.0.0.1 - published: 8080 - protocol: tcp - mode: host - - - target: 80 - host_ip: 127.0.0.1 - published: 8000-9000 - protocol: tcp - mode: host -``` - -### privileged - -`privileged` configures the service container to run with elevated privileges. Support and actual impacts are platform-specific. - -### profiles - -`profiles` defines a list of named profiles for the service to be enabled under. When not set, service is always enabled. - -If present, `profiles` SHOULD follow the regex format of `[a-zA-Z0-9][a-zA-Z0-9_.-]+`. - -### pull_policy - -`pull_policy` defines the decisions Compose implementations will make when it starts to pull images. Possible values are: - -* `always`: Compose implementations SHOULD always pull the image from the registry. -* `never`: Compose implementations SHOULD NOT pull the image from a registry and SHOULD rely on the platform cached image. - If there is no cached image, a failure MUST be reported. -* `missing`: Compose implementations SHOULD pull the image only if it's not available in the platform cache. - This SHOULD be the default option for Compose implementations without build support. - `if_not_present` SHOULD be considered an alias for this value for backward compatibility -* `build`: Compose implementations SHOULD build the image. Compose implementations SHOULD rebuild the image if already present. - -If `pull_policy` and `build` both presents, Compose implementations SHOULD build the image by default. Compose implementations MAY override this behavior in the toolchain. - -### read_only - -`read_only` configures service container to be created with a read-only filesystem. - -### restart - -`restart` defines the policy that the platform will apply on container termination. - -- `no`: The default restart policy. Does not restart a container under any circumstances. -- `always`: The policy always restarts the container until its removal. -- `on-failure`: The policy restarts a container if the exit code indicates an error. -- `unless-stopped`: The policy restarts a container irrespective of the exit code but will stop - restarting when the service is stopped or removed. - -```yml - restart: "no" - restart: always - restart: on-failure - restart: unless-stopped -``` - -### runtime - -`runtime` specifies which runtime to use for the service’s containers. - -The value of `runtime` is specific to implementation. -For example, `runtime` can be the name of [an implementation of OCI Runtime Spec](https://github.com/opencontainers/runtime-spec/blob/master/implementations.md){: target="_blank" rel="noopener" class="_"}, such as "runc". - -```yml -web: - image: busybox:latest - command: true - runtime: runc -``` - -### scale - -_DEPRECATED: use [deploy/replicas](deploy.md#replicas)_ - -`scale` specifies the default number of containers to deploy for this service. - -### secrets - -`secrets` grants access to sensitive data defined by [secrets](#secrets) on a per-service basis. Two -different syntax variants are supported: the short syntax and the long syntax. - -Compose implementations MUST report an error if the secret doesn't exist on the platform or isn't defined in the -[`secrets`](#secrets-top-level-element) section of this Compose file. - -#### Short syntax - -The short syntax variant only specifies the secret name. This grants the -container access to the secret and mounts it as read-only to `/run/secrets/` -within the container. The source name and destination mountpoint are both set -to the secret name. - -The following example uses the short syntax to grant the `frontend` service -access to the `server-certificate` secret. The value of `server-certificate` is set -to the contents of the file `./server.cert`. - -```yml -services: - frontend: - image: awesome/webapp - secrets: - - server-certificate -secrets: - server-certificate: - file: ./server.cert -``` - -#### Long syntax - -The long syntax provides more granularity in how the secret is created within -the service's containers. - -- `source`: The name of the secret as it exists on the platform. -- `target`: The name of the file to be mounted in `/run/secrets/` in the - service's task containers. Defaults to `source` if not specified. -- `uid` and `gid`: The numeric UID or GID that owns the file within - `/run/secrets/` in the service's task containers. Default value is USER running container. -- `mode`: The [permissions](https://web.archive.org/web/20220310140126/http://permissions-calculator.org/) for the file to be mounted in `/run/secrets/` - in the service's task containers, in octal notation. - Default value is world-readable permissions (mode `0444`). - The writable bit MUST be ignored if set. The executable bit MAY be set. - -The following example sets the name of the `server-certificate` secret file to `server.cert` -within the container, sets the mode to `0440` (group-readable) and sets the user and group -to `103`. The value of `server-certificate` secret is provided by the platform through a lookup and -the secret lifecycle is not directly managed by the Compose implementation. - -```yml -services: - frontend: - image: awesome/webapp - secrets: - - source: server-certificate - target: server.cert - uid: "103" - gid: "103" - mode: 0440 -secrets: - server-certificate: - external: true -``` - -Services MAY be granted access to multiple secrets. Long and short syntax for secrets MAY be used in the -same Compose file. Defining a secret in the top-level `secrets` MUST NOT imply granting any service access to it. -Such grant must be explicit within service specification as [secrets](#secrets) service element. - -### security_opt - -`security_opt` overrides the default labeling scheme for each container. - -```yml -security_opt: - - label:user:USER - - label:role:ROLE -``` - -### shm_size - -`shm_size` configures the size of the shared memory (`/dev/shm` partition on Linux) allowed by the service container. -Specified as a [byte value](#specifying-byte-values). - -### stdin_open - -`stdin_open` configures service containers to run with an allocated stdin. - -### stop_grace_period - -`stop_grace_period` specifies how long the Compose implementation MUST wait when attempting to stop a container if it doesn't -handle SIGTERM (or whichever stop signal has been specified with -[`stop_signal`](#stop_signal)), before sending SIGKILL. Specified -as a [duration](#specifying-durations). - -```yml - stop_grace_period: 1s - stop_grace_period: 1m30s -``` - -Default value is 10 seconds for the container to exit before sending SIGKILL. - -### stop_signal - -`stop_signal` defines the signal that the Compose implementation MUST use to stop the service containers. -If unset containers are stopped by the Compose Implementation by sending `SIGTERM`. - -```yml -stop_signal: SIGUSR1 -``` - -### storage_opt - -`storage_opt` defines storage driver options for a service. - -```yml -storage_opt: - size: '1G' -``` - -### sysctls - -`sysctls` defines kernel parameters to set in the container. `sysctls` can use either an array or a map. - -```yml -sysctls: - net.core.somaxconn: 1024 - net.ipv4.tcp_syncookies: 0 -``` - -```yml -sysctls: - - net.core.somaxconn=1024 - - net.ipv4.tcp_syncookies=0 -``` - -You can only use sysctls that are namespaced in the kernel. Docker does not -support changing sysctls inside a container that also modify the host system. -For an overview of supported sysctls, refer to [configure namespaced kernel -parameters (sysctls) at runtime](/engine/reference/commandline/run/#sysctl). - -### tmpfs - -`tmpfs` mounts a temporary file system inside the container. Can be a single value or a list. - -```yml -tmpfs: /run -``` - -```yml -tmpfs: - - /run - - /tmp -``` - -### tty - -`tty` configure service container to run with a TTY. - -### ulimits - -`ulimits` overrides the default ulimits for a container. Either specifies as a single limit as an integer or -soft/hard limits as a mapping. - -```yml -ulimits: - nproc: 65535 - nofile: - soft: 20000 - hard: 40000 -``` - -### user - -`user` overrides the user used to run the container process. Default is that set by image (i.e. Dockerfile `USER`), -if not set, `root`. - -### userns_mode - -`userns_mode` sets the user namespace for the service. Supported values are platform specific and MAY depend -on platform configuration - -```yml -userns_mode: "host" -``` - -### volumes - -`volumes` defines mount host paths or named volumes that MUST be accessible by service containers. - -If the mount is a host path and only used by a single service, it MAY be declared as part of the service -definition instead of the top-level `volumes` key. - -To reuse a volume across multiple services, a named -volume MUST be declared in the [top-level `volumes` key](#volumes-top-level-element). - -This example shows a named volume (`db-data`) being used by the `backend` service, -and a bind mount defined for a single service - -```yml -services: - backend: - image: awesome/backend - volumes: - - type: volume - source: db-data - target: /data - volume: - nocopy: true - - type: bind - source: /var/run/postgres/postgres.sock - target: /var/run/postgres/postgres.sock - -volumes: - db-data: -``` - -#### Short syntax - -The short syntax uses a single string with colon-separated values to specify a volume mount -(`VOLUME:CONTAINER_PATH`), or an access mode (`VOLUME:CONTAINER_PATH:ACCESS_MODE`). - -- `VOLUME`: MAY be either a host path on the platform hosting containers (bind mount) or a volume name -- `CONTAINER_PATH`: the path in the container where the volume is mounted -- `ACCESS_MODE`: is a comma-separated `,` list of options and MAY be set to: - - `rw`: read and write access (default) - - `ro`: read-only access - - `z`: SELinux option indicates that the bind mount host content is shared among multiple containers - - `Z`: SELinux option indicates that the bind mount host content is private and unshared for other containers - -> **Note**: The SELinux re-labeling bind mount option is ignored on platforms without SELinux. - -> **Note**: Relative host paths MUST only be supported by Compose implementations that deploy to a -> local container runtime. This is because the relative path is resolved from the Compose file’s parent -> directory which is only applicable in the local case. Compose Implementations deploying to a non-local -> platform MUST reject Compose files which use relative host paths with an error. To avoid ambiguities -> with named volumes, relative paths SHOULD always begin with `.` or `..`. - -#### Long syntax - -The long form syntax allows the configuration of additional fields that can't be -expressed in the short form. - -- `type`: the mount type `volume`, `bind`, `tmpfs` or `npipe` -- `source`: the source of the mount, a path on the host for a bind mount, or the - name of a volume defined in the - [top-level `volumes` key](#volumes-top-level-element). Not applicable for a tmpfs mount. -- `target`: the path in the container where the volume is mounted -- `read_only`: flag to set the volume as read-only -- `bind`: configure additional bind options - - `propagation`: the propagation mode used for the bind - - `create_host_path`: create a directory at the source path on host if there is nothing present. - Do nothing if there is something present at the path. This is automatically implied by short syntax - for backward compatibility with docker-compose legacy. - - `selinux`: the SELinux re-labeling option `z` (shared) or `Z` (private) -- `volume`: configure additional volume options - - `nocopy`: flag to disable copying of data from a container when a volume is created -- `tmpfs`: configure additional tmpfs options - - `size`: the size for the tmpfs mount in bytes (either numeric or as bytes unit) - - `mode`: the filemode for the tmpfs mount as Unix permission bits as an octal number -- `consistency`: the consistency requirements of the mount. Available values are platform specific - -### volumes_from - -`volumes_from` mounts all of the volumes from another service or container, optionally specifying -read-only access (ro) or read-write (rw). If no access level is specified, then read-write MUST be used. - -String value defines another service in the Compose application model to mount volumes from. The -`container:` prefix, if supported, allows to mount volumes from a container that is not managed by the -Compose implementation. - -```yaml -volumes_from: - - service_name - - service_name:ro - - container:container_name - - container:container_name:rw -``` - -### working_dir - -`working_dir` overrides the container's working directory from that specified by image (i.e. Dockerfile `WORKDIR`). - -## Networks top-level element - -Networks are the layer that allow services to communicate with each other. The networking model exposed to a service -is limited to a simple IP connection with target services and external resources, while the Network definition allows -fine-tuning the actual implementation provided by the platform. - -Networks can be created by specifying the network name under a top-level `networks` section. -Services can connect to networks by specifying the network name under the service [`networks`](#networks) subsection - -In the following example, at runtime, networks `front-tier` and `back-tier` will be created and the `frontend` service -connected to the `front-tier` network and the `back-tier` network. - -```yml -services: - frontend: - image: awesome/webapp - networks: - - front-tier - - back-tier - -networks: - front-tier: - back-tier: -``` - -### driver - -`driver` specifies which driver should be used for this network. Compose implementations MUST return an error if the -driver is not available on the platform. - -```yml -driver: overlay -``` - -Default and available values are platform specific. Compose specification MUST support the following specific drivers: -`none` and `host` - -- `host` use the host's networking stack -- `none` disable networking - -#### host or none - -The syntax for using built-in networks such as `host` and `none` is different, as such networks implicitly exists outside -the scope of the Compose implementation. To use them one MUST define an external network with the name `host` or `none` and -an alias that the Compose implementation can use (`hostnet` or `nonet` in the following examples), then grant the service -access to that network using its alias. - -```yml -services: - web: - networks: - hostnet: {} - -networks: - hostnet: - external: true - name: host -``` - -```yml -services: - web: - ... - networks: - nonet: {} - -networks: - nonet: - external: true - name: none -``` - -### driver_opts - -`driver_opts` specifies a list of options as key-value pairs to pass to the driver for this network. These options are -driver-dependent - consult the driver's documentation for more information. Optional. - -```yml -driver_opts: - foo: "bar" - baz: 1 -``` - -### attachable - -If `attachable` is set to `true`, then standalone containers SHOULD be able attach to this network, in addition to services. -If a standalone container attaches to the network, it can communicate with services and other standalone containers -that are also attached to the network. - -```yml -networks: - mynet1: - driver: overlay - attachable: true -``` - -### enable_ipv6 - -`enable_ipv6` enable IPv6 networking on this network. - -### ipam - -`ipam` specifies a custom IPAM configuration. This is an object with several properties, each of which is optional: - -- `driver`: Custom IPAM driver, instead of the default. -- `config`: A list with zero or more configuration elements, each containing: - - `subnet`: Subnet in CIDR format that represents a network segment - - `ip_range`: Range of IPs from which to allocate container IPs - - `gateway`: IPv4 or IPv6 gateway for the master subnet - - `aux_addresses`: Auxiliary IPv4 or IPv6 addresses used by Network driver, as a mapping from hostname to IP -- `options`: Driver-specific options as a key-value mapping. - -A full example: - -```yml -ipam: - driver: default - config: - - subnet: 172.28.0.0/16 - ip_range: 172.28.5.0/24 - gateway: 172.28.5.254 - aux_addresses: - host1: 172.28.1.5 - host2: 172.28.1.6 - host3: 172.28.1.7 - options: - foo: bar - baz: "0" -``` - -### internal - -By default, Compose implementations MUST provides external connectivity to networks. `internal` when set to `true` allow to -create an externally isolated network. - -### labels - -Add metadata to containers using Labels. Can use either an array or a dictionary. - -Users SHOULD use reverse-DNS notation to prevent labels from conflicting with those used by other software. - -```yml -labels: - com.example.description: "Financial transaction network" - com.example.department: "Finance" - com.example.label-with-empty-value: "" -``` - -```yml -labels: - - "com.example.description=Financial transaction network" - - "com.example.department=Finance" - - "com.example.label-with-empty-value" -``` - -Compose implementations MUST set `com.docker.compose.project` and `com.docker.compose.network` labels. - -### external - -If set to `true`, `external` specifies that this network’s lifecycle is maintained outside of that of the application. -Compose Implementations SHOULD NOT attempt to create these networks, and raises an error if one doesn't exist. - -If `external` is set to `true` , then the resource is not managed by Compose. If `external` is set to `true` and the network configuration has other attributes set besides `name`, then Compose Implementations SHOULD reject the Compose file as invalid. - -In the example below, `proxy` is the gateway to the outside world. Instead of attempting to create a network, Compose -implementations SHOULD interrogate the platform for an existing network simply called `outside` and connect the -`proxy` service's containers to it. - -```yml - -services: - proxy: - image: awesome/proxy - networks: - - outside - - default - app: - image: awesome/app - networks: - - default - -networks: - outside: - external: true -``` - -### name - -`name` sets a custom name for this network. The name field can be used to reference networks which contain special characters. -The name is used as is and will **not** be scoped with the project name. - -```yml -networks: - network1: - name: my-app-net -``` - -It can also be used in conjunction with the `external` property to define the platform network that the Compose implementation -should retrieve, typically by using a parameter so the Compose file doesn't need to hard-code runtime specific values: - -```yml -networks: - network1: - external: true - name: "${NETWORK_ID}" -``` - -## Volumes top-level element - -Volumes are persistent data stores implemented by the platform. The Compose specification offers a neutral abstraction -for services to mount volumes, and configuration parameters to allocate them on infrastructure. - -The `volumes` section allows the configuration of named volumes that can be reused across multiple services. Here's -an example of a two-service setup where a database's data directory is shared with another service as a volume named -`db-data` so that it can be periodically backed up: - -```yml -services: - backend: - image: awesome/database - volumes: - - db-data:/etc/data - - backup: - image: backup-service - volumes: - - db-data:/var/lib/backup/data - -volumes: - db-data: -``` - -An entry under the top-level `volumes` key can be empty, in which case it uses the platform's default configuration for -creating a volume. Optionally, you can configure it with the following keys: - -### driver - -Specify which volume driver should be used for this volume. Default and available values are platform specific. If the driver is not available, the Compose implementation MUST return an error and stop application deployment. - -```yml -driver: foobar -``` - -### driver_opts - -`driver_opts` specifies a list of options as key-value pairs to pass to the driver for this volume. Those options are driver-dependent. - -```yml -volumes: - example: - driver_opts: - type: "nfs" - o: "addr=10.40.0.199,nolock,soft,rw" - device: ":/docker/example" -``` - -### external - -If set to `true`, `external` specifies that this volume already exist on the platform and its lifecycle is managed outside -of that of the application. Compose implementations MUST NOT attempt to create these volumes, and MUST return an error if they -do not exist. - -If `external` is set to `true` , then the resource is not managed by Compose. If `external` is set to `true` and the network configuration has other attributes set besides `name`, then Compose Implementations SHOULD reject the Compose file as invalid. - - -In the example below, instead of attempting to create a volume called -`{project_name}_db-data`, Compose looks for an existing volume simply -called `db-data` and mounts it into the `backend` service's containers. - -```yml -services: - backend: - image: awesome/database - volumes: - - db-data:/etc/data - -volumes: - db-data: - external: true -``` - -### labels - -`labels` are used to add metadata to volumes. You can use either an array or a dictionary. - -It's recommended that you use reverse-DNS notation to prevent your labels from -conflicting with those used by other software. - -```yml -labels: - com.example.description: "Database volume" - com.example.department: "IT/Ops" - com.example.label-with-empty-value: "" -``` - -```yml -labels: - - "com.example.description=Database volume" - - "com.example.department=IT/Ops" - - "com.example.label-with-empty-value" -``` - -Compose implementation MUST set `com.docker.compose.project` and `com.docker.compose.volume` labels. - -### name - -`name` set a custom name for this volume. The name field can be used to reference volumes that contain special -characters. The name is used as is and will **not** be scoped with the stack name. - -```yml -volumes: - data: - name: "my-app-data" -``` - -It can also be used in conjunction with the `external` property. Doing so the name of the volume used to lookup for -actual volume on platform is set separately from the name used to refer to it within the Compose file: - -```yml -volumes: - db-data: - external: - name: actual-name-of-volume -``` - -This makes it possible to make this lookup name a parameter of a Compose file, so that the model ID for volume is -hard-coded but the actual volume ID on platform is set at runtime during deployment: - -```yml -volumes: - db-data: - external: - name: ${DATABASE_VOLUME} -``` - -## Configs top-level element - -Configs allow services to adapt their behaviour without the need to rebuild a Docker image. Configs are comparable to Volumes from a service point of view as they are mounted into service's containers filesystem. The actual implementation detail to get configuration provided by the platform can be set from the Configuration definition. - -When granted access to a config, the config content is mounted as a file in the container. The location of the mount point within the container defaults to `/` in Linux containers and `C:\` in Windows containers. - -By default, the config MUST be owned by the user running the container command but can be overridden by service configuration. -By default, the config MUST have world-readable permissions (mode 0444), unless service is configured to override this. - -Services can only access configs when explicitly granted by a [`configs`](#configs) subsection. - -The top-level `configs` declaration defines or references -configuration data that can be granted to the services in this -application. The source of the config is either `file` or `external`. - -- `file`: The config is created with the contents of the file at the specified path. -- `external`: If set to true, specifies that this config has already been created. Compose implementation does not - attempt to create it, and if it does not exist, an error occurs. -- `name`: The name of config object on Platform to lookup. This field can be used to - reference configs that contain special characters. The name is used as is - and will **not** be scoped with the project name. - -In this example, `http_config` is created (as `_http_config`) when the application is deployed, -and `my_second_config` MUST already exist on Platform and value will be obtained by lookup. - -In this example, `server-http_config` is created as `_http_config` when the application is deployed, -by registering content of the `httpd.conf` as configuration data. - -```yml -configs: - http_config: - file: ./httpd.conf -``` - -Alternatively, `http_config` can be declared as external, doing so Compose implementation will lookup `http_config` to expose configuration data to relevant services. - -```yml -configs: - http_config: - external: true -``` - -External configs lookup can also use a distinct key by specifying a `name`. The following -example modifies the previous one to lookup for config using a parameter `HTTP_CONFIG_KEY`. Doing -so the actual lookup key will be set at deployment time by [interpolation](#interpolation) of -variables, but exposed to containers as hard-coded ID `http_config`. - -```yml -configs: - http_config: - external: true - name: "${HTTP_CONFIG_KEY}" -``` - -If `external` is set to `true` , then the resource is not managed by Compose. If `external` is set to `true` and the network configuration has other attributes set besides `name`, then Compose Implementations SHOULD reject the Compose file as invalid. - -Compose file need to explicitly grant access to the configs to relevant services in the application. - -## Secrets top-level element - -Secrets are a flavour of Configs focussing on sensitive data, with specific constraint for this usage. As the platform implementation may significantly differ from Configs, dedicated Secrets section allows to configure the related resources. - -The top-level `secrets` declaration defines or references sensitive data that can be granted to the services in this -application. The source of the secret is either `file` or `external`. - -- `file`: The secret is created with the contents of the file at the specified path. -- `environment`: The secret is created with the value of an environment variable. -- `external`: If set to true, specifies that this secret has already been created. Compose implementation does - not attempt to create it, and if it does not exist, an error occurs. -- `name`: The name of the secret object in Docker. This field can be used to - reference secrets that contain special characters. The name is used as is - and will **not** be scoped with the project name. - -In this example, `server-certificate` secret is created as `_server-certificate` when the application is deployed, -by registering content of the `server.cert` as a platform secret. - -```yml -secrets: - server-certificate: - file: ./server.cert -``` - -In this example, `token` secret is created as `_token` when the application is deployed, -by registering content of the `OAUTH_TOKEN` environment variable as a platform secret. - -```yml -secrets: - token: - environment: "OAUTH_TOKEN" -``` - -Alternatively, `server-certificate` can be declared as external, doing so Compose implementation will lookup `server-certificate` to expose secret to relevant services. - -```yml -secrets: - server-certificate: - external: true -``` - -External secrets lookup can also use a distinct key by specifying a `name`. The following -example modifies the previous one to look up for secret using a parameter `CERTIFICATE_KEY`. Doing -so the actual lookup key will be set at deployment time by [interpolation](#interpolation) of -variables, but exposed to containers as hard-coded ID `server-certificate`. - -```yml -secrets: - server-certificate: - external: true - name: "${CERTIFICATE_KEY}" -``` - -If `external` is set to `true` , then the resource is not managed by Compose. If `external` is set to `true` and the network configuration has other attributes set besides `name`, then Compose Implementations SHOULD reject the Compose file as invalid. -Compose file need to explicitly grant access to the secrets to relevant services in the application. - -## Fragments - -It is possible to re-use configuration fragments using [YAML anchors](http://www.yaml.org/spec/1.2/spec.html#id2765878). - -```yml -volumes: - db-data: &default-volume - driver: default - metrics: *default-volume -``` - -In previous sample, an _anchor_ is created as `default-volume` based on `db-data` volume specification. It is later reused by _alias_ `*default-volume` to define `metrics` volume. Same logic can apply to any element in a Compose file. Anchor resolution MUST take place -before [variables interpolation](#interpolation), so variables can't be used to set anchors or aliases. - -It is also possible to partially override values set by anchor reference using the -[YAML merge type](http://yaml.org/type/merge.html). In following example, `metrics` volume specification uses alias -to avoid repetition but override `name` attribute: - -```yml - -services: - backend: - image: awesome/database - volumes: - - db-data - - metrics -volumes: - db-data: &default-volume - driver: default - name: "data" - metrics: - <<: *default-volume - name: "metrics" -``` - -## Extension - -Special extension fields can be of any format as long as their name starts with the `x-` character sequence. They can be used -within any structure in a Compose file. This is the sole exception for Compose implementations to silently ignore unrecognized field. - -```yml -x-custom: - foo: - - bar - - zot - -services: - webapp: - image: awesome/webapp - x-foo: bar -``` - -The contents of such fields are unspecified by Compose specification, and can be used to enable custom features. Compose implementation to encounter an unknown extension field MUST NOT fail, but COULD warn about unknown field. - -For platform extensions, it is highly recommended to prefix extension by platform/vendor name, the same way browsers add -support for [custom CSS features](https://www.w3.org/TR/2011/REC-CSS2-20110607/syndata.html#vendor-keywords){: target="_blank" rel="noopener" class="_"}. - -```yml -service: - backend: - deploy: - placement: - x-aws-role: "arn:aws:iam::XXXXXXXXXXXX:role/foo" - x-aws-region: "eu-west-3" - x-azure-region: "france-central" -``` - -### Informative Historical Notes - -This section is informative. At the time of writing, the following prefixes are known to exist: - -| prefix | vendor/organization | -| ---------- | ------------------- | -| docker | Docker | -| kubernetes | Kubernetes | - -### Using extensions as fragments - -With the support for extension fields, Compose file can be written as follows to improve readability of reused fragments: - -```yml -x-logging: &default-logging - options: - max-size: "12m" - max-file: "5" - driver: json-file - -services: - frontend: - image: awesome/webapp - logging: *default-logging - backend: - image: awesome/database - logging: *default-logging -``` - -### specifying byte values - -Value express a byte value as a string in `{amount}{byte unit}` format: -The supported units are `b` (bytes), `k` or `kb` (kilo bytes), `m` or `mb` (mega bytes) and `g` or `gb` (giga bytes). - -``` - 2b - 1024kb - 2048k - 300m - 1gb -``` - -### specifying durations - -Value express a duration as a string in the in the form of `{value}{unit}`. -The supported units are `us` (microseconds), `ms` (milliseconds), `s` (seconds), `m` (minutes) and `h` (hours). -Value can can combine multiple values and using without separator. - -``` - 10ms - 40s - 1m30s - 1h5m30s20ms -``` - -## Interpolation - -Values in a Compose file can be set by variables, and interpolated at runtime. Compose files use a Bash-like -syntax `${VARIABLE}` - -Both `$VARIABLE` and `${VARIABLE}` syntax are supported. Default values can be defined inline using typical shell syntax: -latest - -- `${VARIABLE:-default}` evaluates to `default` if `VARIABLE` is unset or - empty in the environment. -- `${VARIABLE-default}` evaluates to `default` only if `VARIABLE` is unset - in the environment. - -Similarly, the following syntax allows you to specify mandatory variables: - -- `${VARIABLE:?err}` exits with an error message containing `err` if - `VARIABLE` is unset or empty in the environment. -- `${VARIABLE?err}` exits with an error message containing `err` if - `VARIABLE` is unset in the environment. - -Interpolation can also be nested: - -- `${VARIABLE:-${FOO}}` -- `${VARIABLE?$FOO}` -- `${VARIABLE:-${FOO:-default}}` - -Other extended shell-style features, such as `${VARIABLE/foo/bar}`, are not -supported by the Compose specification. - -You can use a `$$` (double-dollar sign) when your configuration needs a literal -dollar sign. This also prevents Compose from interpolating a value, so a `$$` -allows you to refer to environment variables that you don't want processed by -Compose. - -```yml -web: - build: . - command: "$$VAR_NOT_INTERPOLATED_BY_COMPOSE" -``` - -If the Compose implementation can't resolve a substituted variable and no default value is defined, it MUST warn -the user and substitute the variable with an empty string. - -As any values in a Compose file can be interpolated with variable substitution, including compact string notation -for complex elements, interpolation MUST be applied _before_ merge on a per-file-basis. - -## Compose documentation - -- [User guide](../index.md) -- [Installing Compose](../install/index.md) -- [Compose file versions and upgrading](compose-versioning.md) -- [Sample apps with Compose](../samples-for-compose.md) -- [Enabling GPU access with Compose](../gpu-support.md) -- [Command line reference](../reference/index.md) +Use the links below to easily navigate key sections of the Compose specification. + +
+ +
+
+
+
+ Arrow pointing downwards +
+

Status of the Specification

+

Read about the status of the specification.

+
+
+
+
+
+ Data disks +
+

The Compose application model

+

Learn about the Compose application model.

+
+
+
+
+
+ Computers on a local area network +
+

The Compose file

+

Understand the Compose file.

+
+
+
+ +
+
+
+
+ Document with a text outline +
+

Version and name top-level element

+

Understand version and name attributes for Compose.

+
+
+
+
+
+ A pair of scissors +
+

Services top-level element

+

Explore all services attributes for Compose.

+
+
+
+
+
+ Settings cogwheel with stars +
+

Networks top-level element

+

Find all networks attributes for Compose.

+
+
+
+ +
+
+
+
+ Checkered shield +
+

Volumes top-level element

+

Explore all volumes attributes for Compose.

+
+
+
+
+
+ Alarm bell with an exclamation mark +
+

Configs top-level element

+

Find out about configs in Compose.

+
+
+
+
+
+ Document with an overlaying plus sign +
+

Secrets top-level element

+

Learn about secrets in Compose.

+
+
+
+
\ No newline at end of file diff --git a/compose/environment-variables/envvars.md b/compose/environment-variables/envvars.md index cb36559344..23c22cffad 100644 --- a/compose/environment-variables/envvars.md +++ b/compose/environment-variables/envvars.md @@ -43,7 +43,20 @@ the container's name on startup. For example, if your project name is `myapp` and it includes two services `db` and `web`, then Compose starts containers named `myapp-db-1` and `myapp-web-1` respectively. -It defaults to the `basename` of the project directory. +Compose can set the project name in different ways. The level of precedence (from highest to lowest) for each method is as follows: + +1. The `-p` command line flag +2. `COMPOSE_PROJECT_NAME` +3. The top level `name:` variable from the config file (or the last `name:` from + a series of config files specified using `-f`) +4. The `basename` of the project directory containing the config file (or + containing the first config file specified using `-f`) +5. The `basename` of the current directory if no config file is specified + +Project names must contain only lowercase letters, decimal digits, dashes, and +underscores, and must begin with a lowercase letter or decimal digit. If the +`basename` of the project directory or current directory violates this +constraint, you must use one of the other mechanisms. See also the [command-line options overview](../reference/index.md#command-options-overview-and-help) and [using `-p` to specify a project name](../reference/index.md#use--p-to-specify-a-project-name). diff --git a/compose/environment-variables/set-environment-variables.md b/compose/environment-variables/set-environment-variables.md index d4abee8a71..2b81b3a554 100644 --- a/compose/environment-variables/set-environment-variables.md +++ b/compose/environment-variables/set-environment-variables.md @@ -30,10 +30,10 @@ services: When you run `docker compose up`, the `web` service defined in the Compose file substitues in the image `webapp:v1.5` which was set in the `.env` file. You can verify this with the -[convert command](../../engine/reference/commandline/compose_config.md), which prints your resolved application config to the terminal: +[config command](../../engine/reference/commandline/compose_config.md), which prints your resolved application config to the terminal: ```console -$ docker compose convert +$ docker compose config services: web: @@ -43,7 +43,7 @@ services: The `.env` file should be placed at the root of the project directory next to your `docker-compose.yml` file. You can use an alternative path with one of the following methods: - The [`--file` option in the CLI](../reference/index.md#use--f-to-specify-name-and-path-of-one-or-more-compose-files) - The [`--env-file` option in the CLI](#substitute-with---env-file) -- Using the [`env_file` attribute in the Compose file](../compose-file/index.md#env_file) +- Using the [`env_file` attribute in the Compose file](../compose-file/05-services.md#env_file) For more information on formatting an environment file, see [Use an environment file](env-file.md). @@ -56,7 +56,7 @@ For more information on formatting an environment file, see [Use an environment ### Use the `environment` attribute You can set environment variables in a service's containers with the -[`environment` attribute](../compose-file/index.md#environment) in your Compose file. It works in the same way as `docker run -e VARIABLE=VALUE ...` +[`environment` attribute](../compose-file/05-services.md#environment) in your Compose file. It works in the same way as `docker run -e VARIABLE=VALUE ...` ```yaml web: @@ -75,12 +75,12 @@ web: The value of the `DEBUG` variable in the container is taken from the value for the same variable in the shell in which Compose is run. -See [`environment` attribute](../compose-file/index.md#environment) for more information. +See [`environment` attribute](../compose-file/05-services.md#environment) for more information. ### Use the `env_file` attribute You can pass multiple environment variables from an external file through to -a service's containers with the [`env_file` option](../compose-file/index.md#env_file). This works in the same way as `docker run --env-file=FILE ...`: +a service's containers with the [`env_file` option](../compose-file/05-services.md#env_file). This works in the same way as `docker run --env-file=FILE ...`: ```yaml web: @@ -94,7 +94,7 @@ If multiple files are specified, they are evaluated in order and can override va > >With this option, environment variables declared in the file cannot then be referenced again separately in the Compose file or used to configure Compose. -See [`env_file` attribute](../compose-file/index.md#env_file) for more information. +See [`env_file` attribute](../compose-file/05-services.md#env_file) for more information. ### Substitute from the shell @@ -146,7 +146,7 @@ services: If the `--env-file` is not used in the command line, the `.env` file is loaded by default: ```console -$ docker compose convert +$ docker compose config services: web: image: 'webapp:v1.5' @@ -155,7 +155,7 @@ services: Passing the `--env-file` argument overrides the default file path: ```console -$ docker compose --env-file ./config/.env.dev convert +$ docker compose --env-file ./config/.env.dev config services: web: image: 'webapp:v1.6' @@ -164,7 +164,7 @@ services: When an invalid file path is being passed as an `--env-file` argument, Compose returns an error: ```console -$ docker compose --env-file ./doesnotexist/.env.dev convert +$ docker compose --env-file ./doesnotexist/.env.dev config ERROR: Couldn't find env file: /home/user/./doesnotexist/.env.dev ``` diff --git a/compose/extends.md b/compose/extends.md index 7efdf0acfb..211cf7ab42 100644 --- a/compose/extends.md +++ b/compose/extends.md @@ -446,4 +446,4 @@ services: ## Reference information -[`extends`](compose-file/index.md#extends) \ No newline at end of file +[`extends`](compose-file/05-services.md#extends) \ No newline at end of file diff --git a/compose/networking.md b/compose/networking.md index f4f4e1acf3..fbcb1ae89c 100644 --- a/compose/networking.md +++ b/compose/networking.md @@ -54,7 +54,7 @@ the service is accessible outside the swarm as well. Within the `web` container, your connection string to `db` would look like `postgres://db:5432`, and from the host machine, the connection string would -look like `postgres://{DOCKER_IP}:5432`. +look like `postgres://{DOCKER_IP}:8001` for example `postgres://localhost:8001` if your container is running locally. ## Update containers on the network @@ -82,14 +82,14 @@ services: image: postgres ``` -See the [links reference](compose-file/index.md#links) for more information. +See the [links reference](compose-file/05-services.md#links) for more information. ## Multi-host networking When deploying a Compose application on a Docker Engine with [Swarm mode enabled](../engine/swarm/index.md), you can make use of the built-in `overlay` driver to enable multi-host communication. -Overlay networks are always created as `attachable`. You can optionally set the [`attachable`](compose-file/index.md#attachable) property to `false`. +Overlay networks are always created as `attachable`. You can optionally set the [`attachable`](compose-file/06-networks.md#attachable) property to `false`. Consult the [Swarm mode section](../engine/swarm/index.md), to see how to set up a Swarm cluster, and the [Getting started with multi-host networking](../network/network-tutorial-overlay.md) @@ -131,9 +131,9 @@ networks: bar: "2" ``` -Networks can be configured with static IP addresses by setting the [ipv4_address and/or ipv6_address](compose-file/index.md#ipv4_address-ipv6_address) for each attached network. +Networks can be configured with static IP addresses by setting the [ipv4_address and/or ipv6_address](compose-file/05-services.md#ipv4_address-ipv6_address) for each attached network. -Networks can also be given a [custom name](compose-file/index.md#name): +Networks can also be given a [custom name](compose-file/06-networks.md#name): ```yaml services: @@ -165,7 +165,7 @@ networks: ## Use a pre-existing network -If you want your containers to join a pre-existing network, use the [`external` option](compose-file/index.md#external) +If you want your containers to join a pre-existing network, use the [`external` option](compose-file/06-networks.md#external) ```yaml services: # ... @@ -181,5 +181,5 @@ Instead of attempting to create a network called `[projectname]_default`, Compos For full details of the network configuration options available, see the following references: -- [Top-level `networks` key](compose-file/index.md#networks-top-level-element) -- [Service-level `networks` key](compose-file/index.md#networks) +- [Top-level `networks` key](compose-file/06-networks.md) +- [Service-level `networks` key](compose-file/05-services.md#networks) diff --git a/compose/production.md b/compose/production.md index 7f4d9597c8..a4e7b46110 100644 --- a/compose/production.md +++ b/compose/production.md @@ -23,7 +23,7 @@ production. These changes might include: - Binding to different ports on the host - Setting environment variables differently, such as reducing the verbosity of logging, or to specify settings for external services such as an email server -- Specifying a restart policy like [`restart: always`](compose-file/index.md#restart){: target="_blank" rel="noopener" class="_" } to avoid downtime +- Specifying a restart policy like [`restart: always`](compose-file/05-services.md#restart){: target="_blank" rel="noopener" class="_" } to avoid downtime - Adding extra services such as a log aggregator For this reason, consider defining an additional Compose file, say diff --git a/compose/profiles.md b/compose/profiles.md index 49f9f98c39..34e82f58ff 100644 --- a/compose/profiles.md +++ b/compose/profiles.md @@ -18,7 +18,7 @@ development tasks. ## Assigning profiles to services Services are associated with profiles through the -[`profiles` attribute](compose-file/index.md#profiles) which takes an +[`profiles` attribute](compose-file/05-services.md#profiles) which takes an array of profile names: ```yaml @@ -183,4 +183,4 @@ $ COMPOSE_PROFILES=dev docker compose up phpmyadmin ## Reference information -[`profiles`](compose-file/index.md#profiles) +[`profiles`](compose-file/05-services.md#profiles) diff --git a/compose/reference/index.md b/compose/reference/index.md index d08ee0281e..27397b2bc1 100644 --- a/compose/reference/index.md +++ b/compose/reference/index.md @@ -185,9 +185,22 @@ Status: Downloaded newer image for postgres:latest ## Use `-p` to specify a project name -Each configuration has a project name. If you supply a `-p` flag, you can -specify a project name. If you don't specify the flag, Compose uses the current -directory name. See also the [COMPOSE_PROJECT_NAME environment variable](../environment-variables/envvars.md#compose_project_name). +Each configuration has a project name which Compose can set in different ways. The level of precedence (from highest to lowest) for each method is as follows: + +1. The `-p` command line flag +2. The [COMPOSE_PROJECT_NAME environment variable][] +3. The top level `name:` variable from the config file (or the last `name:` from + a series of config files specified using `-f`) +4. The `basename` of the project directory containing the config file (or + containing the first config file specified using `-f`) +5. The `basename` of the current directory if no config file is specified + +[COMPOSE_PROJECT_NAME environment variable]: ../environment-variables/envvars.md#compose_project_name + +Project names must contain only lowercase letters, decimal digits, dashes, and +underscores, and must begin with a lowercase letter or decimal digit. If the +`basename` of the project directory or current directory violates this +constraint, you must use one of the other mechanisms. ## Use `--profile` to specify one or more active profiles diff --git a/compose/release-notes.md b/compose/release-notes.md index 714bff7737..eeb51acf2d 100644 --- a/compose/release-notes.md +++ b/compose/release-notes.md @@ -8,6 +8,98 @@ redirect_from: --- {% include compose-eol.md %} +## 2.17.2 +{% include release-date.html date="2023-03-26" %} +### Update +- Dependencies upgrade: bump compose-go to v1.13.2 + +### Bug fixes and enhancements +- Fixed invalid project name error for directories with uppercase characters or `.` in the name. Fixed [compose#10405](https://github.com/docker/compose/issues/10405){: + target="_blank" rel="noopener" class="_"} + +## 2.17.1 +{% include release-date.html date="2023-03-24" %} +### Update +- Dependencies upgrade: bump buildkit to v0.11.5 +- Dependencies upgrade: bump compose-go to v1.13.1 +- Dependencies upgrade: bump golang to 1.20.2 + +### Bug fixes and enhancements +- Fixed panic on `alpha watch` command. Pull Request [compose#10393](https://github.com/docker/compose/pull/10393){: + target="_blank" rel="noopener" class="_"} +- Prevented conflicts for services named `extensions`. Fixed [compose-go#247](https://github.com/compose-spec/compose-go/issues/247){: + target="_blank" rel="noopener" class="_"} +- Compose now validates project names more consistently. Fixed [compose-go#363](https://github.com/compose-spec/compose-go/issues/363){: + target="_blank" rel="noopener" class="_"} + +## 2.17.0 +{% include release-date.html date="2023-03-23" %} +### Update +- Dependencies upgrade: bump buildkit to v0.11.4 +- Dependencies upgrade: bump buildx to v0.10.4 +- Dependencies upgrade: bump containerd to 1.6.18 +- Dependencies upgrade: bump compose-go to v1.13.0 + +### Bug fixes and enhancements +* Introduced `--wait-timeout` on `up` command. Fixed [compose#10269](https://github.com/docker/compose/issues/10269){: + target="_blank" rel="noopener" class="_"} +* Made `compose service --hash` output sort by service name. Pull Request [compose#10278](https://github.com/docker/compose/pull/10278){: + target="_blank" rel="noopener" class="_"} +* Compose now renders a compact TUI progress report to monitor layers download. Pull Request [compose#10281](https://github.com/docker/compose/pull/10281){: + target="_blank" rel="noopener" class="_"} +* Introduced `restart` for `depends_on`. Fixed [compose#10284](https://github.com/docker/compose/issues/10284){: + target="_blank" rel="noopener" class="_"} +* Added support of `NO_COLOR` env var. Fixed [compose#10340](https://github.com/docker/compose/issues/10340){: + target="_blank" rel="noopener" class="_"} +* Progress writer now uses `dockercli.Err` stream. Fixed [compose#10366](https://github.com/docker/compose/issues/10366){: + target="_blank" rel="noopener" class="_"} +* Introduced `dockerfile_inline`. Fixed [compose#8077](https://github.com/docker/compose/issues/8077){: + target="_blank" rel="noopener" class="_"} +* Added support for `additional_contexts` in the `build` service configuration. Fixed [compose#9461](https://github.com/docker/compose/issues/9461){: + target="_blank" rel="noopener" class="_"} [compose#9961](https://github.com/docker/compose/issues/9961){: + target="_blank" rel="noopener" class="_"} +* Added file delete/rename handling in `watch` mode. Pull Request [compose#10386](https://github.com/docker/compose/pull/10386){: + target="_blank" rel="noopener" class="_"} +* Introduced an `ignore` attribute in `watch` mode. Pull Request [compose#10385](https://github.com/docker/compose/pull/10385){: + target="_blank" rel="noopener" class="_"} +* Compose now uses progress writer to show copies status. Pull Request [compose#10387](https://github.com/docker/compose/pull/10387){: + target="_blank" rel="noopener" class="_"} +* Updated reference documentation for `-p`/`--project-name` flag. Fixed [docs#16915](https://github.com/docker/docs/pull/16915){: + target="_blank" rel="noopener" class="_"}, [compose-spec#311](https://github.com/compose-spec/compose-spec/issues/311){: + target="_blank" rel="noopener" class="_"} +* Introduced a `replace` label to track the relationship between old and new containers of a service. Fixed [compose#9600](https://github.com/docker/compose/issues/9600){: + target="_blank" rel="noopener" class="_"} +* Fixed a bug that meant dependent services were not restarted after a service was restarted. Fixed [compose#10263](https://github.com/docker/compose/issues/10263){: + target="_blank" rel="noopener" class="_"} +* Compose now ignores services without a build section in `watch` mode. Fixed [compose#10270](https://github.com/docker/compose/issues/10270){: + target="_blank" rel="noopener" class="_"} +* Compose now applies config options for pseudo-subcommands. Fixed [compose#10286](https://github.com/docker/compose/issues/10286){: + target="_blank" rel="noopener" class="_"} +* Compose manages only containers with config_hash labels (i.e, created by compose). Fixed [compose#10317](https://github.com/docker/compose/issues/10317){: + target="_blank" rel="noopener" class="_"} +* Compose triggers an error if the project name is empty after normalization. Fixed [compose#10313](https://github.com/docker/compose/issues/10313){: + target="_blank" rel="noopener" class="_"} +* Compose restarts only needed services by checking `depends_on` relations. Fixed [compose#10337](https://github.com/docker/compose/issues/10337){: + target="_blank" rel="noopener" class="_"} +* Fixed a display issue on small terminals. Fixed [compose#10322](https://github.com/docker/compose/issues/10322){: + target="_blank" rel="noopener" class="_"} +* Fixed an issue with building the built images IDs collection. Pull Request [compose#10372](https://github.com/docker/compose/issues/10372){: + target="_blank" rel="noopener" class="_"} +* Use configured name separator to define oneoff container name. Fixed [compose#10354](https://github.com/docker/compose/issues/10354){: + target="_blank" rel="noopener" class="_"} +* Fixed concurrent map read/write issue when recreating containers. Fixed [compose#10319](https://github.com/docker/compose/issues/10319){: + target="_blank" rel="noopener" class="_"} +* Compose now supports Dry Run mode for `stop` and `rm` commands. Pull Request [compose#10257](https://github.com/docker/compose/issues/10257){: + target="_blank" rel="noopener" class="_"} +* Compose now supports Dry Run mode for `pull` command. Pull Request [compose#10341](https://github.com/docker/compose/issues/10341){: + target="_blank" rel="noopener" class="_"} +* Compose now supports Dry Run mode for `push` command. Pull Request [compose#10355](https://github.com/docker/compose/issues/10355){: + target="_blank" rel="noopener" class="_"} +* Compose now supports Dry Run mode for `exec` command. Pull Request [compose#10252](https://github.com/docker/compose/issues/10252){: + target="_blank" rel="noopener" class="_"} +* Compose now supports Dry Run mode for `restart` command. Pull Request [compose#10339](https://github.com/docker/compose/issues/10339){: + target="_blank" rel="noopener" class="_"} + ## 2.16.0 {% include release-date.html date="2023-02-08" %} ### Update diff --git a/compose/startup-order.md b/compose/startup-order.md index 087ff65900..99e24a6f27 100644 --- a/compose/startup-order.md +++ b/compose/startup-order.md @@ -7,7 +7,7 @@ notoc: true {% include compose-eol.md %} You can control the order of service startup and shutdown with the -[depends_on](compose-file/index.md#depends_on) option. Compose always starts and stops +[depends_on](compose-file/05-services.md#depends_on) option. Compose always starts and stops containers in dependency order, where dependencies are determined by `depends_on`, `links`, `volumes_from`, and `network_mode: "service:..."`. @@ -25,6 +25,6 @@ The solution for detecting the ready state of a service is to use the `conditio ## Reference information -- [`depends_on`](compose-file/index.md#depends_on) -- [`healthcheck`](compose-file/index.md#healthcheck) +- [`depends_on`](compose-file/05-services.md#depends_on) +- [`healthcheck`](compose-file/05-services.md#healthcheck) diff --git a/config/containers/logging/configure.md b/config/containers/logging/configure.md index b928f7dd8f..56b8bb5f97 100644 --- a/config/containers/logging/configure.md +++ b/config/containers/logging/configure.md @@ -135,8 +135,7 @@ Docker provides two modes for delivering messages from the container to the log driver: * (default) direct, blocking delivery from container to driver -* non-blocking delivery that stores log messages in an intermediate per-container - ring buffer for consumption by driver +* non-blocking delivery that stores log messages in an intermediate per-container buffer for consumption by driver The `non-blocking` message delivery mode prevents applications from blocking due to logging back pressure. Applications are likely to fail in unexpected ways when @@ -144,15 +143,14 @@ STDERR or STDOUT streams block. > **Warning** > -> When the buffer is full and a new message is enqueued, the oldest message in -> memory is dropped. Dropping messages is often preferred to blocking the +> When the buffer is full, new messages will not be enqueued. Dropping messages is often preferred to blocking the > log-writing process of an application. {: .warning} The `mode` log option controls whether to use the `blocking` (default) or `non-blocking` message delivery. -The `max-buffer-size` log option controls the size of the ring buffer used for +The `max-buffer-size` log option controls the size of the buffer used for intermediate message storage when `mode` is set to `non-blocking`. `max-buffer-size` defaults to 1 megabyte. diff --git a/config/daemon/index.md b/config/daemon/index.md index 3a45b332a7..36836af0d1 100644 --- a/config/daemon/index.md +++ b/config/daemon/index.md @@ -69,8 +69,11 @@ Here's what the configuration file might look like: } ``` -With this configuration the Docker daemon runs in debug mode, uses TLS, and -listens for traffic routed to `192.168.59.3` on port `2376`. You can learn what +In addition to Docker Desktop default values, this configuration enables garbage +collection at a 20GB threshold, and enables buildkit. + +Using this configuration file, run the Docker daemon in debug mode, using TLS, and +listen for traffic routed to `192.168.59.3` on port `2376`. You can learn what configuration options are available in the [dockerd reference docs](../../engine/reference/commandline/dockerd.md#daemon-configuration-file) diff --git a/config/daemon/prometheus.md b/config/daemon/prometheus.md index fb1b34d047..c056b63075 100644 --- a/config/daemon/prometheus.md +++ b/config/daemon/prometheus.md @@ -36,18 +36,17 @@ exist, create it. - **Linux**: `/etc/docker/daemon.json` - **Windows Server**: `C:\ProgramData\docker\config\daemon.json` - **Docker Desktop for Mac / Docker Desktop for Windows**: Click the Docker icon in the toolbar, - select **Settings**, then select **Daemon**. Click **Advanced**. + select **Settings**, then select **Docker Engine**. If the file is currently empty, paste the following: ```json { - "metrics-addr" : "127.0.0.1:9323", - "experimental" : true + "metrics-addr" : "127.0.0.1:9323" } ``` -If the file is not empty, add those two keys, making sure that the resulting +If the file is not empty, add the new key, making sure that the resulting file is valid JSON. Be careful that every line ends with a comma (`,`) except for the last line. diff --git a/contribute/contribute-guide.md b/contribute/contribute-guide.md index 53b47da245..3f3f7cd941 100644 --- a/contribute/contribute-guide.md +++ b/contribute/contribute-guide.md @@ -27,10 +27,10 @@ There are two ways to contribute a pull request to the docs repository: Here’s a list of some of the important files: -- `/_data/toc.yaml` defines the left-hand navigation for the docs -- `/js/docs.js` defines most of the docs-specific JS such as the table of contents (ToC) generation and menu syncing -- `/css/style.scss` defines the docs-specific style rules -- `/_layouts/docs.html` is the HTML template file, which defines the header and footer, and includes all the JS/CSS that serves the docs content +- `/_data/toc.yaml` defines the left-hand navigation for the docs. +- `/js/docs.js` defines most of the docs-specific JS such as the table of contents (ToC) generation and menu syncing. +- `/css/style.scss` defines the docs-specific style rules. +- `/_layouts/docs.html` is the HTML template file, which defines the header and footer, and includes all the JS/CSS that serves the docs content. ### Files not edited here @@ -71,7 +71,7 @@ git clone {{ site.repo }}.git cd docs ``` -Then, build and run the documentation using [Docker Compose](../compose/index.md) +Then, build and run the documentation using [Docker Compose](../compose/index.md): ```bash docker compose up -d --build diff --git a/contribute/overview.md b/contribute/overview.md index eebbaa579c..e634eeacc2 100644 --- a/contribute/overview.md +++ b/contribute/overview.md @@ -76,7 +76,7 @@ as possible for you to work in this repository. The following sections guide you We also provide: -- A section of useful components you can add to your documentation. For components and controls, we use [Bootstrap](https://getbootstrap.com) +- A section of useful components you can add to your documentation. For components and controls, we use [Bootstrap](https://getbootstrap.com). - Information on Docker's [tone and voice](style/voice-tone.md). - A [writing checklist](checklist.md) to help you when you're contributing to Docker's documentation. - A command-line tool called vale to check the style and [help you find errors in your writing](contribute-guide.md#test-the-docs-locally). diff --git a/contribute/style/formatting.md b/contribute/style/formatting.md index 0371d04617..0b4123f6bd 100644 --- a/contribute/style/formatting.md +++ b/contribute/style/formatting.md @@ -25,7 +25,7 @@ Page titles should be action orientated. For example: ### Best practice: -- Make sure the title of your page and the TOC entry matches +- Make sure the title of your page and the TOC entry matches. - If you want to use a ‘:’ in a page title in the table of contents (_toc.yaml), you must wrap the entire title in “” to avoid breaking the build. ## Images diff --git a/desktop/backup-and-restore.md b/desktop/backup-and-restore.md index 02a0e5fa35..db4fb464d7 100644 --- a/desktop/backup-and-restore.md +++ b/desktop/backup-and-restore.md @@ -27,7 +27,7 @@ computer, for example. 2. Use [`docker push`](../engine/reference/commandline/push.md) to push any images you have built locally and want to keep to the [Docker Hub registry](../docker-hub/index.md). - Make sure to configure the [repository's visibility as "private"](../docker-hub/repos/configure/index.md) + Make sure to configure the [repository's visibility as "private"](../docker-hub/repos/index.md) for images that should not be publicly accessible. Alternatively, use [`docker image save -o images.tar image1 [image2 ...]`](../engine/reference/commandline/save.md) diff --git a/desktop/get-started.md b/desktop/get-started.md index 298727ce79..a9054740f4 100644 --- a/desktop/get-started.md +++ b/desktop/get-started.md @@ -29,37 +29,29 @@ redirect_from: ## Quick Start Guide - Once Docker Desktop is installed, the Quick Start Guide launches. It includes a simple exercise to build an example Docker image, run it as a container, push and save the image to Docker Hub. +The Quick Start Guide includes a simple exercise to build an example Docker image, run it as a container, push and save the image to Docker Hub. - ![Docker Quick Start tutorial](images/docker-tutorial.png) +![Docker Quick Start tutorial](images/docker-tutorial.png) -To run the Quick Start Guide on demand, select the Docker menu ![whale menu](images/whale-x.svg){: .inline} and then choose **Quick Start Guide**. +To run the Quick Start Guide, select the Docker menu ![whale menu](images/whale-x.svg){: .inline} and then choose **Quick Start Guide**. -For a more detailed guide, see [Get started](../get-started/index.md). +> **Note** +> +> To access the Quick Start Guide, you must turn off **Access experimental features**. +> Learn how to turn off **Access experimental features** on [Mac](../settings/mac/#experimental-features), [Windows](../settings/windows/#experimental-features), or [Linux](../settings/linux/#experimental-features). + + +For a more detailed guide, see [Get started](../get-started/index.md), or the [Docker Desktop hands-on guides](../get-started/hands-on-overview.md). ## Sign in to Docker Desktop -We recommend that you authenticate using the **Sign in/Create ID** option in the top-right corner of Docker Desktop. +We recommend that you authenticate using the **Sign in** option in the top-right corner of the Docker Dashboard. Once logged in, you can access your Docker Hub repositories directly from Docker Desktop. Authenticated users get a higher pull rate limit compared to anonymous users. For example, if you are authenticated, you get 200 pulls per 6 hour period, compared to 100 pulls per 6 hour period per IP address for anonymous users. For more information, see [Download rate limit](../docker-hub/download-rate-limit.md). -In large enterprises where admin access is restricted, administrators can create a registry.json file and deploy it to the developers’ machines using a device management software as part of the Docker Desktop installation process. Enforcing developers to authenticate through Docker Desktop also allows administrators to set up guardrails using features such as [Image Access Management](../docker-hub/image-access-management.md) which allows team members to only have access to Trusted Content on Docker Hub, and pull only from the specified categories of images. For more information, see [Configure registry.json to enforce sign-in](../docker-hub/configure-sign-in.md). - -### Two-factor authentication - -Docker Desktop lets you to sign in to Docker Hub using two-factor authentication. Two-factor authentication provides an extra layer of security when accessing your Docker Hub account. - -You must turn on two-factor authentication in Docker Hub before signing into your Docker Hub account through Docker Desktop. For instructions, see [Enable two-factor authentication for Docker Hub](/docker-hub/2fa/). - -After two-factor authentication is turned on: - -1. Go to the Docker Desktop menu and then select **Sign in / Create Docker ID**. - -2. Enter your Docker ID and password and select **Sign in**. - -3. After you have successfully signed in, Docker Desktop prompts you to enter the authentication code. Enter the six-digit code from your phone and then select **Verify**. +In large enterprises where admin access is restricted, administrators can [Configure registry.json to enforce sign-in](../docker-hub/configure-sign-in.md). Enforcing developers to authenticate through Docker Desktop also allows administrators to improve their organization’s security posture for containerized development by taking advantage of [Hardened Desktop](hardened-desktop/index.md). ### Credentials management for Linux users diff --git a/desktop/install/archlinux.md b/desktop/install/archlinux.md index 61af6e8178..7b6ca877da 100644 --- a/desktop/install/archlinux.md +++ b/desktop/install/archlinux.md @@ -6,9 +6,9 @@ redirect_from: - /desktop/linux/install/archlinux/ --- -This topic discusses installation of Docker Desktop from an [Arch package](https://desktop-stage.docker.com/linux/main/amd64/docker-desktop-4.17.0-x86_64.pkg.tar.zst?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64) that Docker provides in addition to the supported platforms. Docker has not tested or verified the installation. +This topic discusses installation of Docker Desktop from an [Arch package](https://desktop-stage.docker.com/linux/main/amd64/docker-desktop-4.18.0-x86_64.pkg.tar.zst?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64) that Docker provides in addition to the supported platforms. Docker has not tested or verified the installation. -[Arch package (experimental)](https://desktop.docker.com/linux/main/amd64/docker-desktop-4.17.0-x86_64.pkg.tar.zst?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64){: .button .primary-btn } +[Arch package (experimental)](https://desktop.docker.com/linux/main/amd64/docker-desktop-4.18.0-x86_64.pkg.tar.zst?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64){: .button .primary-btn } _For checksums, see [Release notes](../release-notes.md)_ diff --git a/desktop/install/debian.md b/desktop/install/debian.md index 20eaecbf86..5f39edf4a0 100644 --- a/desktop/install/debian.md +++ b/desktop/install/debian.md @@ -9,7 +9,7 @@ redirect_from: This page contains information on how to install, launch, and upgrade Docker Desktop on a Debian distribution. -[DEB package](https://desktop.docker.com/linux/main/amd64/docker-desktop-4.17.0-amd64.deb?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64){: .button .primary-btn } +[DEB package](https://desktop.docker.com/linux/main/amd64/docker-desktop-4.18.0-amd64.deb?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64){: .button .primary-btn } _For checksums, see [Release notes](../release-notes.md)_ @@ -52,7 +52,7 @@ Recommended approach to install Docker Desktop on Debian: 1. Set up [Docker's package repository](../../engine/install/debian.md#set-up-the-repository). -2. Download latest [DEB package](https://desktop.docker.com/linux/main/amd64/docker-desktop-4.17.0-amd64.deb?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64). +2. Download latest [DEB package](https://desktop.docker.com/linux/main/amd64/docker-desktop-4.18.0-amd64.deb?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64). 3. Install the package with apt as follows: diff --git a/desktop/install/fedora.md b/desktop/install/fedora.md index 7f82f1d042..a8f9195a10 100644 --- a/desktop/install/fedora.md +++ b/desktop/install/fedora.md @@ -9,7 +9,7 @@ redirect_from: This page contains information on how to install, launch and upgrade Docker Desktop on a Fedora distribution. -[RPM package](https://desktop.docker.com/linux/main/amd64/docker-desktop-4.17.0-x86_64.rpm?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64){: .button .primary-btn } +[RPM package](https://desktop.docker.com/linux/main/amd64/docker-desktop-4.18.0-x86_64.rpm?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64){: .button .primary-btn } ## Prerequisites @@ -32,7 +32,7 @@ To install Docker Desktop on Fedora: 1. Set up [Docker's package repository](../../engine/install/fedora.md#set-up-the-repository). -2. Download latest [RPM package](https://desktop.docker.com/linux/main/amd64/docker-desktop-4.17.0-x86_64.rpm?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64). +2. Download latest [RPM package](https://desktop.docker.com/linux/main/amd64/docker-desktop-4.18.0-x86_64.rpm?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64). 3. Install the package with dnf as follows: diff --git a/desktop/install/linux-install.md b/desktop/install/linux-install.md index 4ff0e84af5..0e545ac0cc 100644 --- a/desktop/install/linux-install.md +++ b/desktop/install/linux-install.md @@ -17,25 +17,6 @@ This page contains information about general system requirements, supported plat >For more information see [What is the difference between Docker Desktop for Linux and Docker Engine](../faqs/linuxfaqs.md#what-is-the-difference-between-docker-desktop-for-linux-and-docker-engine). {: .important} -## System requirements - -To install Docker Desktop successfully, your Linux host must meet the following general requirements: - -- 64-bit kernel and CPU support for virtualization. -- KVM virtualization support. Follow the [KVM virtualization support instructions](#kvm-virtualization-support) to check if the KVM kernel modules are enabled and how to provide access to the kvm device. -- **QEMU must be version 5.2 or newer**. We recommend upgrading to the latest version. -- systemd init system. -- Gnome, KDE, or MATE Desktop environment. - - For many Linux distros, the Gnome environment does not support tray icons. To add support for tray icons, you need to install a Gnome extension. For example, [AppIndicator](https://extensions.gnome.org/extension/615/appindicator-support/){:target="_blank" rel="noopener" class="_"}. -- At least 4 GB of RAM. -- Enable configuring ID mapping in user namespaces, see [File sharing](../faqs/linuxfaqs.md#how-do-i-enable-file-sharing). - -Docker Desktop for Linux runs a Virtual Machine (VM). For more information on why, see [Why Docker Desktop for Linux runs a VM](../faqs/linuxfaqs.md#why-does-docker-desktop-for-linux-run-a-vm). - -> **Note:** -> -> Docker does not provide support for running Docker Desktop in nested virtualization scenarios. We recommend that you run Docker Desktop for Linux natively on supported distributions. - ## Supported platforms Docker provides `.deb` and `.rpm` packages from the following Linux distributions @@ -56,6 +37,24 @@ An experimental package is available for [Arch](archlinux.md)-based distribution Docker supports Docker Desktop on the current LTS release of the aforementioned distributions and the most recent version. As new versions are made available, Docker stops supporting the oldest version and supports the newest version. +## System requirements + +To install Docker Desktop successfully, your Linux host must meet the following general requirements: + +- 64-bit kernel and CPU support for virtualization. +- KVM virtualization support. Follow the [KVM virtualization support instructions](#kvm-virtualization-support) to check if the KVM kernel modules are enabled and how to provide access to the kvm device. +- **QEMU must be version 5.2 or newer**. We recommend upgrading to the latest version. +- systemd init system. +- Gnome, KDE, or MATE Desktop environment. + - For many Linux distros, the Gnome environment does not support tray icons. To add support for tray icons, you need to install a Gnome extension. For example, [AppIndicator](https://extensions.gnome.org/extension/615/appindicator-support/){:target="_blank" rel="noopener" class="_"}. +- At least 4 GB of RAM. +- Enable configuring ID mapping in user namespaces, see [File sharing](../faqs/linuxfaqs.md#how-do-i-enable-file-sharing). + +Docker Desktop for Linux runs a Virtual Machine (VM). For more information on why, see [Why Docker Desktop for Linux runs a VM](../faqs/linuxfaqs.md#why-does-docker-desktop-for-linux-run-a-vm). + +> **Note:** +> +> Docker does not provide support for running Docker Desktop in nested virtualization scenarios. We recommend that you run Docker Desktop for Linux natively on supported distributions. ### KVM virtualization support diff --git a/desktop/install/mac-install.md b/desktop/install/mac-install.md index 07f2a05b6a..1a0a47a96a 100644 --- a/desktop/install/mac-install.md +++ b/desktop/install/mac-install.md @@ -78,11 +78,15 @@ Your Mac must meet the following requirements to install Docker Desktop successf {% include desktop-license-update.md %} -4. Select **Accept** to continue. Docker Desktop starts after you accept the terms. +4. Select **Accept** to continue. Note that Docker Desktop will not run if you do not agree to the terms. You can choose to accept the terms at a later date by opening Docker Desktop. For more information, see [Docker Desktop Subscription Service Agreement](https://www.docker.com/legal/docker-subscription-service-agreement){: target="_blank" rel="noopener" class="_" }. We recommend that you also read the [FAQs](https://www.docker.com/pricing/faq){: target="_blank" rel="noopener" class="_"}. +5. From the installation window, select either: + - **Use recommended settings (Requires password)**. This let's Docker Desktop automatically set the necessary configuration settings. + - **Use advanced settings**. You can then set the location of the Docker CLI tools either in the system or user directory, enable the default Docker socket, and enable privileged port mapping. See [Settings](../settings/mac.md#advanced), for more information and how to set the location of the Docker CLI tools. +6. Select **Finish**. If you have applied any of the above configurations that require a password in step 5, you are asked to enter your password to confirm. ### Install from the command line @@ -105,13 +109,6 @@ The `install` command accepts the following flags: - For example: `--allowed-org= --admin-settings='{"configurationFileVersion": 2, "enhancedContainerIsolation": {"value": true, "locked": false}}'` -### Runtime permission requirements - -For some functions, Docker Desktop may require elevated privileges. The user is informed and prompted for authorization whenever such -configuration must be performed. [Docker Desktop permission requirements](../mac/permission-requirements.md) provides details on each configuration -and use case. - - ## Where to go next - [Docker Desktop for Apple silicon](../install/mac-install.md) for detailed information about Docker Desktop for Apple silicon. diff --git a/desktop/install/ubuntu.md b/desktop/install/ubuntu.md index b93fb6bf58..9783c5c33a 100644 --- a/desktop/install/ubuntu.md +++ b/desktop/install/ubuntu.md @@ -9,7 +9,7 @@ redirect_from: This page contains information on how to install, launch and upgrade Docker Desktop on an Ubuntu distribution. -[DEB package](https://desktop.docker.com/linux/main/amd64/docker-desktop-4.17.0-amd64.deb?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64){: .button .primary-btn } +[DEB package](https://desktop.docker.com/linux/main/amd64/docker-desktop-4.18.0-amd64.deb?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64){: .button .primary-btn } _For checksums, see [Release notes](../release-notes.md)_ @@ -48,7 +48,7 @@ Recommended approach to install Docker Desktop on Ubuntu: 1. Set up [Docker's package repository](../../engine/install/ubuntu.md#set-up-the-repository). -2. Download latest [DEB package](https://desktop.docker.com/linux/main/amd64/docker-desktop-4.17.0-amd64.deb?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64). +2. Download latest [DEB package](https://desktop.docker.com/linux/main/amd64/docker-desktop-4.18.0-amd64.deb?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64). 3. Install the package with apt as follows: diff --git a/desktop/mac/permission-requirements.md b/desktop/mac/permission-requirements.md index 867b00fcff..84ac356b9d 100644 --- a/desktop/mac/permission-requirements.md +++ b/desktop/mac/permission-requirements.md @@ -13,39 +13,108 @@ It also provides clarity on running containers as `root` as opposed to having `r ## Permission requirements -Docker Desktop for Mac is run as an unprivileged user. However, certain functionalities may be required for Docker Desktop to perform a limited set of privileged configurations such as: - - [Installing symlinks](#installing-symlinks) in `/usr/local/bin`. This ensures the `docker` CLI is on the user’s PATH without having to reconfigure shells, log out then log back in, for example. - - [Binding privileged ports](#binding-privileged-ports) that are less than 1024. The so-called "privileged ports" have not generally been used as a security boundary, however OSes still prevent unprivileged processes from binding them which breaks commands like `docker run -p 127.0.0.1:80:80 docker/getting-started`. - - [Ensuring `localhost` and `kubernetes.docker.internal` are defined](#ensuring-localhost-and-kubernetesdockerinternal-are-defined) in `/etc/hosts`. Some old macOS installs did not have `localhost` in `/etc/hosts`, which caused Docker to fail. Defining the DNS name `kubernetes.docker.internal` allows us to share Kubernetes contexts with containers. +Docker Desktop for Mac is run as an unprivileged user. However, certain functionalities are required for Docker Desktop to perform a limited set of privileged configurations such as: + - [Installing symlinks](#installing-symlinks) in`/usr/local/bin`. + - [Binding privileged ports](#binding-privileged-ports) that are less than 1024. The so-called "privileged ports" are not generally used as a security boundary, however OSes still prevent unprivileged processes from binding them which breaks commands like `docker run -p 127.0.0.1:80:80 docker/getting-started`. + - [Ensuring `localhost` and `kubernetes.docker.internal` are defined](#ensuring-localhost-and-kubernetesdockerinternal-are-defined) in `/etc/hosts`. Some old macOS installs do not have `localhost` in `/etc/hosts`, which causes Docker to fail. Defining the DNS name `kubernetes.docker.internal` allows Docker to share Kubernetes contexts with containers. - Securely caching the Registry Access Management policy which is read-only for the developer. -Versions prior to 4.15 of Docker Desktop for Mac require `root` access to be granted on the first run. The first time that Docker Desktop is launched the user receives an admin prompt to grant permissions for a privileged helper service `com.docker.vmnetd` to be installed. For subsequent runs, no `root` privileges are required. Following the principle of least privilege, this approach allows `root` access to be used only for the operations for which it is absolutely necessary, while still being able to use Docker Desktop as an unprivileged user. +Depending on which version of Docker Desktop for Mac is used, privileged access is granted either during installation, first run, or only when it is needed. + + +
+
+ +From version 4.18 and above, Docker Desktop for Mac provides greater control over functionality that's enabled during installation. + +The first time Docker Desktop for Mac is launched, you are presented with an installation window where you can choose to either use the default settings, which work for most developers and require privileged access to be granted, or use advanced settings. + +If you work in an environment with elevated security requirements, for instance where local administrative access is prohibited, then you can use the advanced settings to remove the need for granting privileged access. You can configure: +- The location of the Docker CLI tools either in the system or user directory +- The default Docker socket +- Privileged port mapping + +Depending on which advanced settings you configure, you must enter your password to confirm. + +You can change these configurations at a later date from the **Advanced** page in **Settings**. + +
+
+
+ +Versions 4.15 to 4.17 of Docker Desktop for Mac doesn't require the privileged process to run permanently. Whenever elevated privileges are needed for a configuration, Docker Desktop prompts you with information on the task it needs to perform. Most configurations are applied once, subsequent runs don't prompt for privileged access anymore. +The only time Docker Desktop may start the privileged process is for binding privileged ports that are not allowed by default on the host OS. + +
+
+
+ +Versions prior to 4.15 of Docker Desktop for Mac require `root` access to be granted on the first run. The first time that Docker Desktop is launched you receive an admin prompt to grant permission for the installation of the `com.docker.vmnetd` privileged helper service. For subsequent runs, `root` privileges aren't required. Following the principle of least privilege, this approach allows `root` access to be used only for the operations for which it is absolutely necessary, while still being able to use Docker Desktop as an unprivileged user. All privileged operations are run using the privileged helper process `com.docker.vmnetd`. -For security reasons, version 4.15 of Docker Desktop for Mac doesn't require the user to run a permanent privileged process. Whenever elevated privileges are needed for a configuration, Docker Desktop prompts the user with information on the task it needs to perform. Most configurations are applied once, subsequent runs don't prompt for privileged access anymore. -The only time Docker Desktop may start the privileged process is for binding privileged ports that are not allowed by default on the host OS. +
+
+
### Installing symlinks -The docker binaries are installed by default in `/Applications/Docker.app/Contents/Resources/bin`. Docker Desktop ensures the `docker` CLI is on the user’s PATH without having to reconfigure shells, log out then log back in for example. As on most systems `/usr/local/bin` is in the user's PATH by default, and so Docker Desktop creates symlinks for all docker binaries in it. +The Docker binaries are installed by default in `/Applications/Docker.app/Contents/Resources/bin`. Docker Desktop ensures the Docker CLI is on your PATH without having to reconfigure shells, log out then log back in for example. As on most systems `/usr/local/bin` is on the PATH by default, and so Docker Desktop creates symlinks for all Docker binaries in it. -Installing symlinks in `/usr/local/bin` is a privileged configuration Docker Desktop performs on the first startup. Docker Desktop checks if symlinks exists and takes the following actions: + +
+
+ +With version 4.18 or later, you can choose whether to install symlinks either in `/usr/local/bin` or `$HOME/.docker/bin` during installation of Docker Desktop. + +If `/usr/local/bin` is chosen, and this location is not writable by unprivileged users, Docker Desktop requires authorization to confirm this choice before the symlinks to Docker binaries are created in `/usr/local/bin`. If `$HOME/.docker/bin` is chosen, authorization is not required, but the you must [manually add `$HOME/.docker/bin`](../settings/mac.md#advanced) to their PATH. + +You are also given the option to enable the installation of the `/var/run/docker.sock` symlink. Creating this symlink ensures various Docker clients relying on the default Docker socket path to work without additional changes. + +As the `/var/run` is mounted as a tmpfs, its content is deleted on restart, symlink to the Docker socket included. To ensure the Docker socket exists after restart, Docker Desktop sets up a `launchd` startup task that creates the symlink by running `ln -s -f /Users//.docker/run/docker.sock /var/run/docker.sock`. This ensures the user is not prompted on each startup to create the symlink. If the user does not enable this option at installation, the symlink and the startup task is not created and the user may have to explicitly set the `DOCKER_HOST` environment variable to `/Users//.docker/run/docker.sock` in the clients it is using. The Docker CLI relies on the current context to retrieve the socket path, the current context is set to `desktop-linux` on Docker Desktop startup. + +
+
+
+ +For versions prior to 4.18, installing symlinks in `/usr/local/bin` is a privileged configuration Docker Desktop performs on the first startup. Docker Desktop checks if symlinks exists and takes the following actions: - Creates the symlinks without the admin prompt if `/usr/local/bin` is writable by unprivileged users. -- Triggers an admin prompt for the user to authorize the creation of symlinks in `/usr/local/bin`. If the user authorizes this, symlinks to docker binaries are created in `/usr/local/bin`. -If the user rejects the prompt, is not willing to run configurations requiring elevated privileges, or does not have admin rights on their machine, Docker Desktop creates the symlinks in `~/.docker/bin` and edits the user's shell profile to ensure this location is in the user's PATH. This requires all open shells to be reloaded. -The rejection is recorded for future runs to avoid prompting the user again. -For any failure to ensure binaries are on the user's PATH, the user may need to manually add to their PATH the `/Applications/Docker.app/Contents/Resources/bin` or use the full path to docker binaries. +- Triggers an admin prompt for the user to authorize the creation of symlinks in `/usr/local/bin`. If you authorizes this, symlinks to Docker binaries are created in `/usr/local/bin`. If you reject the prompt, are not willing to run configurations requiring elevated privileges, or don't have admin rights on your machine, Docker Desktop creates the symlinks in `~/.docker/bin` and edits your shell profile to ensure this location is in your PATH. This requires all open shells to be reloaded. +The rejection is recorded for future runs to avoid prompting you again. +For any failure to ensure binaries are on your PATH, you may need to manually add to their PATH the `/Applications/Docker.app/Contents/Resources/bin` or use the full path to Docker binaries. -A particular case is the installation of the `/var/run/docker.sock` symlink. Creating this symlink ensures various docker clients relying on the default docker socket path to work without additional changes. As the `/var/run` is mounted as a tmpfs, its content is deleted on restart, symlink to docker socket included. -To ensure the docker socket exists after restart, Docker Desktop sets up a `launchd` startup task that creates the symlink by running `ln -s -f /Users//.docker/run/docker.sock /var/run/docker.sock`. This ensures the user is not prompted on each startup to create the symlink. If the user rejects the prompt, the symlink and the startup task is not created and the user may have to explicitly set the `DOCKER_HOST` to `/Users//.docker/run/docker.sock` in the clients it is using. The docker CLI relies on the current context to retrieve the socket path, the current context is set to `desktop-linux` on Docker Desktop startup. +A particular case is the installation of the `/var/run/docker.sock` symlink. Creating this symlink ensures various Docker clients relying on the default Docker socket path to work without additional changes. As the `/var/run` is mounted as a tmpfs, its content is deleted on restart, symlink to Docker socket included. +To ensure the Docker socket exists after restart, Docker Desktop sets up a `launchd` startup task that creates the symlink by running `ln -s -f /Users//.docker/run/docker.sock /var/run/docker.sock`. This ensures that you are not prompted on each startup to create the symlink. If you reject the prompt, the symlink and the startup task is not created and you may have to explicitly set the `DOCKER_HOST` to `/Users//.docker/run/docker.sock` in the clients it is using. The Docker CLI relies on the current context to retrieve the socket path, the current context is set to `desktop-linux` on Docker Desktop startup. + +
+
+
### Binding privileged ports -When running a container that requires binding privileged ports, Docker Desktop first attempts to bind it directly as an unprivileged process. If the OS prevents this and it fails, Docker Desktop checks if the `com.docker.vmnetd` privileged helper process is running to bind the privileged port through it. + +
+
-If the privileged helper process is not running, Docker Desktop prompts the user for authorization to run it under [launchd](https://developer.apple.com/library/archive/documentation/MacOSX/Conceptual/BPSystemStartup/Chapters/CreatingLaunchdJobs.html). +With version 4.18 and later you can choose to enable privileged port mapping during installation, or from the **Advanced** page in **Settings** post-installation. Docker Desktop requires authorization to confirm this choice. +
+
+
+ +For versions below 4.18 , if you run a container that requires binding privileged ports, Docker Desktop first attempts to bind it directly as an unprivileged process. If the OS prevents this and it fails, Docker Desktop checks if the `com.docker.vmnetd` privileged helper process is running to bind the privileged port through it. + +If the privileged helper process is not running, Docker Desktop prompts you for authorization to run it under [launchd](https://developer.apple.com/library/archive/documentation/MacOSX/Conceptual/BPSystemStartup/Chapters/CreatingLaunchdJobs.html). This configures the privileged helper to run as in the versions of Docker Desktop prior to 4.15. However, the functionality provided by this privileged helper now only supports port binding and caching the Registry Access Management policy. -If the user declines the launch of the privileged helper process, binding the privileged port cannot be done and the docker CLI returns an error: +If you decline the launch of the privileged helper process, binding the privileged port cannot be done and the Docker CLI returns an error: ```console $ docker run -p 127.0.0.1:80:80 docker/getting-started @@ -58,20 +127,41 @@ ERRO[0003] error waiting for container: context canceled > **Note** > -> The command may fail with the same error if the user takes too long to authorize the prompt to start the helper process, as it may timeout. +> The command may fail with the same error if you take too long to authorize the prompt to start the helper process, as it may timeout. + +
+
+
### Ensuring `localhost` and `kubernetes.docker.internal` are defined -On first run, Docker Desktop checks if `localhost` is resolved to `127.0.0.1`. In case the resolution fails, it prompts the user to allow adding the mapping to `/etc/hosts`. Similarly, when the Kubernetes cluster is installed, it checks that `kubernetes.docker.internal` is resolved to `127.0.0.1` and prompts the user to do so. + +
+
+ +With version 4.18 it is your responsibility to ensure that localhost is resolved to `127.0.0.1` and if Kubernetes is used, that `kubernetes.docker.internal` is resolved to `127.0.0.1`. + +
+
+
+ +On first run, Docker Desktop checks if `localhost` is resolved to `127.0.0.1`. In case the resolution fails, it prompts you to allow adding the mapping to `/etc/hosts`. Similarly, when the Kubernetes cluster is installed, it checks that `kubernetes.docker.internal` is resolved to `127.0.0.1` and prompts you to do so. + +
+
+
## Installing from the commandline -In version 4.11 and later of Docker Desktop for Mac, privileged configurations are applied during the installation with the `--user` flag on the [install command](../install/mac-install.md#install-from-the-command-line). In this case, the user is not prompted to grant root privileges on the first run of Docker Desktop. Specifically, the `--user` flag: +In version 4.11 and later of Docker Desktop for Mac, privileged configurations are applied during the installation with the `--user` flag on the [install command](../install/mac-install.md#install-from-the-command-line). In this case, you are not prompted to grant root privileges on the first run of Docker Desktop. Specifically, the `--user` flag: - Uninstalls the previous `com.docker.vmnetd` if present -- Sets up symlinks for the user +- Sets up symlinks - Ensures that `localhost` is resolved to `127.0.0.1` -The limitation of this approach is that Docker Desktop can only be run by one user account per machine, namely the one specified in the `-–user` flag. +The limitation of this approach is that Docker Desktop can only be run by one user-account per machine, namely the one specified in the `-–user` flag. ## Privileged Helper diff --git a/desktop/networking.md b/desktop/networking.md index cc162109af..0bf3380175 100644 --- a/desktop/networking.md +++ b/desktop/networking.md @@ -31,8 +31,8 @@ When you run a container with the `-p` argument, for example: $ docker run -p 80:80 -d nginx ``` -Docker Desktop makes whatever is running on port 80 in the container (in -this case, `nginx`) available on port 80 of `localhost`. In this example, the +Docker Desktop makes whatever is running on port 80 in the container, in +this case, `nginx`, available on port 80 of `localhost`. In this example, the host and container ports are the same. If, for example, you already have something running on port 80 of your host machine, you can connect the container to a different port: @@ -58,11 +58,15 @@ Docker Desktop on Mac and Linux allows you to use the host’s SSH agent inside 1. Bind mount the SSH agent socket by adding the following parameter to your `docker run` command: - `--mount type=bind,src=/run/host-services/ssh-auth.sock,target=/run/host-services/ssh-auth.sock` + ```console + $--mount type=bind,src=/run/host-services/ssh-auth.sock,target=/run/host-services/ssh-auth.sock + ``` 2. Add the `SSH_AUTH_SOCK` environment variable in your container: - `-e SSH_AUTH_SOCK="/run/host-services/ssh-auth.sock"` + ```console + $ -e SSH_AUTH_SOCK="/run/host-services/ssh-auth.sock" + ``` To enable the SSH agent in Docker Compose, add the following flags to your service: @@ -104,7 +108,7 @@ However if you are a Windows user, it works with Windows containers. ### I want to connect from a container to a service on the host -The host has a changing IP address (or none if you have no network access). We recommend that you connect to the special DNS name +The host has a changing IP address, or none if you have no network access. We recommend that you connect to the special DNS name `host.docker.internal` which resolves to the internal IP address used by the host. This is for development purpose and does not work in a production environment outside of Docker Desktop. @@ -129,7 +133,7 @@ If you have installed Python on your machine, use the following instructions as #### I want to connect to a container from the host -Port forwarding works for `localhost`; `--publish`, `-p`, or `-P` all work. +Port forwarding works for `localhost`. `--publish`, `-p`, or `-P` all work. Ports exposed from Linux are forwarded to the host. Our current recommendation is to publish a port, or to connect from another diff --git a/desktop/release-notes.md b/desktop/release-notes.md index 2800935475..e65af29a91 100644 --- a/desktop/release-notes.md +++ b/desktop/release-notes.md @@ -24,13 +24,148 @@ Take a look at the [Docker Public Roadmap](https://github.com/docker/roadmap/pro For frequently asked questions about Docker Desktop releases, see [FAQs](faqs/general.md/#releases) +## 4.18.0 + +{% include release-date.html date="2023-04-03" %} + +> Download Docker Desktop +> +> {% include desktop-install.html %} + +### New + +- Initial beta release of `docker init` as per [the roadmap](https://github.com/docker/roadmap/issues/453). +- Added a new **Learning Center** tab to help users get started with Docker. +- Added an experimental file-watch command to Docker Compose that automatically updates your running Compose services as you edit and save your code. + +### Upgrades + +- [Buildx v0.10.4](https://github.com/docker/buildx/releases/tag/v0.10.4) +- [Compose 2.17.2](https://github.com/docker/compose/releases/tag/v2.17.2) +- [Containerd v1.6.18](https://github.com/containerd/containerd/releases/tag/v1.6.18), which includes fixes for [CVE-2023-25153](https://github.com/advisories/GHSA-259w-8hf6-59c2) and [CVE-2023-25173](https://github.com/advisories/GHSA-hmfx-3pcx-653p). +- [Docker Engine v20.10.24](https://docs.docker.com/engine/release-notes/20.10/#201024), which contains fixes for [CVE-2023-28841](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-28841), + [CVE-2023-28840](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-28840), and + [CVE-2023-28842](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-28842). + +### Bug fixes and enhancements + +#### For all platforms + +- [Docker Scout CLI](../scout/index.md#docker-scout-cli) can now compare two images and display packages and vulnerabilities differences. This command is in [Early Access](../release-lifecycle.md) and might change in the future. +- [Docker Scout CLI](../scout/index.md#docker-scout-cli) now displays base image update and remediation recommendations using `docker scout recommendations`. It also displays a short overview of an image using `docker scout quickview` commands. +- You can now search for extensions direct from the Marketplace, as well as using **Global Search**. +- Fixed a bug where `docker buildx` container builders would lose access to the network after 24hrs. +- Reduced how often users are prompted for feedback on Docker Desktop. +- Removed minimum VM swap size. +- Added support for subdomain match, CIDR match, `.` and `_.` in HTTP proxy exclude lists. +- Fixed a bug in the transparent TLS proxy when the Server Name Indication field is not set. +- Fixed a grammatical error in Docker Desktop engine status message. + +### For Windows + +- Fixed a bug where `docker run --gpus=all` hangs. Fixes [docker/for-win#13324](https://github.com/docker/for-win/issues/13324). +- Fixed a bug where Registry Access Management policy updates were not downloaded. +- Docker Desktop now allows Windows containers to work when BitLocker is enabled on `C:`. +- Docker Desktop with the WSL backend no longer requires the `com.docker.service` privileged service to run permanently. For more information see [Permission requirements for Windows](https://docs.docker.com/desktop/windows/permission-requirements/). + +### For Mac + +- Fixed a performance issue where attributes stored on the host would not be cached for VirtioFS users. +- The first time Docker Desktop for Mac is launched, the user is presented with an installation window to confirm or adjust the configuration that requires privileged access. For more information see [Permission requirements for Mac](https://docs.docker.com/desktop/mac/permission-requirements/). +- Added the **Advanced** tab in **Settings**, where users can adjust the settings which require privileged access. + +### For Linux + +- Fixed a bug where the VM networking crashes after 24h. [docker/for-linux#131](https://github.com/docker/desktop-linux/issues/131) + +### Security + +#### For all platforms + +- Fixed a security issue with the Artifactory Integration where it would fall back to sending registry credentials over plain HTTP if HTTPS check failed. Only users who have `Access experimental features` enabled are affected. Fixes [docker/for-win#13344](https://github.com/docker/for-win/issues/13344). + +#### For Mac + +- Removed the `com.apple.security.cs.allow-dyld-environment-variables` and `com.apple.security.cs.disable-library-validation` entitlements which allow an arbitrary dynamic library to be loaded with Docker Desktop via the `DYLD_INSERT_LIBRARIES` environment variable. + +### Known Issues + +- Uninstalling Docker Desktop on Mac from the **Troubleshoot** page might trigger an unexpected fatal error popup. + +## 4.17.1 + +{% include release-date.html date="2023-03-20" %} + +> Download Docker Desktop +> +> [Windows](https://desktop.docker.com/win/main/amd64/101757/Docker%20Desktop%20Installer.exe) + +
+
+ +
+
+
  • Windows: SHA-256 2ea284648a5f708428f3a06bb8e1eb68cbeba6689b53c53d7ca24043a8f34800
  • +
    +
    +
    +
    + +### Bug fixes and enhancements + +#### For Windows + +- Docker Desktop now allows Windows containers to work when BitLocker is enabled on C: +- Fixed a bug where `docker buildx` container builders would lose access to the network after 24hrs. +- Fixed a bug where Registry Access Management policy updates were not downloaded. +- Improved debug information to better characterise failures under WSL 2. + +### Known Issues + +- Running containers with `--gpus` on Windows with the WSL 2 backend does not work. This will be fixed in future releases. See [docker/for-win/13324](https://github.com/docker/for-win/issues/13324). + ## 4.17.0 {% include release-date.html date="2023-02-27" %} > Download Docker Desktop > -> {% include desktop-install.html %} +> [Windows](https://desktop.docker.com/win/main/amd64/99724/Docker%20Desktop%20Installer.exe) | +> [Mac with Intel chip](https://desktop.docker.com/mac/main/amd64/99724/Docker.dmg) | +> [Mac with Apple chip](https://desktop.docker.com/mac/main/arm64/99724/Docker.dmg) | +> [Debian](https://desktop.docker.com/linux/main/amd64/99724/docker-desktop-4.17.0-amd64.deb) | +> [RPM](https://desktop.docker.com/linux/main/amd64/99724/docker-desktop-4.17.0-x86_64.rpm) | +> [Arch package](https://desktop.docker.com/linux/main/amd64/99724/docker-desktop-4.17.0-x86_64.pkg.tar.zst) + +
    +
    + +
    +
    +
  • Windows: SHA-256 69ea659b0ca0e160a1de9bd63dc5697f5eb89fff1d33484fb8ef9793e43d0d45
  • +
  • Mac Intel: SHA-256 eb0531122a62859ce7b029e943fdad365603a916e6c15c107514c1e4a818d7ef
  • +
  • Mac Arm: SHA-256 5e01465d93dfe18d7678a96705e7c26bb654b6766f06373b5cffbf77c641bccc
  • +
  • Linux DEB: SHA-256 6828d35ae02763255790de6690909935a1f7c951373179ac0efd6c6b578b5219
  • +
  • Linux RPM: SHA-256 7973c5bf41bdc78ca39ba64f93c6e4a33263d8dbfc604651bf1562bfeeea26f7
  • +
  • Linux Arch: SHA-256 c783ce942c84f899d1f576d01d34fd4de3cefa0a1d577eda2bc5c4ceaec6cfdb
  • +
    +
    +
    +
    ### New @@ -115,17 +250,17 @@ For frequently asked questions about Docker Desktop releases, see [FAQs](faqs/ge > > [Windows](https://desktop.docker.com/win/main/amd64/96739/Docker%20Desktop%20Installer.exe) -
    +
    - diff --git a/desktop/use-desktop/container.md b/desktop/use-desktop/container.md index 9f3904f082..9f2b58ed1b 100644 --- a/desktop/use-desktop/container.md +++ b/desktop/use-desktop/container.md @@ -38,7 +38,7 @@ To open the integrated terminal, either: You can obtain detailed information about the container when you select a container. -The **container view** displays **Logs**, **Inspect**, **Terminal**, and **Stats** tabs and provides quick action buttons to perform various actions. +The **container view** displays **Logs**, **Inspect**, **Terminal**, **Files**, and **Stats** tabs and provides quick action buttons to perform various actions. - Select **Logs** to see logs from the container. You can also: - Use `Cmd + f`/`Ctrl + f` to open the search bar and find specific entries. Search matches are highlighted in yellow. @@ -50,4 +50,11 @@ The **container view** displays **Logs**, **Inspect**, **Terminal**, and **Stats - Select **Inspect** to view low-level information about the container. You can see the local path, version number of the image, SHA-256, port mapping, and other details. +- Select **Files** to explore the filesystem of running or stopped containers. You can also: + - See which files have been recently added, modified, or deleted + - Edit a file straight from the built-in editor + - Drag and drop files and folders between the host and the container + - Delete unnecessary files when you right-click on a file + - Download file and folders from the container straight to the host + - Select **Stats** to view information about the container resource utilization. You can see the amount of CPU, disk I/O, memory, and network I/O used by the container. diff --git a/desktop/use-desktop/index.md b/desktop/use-desktop/index.md index 4abaaf99cd..2ab54c7d48 100644 --- a/desktop/use-desktop/index.md +++ b/desktop/use-desktop/index.md @@ -12,7 +12,7 @@ When you open Docker Desktop, the Docker Dashboard displays. The **Containers** view provides a runtime view of all your containers and applications. It allows you to interact with containers and applications, and manage the lifecycle of your applications directly from your machine. This view also provides an intuitive interface to perform common actions to inspect, interact with, and manage your Docker objects including containers and Docker Compose-based applications. For more information, see [Explore running containers and applications](container.md). -The **Images** view displays a list of your Docker images and allows you to run an image as a container, pull the latest version of an image from Docker Hub, and inspect images. It also displays a summary of the vulnerability scanning report using Snyk. In addition, the **Images** view contains clean-up options to remove unwanted images from the disk to reclaim space. If you are logged in, you can also see the images you and your organization have shared on Docker Hub. For more information, see [Explore your images](images.md). +The **Images** view displays a list of your Docker images and allows you to run an image as a container, pull the latest version of an image from Docker Hub, and inspect images. It also displays a summary of image vulnerabilities. In addition, the **Images** view contains clean-up options to remove unwanted images from the disk to reclaim space. If you are logged in, you can also see the images you and your organization have shared on Docker Hub. For more information, see [Explore your images](images.md). The **Volumes** view displays a list of volumes and allows you to easily create and delete volumes and see which ones are being used. For more information, see [Explore volumes](volumes.md). @@ -31,7 +31,7 @@ From the Docker Dashboard you can use Quick Search, which is located in the Dash - Extensions. From here, you can learn more about the extension and install it with a single click. Or, if you already have an extension installed, you can open it straight from the search results. -- Any volume. From here you can view the associated container. +- Any volume. From here you can view the associated container. ## The Docker menu @@ -43,9 +43,9 @@ To display the Docker menu, right-click on the ![whale menu](../../assets/images - **Sign in/Create Docker ID** - **Settings** - **Check for updates** -- **Troubleshoot** +- **Troubleshoot** - **Switch to Windows containers** -- **About Docker Desktop**. Contains information on the versions you are running, and links to the Subscription Service Agreement for example. +- **About Docker Desktop**. Contains information on the versions you are running, and links to the Subscription Service Agreement for example. - **Documentation** - **Quick Start Guide**. Launches the Quick Start Guide. - **Docker Hub** @@ -54,4 +54,3 @@ To display the Docker menu, right-click on the ![whale menu](../../assets/images - **Pause** - **Restart** - **Quit Docker Desktop** - diff --git a/desktop/windows/permission-requirements.md b/desktop/windows/permission-requirements.md index 929058b992..e91b81d219 100644 --- a/desktop/windows/permission-requirements.md +++ b/desktop/windows/permission-requirements.md @@ -6,7 +6,7 @@ redirect_from: - /desktop/windows/privileged-helper/ --- -This page contains information about the permission requirements for running and installing Docker Desktop on Windows, the functionality of the privileged helper process `com.docker.service.exe` and the reasoning behind this approach. +This page contains information about the permission requirements for running and installing Docker Desktop on Windows, the functionality of the privileged helper process `com.docker.service` and the reasoning behind this approach. It also provides clarity on running containers as `root` as opposed to having `Administrator` access on the host and the privileges of the Windows Docker engine and Windows containers. @@ -14,11 +14,11 @@ It also provides clarity on running containers as `root` as opposed to having `A While Docker Desktop on Windows can be run without having `Administrator` privileges, it does require them during installation. On installation the user gets a UAC prompt which allows a privileged helper service to be installed. After that, Docker Desktop can be run by users without administrator privileges, provided they are members of the `docker-users` group. The user who performs the installation is automatically added to this group, but other users must be added manually. This allows the administrator to control who has access to Docker Desktop. -The reason for this approach is that Docker Desktop needs to perform a limited set of privileged operations which are conducted by the privileged helper process `com.docker.service.exe`. This approach allows, following the principle of least privilege, `Administrator` access to be used only for the operations for which it is absolutely necessary, while still being able to use Docker Desktop as an unprivileged user. +The reason for this approach is that Docker Desktop needs to perform a limited set of privileged operations which are conducted by the privileged helper process `com.docker.service`. This approach allows, following the principle of least privilege, `Administrator` access to be used only for the operations for which it is absolutely necessary, while still being able to use Docker Desktop as an unprivileged user. ## Privileged Helper -The privileged helper `com.docker.service.exe` is a Windows service which runs in the background with `SYSTEM` privileges. It listens on the named pipe `//./pipe/dockerBackendV2`. The developer runs the Docker Desktop application, which connects to the named pipe and sends commands to the service. This named pipe is protected, and only users that are part of the `docker-users` group can have access to it. +The privileged helper `com.docker.service` is a Windows service which runs in the background with `SYSTEM` privileges. It listens on the named pipe `//./pipe/dockerBackendV2`. The developer runs the Docker Desktop application, which connects to the named pipe and sends commands to the service. This named pipe is protected, and only users that are part of the `docker-users` group can have access to it. The service performs the following functionalities: - Ensuring that `kubernetes.docker.internal` is defined in the Win32 hosts file. Defining the DNS name `kubernetes.docker.internal` allows Docker to share Kubernetes contexts with containers. @@ -33,18 +33,19 @@ The service performs the following functionalities: - Checking if required Windows features are both installed and enabled. - Conducting healthchecks and retrieving the version of the service itself. +The service start mode depends on which container engine is selected: +- With Windows containers, or Hyper-v Linux containers the service is started when the system boots and runs all the time, even when Docker Desktop isn't running. This is required for the user to be able to launch Docker Desktop without admin privileges. If the user switches to WSL2 Linux containers, the service is stopped and doesn't start automatically upon next Windows boot. +- With WSL2 Linux containers, the service isn't necessary and therefore doesn't run automatically when the system boots. If the user switches to Windows containers or Hyper-v Linux containers, a UAC prompt is displayed which asks the user to accept the privileged operation to start the service. If accepted, the service is started and set to start automatically upon the next Windows boot. + ## Containers running as root within the Linux VM The Linux Docker daemon and containers run in a minimal, special-purpose Linux VM managed by Docker. It is immutable so users can’t extend it or change the installed software. -This means that although containers run by default as `root`, this does not allow altering the VM and does not grant `Administrator` access to the Windows host machine. The Linux VM serves as a security boundary and limits what resources from the host can be accessed. File sharing uses a user-space crafted file server and any directories from the host bind mounted into Docker containers still retain their original permissions. It does not give the user access to any files that it doesn’t already have access to. +This means that although containers run by default as `root`, this doesn't allow altering the VM and doesn't grant `Administrator` access to the Windows host machine. The Linux VM serves as a security boundary and limits what resources from the host can be accessed. File sharing uses a user-space crafted file server and any directories from the host bind mounted into Docker containers still retain their original permissions. It doesn't give the user access to any files that it doesn’t already have access to. ## Windows Containers -Unlike the Linux Docker engine and containers which run in a VM, Windows containers are an operating system feature, and run directly on the Windows host with `Administrator` privileges. For organizations which do not want their developers to run Windows containers, a `–no-windows-containers` installer flag is available from version 4.11 to disable their use. +Unlike the Linux Docker engine and containers which run in a VM, Windows containers are an operating system feature, and run directly on the Windows host with `Administrator` privileges. For organizations who don't want their developers to run Windows containers, a `–no-windows-containers` installer flag is available from version 4.11 to disable their use. ## Networking For network connectivity, Docker Desktop uses a user-space process (`vpnkit`), which inherits constraints like firewall rules, VPN, HTTP proxy properties etc. from the user that launched it. - - - diff --git a/develop/remote-development.md b/develop/remote-development.md index 5e7b8037e2..6f248c0850 100644 --- a/develop/remote-development.md +++ b/develop/remote-development.md @@ -32,7 +32,7 @@ remote resources helps simplify and speed up the inner loop. There are several tools available, commercial and open-source, that you can use to enable a hybrid local-and-remote development environment. For example: -- [Telepresence](https://www.cncf.io/projects/telepresence/){: target="_blank" rel="noopener" class="_" } +- [Telepresence](https://app.getambassador.io/auth/realms/production/protocol/openid-connect/auth?client_id=docker-docs&response_type=code&redirect_uri=https%3A%2F%2Fapp.getambassador.io&utm_source=docker-docs&utm_medium=dockerwebsite&utm_campaign=Docker%26TP){: target="_blank" rel="noopener" class="_" } - [CodeZero](https://www.codezero.io/){: target="_blank" rel="noopener" class="_" } - [Gefyra](https://gefyra.dev/){: target="_blank" rel="noopener" class="_" } - [kubefwd](https://kubefwd.com/){: target="_blank" rel="noopener" class="_" } @@ -72,9 +72,9 @@ You have a few options for how the local containers can integrate with the clust Telepresence is free and open-source, and you can try it out by heading to the -[Telepresence quickstart guide](https://www.telepresence.io/docs/latest/quick-start/){: target="_blank" rel="noopener" class="_" }. +[Telepresence quickstart guide](https://www.telepresence.io/docs/latest/quick-start/?utm_source=docker-docs&utm_medium=dockerwebsite&utm_campaign=Docker-TP){: target="_blank" rel="noopener" class="_" }. There’s also a -[Telepresence extension for Docker Desktop](https://www.getambassador.io/docs/telepresence/latest/extension/intro){: target="_blank" rel="noopener" class="_" }, +[Telepresence extension for Docker Desktop](https://www.getambassador.io/docs/telepresence/latest/extension/intro?utm_source=docker-docs&utm_medium=dockerwebsite&utm_campaign=Docker-TP){: target="_blank" rel="noopener" class="_" }, which helps you manage intercepts for your containers. ### Docker × Ambassador @@ -92,7 +92,7 @@ local-remote development environment easy and seamless. You can now connect your Docker ID to Ambassador Cloud to sign in and use Telepresence. To get started: 1. Go to the - [Docker × Ambassador](https://app.getambassador.io/auth/realms/production/protocol/openid-connect/auth?client_id=docker-docs&response_type=code&redirect_uri=https://app.getambassador.io) + [Docker × Ambassador](https://app.getambassador.io/auth/realms/production/protocol/openid-connect/auth?client_id=docker-docs&response_type=code&redirect_uri=https%3A%2F%2Fapp.getambassador.io&utm_source=docker-docs&utm_medium=dockerwebsite&utm_campaign=Docker%26TP) page. 2. Sign in using your Docker ID. 3. Authorize the Ambassador Cloud app. diff --git a/docker-hub/2fa/disable-2fa.md b/docker-hub/2fa/disable-2fa.md index 2b5c8bcc07..2e6c51bc22 100644 --- a/docker-hub/2fa/disable-2fa.md +++ b/docker-hub/2fa/disable-2fa.md @@ -4,25 +4,16 @@ keywords: Docker, docker, registry, security, Docker Hub, authentication, two-fa title: Disable two-factor authentication on Docker Hub --- -> **Note:** -> Disabling two-factor authentication will result in decreased security for your +> **Warning** +> +> Disabling two-factor authentication results in decreased security for your > Docker Hub account. {: .warning } - -## Prerequisites -Two-factor authentication is enabled on your Docker Hub account. - ## Disable two-factor authentication -To disable two-factor authentication, log in to your Docker Hub account. Click -on your username and select **Account Settings**. Go to Security and click on -**Disable 2FA**. +1. Sign in to your Docker Hub account. +2. Select your username and then from the dropdown menu, select **Account Settings**. +3. Navigate to the **Security** tab and select **Disable 2FA**. +4. Enter your password and select **Disable 2FA**. -![Disable 2FA button](../images/2fa-disable-2fa.png) -You will be prompted to input your Docker ID password. Enter your password and -click **Disable 2FA**. - -![Enter your password view](../images/2fa-enter-pw-disable-2fa.png){:width="250px"} - -You have successfully disabled two-factor authentication. diff --git a/docker-hub/2fa/index.md b/docker-hub/2fa/index.md index 40a70a4f70..16600a6004 100644 --- a/docker-hub/2fa/index.md +++ b/docker-hub/2fa/index.md @@ -4,12 +4,11 @@ keywords: Docker, docker, registry, security, Docker Hub, authentication, two-fa title: Enable two-factor authentication for Docker Hub --- -## About two-factor authentication Two-factor authentication adds an extra layer of security to your Docker Hub -account by requiring a unique security code when you log into your account. The -security code will be required in addition to your password. +account by requiring a unique security code when you sign in to your account. The +security code is required in addition to your password. -When you enable two-factor authentication, you will also be provided a recovery +When you enable two-factor authentication, you are also provided with a recovery code. Each recovery code is unique and specific to your account. You can use this code to recover your account in case you lose access to your authenticator app. See [Recover your Docker Hub account](recover-hub-account/). @@ -21,45 +20,20 @@ You need a mobile phone with a time-based one-time password authenticator application installed. Common examples include Google Authenticator or Yubico Authenticator with a registered YubiKey. -> **Note:** -> Two-factor authentication is currently in beta. Feel free to provide feedback -> at the [Docker Hub feedback repo](https://github.com/docker/hub-feedback/issues). -{: .important} - ## Enable two-factor authentication -To enable two-factor authentication, log in to your Docker Hub account. Click -on your username and select **Account Settings**. Go to Security and click -**Enable Two-Factor Authentication**. +1. Sign to your Docker Hub account. +2. Select your username and then from the dropdown menu, select **Account Settings**. +3. Select the **Security** tab and then select **Enable Two-Factor Authentication**. + The next page reminds you to download an authenticator app. +4. Select **Set up using an app**. + Your unique recovery code is sent to you. +5. Save your recovery code and store it somewhere safe. + Your recovery code can be used to recover your account in the event you lose access to your authenticator app. -![Two-factor home](../images/2fa-security-home.png) +6. Select **Next** and then open your authenticator app. + You can choose between scanning the QR code or entering a text code into your authenticator app. +7. Once you have linked your authenticator app, enter the six-digit code to in text field and then select **Next**. -The next page will remind you to download an authenticator app. Click **Set up** -**using an app**. You will receive your unique recovery code. - -> **Save your recovery code and store it somewhere safe.** -> -> Your recovery code can be used to recover your account in the event you lose -> access to your authenticator app. -{: .important } - -![Recovery code example](../images/2fa-recovery-code.png) - -After you have saved your code, click **Next**. - -Open your authenticator app. You can choose between scanning the QR code or -entering a text code into your authenticator app. Once you have linked your -authenticator app, it will give you a six-digit code to enter in text field. -Click **Next**. - -![Enter special code view](../images/2fa-enter-code.png) - -You have successfully enabled two-factor authentication. The next time you log +Two-factor authentication is now enabled. The next time you sign in to your Docker Hub account, you will be asked for a security code. - -> **Note:** -> Now that you have two-factor authentication enabled on your account, you must -> create at least one personal access token. Otherwise, you will be unable to -> log in to your account from the Docker CLI. See [Managing access tokens](../access-tokens) -> for more information. -{: .important } diff --git a/docker-hub/2fa/new-recovery-code.md b/docker-hub/2fa/new-recovery-code.md index 9c1f5142a1..d5431dc018 100644 --- a/docker-hub/2fa/new-recovery-code.md +++ b/docker-hub/2fa/new-recovery-code.md @@ -7,20 +7,12 @@ title: Generate a new recovery code If you have lost your two-factor authentication recovery code and still have access to your Docker Hub account, you can generate a new recovery code. -## Prerequisites - -Two-factor authentication is enabled on your Docker Hub account. - ## Generate a new recovery code -To disable two-factor authentication, log in to your Docker Hub account. Click -on your username and select **Account Settings**. Go to **Security** and **Click here to generate a new code**. +1. Sign in to your Docker Hub account. +2. Select your username and then from the dropdown menu, select **Account Settings**. +3. Navigate to the **Security** tab and select **Click here to generate a new code**. +4. Enter your password. -![New recovery code link](../images/2fa-disable-2fa.png) - -Enter your password. - -![Enter your password view](../images/2fa-pw-new-code.png){:width="250px"} - -Your new recovery code will be displayed. Remember to save your recovery code +Your new recovery code is displayed. Remember to save your recovery code and store it somewhere safe. diff --git a/docker-hub/2fa/recover-hub-account.md b/docker-hub/2fa/recover-hub-account.md index fc4bc5271b..1c544f4754 100644 --- a/docker-hub/2fa/recover-hub-account.md +++ b/docker-hub/2fa/recover-hub-account.md @@ -5,9 +5,9 @@ title: Recover your Docker Hub account --- -If you have lost access to both your two-factor authentication application and -your recovery code, +If you have lost access to both your two-factor authentication application and your recovery code: 1. Navigate to [Docker Hub](https://hub.docker.com){: target="_blank" rel="noopener" class="_"} and enter your username and password. -2. Click **I've lost my authentication device** and **I've lost my recovery code**. -3. Complete the [Contact Support form](https://hub.docker.com/support/contact/?category=2fa-lockout){:target="_blank" rel="noopener" class="_"}. You must enter the primary email address associated with your Docker ID in the Contact Support form for recovery instructions. +2. Select **I've lost my authentication device** and **I've lost my recovery code**. +3. Complete the [Contact Support form](https://hub.docker.com/support/contact/?category=2fa-lockout){:target="_blank" rel="noopener" class="_"}. + You must enter the primary email address associated with your Docker ID in the Contact Support form for recovery instructions. diff --git a/docker-hub/access-tokens.md b/docker-hub/access-tokens.md index 82a5334162..b2214e7b10 100644 --- a/docker-hub/access-tokens.md +++ b/docker-hub/access-tokens.md @@ -1,67 +1,49 @@ --- -title: Manage access tokens +title: Create and manage access tokens description: Learn how to create and manage your personal Docker Hub access tokens to securely push and pull images programmatically. keywords: docker hub, hub, security, PAT, personal access token --- -Docker Hub lets you create personal access tokens as alternatives to your password. You can use tokens to access Hub images from the Docker CLI. +If you are using the [Docker Hub CLI](https://github.com/docker/hub-tool#readme){: target="_blank" rel="noopener" class="_"} +tool (currently experimental) to access Hub images from the Docker CLI, you can create personal access tokens (PAT) as alternatives to your password. -Using personal access tokens provides some advantages over a password: +Compared to passwords, personal access tokens provide the following advantages: -* You can investigate the last usage of the access token and disable or delete - it if you find any suspicious activity. -* When using an access token, you can't perform any admin activity on the account, including changing the password. It protects your account if your computer is compromised. +- You can investigate when the PAT was last used and then disable or delete it if you find any suspicious activity. +- When using an access token, you can't perform any admin activity on the account, including changing the password. It protects your account if your computer is compromised. -Docker provides a [Docker Hub CLI](https://github.com/docker/hub-tool#readme){: target="_blank" rel="noopener" class="_"} -tool (currently experimental) and an API that allows you to interact with Docker Hub. Browse through the [Docker Hub API](/docker-hub/api/latest/){: target="_blank" rel="noopener" class="_"} documentation to explore the supported endpoints. - -> **Important** -> -> Treat access tokens like your password and keep them secret. Store your -> tokens securely (for example, in a credential manager). -{: .important} - -Access tokens are valuable for building integrations, as you can issue -multiple tokens – one for each integration – and revoke them at +Access tokens are also valuable for building integrations, as you can issue multiple tokens, one for each integration, and revoke them at any time. - > **Note** > > If you have [two-factor authentication (2FA)](2fa/index.md) enabled on > your account, you must create at least one personal access token. Otherwise, - > you will be unable to log in to your account from the Docker CLI. - + > you won't be able to sign in to your account from the Docker CLI. ## Create an access token -The following video walks you through the process of managing access tokens. +> **Important** +> +> Treat access tokens like your password and keep them secret. Store your tokens securely in a credential manager for example. +{: .important} - +1. Sign in to [Docker Hub](https://hub.docker.com){: target="_blank" rel="noopener" class="_"}. -To create your access token: +2. Select your username in the top-right corner and from the dropdown menu select **Account Settings**. -1. Log in to [hub.docker.com](https://hub.docker.com){: target="_blank" rel="noopener" class="_"}. - -2. Click on your username in the top right corner and select **[Account Settings](https://hub.docker.com/settings/general)**. - -3. Select **[Security](https://hub.docker.com/settings/security) > New Access Token**. - -4. Add a description for your token. Use something that indicates where the token - will be used, or set a purpose for the token. You can view the following access - permissions from the drop-down: - - ![New access token menu](images/hub-create-token.png){:width="700px"} +3. Select the **Security** tab and then **New Access Token**. +4. Add a description for your token. Use something that indicates the use case or purpose of the token. + +5. Set the access permissions. The access permissions are scopes that set restrictions in your repositories. For example, for Read & Write permissions, an automation pipeline can build an image and then push it to a repository. However, it can not delete the repository. -5. Copy the token that appears on the screen and save it. You will not be able +6. Select **Generate** and then copy the token that appears on the screen and save it. You won't be able to retrieve the token once you close this prompt. - ![Copy access token view](images/hub-copy-token.png){:width="700px"} - ## Use an access token You can use an access token anywhere that requires your Docker Hub @@ -81,16 +63,11 @@ a password. You can rename, activate, deactivate, or delete a token as needed. -1. Access your tokens under **[Account Settings > Security](https://hub.docker.com/settings/security){: target="_blank" rel="noopener" class="_"}**. +1. Access your tokens under **Account Settings > Security**. This page shows an overview of all your tokens. You can also view the number of tokens that are activated and deactivated in the toolbar. - ![Delete or edit and access token](images/hub-delete-edit-token.png){:width="700px"} +2. Choose a token and then select **Delete** or **Edit**, or use the menu on the far right of a token row to bring up the edit screen. + You can also select multiple tokens to delete at once. -2. Select a token and click **Delete** or **Edit**, or use the menu on - the far right of a token row to bring up the edit screen. You can also - select multiple tokens to delete at once. - - ![Modify an access token](images/hub-edit-token.png){:width="700px"} - -3. After modifying the token, click the **Save** button to save your changes. +3. After modifying the token, select **Save**. diff --git a/docker-hub/admin-overview.md b/docker-hub/admin-overview.md new file mode 100644 index 0000000000..6f5f0a29f2 --- /dev/null +++ b/docker-hub/admin-overview.md @@ -0,0 +1,70 @@ +--- +title: Administration +description: landing page for administration topic +keywords: administration, docker hub, admin +--- + +Sign in to Docker Hub to change account settings and carry out administrative or security-related tasks. + +
    + +
    +
    +
    +
    + Set-up-an-org +
    +

    Set up an organization

    +

    Explore and set up an organization in Docker Hub

    +
    +
    +
    +
    +
    + onboard +
    +

    Onboard users to your teams and organizations

    +

    Learn how to onboard with Docker Team or Docker Business subscription.

    +
    +
    +
    +
    +
    + Release notes +
    +

    Use Hardened Docker Desktop

    +

    Explore the security model for Docker Desktop.

    +
    +
    +
    + +
    +
    +
    +
    + sign-in +
    +

    Enforce sign in

    +

    Configure sign in for members of your teams and organizations.

    +
    +
    +
    +
    +
    + SSO +
    +

    Enable Single Sign-On

    +

    Understand and use Single Sign-On.

    +
    +
    +
    +
    +
    + 2fa +
    +

    Set up two-factor authentication

    +

    Add an extra layer of authentication to your Docker account.

    +
    +
    +
    +
    diff --git a/docker-hub/audit-log.md b/docker-hub/audit-log.md index f2bc71b20c..737054d2eb 100644 --- a/docker-hub/audit-log.md +++ b/docker-hub/audit-log.md @@ -4,30 +4,33 @@ keywords: Team, organization, activity, log, audit, activities title: Audit logs --- -{% include upgrade-cta.html - body="Audit logs are available for users subscribed to a Docker Team or a Business subscription. Upgrade now to start tracking events across your organization." - header-text="This feature requires a paid Docker subscription" - target-url="https://www.docker.com/pricing?utm_source=docker&utm_medium=webreferral&utm_campaign=docs_driven_upgrade_audit_log" -%} +> **Note** +> +> Audit logs requires a [Docker Team, or Business subscription](../subscription/index.md). -Audit logs display a chronological list of activities that occur at organization and repository levels. It provides owners of Docker Team accounts a report of all their team member activities. This allows the team owners to view and track what changes were made, the date when a change was made, and who initiated the change. For example, the audit logs display activities such as the date when a repository was created or deleted, the team member who created the repository, the name of the repository, and when there was a change to the privacy settings. +Audit logs display a chronological list of activities that occur at organization and repository levels. It provides a report to owners of Docker Team on all their team member activities. + +With audit logs, team owners can view and track: + - What changes were made + - The date when a change was made + - Who initiated the change + + For example, Audit logs display activities such as the date when a repository was created or deleted, the team member who created the repository, the name of the repository, and when there was a change to the privacy settings. Team owners can also see the audit logs for their repository if the repository is part of the organization subscribed to a Docker Team plan. +Audit logs began tracking activities from the date the feature went live, that is from 25 January 2021. Activities that took place before this date are not captured. + ## View the audit logs To view the audit logs: -1. Sign into an owner account for the organization in Docker Hub. -2. Select your organization from the list and then click on the **Activity** tab. - - ![Organization activity tab](images/org-activity-tab.png){:width="700px"} - -The audit logs begin tracking activities from the date the feature is live, that is from **25 January 2021**. Activities that took place before this date are not captured. +1. Sign in to Docker Hub. +2. Select your organization from the list and then select the **Activity** tab. > **Note** > -> Docker will retain the activity data for a period of three months. +> Docker retains the activity data for a period of three months. ## Customize the audit logs @@ -39,10 +42,9 @@ By default, all activities that occur at organization and repository levels are > > Activities created by the Docker Support team as part of resolving customer issues appear in the audit logs as **dockersupport**. -Click the **All Activities** drop-down list to view activities that are specific to an organization or a repository. After choosing **Organization** or **Repository**, you can further refine the results using the **All Actions** drop-down list. If you select the **Activities** tab from the **Repository** view, you can only filter repository-level activities. - -![Refine organization activities](images/org-all-actions.png){:width="600px"} +Select the **All Activities** dropdown to view activities that are specific to an organization, repository, or billing. If you select the **Activities** tab from the **Repository** view, you can only filter repository-level activities. +After choosing **Organization**, **Repository**, or **Billing**, you can further refine the results using the **All Actions** dropdown. ## Audit logs event definitions diff --git a/docker-hub/builds/advanced.md b/docker-hub/builds/advanced.md index 112e063ef1..d508fca7dc 100644 --- a/docker-hub/builds/advanced.md +++ b/docker-hub/builds/advanced.md @@ -6,11 +6,10 @@ redirect_from: - /docker-cloud/builds/advanced/ --- -{% include upgrade-cta.html - body="The Automated Builds feature is available for Docker Pro, Team, and Business users. Upgrade now to automatically build and push your images. If you are using automated builds for an open-source project, you can join our [Open Source Community](https://www.docker.com/community/open-source/application){: target='_blank' rel='noopener' class='_'} program to learn how Docker can support your project on Docker Hub." - header-text="This feature requires a Docker subscription" - target-url="https://www.docker.com/pricing?utm_source=docker&utm_medium=webreferral&utm_campaign=docs_driven_upgrade_auto_builds" -%} +> **Note** +> +> Automated builds require a +> [Docker Pro, Team, or Business subscription](../../subscription/index.md). The following options allow you to customize your automated build and automated test processes. @@ -21,8 +20,10 @@ Several utility environment variables are set by the build process, and are available during automated builds, automated tests, and while executing hooks. -> **Note**: These environment variables are only available to the build and test -processes and do not affect your service's run environment. +> **Note** +> +> These environment variables are only available to the build and test +processes and don't affect your service's run environment. * `SOURCE_BRANCH`: the name of the branch or the tag that is currently being tested. * `SOURCE_COMMIT`: the SHA1 hash of the commit being tested. @@ -51,12 +52,15 @@ services: Docker Hub allows you to override and customize the `build`, `test` and `push` commands during automated build and test processes using hooks. For example, you might use a build hook to set build arguments used only during the build -process. (You can also set up [custom build phase hooks](#custom-build-phase-hooks) -to perform actions in between these commands.) +process. You can also set up [custom build phase hooks](#custom-build-phase-hooks) +to perform actions in between these commands. -**Use these hooks with caution.** The contents of these hook files replace the +> **Important** +> +>Use these hooks with caution. The contents of these hook files replace the basic `docker` commands, so you must include a similar build, test or push command in the hook or your automated process does not complete. +{: .important} To override these phases, create a folder called `hooks` in your source code repository at the same directory level as your Dockerfile. Create a file called @@ -64,7 +68,7 @@ repository at the same directory level as your Dockerfile. Create a file called builder process can execute, such as `docker` and `bash` commands (prefixed appropriately with `#!/bin/bash`). -These hooks will be running on an instance of [Amazon Linux 2](https://aws.amazon.com/amazon-linux-2/){:target="_blank" rel="noopener" class="_"}, +These hooks run on an instance of [Amazon Linux 2](https://aws.amazon.com/amazon-linux-2/){:target="_blank" rel="noopener" class="_"}, a distro based on Red Hat Enterprise Linux (RHEL), which includes interpreters such as Perl or Python, and utilities such as `git` or `curl`. Refer to the [Amazon Linux 2 documentation](https://aws.amazon.com/amazon-linux-2/faqs/){:target="_blank" rel="noopener" class="_"} @@ -89,15 +93,15 @@ The following hooks are available: * `hooks/post_build` * `hooks/pre_test` * `hooks/post_test` -* `hooks/pre_push` (only used when executing a build rule or [automated build](index.md) ) -* `hooks/post_push` (only used when executing a build rule or [automated build](index.md) ) +* `hooks/pre_push` (only used when executing a build rule or [Automated build](index.md) ) +* `hooks/post_push` (only used when executing a build rule or [Automated build](index.md) ) ### Build hook examples #### Override the "build" phase to set variables Docker Hub allows you to define build environment variables either in the hook -files, or from the automated build interface (which you can then reference in hooks). +files, or from the automated build interface, which you can then reference in hooks. In the following example, we define a build hook that uses `docker build` arguments to set the variable `CUSTOM` based on the value of variable we defined using the @@ -109,9 +113,11 @@ the image being built. $ docker build --build-arg CUSTOM=$VAR -f $DOCKERFILE_PATH -t $IMAGE_NAME . ``` -> **Caution**: A `hooks/build` file overrides the basic [docker build](../../engine/reference/commandline/build.md) command -used by the builder, so you must include a similar build command in the hook or +> **Important** +> +> A `hooks/build` file overrides the basic [docker build](../../engine/reference/commandline/build.md) command used by the builder, so you must include a similar build command in the hook or the automated build fails. +{: .important} Refer to the [docker build documentation](../../engine/reference/commandline/build.md#build-arg) to learn more about Docker build-time variables. @@ -128,15 +134,15 @@ $ docker tag $IMAGE_NAME $DOCKER_REPO:$SOURCE_COMMIT $ docker push $DOCKER_REPO:$SOURCE_COMMIT ``` -## Source Repository / Branch Clones +## Source repository or branch clones When Docker Hub pulls a branch from a source code repository, it performs -a shallow clone (only the tip of the specified branch). This has the advantage +a shallow clone, it clones only the tip of the specified branch. This has the advantage of minimizing the amount of data transfer necessary from the repository and speeding up the build because it pulls only the minimal code necessary. -Because of this, if you need to perform a custom action that relies on a different -branch (such as a `post_push` hook), you can't checkout that branch, unless +As a result, if you need to perform a custom action that relies on a different +branch, such as a `post_push` hook, you can't checkout that branch unless you do one of the following: * You can get a shallow checkout of the target branch by doing the following: diff --git a/docker-hub/builds/automated-testing.md b/docker-hub/builds/automated-testing.md index 49fd00937c..f0fdd1e928 100644 --- a/docker-hub/builds/automated-testing.md +++ b/docker-hub/builds/automated-testing.md @@ -7,18 +7,17 @@ redirect_from: title: Automated repository tests --- -{% include upgrade-cta.html - body="The Automated Builds feature is available for Docker Pro, Team, and Business users. Upgrade now to automatically build and push your images. If you are using automated builds for an open-source project, you can join our [Open Source Community](https://www.docker.com/community/open-source/application){: target='_blank' rel='noopener' class='_'} program to learn how Docker can support your project on Docker Hub." - header-text="This feature requires a Docker subscription" - target-url="https://www.docker.com/pricing?utm_source=docker&utm_medium=webreferral&utm_campaign=docs_driven_upgrade_auto_builds" -%} +> **Note** +> +> Automated builds require a +> [Docker Pro, Team, or Business subscription](../../subscription/index.md). Docker Hub can automatically test changes to your source code repositories using containers. You can enable `Autotest` on [any Docker Hub repository](../repos/index.md) to run tests on each pull request to the source code repository to create a continuous integration testing service. -Enabling `Autotest` builds an image for testing purposes, but does **not** +Enabling `Autotest` builds an image for testing purposes, but does not automatically push the built image to the Docker repository. If you want to push built images to your Docker Hub repository, enable [Automated Builds](index.md). @@ -43,28 +42,27 @@ a container using the built image. You can define any number of linked services in this file. The only requirement is that `sut` is defined. Its return code determines if tests passed or not. -Tests **pass** if the `sut` service returns `0`, and **fail** otherwise. +Tests pass if the `sut` service returns `0`, and fail otherwise. > **Note** > > Only the `sut` service and all other services listed in -> [`depends_on`](../../compose/compose-file/compose-file-v2.md#depends_on) are +> [`depends_on`](../../compose/compose-file/05-services.md#depends_on) are > started. If you have services that poll for changes in other services, be sure -> to include the polling services in the [`depends_on`](../../compose/compose-file/compose-file-v2.md#depends_on) +> to include the polling services in the [`depends_on`](../../compose/compose-file/05-services.md#depends_on) > list to make sure all of your services start. -> Also make sure to include a compose file version from 2.0 upward as `depends_on` -> was added in [version 2.0](../../compose/compose-file/compose-versioning.md#version-2) -> file format. You can define more than one `docker-compose.test.yml` file if needed. Any file that ends in `.test.yml` is used for testing, and the tests run sequentially. You can also use [custom build hooks](advanced.md#override-build-test-or-push-commands) to further customize your test behavior. -> **Note**: If you enable Automated builds, they also run any tests defined +> **Note** +> +> If you enable Automated builds, they also run any tests defined in the `test.yml` files. -## Enable automated tests on a repository +## Enable Automated tests on a repository To enable testing on a source code repository, you must first create an associated build-repository in Docker Hub. Your `Autotest` settings are @@ -72,24 +70,24 @@ configured on the same page as [automated builds](index.md), however you do not need to enable Autobuilds to use `Autotest`. Autobuild is enabled per branch or tag, and you do not need to enable it at all. -Only branches that are configured to use **Autobuild** push images to the +Only branches that are configured to use Autobuild push images to the Docker repository, regardless of the Autotest settings. -1. Log in to Docker Hub and select **Repositories** in the top navigation. +1. Sign in to Docker Hub and select **Repositories**. 2. Select the repository you want to enable `Autotest` on. -3. From the repository view, click the **Builds** tab. +3. From the repository view, select the **Builds** tab. -4. Click **Configure automated builds**. +4. Select **Configure automated builds**. 5. Configure the automated build settings as explained in [Automated Builds](index.md). At minimum you must configure: * The source code repository - * the build location - * at least one build rule + * The build location + * At least one build rule 6. Choose your **Autotest** option. @@ -106,19 +104,22 @@ Docker repository, regardless of the Autotest settings. pull requests to branches that match a build rule, including when the pull request originated in an external source repository. - > **Note**: For security purposes, autotest on _external pull requests_ is + > **Important** + > + >For security purposes, autotest on external pull requests is limited on public repositories. Private images are not pulled and environment variables defined in Docker Hub are not available. Automated builds continue to work as usual. + {: .important} -7. Click **Save** to save the settings, or click **Save and build** to save and +7. Select **Save** to save the settings, or select **Save and build** to save and run an initial test. ## Check your test results -From the repository's details page, click **Timeline**. +From the repository's details page, select **Timeline**. From this tab you can see any pending, in-progress, successful, and failed builds and test runs for the repository. -You can click any timeline entry to view the logs for each test run. +You can choose any timeline entry to view the logs for each test run. diff --git a/docker-hub/builds/how-builds-work.md b/docker-hub/builds/how-builds-work.md new file mode 100644 index 0000000000..7176bf0a00 --- /dev/null +++ b/docker-hub/builds/how-builds-work.md @@ -0,0 +1,41 @@ +--- +description: how automated builds work +keywords: docker hub, automated builds +title: How Automated builds work +--- + +> **Note** +> +> Automated builds require a +> [Docker Pro, Team, or Business subscription](../../subscription/index.md). + +Docker Hub can automatically build images from source code in an external +repository and automatically push the built image to your Docker repositories. + +![An automated build dashboard](images/index-dashboard.png){:width="750px"} + +When you set up Automated builds, also called autobuilds, you create a list of +branches and tags that you want to build into Docker images. When you push code +to a source-code branch, for example in GitHub, for one of those listed image +tags, the push uses a webhook to trigger a new build, which produces a Docker +image. The built image is then pushed to Docker Hub. + +> **Note** +> +> You can still use `docker push` to push pre-built images to +repositories with Automated builds configured. + +If you have automated tests configured, these run after building but before +pushing to the registry. You can use these tests to create a continuous +integration workflow where a build that fails its tests doesn't push the built +image. Automated tests don't push images to the registry on their own. [Learn about automated image testing](automated-testing.md). + +Depending on your [subscription](https://www.docker.com/pricing){: target="_blank" rel="noopener" class="_"}, +you may get concurrent builds, which means that `N` autobuilds can be run at the +same time. `N` is configured according to your subscription. Once `N+1` builds +are running, any additional builds go into a queue to be run later. + +The maximum number of pending builds in the queue is 30 and Docker Hub discards further +requests. The number of concurrent builds for Pro is 5 and +for Team and Business is 15. + diff --git a/docker-hub/builds/index.md b/docker-hub/builds/index.md index 7b91a099fc..2c3e67bf11 100644 --- a/docker-hub/builds/index.md +++ b/docker-hub/builds/index.md @@ -1,5 +1,5 @@ --- -description: Set up automated builds +description: Set up Automated builds keywords: automated, build, images, Docker Hub redirect_from: - /docker-hub/builds/automated-build/ @@ -10,59 +10,28 @@ redirect_from: title: Set up Automated Builds --- -{% include upgrade-cta.html - body="The Automated Builds feature is available for Docker Pro, Team, and Business users. Upgrade now to automatically build and push your images. If you are using automated builds for an open-source project, you can join our [Open Source Community](https://www.docker.com/community/open-source/application){: target='_blank' rel='noopener' class='_'} program to learn how Docker can support your project on Docker Hub." - header-text="This feature requires a Docker subscription" - target-url="https://www.docker.com/pricing?utm_source=docker&utm_medium=webreferral&utm_campaign=docs_driven_upgrade_auto_builds" -%} - -## How Automated Builds work - -Docker Hub can automatically build images from source code in an external -repository and automatically push the built image to your Docker repositories. - -When you set up automated builds (also called autobuilds), you create a list of -branches and tags that you want to build into Docker images. When you push code -to a source code branch (for example in GitHub) for one of those listed image -tags, the push uses a webhook to trigger a new build, which produces a Docker -image. The built image is then pushed to the Docker Hub registry. - > **Note** > -> You can still use `docker push` to push pre-built images to -repositories with Automated Builds configured. +> Automated builds require a +> [Docker Pro, Team, or Business subscription](../../subscription/index.md). -If you have automated tests configured, these run after building but before -pushing to the registry. You can use these tests to create a continuous -integration workflow where a build that fails its tests doesn't push the built -image. Automated tests don't push images to the registry on their own. [Learn about automated image testing](automated-testing.md). +This page contains information on: +- [Configuring Automated builds](#configure-automated-builds) +- [Advanced Automated build options](#advanced-automated-build-options) +- [Automated builds for teams](#autobuild-for-teams) -Depending on your [subscription](https://www.docker.com/pricing){: target="_blank" rel="noopener" class="_"}, -you may get concurrent builds, which means that `N` autobuilds can be run at the -same time. `N` is configured according to your subscription. Once `N+1` builds -are running, any additional builds go into a queue to be run later. - -The maximum number of pending builds in the queue is 30 and Docker Hub discards further -requests. The number of concurrent builds for Pro is 5 and -for Team and Business is 15. - -![An automated build dashboard](images/index-dashboard.png) - -## Configure automated build settings +## Configure Automated builds You can configure repositories in Docker Hub so that they automatically build an image each time you push new code to your source provider. If you have [automated tests](automated-testing.md) configured, the new image is only pushed when the tests succeed. -You can add builds to existing repositories, or add them when you create a repository. - 1. From the **Repositories** section, select a repository to view its details. 2. Select the **Builds** tab. -3. If you are setting up automated builds for the first time, select the code - repository service (GitHub or Bitbucket) where the image's source code is stored. +3. Select either GitHub or Bitbucket to connect where the image's source code is stored. > Note > @@ -73,31 +42,32 @@ You can add builds to existing repositories, or add them when you create a repos 4. Select the **source repository** to build the Docker images from. > Note - > You might need to specify an organization or user (the _namespace_) from - > the source code provider. Once you select a namespace, its source code + > You might need to specify an organization or user from + > the source code provider. Once you select a user, source code > repositories appear in the **Select repository** dropdown list. -5. Optionally, enable [autotests](automated-testing.md#enable-automated-tests-on-a-repository). +5. Optional: Enable [autotests](automated-testing.md#enable-automated-tests-on-a-repository). -6. Review the default **Build Rules**, and optionally select the - **plus sign** to add and configure more build rules. +6. Review the default **Build Rules** - _Build rules_ control what Docker Hub builds into images from the contents + Build rules control what Docker Hub builds into images from the contents of the source code repository, and how the resulting images are tagged within the Docker repository. A default build rule is set up for you, which you can edit or delete. This - default set builds from the `Branch` in your source code repository called - `master`, and creates a Docker image tagged with `latest`. + default rule sets builds from the `Branch` in your source code repository called + `master` or `main`, and creates a Docker image tagged with `latest`. For more information, see [set up build rules](#set-up-build-rules) -7. For each branch or tag, enable or disable the **Autobuild** toggle. +7. Optional: Select the **plus** icon to add and [configure more build rules](#set-up-build-rules). + +8. For each branch or tag, enable or disable the **Autobuild** toggle. Only branches or tags with autobuild enabled are built, tested, and have the resulting image pushed to the repository. Branches with autobuild disabled are built for test purposes (if enabled at the repository level), but the built Docker image isn't pushed to the repository. -8. For each branch or tag, enable or disable the **Build Caching** toggle. +9. For each branch or tag, enable or disable the **Build Caching** toggle. [Build caching](../../develop/develop-images/dockerfile_best-practices.md#leverage-build-cache) can save time if you are building a large image frequently or have @@ -105,20 +75,20 @@ You can add builds to existing repositories, or add them when you create a repos make sure all of your dependencies are resolved at build time, or if you have a large layer that's quicker to build locally. -9. Click **Save** to save the settings, or click **Save and build** to save and +10. Select **Save** to save the settings, or select **Save and build** to save and run an initial test. > Note > > A webhook is automatically added to your source code repository to notify - > Docker Hub on every push. Only pushes to branches that's listed as the - > source for one or more tags trigger a build. + > Docker Hub on every push. Only pushes to branches that are listed as the + > source for one or more tags, trigger a build. ### Set up build rules -By default when you set up automated builds, a basic build rule is created for you. -This default rule watches for changes to the `master` branch in your source code -repository, and builds the `master` branch into a Docker image tagged with +By default when you set up Automated builds, a basic build rule is created for you. +This default rule watches for changes to the `master` or `main` branch in your source code +repository, and builds the `master` or `main` branch into a Docker image tagged with `latest`. In the **Build Rules** section, enter one or more sources to build. @@ -130,34 +100,34 @@ For each source: * Enter the name of the **Source** branch or tag you want to build. - The first time you configure automated builds, a default build rule is set up + The first time you configure Automated builds, a default build rule is set up for you. This default set builds from the `Branch` in your source code called `master`, and creates a Docker image tagged with `latest`. You can also use a regex to select which source branches or tags to build. To learn more, see - [regexes](index.md#regexes-and-automated-builds). + [regexes](#regexes-and-automated-builds). * Enter the tag to apply to Docker images built from this source. If you configured a regex to select the source, you can reference the capture groups and use its result as part of the tag. To learn more, see - [regexes](index.md#regexes-and-automated-builds). + [regexes](#regexes-and-automated-builds). * Specify the **Dockerfile location** as a path relative to the root of the source code repository. If the Dockerfile is at the repository root, leave this path set to `/`. > **Note** > > When Docker Hub pulls a branch from a source code repository, it performs a -> shallow clone (only the tip of the specified branch). Refer to -> [Advanced options for Autobuild and Autotest](advanced.md#source-repository--branch-clones) +> shallow clone - only the tip of the specified branch. Refer to +> [Advanced options for Autobuild and Autotest](advanced.md#source-repository-or-branch-clones) > for more information. ### Environment variables for builds You can set the values for environment variables used in your build processes when you configure an automated build. Add your build environment variables by -clicking the plus sign next to the **Build environment variables** section, and +selecting the **plus** icon next to the **Build environment variables** section, and then entering a variable name and the value. When you set variable values from the Docker Hub UI, you can use them by the @@ -169,99 +139,29 @@ should remain secret. > > The variables set on the build configuration screen are used during > the build processes only and shouldn't get confused with the environment -> values used by your service (for example to create service links). - -## Check your active builds - -A summary of a repository's builds appears both on the repository **General** -tab, and in the **Builds** tab. The **Builds** tab also displays a color coded -bar chart of the build queue times and durations. Both views display the -pending, in progress, successful, and failed builds for any tag of the -repository. - -![Active builds](images/index-active.png) - -From either location, you can select a build job to view its build report. The -build report shows information about the build job. This includes the source -repository and branch (or tag), the build logs, the build duration, creation time and location, and the user namespace the build occurred in. - ->**Note** -> -> You can now view the progress of your builds every 30 seconds when you -> refresh the Builds page. With the in-progress build logs, you can debug your -> builds before they're finished. - -![Build report](/docker-hub/images/index-report.png) - -## Cancel or retry a build - -While a build is in queue or running, a **Cancel** icon appears next to its build -report link on the General tab and on the Builds tab. You can also click the -**Cancel** on the build report page, or from the Timeline tab's logs -display for the build. - -![List of builds showing the cancel icon](images/build-cancelicon.png) - -## Failing builds - -If a build fails, a **Retry** icon appears next to the build report line on the -**General** and **Builds** tabs. The **Build report** page and **Timeline logs** also display a **Retry** button. - -![Timeline view showing the retry build button](images/retry-build.png) - -> **Note** -> -> If you are viewing the build details for a repository that belongs to an -> Organization, the Cancel and Retry buttons only appear if you have `Read & Write` access to the repository. - -Automated builds have a 4-hour execution time limit. If a build reaches this time limit, it's -automatically cancelled, and the build logs display the following message: - -```text -2022-11-02T17:42:27Z The build was cancelled or exceeded the maximum execution time. -``` - -This log message is the same as when you actively cancel a build. To identify -whether a build was automatically cancelled, check the build duration. - -## Disable an automated build - -Automated builds are enabled per branch or tag, and can be disabled and -re-enabled. You might do this when you want to only build manually for -a while, for example when you are doing major refactoring in your code. When you disable autobuilds doesn't disable [autotests](automated-testing.md). - -To disable an automated build: - -1. From the **Repositories** page, select a repository, and select the **Builds** tab. - -2. Click **Configure automated builds** to edit the repository's build settings. - -3. In the **Build Rules** section, locate the branch or tag you no longer want -to automatically build. - -4. Click the **autobuild** toggle next to the configuration line. When disabled the toggle is gray. - -5. Click **Save** to save your changes. +> values used by your service, for example to create service links. ## Advanced automated build options -At the minimum you need a build rule composed of a source branch (or tag) and -destination Docker tag to set up an automated build. You can also change where -the build looks for the Dockerfile, set a path to the files the build use -(the build context), set up multiple static tags or branches to build from, and -use regular expressions (regexes) to dynamically select source code to build and -create dynamic tags. +At the minimum you need a build rule composed of a source branch, or tag, and a +destination Docker tag to set up an automated build. You can also: + +- Change where the build looks for the Dockerfile +- Set a path to the files the build should use (the build context) +- Set up multiple static tags or branches to build from +- Use regular expressions (regexes) to dynamically select source code to build and +create dynamic tags All of these options are available from the **Build configuration** screen for -each repository. Select **Repositories** from the left navigation, and select the name of the repository you want to edit. Select the **Builds** tab, and click **Configure Automated builds**. +each repository. Select **Repositories** from the left navigation, and select the name of the repository you want to edit. Select the **Builds** tab, and then select **Configure Automated builds**. ### Tag and branch builds You can configure your automated builds so that pushes to specific branches or tags triggers a build. -1. In the **Build Rules** section, click the plus sign to add more sources to build. +1. In the **Build Rules** section, select the **plus** icon to add more sources to build. -2. Select the **Source type** to build: either a tag or a branch. +2. Select the **Source type** to build either a tag or a branch. > Note > @@ -291,7 +191,7 @@ Depending on how you arrange the files in your source code repository, the files required to build your images may not be at the repository root. If that's the case, you can specify a path where the build looks for the files. -The _build context_ is the path to the files needed for the build, relative to +The build context is the path to the files needed for the build, relative to the root of the repository. Enter the path to these files in the **Build context** field. Enter `/` to set the build context as the root of the source code repository. > **Note** @@ -303,17 +203,16 @@ the root of the repository. Enter the path to these files in the **Build context You can specify the **Dockerfile location** as a path relative to the build context. If the Dockerfile is at the root of the build context path, leave the -Dockerfile path set to `/`. (If the build context field is blank, set the path -to the Dockerfile from the root of the source repository.) +Dockerfile path set to `/`. If the build context field is blank, set the path +to the Dockerfile from the root of the source repository. -### Regexes and automated builds +### Regexes and Automated builds You can specify a regular expression (regex) so that only matching branches or tags are built. You can also use the results of the regex to create the Docker tag that's applied to the built image. -You can use up to nine regular expression capture groups -(expressions enclosed in parentheses) to select a source to build, and reference +You can use up to nine regular expression capture groups, or expressions enclosed in parentheses, to select a source to build, and reference these in the **Docker Tag** field using `{\1}` through `{\9}`. @@ -33,7 +68,7 @@ Docker provides a [Docker Hub CLI](https://github.com/docker/hub-tool#readme){:
    Docker ID
    -

    Create a Docker ID

    +

    Create an account

    Sign up and create a new Docker ID

    @@ -60,12 +95,12 @@ Docker provides a [Docker Hub CLI](https://github.com/docker/hub-tool#readme){:
    -

    Manage access tokens

    -

    Create personal access tokens as an alternative to your password.

    +

    Use Automated builds

    +

    Create and manage automated builds and autotesting.

    diff --git a/docker-hub/manage-a-team.md b/docker-hub/manage-a-team.md index 6d68668512..9481871f01 100644 --- a/docker-hub/manage-a-team.md +++ b/docker-hub/manage-a-team.md @@ -1,14 +1,12 @@ --- description: Docker Hub Teams & Organizations keywords: Docker, docker, registry, teams, organizations, plans, Dockerfile, Docker Hub, docs, documentation -title: Teams and Organizations +title: Create and manage a team --- -## Create a team - -A **Team** is a group of Docker users that belong to an organization. An +A team is a group of Docker users that belong to an organization. An organization can have multiple teams. When you first create an organization, -you’ll see that you have a team, the **owners** team, with a single member. An +you’ll see that you have a team, the owners team, with a single member. An organization owner can then create new teams and add members to an existing team using their Docker ID or email address and by selecting a team the user should be part of. @@ -16,21 +14,22 @@ The org owner can add additional org owners to the owners team to help them manage users, teams, and repositories in the organization. See [Owners team](#the-owners-team) for details. -To create a team: +## Create a team 1. Go to **Organizations** in Docker Hub, and select your organization. -2. Open the **Teams** tab and click **Create Team**. -3. Fill out your team's information and click **Create**. +2. Select the **Teams** tab and then select **Create Team**. +3. Fill out your team's information and select **Create**. +4. [Add members to your team](members.md#add-a-member-to-a-team) -### The owners team +## The owners team -The **owners** team is a special team created by default during the org creation +The owners team is a special team created by default during the org creation process. The owners team has full access to all repositories in the organization. An organization owner is an administrator who is responsible to manage repositories and add team members to the organization. They have full access to private repositories, all teams, billing information, and org settings. An org -owner can also specify [permissions](../docker-hub/repos/configure/index.md#permissions-reference) for each team in +owner can also specify [permissions](#permissions-reference) for each team in the organization. Only an org owner can enable [SSO](../single-sign-on/index.md) for the organization. When SSO is enabled for your organization, the org owner can @@ -40,30 +39,60 @@ enforcement. The org owner can also add additional org owners to help them manage users, teams, and repositories in the organization. -## Configure repository permissions +## Configure repository permissions for a team Organization owners can configure repository permissions on a per-team basis. -For example, you can specify that all teams within an organization have Read and -Write access to repositories A and B, whereas only specific teams have Admin +For example, you can specify that all teams within an organization have "Read and +Write" access to repositories A and B, whereas only specific teams have "Admin" access. Note that org owners have full administrative access to all repositories within the organization. To give a team access to a repository 1. Navigate to **Organizations** in Docker Hub, and select your organization. -2. Click on the **Teams** tab and select the team that you'd like to configure repository access to. -3. Click on the **Permissions** tab and select a repository from the +2. Select the **Teams** tab and select the team that you'd like to configure repository access to. +3. Select the **Permissions** tab and select a repository from the **Repository** drop-down. -4. Choose a permission from the **Permissions** drop-down list and click +4. Choose a permission from the **Permissions** dropdown list and select **Add**. ![Team Repo Permissions](images/team-repo-permission.png){:width="700px"} -### View a team's permissions for all repositories +### Permissions reference + +- `Read-only` access lets users view, search, and pull a private repository in the same way as they can a public repository. +- `Read & Write` access lets users pull, push, and view a repository. In addition, it lets users view, cancel, retry or trigger builds +- `Admin` access lets users pull, push, view, edit, and delete a + repository. You can also edit build settings, and update the repositories description, collaborators rights, public/private visibility, and delete. + +Permissions are cumulative. For example, if you have "Read & Write" permissions, +you automatically have "Read-only" permissions: + +| Action | Read-only | Read & Write | Admin | +|:------------------:|:---------:|:------------:|:-----:| +| Pull a Repository | ✅ | ✅ | ✅ | +| View a Repository | ✅ | ✅ | ✅ | +| Push a Repository | ❌ | ✅ | ✅ | +| Edit a Repository | ❌ | ❌ | ✅ | +| Delete a Repository | ❌ | ❌ | ✅ | +| Update a Repository Description | ❌ | ❌ | ✅ | +| View Builds | ✅ | ✅ | ✅ | +| Cancel Builds | ❌ | ✅ | ✅ | +| Retry Builds | ❌ | ✅ | ✅ | +| Trigger Builds | ❌ | ✅ | ✅ | +| Edit Build Settings | ❌ | ❌ | ✅ | + +> **Note** +> +> A user who hasn't verified their email address only has +> `Read-only` access to the repository, regardless of the rights their team +> membership has given them. + +## View a team's permissions for all repositories To view a team's permissions across all repositories: 1. Open **Organizations** > **_Your Organization_** > **Teams** > **_Team Name_**. -2. Click on the **Permissions** tab, where you can view the repositories this team can access. +2. Select the **Permissions** tab, where you can view the repositories this team can access. ## Videos diff --git a/docker-hub/members.md b/docker-hub/members.md index ee890982d6..f1293523a1 100644 --- a/docker-hub/members.md +++ b/docker-hub/members.md @@ -22,8 +22,8 @@ Use the following steps to invite members to your organization via Docker ID or 5. Select a team from the drop-down list to add all invited users to that team. > **Note** > - > It is recommended that you invite non-administrative users to a team other than the owners team. Members in the owners team will have full access to your organization’s administrative settings. To create a new team, see [Create a team](../docker-hub/orgs.md/#create-a-team). -6. Click **Invite** to confirm. + > It is recommended that you invite non-administrative users to a team other than the owners team. Members in the owners team will have full access to your organization’s administrative settings. To create a new team, see [Create a team](manage-a-team.md). +6. Select **Invite** to confirm. > **Note** > > You can view the pending invitations in the **Members** tab. The invitees receive an email with a link to the organization in Docker Hub where they can accept or decline the invitation. @@ -39,7 +39,7 @@ To invite multiple members to your organization via a CSV file containing email 4. Select a team from the drop-down list to add all invited users to that team. > **Note** > - > It is recommended that you invite non-administrative users to a team other than the owners team. Members in the owners team will have full access to your organization’s administrative settings. To create a new team, see [Create a team](../docker-hub/orgs.md/#create-a-team). + > It is recommended that you invite non-administrative users to a team other than the owners team. Members in the owners team will have full access to your organization’s administrative settings. To create a new team, see [Create a team](manage-a-team.md). 5. Select **Download the template CSV file** to optionally download an example CSV file. The following is an example of the contents of a valid CSV file. ``` email @@ -63,7 +63,7 @@ To invite multiple members to your organization via a CSV file containing email - **Already invited**: The user has already been sent an invite email and another invite email will not be sent. - **Member**: The user is already a member of your organization and an invite email will not be sent. - **Duplicate**: The CSV file has multiple occurrences of the same email address. The user will be sent only one invite email. -9. Click **Send invites**. +9. Select **Send invites**. > **Note** > > You can view the pending invitations in the **Members** tab. The invitees receive an email with a link to the organization in Docker Hub where they can accept or decline the invitation. @@ -75,13 +75,13 @@ Organization owners can add a member to one or more teams within an organization To add a member to a team: 1. Navigate to **Organizations** in [Docker Hub](https://hub.docker.com){: target="_blank" rel="noopener" class="_"}, and select your organization. -2. In the **Members** tab, click the additional options from the table menu and select **Add to team**. +2. In the **Members** tab, select the additional options from the table menu and select **Add to team**. > **Note** > - > You can also navigate to **Organizations** > **Your Organization** > **Teams** > **Your Team Name** and click **Add Member**. Select a member from the drop-down list to add them to the team or search by Docker ID or email. + > You can also navigate to **Organizations** > **Your Organization** > **Teams** > **Your Team Name** and select **Add Member**. Select a member from the drop-down list to add them to the team or search by Docker ID or email. -3. Select the team and click **Add**. +3. Select the team and then select **Add**. > **Note** > @@ -93,7 +93,7 @@ To resend an invitation if the invite is pending or declined: 1. Navigate to **Organizations** in [Docker Hub](https://hub.docker.com){: target="_blank" rel="noopener" class="_"} and select your organization. 2. In the **Members** tab, locate the invitee and select **Resend invitation** from the table menu. -3. Click **Invite** to confirm. +3. Select **Invite** to confirm. ## Remove members @@ -101,20 +101,20 @@ To remove a member from an organization: 1. Navigate to **Organizations** in [Docker Hub](https://hub.docker.com){: target="_blank" rel="noopener" class="_"}, and select your organization. 2. In the **Members** tab, select Remove member from the table menu. -3. When prompted, click **Remove** to confirm. +3. When prompted, select **Remove** to confirm. To remove an invitee from an organization: 1. Navigate to **Organizations** in [Docker Hub](https://hub.docker.com){: target="_blank" rel="noopener" class="_"}, and select your organization. 2. In the **Members** tab, locate the invitee you would like to remove and select **Remove invitee** from the table menu. -3. When prompted, click **Remove** to confirm. +3. When prompted, select **Remove** to confirm. To remove a member from a specific team: 1. Navigate to **Organizations** in [Docker Hub](https://hub.docker.com){: target="_blank" rel="noopener" class="_"}, and select your organization. -2. Click on the **Teams** tab and select the team from the list. -3. Click the **X** next to the user’s name to remove them from the team. -4. When prompted, click **Remove** to confirm. +2. Select on the **Teams** tab and select the team from the list. +3. Select the **X** next to the user’s name to remove them from the team. +4. When prompted, select **Remove** to confirm. ## Export members diff --git a/docker-hub/onboard-business.md b/docker-hub/onboard-business.md index 5059b212bf..447c85941b 100644 --- a/docker-hub/onboard-business.md +++ b/docker-hub/onboard-business.md @@ -57,8 +57,8 @@ organization’s settings apply to the user’s session, you can use a `registry Configure security settings and manage your repositories: - Create [repositories](../docker-hub/repos/index.md) to share container images. -- [Consolidate a repository](../docker-hub/repos/index.md/#consolidating-a-repository) from your personal account to your organization. -- Create [teams](../docker-hub/orgs.md/#create-a-team) and configure [repository permissions](../docker-hub/orgs.md/#configure-repository-permissions). +- [Move images](../docker-hub/repos/index.md/#move-images-between-repositories) from your personal account to your organization. +- Create [teams](manage-a-team.md) and configure [repository permissions](manage-a-team.md#configure-repository-permissions-for-a-team). - Configure [Hardened Docker Desktop](../desktop/hardened-desktop/index.md) to improve your organization’s security posture for containerized development. Hardened Docker Desktop includes: - [Settings Management](../desktop/hardened-desktop/settings-management/index.md), which helps you to confidently manage and control the usage of Docker Desktop within your organization. - [Enhanced Container Isolation](../desktop/hardened-desktop/enhanced-container-isolation/index.md), a setting that instantly enhances security by preventing containers from running as root in Docker Desktop’s Linux VM. diff --git a/docker-hub/onboard-team.md b/docker-hub/onboard-team.md index 90233b849e..c3a5a9a913 100644 --- a/docker-hub/onboard-team.md +++ b/docker-hub/onboard-team.md @@ -49,8 +49,8 @@ organization’s settings apply to the user’s session, you can use a `registry Create and manage your repositories: - Create [repositories](../docker-hub/repos/index.md) to share container images. -- [Consolidate a repository](../docker-hub/repos/index.md/#consolidating-a-repository) from your personal account to your organization. -- Create [teams](../docker-hub/orgs.md/#create-a-team) and configure [repository permissions](../docker-hub/orgs.md/#configure-repository-permissions). +- [Consolidate a repository](../docker-hub/repos/index.md/#move-images-between-repositories) from your personal account to your organization. +- Create [teams](manage-a-team.md#create-a-team) and configure [repository permissions](manage-a-team.md#configure-repository-permissions-for-a-team). Your Docker Team subscription provides many more additional features. [Learn more](../subscription/index.md). diff --git a/docker-hub/onboarding-faqs.md b/docker-hub/onboarding-faqs.md index 1ae83439ed..f7456eeb24 100644 --- a/docker-hub/onboarding-faqs.md +++ b/docker-hub/onboarding-faqs.md @@ -39,14 +39,14 @@ The organization name, sometimes referred to as the organization namespace or th ### What’s a team? -A **Team** is a group of Docker users that belong to an organization. An organization can have multiple teams. When you first create an organization, you’ll see that you have a team, the owners team, with a single member. An organization owner can then create new teams and add members to an existing team using Docker IDs or email address and by selecting a team the user should be part of. [Learn more](orgs.md#create-a-team). +A **Team** is a group of Docker users that belong to an organization. An organization can have multiple teams. When you first create an organization, you’ll see that you have a team, the owners team, with a single member. An organization owner can then create new teams and add members to an existing team using Docker IDs or email address and by selecting a team the user should be part of. [Learn more](manage-a-team.md). ### Who is an organization owner? An organization owner is an administrator who is responsible to manage repositories and add team members to the organization. They have full access to private repositories, all teams, billing information, and organization settings. -An organization owner can also specify [permissions](orgs.md#configure-repository-permissions) for each team in the +An organization owner can also specify [permissions](manage-a-team.md#configure-repository-permissions-for-a-team) for each team in the organization. Only an organization owner can enable SSO for the organization. When SSO is enabled for your organization, the organization owner can also manage users. @@ -62,7 +62,7 @@ An existing owner can add additional team members as organization owners. All they need to do is select the organization from the [Organizations](https://hub.docker.com/orgs){: target="_blank" rel="noopener" class="_"} page in Docker Hub, add the Docker ID/Email of the user, and then -select the **Owners** team from the drop-down menu. [Learn more](orgs.md#the-owners-team). +select the **Owners** team from the drop-down menu. [Learn more](manage-a-team.md#the-owners-team). ### Do users first need to authenticate with Docker before an owner can add them to an organization? @@ -127,7 +127,7 @@ Yes. You can configure repository access on a per-team basis. For example, you can specify that all teams within an organization have **Read and Write** access to repositories A and B, whereas only specific teams have **Admin** access. Org owners have full administrative access to all repositories within the -organization. [Learn more](orgs.md#configure-repository-permissions). +organization. [Learn more](manage-a-team.md#configure-repository-permissions-for-a-team). ### Can I configure multiple SSO identity providers (IdPs) to authenticate users to a single org? diff --git a/docker-hub/orgs.md b/docker-hub/orgs.md index 9492459483..6212b72d6f 100644 --- a/docker-hub/orgs.md +++ b/docker-hub/orgs.md @@ -6,21 +6,17 @@ redirect_from: - /docker-cloud/orgs/ --- -Docker Hub organizations let you create teams so you can give your team access -to shared image repositories. - -An **Organization** is a collection of teams and repositories -that can be managed together. A **Team** is a group of Docker members that belong to an organization. +An organization in Docker Hub is a collection of teams and repositories +that can be managed together. A team is a group of Docker members that belong to an organization. An organization can have multiple teams. Docker users become members of an organization when they are assigned to at least one team in the organization. When you first -create an organization, you’ll see that you have a team, the **owners** (Admins) -team, with a single member. An organization owner is someone that is part of the +create an organization, you have one team, the "owners" team, that has a single member. An organization owner is someone that is part of the owners team. They can create new teams and add members to an existing team using their Docker ID or email address and by -selecting a team the user should be part of. An org owner can also add -additional org owners to help them manage users, teams, and repositories in the +selecting a team the user should be part of. An organization owner can also add +additional owners to help them manage users, teams, and repositories in the organization. ## Create an organization @@ -36,12 +32,12 @@ To create an organization: 1. Sign into [Docker Hub](https://hub.docker.com/){: target="_blank" rel="noopener" class="_"} using your [Docker ID](../docker-id/index.md) or your email address. -2. Select **Organizations**. Click **Create Organization** to create a new organization. +2. Select **Organizations** and then **Create Organization** to create a new organization. 3. Choose a plan for your organization. See [Docker Pricing](https://www.docker.com/pricing/){: target="_blank" rel="noopener" class="_" id="dkr_docs_subscription_btl"} for details on the features offered in the Team and Business plan. 4. Enter a name for your organization. This is the official, unique name for -your organization in Docker Hub. Note that it is not possible to change the name +your organization in Docker Hub. It is not possible to change the name of the organization after you've created it. > **Note** @@ -51,21 +47,19 @@ of the organization after you've created it. 5. Enter the name of your company. This is the full name of your company. This info is displayed on your organization page, and in the details of any public images you publish. You can update the company name anytime by navigating -to your organization's **Settings** page. Click **Continue to Org size**. -6. On the Organization Size page, specify the number of users (seats) you'd -require and click **Continue to payment**. +to your organization's **Settings** page. +6. Select **Continue to Org size** and then specify the number of users (seats) you'd +require. +7. Select **Continue to payment** and follow the onscreen instructions. -You've now created an organization. Select the newly created organization from -the Organizations page. You'll now see that you have a team, the **owners** team -with a single member (you). +You've now created an organization with one team, the owners team, with you as the single member. -### View an organization +## View an organization To view an organization: -1. Log into Docker Hub with a user account that is a member of any team in the - organization. You must be part of the **owners** team to access the - organization's **Settings** page. +1. Sign in to Docker Hub with a user account that is a member of any team in the + organization. > **Note** > @@ -77,21 +71,19 @@ To view an organization: > conversion or another account that was added as a member. If you > don't see the organization after logging in, > then you are neither a member or an owner of it. An organization - > administrator will need to add you as a member of the organization. + > administrator needs to add you as a member of the organization. -2. Click **Organizations** in the top navigation bar, then choose your +2. Select **Organizations** in the top navigation bar, then choose your organization from the list. - ![View organization details](images/view-org.png){:width="700px"} - -The Organization landing page displays various options that allow you to +The organization landing page displays various options that allow you to configure your organization. - **Members**: Displays a list of team members. You can invite new members using the **Invite members** button. See [Manage members](../docker-hub/members.md) for details. - **Teams**: Displays a list of existing teams and the number of - members in each team. See [Create a team](#create-a-team) for details. + members in each team. See [Create a team](manage-a-team.md) for details. - **Repositories**: Displays a list of repositories associated with the organization. See [Repositories](../docker-hub/repos/index.md) for detailed information about @@ -105,75 +97,14 @@ configure your organization. - **Settings**: Displays information about your organization, and allows you to view and change your repository privacy settings, configure org permissions such as - [Image Access Management](image-access-management.md), configure notification settings, and [deactivate](deactivate-account.md#deactivating-an-organization) your - organization. You can also update your organization name and company name that appear on your organization landing page. - + [Image Access Management](image-access-management.md), configure notification settings, and [deactivate](deactivate-account.md#deactivate-an-organization) your + organization. You can also update your organization name and company name that appear on your organization landing page. You must be part of the owners team to access the + organization's **Settings** page. - **Billing**: Displays information about your existing [Docker subscription (plan)](../subscription/index.md) and your billing history. You can also access your invoices from this tab. -## Create a team - -A **Team** is a group of Docker users that belong to an organization. An -organization can have multiple teams. When you first create an organization, -you’ll see that you have a team, the **owners** team, with a single member. An -organization owner can then create new teams and add members to an existing team -using their Docker ID or email address and by selecting a team the user should be part of. - -The org owner can add additional org owners to the owners team to help them -manage users, teams, and repositories in the organization. See [Owners -team](#the-owners-team) for details. - -To create a team: - -1. Go to **Organizations** in Docker Hub, and select your organization. -2. Open the **Teams** tab and click **Create Team**. -3. Fill out your team's information and click **Create**. - -### The owners team - -The **owners** team is a special team created by default during the org creation -process. The owners team has full access to all repositories in the organization. - -An organization owner is an administrator who is responsible to manage -repositories and add team members to the organization. They have full access to -private repositories, all teams, billing information, and org settings. An org -owner can also specify [permissions](../docker-hub/repos/configure/index.md#permissions-reference) for each team in -the organization. Only an org owner can enable [SSO](../single-sign-on/index.md) -for -the organization. When SSO is enabled for your organization, the org owner can -also manage users. Docker can auto-provision Docker IDs for new end-users or -users who'd like to have a separate Docker ID for company use through SSO -enforcement. - -The org owner can also add additional org owners to help them manage users, teams, and repositories in the organization. - -## Configure repository permissions - -Organization owners can configure repository permissions on a per-team basis. -For example, you can specify that all teams within an organization have Read and -Write access to repositories A and B, whereas only specific teams have Admin -access. Note that org owners have full administrative access to all repositories within the organization. - -To give a team access to a repository - -1. Navigate to **Organizations** in Docker Hub, and select your organization. -2. Click on the **Teams** tab and select the team that you'd like to configure repository access to. -3. Click on the **Permissions** tab and select a repository from the - **Repository** drop-down. -4. Choose a permission from the **Permissions** drop-down list and click - **Add**. - - ![Team repository permissions view](images/team-repo-permission.png){:width="700px"} - -### View a team's permissions for all repositories - -To view a team's permissions across all repositories: - -1. Open **Organizations** > **_Your Organization_** > **Teams** > **_Team Name_**. -2. Click on the **Permissions** tab, where you can view the repositories this team can access. - ## Videos You can also check out the following videos for information about creating Teams diff --git a/docker-hub/publish/index.md b/docker-hub/publish/index.md index 446585c5c0..f4825c92ec 100644 --- a/docker-hub/publish/index.md +++ b/docker-hub/publish/index.md @@ -16,7 +16,7 @@ redirect_from: - /docker-hub/publish/certify-plugins-logging/ - /docker-hub/publish/trustchain/ - /docker-hub/publish/byol/ - - /docker-hub/publish/publisher-center-migration/ + - /docker-hub/publish/publisher-center-migration/ --- The Verified Publisher Program provides several features and benefits to Docker diff --git a/docker-hub/quickstart.md b/docker-hub/quickstart.md index a9a6e2eb53..f922c8db10 100644 --- a/docker-hub/quickstart.md +++ b/docker-hub/quickstart.md @@ -76,11 +76,11 @@ redirect_from: - /apidocs/overview/ --- -The following section contains step-by-step instructions on how to easily get started with Docker Hub. +The following section contains step-by-step instructions on how to get started with Docker Hub. ### Step 1: Sign up for a Docker account -Let's start by creating a [Docker ID](https://hub.docker.com/signup){: target="_blank" rel="noopener" class="_"}. +Start by creating a [Docker ID](https://hub.docker.com/signup){: target="_blank" rel="noopener" class="_"}. A Docker ID grants you access to Docker Hub repositories and allows you to explore images that are available from the community and verified publishers. You'll also need a Docker ID to share images on Docker Hub. @@ -89,29 +89,24 @@ A Docker ID grants you access to Docker Hub repositories and allows you to explo To create a repository: 1. Sign in to [Docker Hub](https://hub.docker.com){: target="_blank" rel="noopener" class="_"}. -2. Click **Create a Repository** on the Docker Hub welcome page. +2. Select **Create a Repository** on the Docker Hub welcome page. 3. Name it **<your-username>/my-private-repo**. 4. Set the visibility to **Private**. +5. Select **Create**. - ![Create a repository](images/index-create-repo.png) - -5. Click **Create**. - - You've created your first repository. You should see: - - ![Repository created](images/index-repo-created.png) + You've created your first repository. ### Step 3: Download and install Docker Desktop -You'll need to download Docker Desktop to build, push, and pull container images. +You need to download Docker Desktop to build, push, and pull container images. 1. Download and install [Docker Desktop](../desktop/index.md). -2. Sign in to the Docker Desktop application using the Docker ID you've just created. +2. Sign in to Docker Desktop using the Docker ID you created in step one. ### Step 4: Pull and run a container image from Docker Hub -1. Run `docker pull hello-world` to pull the image from Docker Hub. You should see output similar to: +1. In your terminal, run `docker pull hello-world` to pull the image from Docker Hub. You should see output similar to: ```console $ docker pull hello-world @@ -177,7 +172,7 @@ Docker image locally. ![Tag created](images/index-tag.png) -Congratulations! You've successfully: +You've successfully: - Signed up for a Docker account - Created your first repository diff --git a/docker-hub/release-notes.md b/docker-hub/release-notes.md index ded8131476..41da5d13d5 100644 --- a/docker-hub/release-notes.md +++ b/docker-hub/release-notes.md @@ -11,6 +11,12 @@ known issues for each Docker Hub release. Take a look at the [Docker Public Roadmap](https://github.com/docker/roadmap/projects/1){: target="_blank" rel="noopener" class="_"} to see what's coming next. +## 2023-03-07 + +### New + +- You can now automatically sync user updates with your Docker organizations and teams with [Group Mapping](group-mapping.md) for SSO and SCIM provisioning. + ## 2022-12-12 ### New @@ -22,7 +28,7 @@ Take a look at the [Docker Public Roadmap](https://github.com/docker/roadmap/pro ### New -- The new [autobuild feature](../docker-hub/builds/index.md#check-your-active-builds) lets you view your in-progress logs every 30 seconds instead of when the build is complete. +- The new [autobuild feature](../docker-hub/builds/manage-builds.md#check-your-active-builds) lets you view your in-progress logs every 30 seconds instead of when the build is complete. ## 2022-09-21 @@ -134,7 +140,7 @@ For more information about this feature and for instructions on how to use it, s ### New feature -The **Repositories** view now shows which images have gone stale because they haven't been pulled or pushed recently. For more information, see [repository tags](repos/access/index.md#viewing-repository-tags). +The **Repositories** view now shows which images have gone stale because they haven't been pulled or pushed recently. For more information, see [repository tags](repos/access/index.md#view-repository-tags). # 2020-10-07 diff --git a/docker-hub/repos/access/index.md b/docker-hub/repos/access/index.md index b35991c38e..f55d0acaf2 100644 --- a/docker-hub/repos/access/index.md +++ b/docker-hub/repos/access/index.md @@ -4,14 +4,15 @@ keywords: Docker, docker, trusted, registry, accounts, plans, Dockerfile, Docker title: Access repositories --- - Within your repository, you can give others access to push and pull to your repository, and you can assign permissions. You can also view your repository tags and the associated images. +Within your repository, you can give others access to push and pull to your repository, and you can assign permissions. You can also view your repository tags and the associated images. ## Collaborators and their role -A collaborator is someone you want to give access to a private repository. Once -designated, they can `push` and `pull` to your repositories. They're not -allowed to perform any administrative tasks such as deleting the repository or -changing its status from private to public. +A collaborator is someone you want to give access to a private repository. Once designated, they can `push` and `pull` to your repositories. They're not +allowed to perform any administrative tasks such as deleting the repository or changing its status from private to public. + +You can choose collaborators and manage their access to a private +repository from that repository's **Settings** page. > **Note** > @@ -22,23 +23,12 @@ You can also assign more granular collaborator rights ("Read", "Write", or "Admin") on Docker Hub by using organizations and teams. For more information see the [organizations documentation](../../../docker-hub/orgs.md#create-an-organization). -## Viewing repository tags +## View repository tags -Docker Hub's individual repositories view shows you the available tags and the -size of the associated image. Go to the **Repositories** view and select repository to see its tags. To view individual tags, select the **Tags** tab. +You can view the available tags and the size of the associated image. Go to the **Repositories** view and select a repository to see its tags. To view individual tags, select the **Tags** tab. ![View repo tags](../../images/repo-overview.png) -Image sizes are the cumulative space taken up by the image and all its parent -images. This is also the disk space used by the contents of the `.tar` file -created when you `docker save` an image. An image is stale if there has been no push/pull activity for more than one month. For example: - -* It hasn't been pulled for more than one month -* And it hasn't been pushed for more than one month - -A multi-architecture image is stale if all single-architecture images -part of its manifest are stale. - To delete a tag, select the corresponding checkbox and select **Delete** from the **Action** drop-down list. > **Note** @@ -48,13 +38,16 @@ To delete a tag, select the corresponding checkbox and select **Delete** from th You can select a tag's digest to view more details. -![View tag](../../images/repo-image-layers.png) +Image sizes are the cumulative space taken up by the image and all its parent +images. This is also the disk space used by the contents of the `.tar` file +created when you `docker save` an image. -## Searching for repositories +An image is stale if there has been no push or pull activity for more than one month. A multi-architecture image is stale if all single-architecture images part of its manifest are stale. + +## Search for repositories You can search the [Docker Hub](https://hub.docker.com) registry through its -search interface or by using the command line interface. Searching can find -images by image name, username, or description: +search interface or by using the command line interface. You can search by image name, username, or description: ```console $ docker search centos @@ -66,7 +59,8 @@ tutum/centos Centos image with SSH access. For the root. ... ``` -There you can see two example results: `centos` and `ansible/centos7-ansible`. +In the example above, you can see two example results, `centos` and `ansible/centos7-ansible`. + The second result shows that it comes from the public repository of a user, named `ansible/`, while the first result, `centos`, doesn't explicitly list a repository which means that it comes from the top-level namespace for @@ -89,6 +83,6 @@ Status: Downloaded newer image for centos:latest You now have an image from which you can run containers. -## Starring repositories +## Star repositories -Your can star your repositories. Stars are a way to show that you like a repository. They're also an easy way of bookmarking your favorites. +Stars are a way to show that you like a repository. They're also an easy way of bookmarking your favorites. diff --git a/docker-hub/repos/configure/index.md b/docker-hub/repos/configure/index.md deleted file mode 100644 index fafd1552a5..0000000000 --- a/docker-hub/repos/configure/index.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -description: Configuring repositories on Docker Hub -keywords: Docker, docker, trusted, registry, accounts, plans, Dockerfile, Docker Hub, webhooks, docs, documentation, creating, deleting, consolidating -title: Configure repositories ---- - -When configuring repositories, your private repositories let you keep your container images private, to your personal account or within an organization or team. You can also view your permissions or manage your users' access to those repositories. - - -## Private repositories - -You can make an existing repository private by going to its **Settings** tab and select **Make private**. - -![Make a repo private](../../images/repo-make-private.png){: style="max-width: 60%"} - -You get one free private repository with your Docker Hub user account (not -available for organizations you're a member of). If you need more private -repositories for your user account, upgrade your Docker Hub subscription from your [Billing Information](https://hub.docker.com/billing/plan){: target="_blank" rel="noopener" class="_"} page. - -Once you've created a private repository, you can `push` and `pull` images to and -from it using Docker. - -> **Note**: You must sign in and have access to work with a -> private repository. Private repositories aren't available to search through -> the top-level search or `docker search`. - -You can choose collaborators and manage their access to a private -repository from that repository's **Settings** page. You can also toggle the -repository's status between public and private, if you have an available -repository slot open. Otherwise, you can upgrade your -[Docker Hub](https://hub.docker.com/account/billing-plans/){: target="_blank" rel="noopener" class="_"} subscription. - -### Permissions reference - -Permissions are cumulative. For example, if you have Read & Write permissions, -you automatically have Read-only permissions: - -- `Read-only` access lets users view, search, and pull a private repository in the same way as they can a public repository. -- `Read & Write` access lets users pull, push, and view a repository. In addition, it lets users view, cancel, retry or trigger builds -- `Admin` access lets users pull, push, view, edit, and delete a - repository. You can also edit build settings, and update the repositories description, collaborators rights, public/private visibility, and delete. - -> **Note** -> -> A user who hasn't verified their email address only has -> `Read-only` access to the repository, regardless of the rights their team -> membership has given them. - - diff --git a/docker-hub/repos/create.md b/docker-hub/repos/create.md new file mode 100644 index 0000000000..32da7bca95 --- /dev/null +++ b/docker-hub/repos/create.md @@ -0,0 +1,66 @@ +--- +description: Creating repositories on Docker Hub +keywords: Docker, docker, trusted, registry, accounts, plans, Dockerfile, Docker Hub, webhooks, docs, documentation, manage, repos +title: Create repositories +redirect_from: +- /docker-hub/repos/configure/ +--- + +Repositories let you share container images with your team, +customers, or the Docker community at large. + +A single Docker Hub repository can hold many Docker images which are stored as **tags**. Docker images are pushed to Docker Hub through the [`docker push`](/engine/reference/commandline/push/) +command. + +## Create a repository + +1. Sign in to Docker Hub. +2. Select **Repositories**. +3. Near the top-right corner, select **Create Repository**. + +When creating a new repository: + +- You can choose to locate it under your own user account, or under any + [organization](../../docker-hub/orgs.md) where you are an [owner](../manage-a-team.md#the-owners-team). +- The repository name needs to: + - Be unique + - Have between 2 and 255 characters + - Only contain lowercase letters, numbers, hyphens (`-`), and underscores (`_`) + + > **Note** + > + > You can't rename a Docker Hub repository once it's created. + +- The description can be up to 100 characters. It is used in the search results. +- If you are a Docker Verified Publisher (DVP) or Docker-Sponsored Open Source (DSOS) organization, you can also add a logo to a repository. The maximum size is 1000x1000. +- You can link a GitHub or Bitbucket account now, or choose to do it later in + the repository settings. +- You can set the repository's default visibility to public or private. + + > **Note** + > + > For organizations creating a new repository, it's recommended you select **Private**. + +## Push a Docker container image to Docker Hub + +Once you have created a repository, you can start using `docker push` to push +images. + +To push an image to Docker Hub, you must first name your local image using your +Docker Hub username and the repository name that you created. + +If you want to add multiple images to a repository, add a specific `:` to them, for example `docs/base:testing`. If it's not specified, the tag defaults to `latest`. + +Name your local images using one of these methods: + +- When you build them, using `docker build -t /[:]` +- By re-tagging an existing local image `docker tag /[:]` +- By using `docker commit /[:]` to commit changes + +Now you can push this image to the repository designated by its name or tag: + +```console +$ docker push /: +``` + +The image is then uploaded and available for use by your teammates and/or the community. \ No newline at end of file diff --git a/docker-hub/repos/index.md b/docker-hub/repos/index.md index 797f97fd0c..204cc37d62 100644 --- a/docker-hub/repos/index.md +++ b/docker-hub/repos/index.md @@ -6,122 +6,70 @@ redirect_from: - /engine/tutorials/dockerrepos/ --- -Docker Hub repositories allow you share container images with your team, -customers, or the Docker community at large. +## Change a repository from public to private -Docker images are pushed to Docker Hub through the [`docker push`](/engine/reference/commandline/push/) -command. A single Docker Hub repository can hold many Docker images (stored as -**tags**). +1. Navigate to your repository. +2. Select the **Settings** tab. +3. Select **Make private**. +4. Enter the name of your repository to confirm. -## Creating a repository +You get one free private repository with your Docker Hub user account (not +available for organizations you're a member of). If you need more private +repositories for your user account, upgrade your Docker Hub subscription from your [Billing Information](https://hub.docker.com/billing/plan){: target="_blank" rel="noopener" class="_"} page. -To create a repository, sign into Docker Hub, select **Repositories** then -**Create Repository**: - -![Create repository](../images/repos-create.png) - -When creating a new repository: - -* You can choose to put it in your Docker ID namespace, or in any - [organization](../../docker-hub/orgs.md) where you are an [owner](../../docker-hub/orgs.md#the-owners-team). -* The repository name needs to be unique in that namespace, can be two - to 255 characters, and can only contain lowercase letters, numbers, hyphens (`-`), and underscores (`_`). - - > **Note:** - > - > You can't rename a Docker Hub repository once it's created. - -* The description can be up to 100 characters and used in the search result. -* You can link a GitHub or Bitbucket account now, or choose to do it later in - the repository settings. -* You can set your default visibility to public or private for your account. - - > **Note:** - > - > For organizations creating a new repository, it's recommended you select **Private**. - - > **Note** - > - > Docker Verified Publisher (DVP) and Docker-Sponsored Open Source (DDOS) organizations can also add a logo to a repository. The maximum size is 1000x1000. - -![Setting page for creating a repository](../images/repo-create-details.png) - -After you select **Create**, you can start using `docker push` to push -images to this repository. - -## Creating a private repository - -To create a private repository, navigate to Docker Hub and select **Repositories** and **Private**. - -> **Note** -> -> To update your public repository to private, navigate to your repository, select **Settings** and **Make private**. - -![Create a private repository](/docker-hub/images/repo-create-private.png){: style="max-width: 60%"} - -## Deleting a repository - -1. Navigate to [Docker Hub](https://hub.docker.com){: target="_blank" rel="noopener" class="_"} and select **Repositories**. - -2. Select a repository from the list, select **Settings**, and then Delete Repository. - - > **Note:** - > - > Deleting a repository deletes all the images it contains and its build settings. This action can't be undone. - -3. Enter the name of the repository to confirm the deletion and select **Delete**. - -## Consolidating a repository +## Move images between repositories ### Personal to personal When consolidating personal repositories, you can pull private images from the initial repository and push them into another repository owned by you. To avoid losing your private images, perform the following steps: -1. Navigate to [Docker Hub](https://hub.docker.com){: target="_blank" rel="noopener" class="_"} create a Docker ID and select the personal subscription. +1. Navigate to [Docker Hub](https://hub.docker.com){: target="_blank" rel="noopener" class="_"} create a new Docker ID and select the personal subscription. 2. Using `docker login` from the CLI, sign in using your original Docker ID and pull your private images. -3. Tag your private images with your newly created Docker ID using: -`docker tag namespace1/docker101tutorial new_namespace/docker101tutorial` -4. Using `docker login` from the CLI, sign in with your newly created Docker ID, and push your newly tagged private images to your new Docker ID namespace. -`docker push new_namespace/docker101tutorial` -5. The private images that existed in your previous namespace are now available in your new Docker ID namespace. +3. Tag your private images with your newly created Docker ID, for example: + + ```console + $ docker tag namespace1/docker101tutorial new_namespace/docker101tutorial + ``` +4. Using `docker login` from the CLI, sign in with your newly created Docker ID, and push your newly tagged private images to your new Docker ID namespace: + + ```console + $ docker push new_namespace/docker101tutorial + ``` + +The private images that existed in your previous account are now available in your new account. ### Personal to an organization -To avoid losing your private images, you can pull your private images from your personal namespace and push them to an organization that's owned by you. +To avoid losing your private images, you can pull your private images from your personal account and push them to an organization that's owned by you. 1. Navigate to [Docker Hub](https://hub.docker.com){: target="_blank" rel="noopener" class="_"} and select **Organizations**. 2. Select the applicable organization and verify that your user account is a member of the organization. -3. Sign in to [Docker Hub](https://hub.docker.com){: target="_blank" rel="noopener" class="_"} using your original Docker ID, and pull your images from the initial namespace. -`docker pull namespace1/docker101tutorial` -4. Tag your images with your new organization namespace. -`docker tag namespace1/docker101tutorial /docker101tutorial` -5. Push your newly tagged images to your new org namespace. -`docker push new_org/docker101tutorial` +3. Sign in to [Docker Hub](https://hub.docker.com){: target="_blank" rel="noopener" class="_"} using your original Docker ID, and pull your images: -The private images that existed in the initial namespace are now available for your organization. + ```console + $ docker pull namespace1/docker101tutorial + ``` +4. Tag your images with your new organization namespace: -## Pushing a Docker container image to Docker Hub + ```console + $ docker tag namespace1/docker101tutorial /docker101tutorial + ``` +5. Push your newly tagged images to your new org namespace: -To push an image to Docker Hub, you must first name your local image using your -Docker Hub username and the repository name that you created through Docker Hub -on the web. + ```console + $ docker push new_org/docker101tutorial + ``` -You can add multiple images to a repository by adding a specific `:` to -them (for example `docs/base:testing`). If it's not specified, the tag defaults -to `latest`. +The private images that existed in your user account are now available for your organization. -Name your local images using one of these methods: +## Delete a repository -* When you build them, using `docker build -t /[:]` -* By re-tagging an existing local image `docker tag /[:]` -* By using `docker commit /[:]` - to commit changes +> **Warning** +> +> Deleting a repository deletes all the images it contains and its build settings. This action can't be undone. +{: .warning} -Now you can push this repository to the registry designated by its name or tag. - -```console -$ docker push /: -``` - -The image is then uploaded and available for use by your teammates and/or -the community. \ No newline at end of file +1. Navigate to your repository. +2. Select the **Settings** tab. +3. Select **Delete repository**. +4. Enter the name of your repository to confirm. \ No newline at end of file diff --git a/docker-hub/scim.md b/docker-hub/scim.md index c133763d45..860a089618 100644 --- a/docker-hub/scim.md +++ b/docker-hub/scim.md @@ -4,7 +4,6 @@ keywords: SCIM, SSO title: SCIM direct_from: - /docker-hub/company-scim/ -- /docker-hub/group-mapping/ --- This section is for administrators who want to enable System for Cross-domain Identity Management (SCIM) 2.0 for their business. It is available for Docker Business customers. @@ -48,14 +47,6 @@ Follow the instructions provided by your IdP: - [Azure AD](https://learn.microsoft.com/en-us/azure/databricks/administration-guide/users-groups/scim/aad#step-2-configure-the-enterprise-application){: target="_blank" rel="noopener" class="_" } - [OneLogin](https://developers.onelogin.com/scim/create-app){: target="_blank" rel="noopener" class="_" } -### Optional step -You also have the option to use group mapping within your IdP. To take advantage of group mapping, follow the instructions provided by your IdP: -- [Okta](https://help.okta.com/en-us/Content/Topics/users-groups-profiles/usgp-about-group-push.htm){: target="_blank" rel="noopener" class="_" } -- [Azure AD](https://learn.microsoft.com/en-us/azure/active-directory/app-provisioning/customize-application-attributes){: target="_blank" rel="noopener" class="_" } -- [OneLogin](https://developers.onelogin.com/scim/create-app){: target="_blank" rel="noopener" class="_" } - -Once complete, a user who signs in to Docker through SSO is automatically added to the organizations and teams mapped in the IdP. - ## Disable SCIM If SCIM is disabled, any user provisioned through SCIM will remain in the organization. Future changes for your users will not sync from your IdP. User de-provisioning is only possible when manually removing the user from the organization. diff --git a/docker-hub/service-accounts.md b/docker-hub/service-accounts.md index 84b5ea43d6..1025e24735 100644 --- a/docker-hub/service-accounts.md +++ b/docker-hub/service-accounts.md @@ -3,6 +3,10 @@ description: Docker Service accounts keywords: Docker, service, accounts, Docker Hub title: Service accounts --- +> **Note** +> +> Service accounts requires a +> [Docker Team, or Business subscription](../subscription/index.md). A service account is a Docker ID used for automated management of container images or containerized applications. Service accounts are typically used in automated workflows, and don't share Docker IDs with the members in the organization. Common use cases for service accounts include mirroring content on Docker Hub, or tying in image pulls from your CI/CD process. @@ -37,9 +41,9 @@ Refer to the following table for details on the Enhanced Service Account add-on To create a new service account for your Team account: 1. Create a new Docker ID. -2. Create a [team](orgs.md#create-a-team) in your organization and grant it read-only access to your private repositories. +2. Create a [team](manage-a-team.md) in your organization and grant it read-only access to your private repositories. 3. Add the new Docker ID to your [organization](orgs.md). -4. Add the new Docker ID to the [team](orgs.md#create-a-team) you created earlier. +4. Add the new Docker ID to the [team](manage-a-team.md) you created earlier. 5. Create a new [personal access token (PAT)](/access-tokens.md) from the user account and use it for CI. > **Note** diff --git a/docker-hub/vulnerability-scanning.md b/docker-hub/vulnerability-scanning.md index 21e2a919f7..800a5e6e3a 100644 --- a/docker-hub/vulnerability-scanning.md +++ b/docker-hub/vulnerability-scanning.md @@ -23,7 +23,7 @@ Scan results include: - The source of the vulnerability, such as Operating System (OS) packages and libraries - The version in which it was introduced -- A recommended fixed version (if available) to remediate the vulnerabilities +- A recommended fixed version, if available, to remediate the vulnerabilities discovered. ## Changes to vulnerability scanning in Docker Hub @@ -39,7 +39,7 @@ show a higher number of vulnerabilities. If you used vulnerability scanning before February 27th, 2023, you may see that new vulnerability reports list a higher number of vulnerabilities, due to a more thorough analysis. -There is no action required on your part. Scans will continue to run as usual +There is no action required on your part. Scans continue to run as usual with no interruption or changes to pricing. Historical data continues to be available. @@ -72,10 +72,9 @@ Business tier. To enable Basic vulnerability scanning: -1. Log into your [Docker Hub](https://hub.docker.com){: target="_blank" +1. Sign in to your [Docker Hub](https://hub.docker.com){: target="_blank" rel="noopener" class="_"} account. -2. Click **Repositories** from the main menu and select a repository from the - list. +2. Select **Repositories** and then choose a repository. 3. Go to the **Settings** tab. 4. Under **Image insight settings**, select **Basic Hub vulnerability scanning**. @@ -117,7 +116,7 @@ To view the vulnerability report: ![Vulnerability scan report](images/vuln-scan-report.png){:width="700px"} -2. Click on the **Tags** tab > **Digest** > **Vulnerabilities** to view the +2. Select the **Tags** tab, then **Digest**, then **Vulnerabilities** to view the detailed scan report. The scan report displays vulnerabilities identified by the scan, sorting them @@ -165,10 +164,9 @@ improving image security. For more information, see Repository owners and administrators can disable Basic vulnerability scanning on a repository. To disable scanning: -1. Log into your [Docker Hub](https://hub.docker.com){: target="_blank" +1. Sign in to your [Docker Hub](https://hub.docker.com){: target="_blank" rel="noopener" class="_"} account. -2. Go to **Repositories** from the main menu and select a repository from the - list. +2. Go to **Repositories** and then select a repository from the list. 3. Go to the **Settings** tab. 4. Under **Image insight settings**, select **None**. 5. Select **Save**. diff --git a/docker-hub/webhooks.md b/docker-hub/webhooks.md index 35a6582f11..70fd896441 100644 --- a/docker-hub/webhooks.md +++ b/docker-hub/webhooks.md @@ -1,36 +1,31 @@ --- description: Docker Hub Webhooks keywords: Docker, webhooks, hub, builds -title: Docker Hub Webhooks +title: Webhooks --- You can use webhooks to cause an action in another service in response to a push event in the repository. Webhooks are POST requests sent to a URL you define in Docker Hub. -Configure webhooks through the "Webhooks" tab on your Docker Hub repository: +## Create a webhook -![Webhooks page](images/webhooks-empty.png) +To create a webhook: +1. In your chosen respository, select the **Webhooks** tabThen: +2. Provide a name for the webhook +3. Provide a destination webhook URL. This is where webhook POST requests are delivered. +4. Select **Create**. -### Create Webhooks +## View webhook delivery history -To create a webhook, visit the webhooks tab for your repository. Then: -1. Provide a name for the webhooks -2. Provide a destination webhook URL. This is where webhook POST requests will be delivered: +To view the history of the webhook: +1. Hover over your webhook under the **Current Webhooks section**. +2. Select the **Menu options** icon. +3. Select **View History**. -![Create webhooks](images/webhooks-create.png) +You can then view the delivery history, and whether delivering the POST request was successful or not: -### View Webhook delivery history +## Example webhook payload -You can view Webhook Delivery History by clicking on the submenu of the webhook and then clicking "View History" - -![Webhook delivery history](images/webhooks-submenu.png) - -You can then view the delivery history, and whether delivering the POST request was successful or failed: - -![Webhooks History](images/webhooks-history.png) - -### Example Webhook payload - -Docker Hub Webhook payloads have the following payload JSON format: +Webhook payloads have the following JSON format: ```json { @@ -60,23 +55,23 @@ Docker Hub Webhook payloads have the following payload JSON format: } ``` -### Validate a webhook callback +## Validate a webhook callback -To validate a callback in a webhook chain, you need to +To validate a callback in a webhook chain, you need to: 1. Retrieve the `callback_url` value in the request's JSON payload. 2. Send a POST request to this URL containing a valid JSON body. -> **Note**: A chain request is only considered complete once the last -> callback has been validated. +> **Note** +> +> A chain request is only considered complete once the last callback is validated. - -#### Callback JSON data +### Callback JSON data The following parameters are recognized in callback data: * `state` (required): Accepted values are `success`, `failure`, and `error`. - If the state isn't `success`, the Webhook chain is interrupted. + If the state isn't `success`, the webhook chain is interrupted. * `description`: A string containing miscellaneous information that is available on Docker Hub. Maximum 255 characters. * `context`: A string containing the context of the operation. Can be retrieved diff --git a/docker-id/index.md b/docker-id/index.md index b3715ef6c6..7016622a8b 100644 --- a/docker-id/index.md +++ b/docker-id/index.md @@ -1,27 +1,27 @@ --- description: Sign up for a Docker ID and log in keywords: accounts, docker ID, billing, paid plans, support, Hub, Store, Forums, knowledge base, beta access, email, activation, verification -title: Docker ID accounts +title: Create an account redirect_from: - /docker-cloud/dockerid/ - /docker-hub/accounts/ --- -Your free Docker ID grants you access to Docker Hub repositories and some beta programs. All you need is an email address. +All you need is an email address to create a Docker account. Once you've created your account with a unique Docker ID, you can access Docker Hub repositories and explore images that are available from the community and verified publishers. -## Register for a Docker ID +Your Docker ID becomes your username for hosted Docker services, and [Docker forums](https://forums.docker.com/). -Your Docker ID becomes your user namespace for hosted Docker services, and becomes your username on the [Docker forums](https://forums.docker.com/). To create a new Docker ID: +## Create a Docker ID 1. Go to the [Docker Hub signup page](https://hub.docker.com/signup/). -2. Enter a username that will become your Docker ID. +2. Enter a username. - Your Docker ID must be between 4 and 30 characters long, and can only contain numbers and lowercase letters. *Once you create your Docker ID you can't reuse it in the future if you deactivate this account*. + Your Docker ID must be between 4 and 30 characters long, and can only contain numbers and lowercase letters. Once you create your Docker ID you can't reuse it in the future if you deactivate this account. 3. Enter a unique, valid email address. -4. Enter a password that's at least 9 characters. +4. Enter a password that's at least 9 characters long. 5. Complete the Captcha verification and then select **Sign up**. @@ -33,7 +33,7 @@ Your Docker ID becomes your user namespace for hosted Docker services, and becom > > You have limited actions available until you verify your email address. -## Log in +## Sign in Once you register and verify your Docker ID email address, you can sign in to [Docker Hub](https://hub.docker.com). diff --git a/engine/install/debian.md b/engine/install/debian.md index 40101dac46..088b8d86ea 100644 --- a/engine/install/debian.md +++ b/engine/install/debian.md @@ -89,8 +89,7 @@ Docker from the repository. $ sudo apt-get install \ ca-certificates \ curl \ - gnupg \ - lsb-release + gnupg ``` 2. Add Docker's official GPG key: @@ -104,8 +103,9 @@ Docker from the repository. ```console $ echo \ - "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] {{ download-url-base }} \ - $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] {{ download-url-base }} \ + "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null ``` #### Install Docker Engine diff --git a/engine/install/index.md b/engine/install/index.md index a7e6ccd6e2..1d49b5a86b 100644 --- a/engine/install/index.md +++ b/engine/install/index.md @@ -30,16 +30,16 @@ redirect_from: > **Docker Desktop for Linux** > -> Docker Desktop helps you build, share, and run containers easily on Mac and -> Windows as you do on Linux. We are excited to share that Docker Desktop for +> Docker Desktop helps you build, share, and run containers on Mac and +> Windows as you do on Linux. Docker Desktop for > Linux is now GA. For more information, see [Docker Desktop for Linux](../../desktop/install/linux-install.md). {: .important} ## Supported platforms -Docker Engine is available on a variety of [Linux platforms](../../desktop/install/linux-install.md), -[macOS](../../desktop/install/mac-install.md) and [Windows 10](../../desktop/install/windows-install.md) +Docker Engine is available on a variety of [Linux distros](../../desktop/install/linux-install.md), +[macOS](../../desktop/install/mac-install.md), and [Windows 10](../../desktop/install/windows-install.md) through Docker Desktop, and as a [static binary installation](binaries.md). Find your preferred operating system below. @@ -55,7 +55,7 @@ your preferred operating system below. ### Server -Docker provides `.deb` and `.rpm` packages from the following Linux distributions +Docker provides `.deb` and `.rpm` packages from the following Linux distros and architectures: | Platform | x86_64 / amd64 | arm64 / aarch64 | arm (32-bit) | s390x | @@ -69,12 +69,12 @@ and architectures: | [Ubuntu](ubuntu.md) | [{{ yes }}](ubuntu.md) | [{{ yes }}](ubuntu.md) | [{{ yes }}](ubuntu.md) | [{{ yes }}](ubuntu.md) | | [Binaries](binaries.md) | [{{yes}}](binaries.md) | [{{yes}}](binaries.md) | [{{yes}}](binaries.md) | | -### Other Linux distributions +### Other Linux distros > **Note** > -> While the instructions below may work, Docker does not test or verify -> installation on derivatives. +> While the instructions below may work, Docker doesn't test or verify +> installation on distro derivatives. - Users of Debian derivatives such as "BunsenLabs Linux", "Kali Linux" or "LMDE" (Debian-based Mint) should follow the installation instructions for @@ -86,15 +86,15 @@ and architectures: substituting the version of their distro for the corresponding Ubuntu release. Refer to the documentation of your distro to find which Ubuntu release corresponds with your derivative version. -- Some Linux distributions are providing a package of Docker Engine through their +- Some Linux distros provide a package of Docker Engine through their package repositories. These packages are built and maintained by the Linux - distribution's package maintainers and may have differences in configuration - or built from modified source code. Docker is not involved in releasing these - packages and bugs or issues involving these packages should be reported in - your Linux distribution's issue tracker. + distro's package maintainers and may have differences in configuration + or built from modified source code. Docker isn't involved in releasing these + packages and you should report any bugs or issues involving these packages to + your Linux distro's issue tracker. Docker provides [binaries](binaries.md) for manual installation of Docker Engine. -These binaries are statically linked and can be used on any Linux distribution. +These binaries are statically linked and you can use them on any Linux distro. ## Release channels diff --git a/engine/install/linux-postinstall.md b/engine/install/linux-postinstall.md index 517a88b3eb..b0387ee348 100644 --- a/engine/install/linux-postinstall.md +++ b/engine/install/linux-postinstall.md @@ -127,16 +127,16 @@ To avoid issues with overusing disk for log data, consider one of the following options: - Configure the `json-file` logging driver to turn on - [log rotation](../../config/containers/logging/json-file.md) + [log rotation](../../config/containers/logging/json-file.md). - Use an [alternative logging driver](../../config/containers/logging/configure.md#configure-the-default-logging-driver) such as the ["local" logging driver](../../config/containers/logging/local.md) - that performs log rotation by default + that performs log rotation by default. - Use a logging driver that sends logs to a remote logging aggregator. ## Next steps -- Take a look at the [Get started](../../get-started/index.md) training modules +- Read the [Get started](../../get-started/index.md) training modules to learn how to build an image and run it as a containerized application. - Review the topics in [Develop with Docker](../../develop/index.md) to learn how to build new applications using Docker. diff --git a/engine/install/ubuntu.md b/engine/install/ubuntu.md index a96994d1f8..c2aa4b01e3 100644 --- a/engine/install/ubuntu.md +++ b/engine/install/ubuntu.md @@ -88,8 +88,7 @@ Docker from the repository. $ sudo apt-get install \ ca-certificates \ curl \ - gnupg \ - lsb-release + gnupg ``` 2. Add Docker's official GPG key: @@ -103,8 +102,9 @@ Docker from the repository. ```console $ echo \ - "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] {{ download-url-base }} \ - $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] {{ download-url-base }} \ + "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null ``` #### Install Docker Engine diff --git a/engine/reference/commandline/init.md b/engine/reference/commandline/init.md new file mode 100644 index 0000000000..b93e7beb72 --- /dev/null +++ b/engine/reference/commandline/init.md @@ -0,0 +1,17 @@ +--- +datafolder: init-cli +datafile: docker_init +title: docker init +--- + +> **Beta** +> +> The Docker Init plugin is currently in [Beta](../../../release-lifecycle.md#beta). Docker recommends that you do not use this in production environments. + +{% include cli.md datafolder=page.datafolder datafile=page.datafile %} diff --git a/engine/reference/commandline/scout_compare.md b/engine/reference/commandline/scout_compare.md new file mode 100644 index 0000000000..f3c365159b --- /dev/null +++ b/engine/reference/commandline/scout_compare.md @@ -0,0 +1,13 @@ +--- +datafolder: scout-cli +datafile: docker_scout_compare +title: docker scout compare +--- + +{% include cli.md datafolder=page.datafolder datafile=page.datafile %} diff --git a/engine/reference/commandline/scout_quickview.md b/engine/reference/commandline/scout_quickview.md new file mode 100644 index 0000000000..ec6821b938 --- /dev/null +++ b/engine/reference/commandline/scout_quickview.md @@ -0,0 +1,13 @@ +--- +datafolder: scout-cli +datafile: docker_scout_quickview +title: docker scout quickview +--- + +{% include cli.md datafolder=page.datafolder datafile=page.datafile %} diff --git a/engine/reference/commandline/scout_recommendations.md b/engine/reference/commandline/scout_recommendations.md new file mode 100644 index 0000000000..2e7c7226ef --- /dev/null +++ b/engine/reference/commandline/scout_recommendations.md @@ -0,0 +1,13 @@ +--- +datafolder: scout-cli +datafile: docker_scout_recommendations +title: docker scout recommendations +--- + +{% include cli.md datafolder=page.datafolder datafile=page.datafile %} diff --git a/engine/reference/commandline/scout_sbom.md b/engine/reference/commandline/scout_sbom.md new file mode 100644 index 0000000000..09e7068915 --- /dev/null +++ b/engine/reference/commandline/scout_sbom.md @@ -0,0 +1,13 @@ +--- +datafolder: scout-cli +datafile: docker_scout_sbom +title: docker scout sbom +--- + +{% include cli.md datafolder=page.datafolder datafile=page.datafile %} diff --git a/engine/release-notes/20.10.md b/engine/release-notes/20.10.md index ac8c4dcb55..998d365b01 100644 --- a/engine/release-notes/20.10.md +++ b/engine/release-notes/20.10.md @@ -10,6 +10,34 @@ skip_read_time: true This document describes the latest changes, additions, known issues, and fixes for Docker Engine version 20.10. +## 20.10.24 +{% include release-date.html date="2023-04-04" %} + +### Updates + +- Update Go runtime to [1.19.7](https://go.dev/doc/devel/release#go1.19.minor). +- Update Docker Buildx to [v0.10.4](https://github.com/docker/buildx/releases/tag/v0.10.4). +- Update containerd to [v1.6.20](https://github.com/containerd/containerd/releases/tag/v1.6.20). +- Update runc to [v1.1.5](https://github.com/opencontainers/runc/releases/tag/v1.1.5). + +### Bug fixes and enhancements + +- Fixed a number of issues that can cause Swarm encrypted overlay networks + to fail to uphold their guarantees, addressing [CVE-2023-28841](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-28841), + [CVE-2023-28840](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-28840), and + [CVE-2023-28842](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-28842). + - A lack of kernel support for encrypted overlay networks now reports + as an error. + - Encrypted overlay networks are eagerly set up, rather than waiting for + multiple nodes to attach. + - Encrypted overlay networks are now usable on Red Hat Enterprise Linux 9 + through the use of the `xt_bpf` kernel module. + - Users of Swarm overlay networks should review [GHSA-vwm3-crmr-xfxw](https://github.com/moby/moby/security/advisories/GHSA-vwm3-crmr-xfxw) + to ensure that unintentional exposure has not occurred. +- Upgrade github.com/containerd/fifo to v1.1.0 to fix a potential panic [moby/moby#45216](https://github.com/moby/moby/pull/45242). +- Fix missing Bash completion for installed cli-plugins [docker/cli#4091](https://github.com/docker/cli/pull/4091). + + ## 20.10.23 {% include release-date.html date="2023-01-19" %} diff --git a/engine/release-notes/23.0.md b/engine/release-notes/23.0.md index 67eaae294f..2d4c388d92 100644 --- a/engine/release-notes/23.0.md +++ b/engine/release-notes/23.0.md @@ -41,6 +41,73 @@ Changing the version format is a stepping-stone towards Go module compatibility, but the repository doesn't yet use Go modules, and still requires using a "+incompatible" version. Work continues towards Go module compatibility in a future release. +## 23.0.3 + +{% include release-date.html date="2023-04-04" %} + +> **Note** +> +> Due to an issue with CentOS 9 Stream's package repositories, packages for +> CentOS 9 are currently unavailable. Packages for CentOS 9 may be added later, +> or as part of the next (23.0.4) patch release. + +### Bug fixes and enhancements + +- Fixed a number of issues that can cause Swarm encrypted overlay networks + to fail to uphold their guarantees, addressing [CVE-2023-28841](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-28841), + [CVE-2023-28840](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-28840), and + [CVE-2023-28842](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-28842). + - A lack of kernel support for encrypted overlay networks now reports + as an error. + - Encrypted overlay networks are eagerly set up, rather than waiting for + multiple nodes to attach. + - Encrypted overlay networks are now usable on Red Hat Enterprise Linux 9 + through the use of the `xt_bpf` kernel module. + - Users of Swarm overlay networks should review [GHSA-vwm3-crmr-xfxw](https://github.com/moby/moby/security/advisories/GHSA-vwm3-crmr-xfxw) + to ensure that unintentional exposure has not occurred. + +### Packaging Updates + +- Upgrade `containerd` to [v1.6.20](https://github.com/containerd/containerd/releases/tag/v1.6.20). +- Upgrade `runc` to [v1.1.5](https://github.com/opencontainers/runc/releases/tag/v1.1.5). + + +## 23.0.2 + +{% include release-date.html date="2023-03-28" %} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 23.0.2 milestone](https://github.com/docker/cli/milestone/75?closed=1) +- [moby/moby, 23.0.2 milestone](https://github.com/moby/moby/milestone/114?closed=1) + +### Bug fixes and enhancements + +- Fully resolve missing checks for `apparmor_parser` when an AppArmor enabled kernel is detected. [containerd/containerd#8087](https://github.com/containerd/containerd/pull/8087), [moby/moby#45043](https://github.com/moby/moby/pull/45043) +- Ensure that credentials are redacted from Git URLs when generating BuildKit buildinfo. Fixes [CVE-2023-26054](https://github.com/moby/buildkit/security/advisories/GHSA-gc89-7gcr-jxqc). [moby/moby#45110](https://github.com/moby/moby/pull/45110) +- Fix anonymous volumes created by a `VOLUME` line in a Dockerfile being excluded from volume prune. [moby/moby#45159](https://github.com/moby/moby/pull/45159) +- Fix a failure to properly propagate errors during removal of volumes on a Swarm node. [moby/moby#45155](https://github.com/moby/moby/pull/45155) +- Temporarily work around a bug in BuildKit `COPY --link` by disabling mergeop/diffop optimization. [moby/moby#45112](https://github.com/moby/moby/pull/45112) +- Properly clean up child tasks when a parent Swarm job is removed. [moby/swarmkit#3112](https://github.com/moby/swarmkit/pull/3112), [moby/moby#45107](https://github.com/moby/moby/pull/45107) +- Fix Swarm service creation logic so that both a GenericResource and a non-default network can be used together. [moby/swarmkit#3082](https://github.com/moby/swarmkit/pull/3082), [moby/moby#45107](https://github.com/moby/moby/pull/45107) +- Fix Swarm CSI support requiring the CSI plugin to offer staging endpoints in order to publish a volume. [moby/swarmkit#3116](https://github.com/moby/swarmkit/pull/3116), [moby/moby#45107](https://github.com/moby/moby/pull/45107) +- Fix a panic caused by log buffering in some configurations. [containerd/fifo#47](https://github.com/containerd/fifo/pull/47), [moby/moby#45051](https://github.com/moby/moby/pull/45051) +- Log errors in the REST to Swarm gRPC API translation layer at the debug level to reduce redundancy and noise. [moby/moby#45016](https://github.com/moby/moby/pull/45016) +- Fix a DNS resolution issue affecting containers created with `--dns-opt` or `--dns-search` when `systemd-resolved` is used outside the container. [moby/moby#45000](https://github.com/moby/moby/pull/45000) +- Fix a panic when logging errors in handling DNS queries originating from inside a container. [moby/moby#44980](https://github.com/moby/moby/pull/44980) +- Improve the speed of `docker ps` by allowing users to opt out of size calculations with `--size=false`. [docker/cli#4107](https://github.com/docker/cli/pull/4107) +- Extend support for Bash completion to all plugins. [docker/cli#4092](https://github.com/docker/cli/pull/4092) +- Fix `docker stack deploy` failing on Windows when special environment variables set by `cmd.exe` are present. [docker/cli#4083](https://github.com/docker/cli/pull/4083) +- Add forward compatibility for future API versions by considering empty image tags to be the same as ``. [docker/cli#4065](https://github.com/docker/cli/pull/4065) +- Atomically write context files to greatly reduce the probability of corruption, and improve the error message for a corrupt context. [docker/cli#4063](https://github.com/docker/cli/pull/4063) + +### Packaging + +- Upgrade Go to `1.19.7`. [docker/docker-ce-packaging#857](https://github.com/docker/docker-ce-packaging/pull/857), [docker/cli#4086](https://github.com/docker/cli/pull/4086), [moby/moby#45137](https://github.com/moby/moby/pull/45137) +- Upgrade `containerd` to `v1.6.19`. [moby/moby#45084](https://github.com/moby/moby/pull/45084), [moby/moby#45099](https://github.com/moby/moby/pull/45099) +- Upgrade Buildx to `v0.10.4`. [docker/docker-ce-packaging#855](https://github.com/docker/docker-ce-packaging/pull/855) +- Upgrade Compose to `v2.17.2`. [docker/docker-ce-packaging#867](hhttps://github.com/docker/docker-ce-packaging/pull/867) + ## 23.0.1 {% include release-date.html date="2023-02-09" %} diff --git a/get-started/02_our_app.md b/get-started/02_our_app.md index 42dadcbedd..03609f610b 100644 --- a/get-started/02_our_app.md +++ b/get-started/02_our_app.md @@ -7,8 +7,8 @@ description: Containerize and run a simple application to learn Docker --- For the rest of this guide, you'll be working with a simple todo -list manager that's running in Node.js. If you're not familiar with Node.js, -don't worry. This guide doesn't require JavaScript experience. +list manager that runs on Node.js. If you're not familiar with Node.js, +don't worry. This guide doesn't require any prior experience with JavaScript. To complete this guide, you'll need the following: @@ -34,7 +34,7 @@ Before you can run the application, you need to get the application source code ## Build the app's container image -In order to build the [container image](../get-started/overview.md/#docker-objects){:target="_blank" rel="noopener" class="_"}, you'll need to use a `Dockerfile`. A Dockerfile is simply a text-based file with no file extension. A Dockerfile contains a script of instructions that Docker uses to create a container image. +To build the [container image](../get-started/overview.md/#docker-objects){:target="_blank" rel="noopener" class="_"}, you'll need to use a `Dockerfile`. A Dockerfile is simply a text-based file with no file extension that contains a script of instructions. Docker uses this script to build a container image. 1. In the `app` directory, the same location as the `package.json` file, create a file named `Dockerfile`. You can use the following commands below to create a Dockerfile based on your operating system. @@ -130,7 +130,7 @@ Now that you have an image, you can run the application in a [container](../get- ![Empty todo list](images/todo-list-empty.png){: style="width:450px;margin-top:20px;"} {: .text-center } -3. Go ahead and add an item or two and see that it works as you expect. You can mark items as complete and remove items. Your frontend is successfully storing items in the backend. +3. Go ahead and add an item or two and see that it works as you expect. You can mark items as complete and remove them. Your frontend is successfully storing items in the backend. At this point, you should have a running todo list manager with a few items, all built by you. diff --git a/get-started/07_multi_container.md b/get-started/07_multi_container.md index 31a3370aae..adbe724dc7 100644 --- a/get-started/07_multi_container.md +++ b/get-started/07_multi_container.md @@ -195,7 +195,7 @@ The todo app supports the setting of a few environment variables to specify MySQ You can now start your dev-ready container. -1. Specify each of the environment variables above, as well as connect the container to your app network. +1. Specify each of the environment variables above, as well as connect the container to your app network. Make sure that you are in the `getting-started/app` directory when you run this command.