Merge branch 'main' into ENGDOCS-1054

This commit is contained in:
jerae-duffin 2022-10-26 23:37:47 -05:00 committed by GitHub
commit 9cd8845c36
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
134 changed files with 4751 additions and 1797 deletions

View File

@ -1,5 +1,4 @@
.DS_Store
.git
.github
.gitignore
.idea

View File

@ -3,5 +3,5 @@ message: "Write short, concise sentences. (<=30 words)"
scope: sentence
link: https://docs.docker.com/contribute/checklist/
level: warning
max: 30
max: 31
token: \b(\w+)\b

View File

@ -25,6 +25,7 @@ Kubernetes
Lightstreamer
Linux
Logstash
LTS
Mac
Mail(chimp|gun)
Microsoft
@ -38,9 +39,11 @@ Postgres
PowerShell
Python
QEMU
Raspbian
RHEL
S3
SQLite
SLES
Slack
Snyk
Solr

View File

@ -1,11 +1,16 @@
APIs?
DHCP
DNS
Ethernet
GRUB
Git
GPG
HTTP
IPs?
IPv[46]
IPvlan
MAC
RPM
SDKs?
SSO
TCP
@ -13,6 +18,8 @@ UDP
Unix
VLAN
VM
[Ll]oopback
[Nn]ameserver
[Nn]amespace
cgroup
config
@ -20,6 +27,7 @@ containerd
deserialization
deserialize
filepath
firewalld
glibc
goroutine
hostname
@ -43,6 +51,7 @@ stdout
subnet
swappable
systemd
umask
ungated
virtiofs
virtualize

View File

@ -4,7 +4,7 @@ on:
push:
# needs push event on default branch otherwise cache is evicted when pull request is merged
branches:
- master
- main
pull_request:
permissions:

View File

@ -5,7 +5,7 @@ on:
push:
branches:
- lab
- master
- main
- published
# these permissions are needed to interact with GitHub's OIDC Token endpoint.
@ -22,14 +22,14 @@ jobs:
run: |
JEKYLL_ENV=development
DOCS_AWS_REGION=us-east-1
if [ "${{ github.ref }}" = "refs/heads/master" ]; then
if [ "${{ github.ref }}" = "refs/heads/main" ]; then
DOCS_URL="https://docs-stage.docker.com"
DOCS_AWS_IAM_ROLE="arn:aws:iam::710015040892:role/stage-docs-docs.docker.com-20220818202135984800000001"
DOCS_S3_BUCKET="stage-docs-docs.docker.com"
DOCS_S3_CONFIG="s3-config.json"
DOCS_CLOUDFRONT_ID="E1R7CSW3F0X4H8"
DOCS_LAMBDA_FUNCTION_REDIRECTS="DockerDocsRedirectFunction-stage"
DOCS_SLACK_MSG="Successfully deployed docs-stage from master branch. $DOCS_URL"
DOCS_SLACK_MSG="Successfully deployed docs-stage from main branch. $DOCS_URL"
elif [ "${{ github.ref }}" = "refs/heads/published" ]; then
JEKYLL_ENV=production
DOCS_URL="https://docs.docker.com"

View File

@ -1,7 +1,7 @@
# syntax=docker/dockerfile:1
# This Dockerfile builds the docs for https://docs.docker.com/
# from the master branch of https://github.com/docker/docs
# from the main branch of https://github.com/docker/docs
# Use same ruby version as the one in .ruby-version
# that is used by Netlify
@ -47,9 +47,15 @@ ARG JEKYLL_ENV
ARG DOCS_URL
ENV TARGET=/out
RUN --mount=type=bind,target=.,rw \
--mount=type=cache,target=/tmp/docker-docs-clone \
--mount=type=cache,target=/src/.jekyll-cache <<EOT
set -eu
CONFIG_FILES=_config.yml$([ "$JEKYLL_ENV" = "production" ] && echo ",_config_production.yml" || true)
CONFIG_FILES="_config.yml"
if [ "${JEKYLL_ENV}" = "production" ]; then
CONFIG_FILES="${CONFIG_FILES},_config_production.yml"
elif [ "${DOCS_URL}" = "https://docs-stage.docker.com" ]; then
CONFIG_FILES="${CONFIG_FILES},_config_stage.yml"
fi
set -x
bundle exec jekyll build --profile -d ${TARGET} --config ${CONFIG_FILES}
EOT

View File

@ -15,8 +15,8 @@ end
# more info: https://github.com/docker/docs/issues/14788
gem 'rouge', '3.27.0'
gem 'archive-zip', '0.12.0'
gem 'front_matter_parser', '1.0.1'
gem 'git', '1.12.0'
gem 'html-proofer', '3.19.4'
gem 'mdl', '0.11.0'
gem 'octopress-hooks', '2.6.2'

View File

@ -1,11 +1,9 @@
GEM
remote: https://rubygems.org/
specs:
addressable (2.8.0)
public_suffix (>= 2.0.2, < 5.0)
archive-zip (0.12.0)
io-like (~> 0.3.0)
chef-utils (17.10.0)
addressable (2.8.1)
public_suffix (>= 2.0.2, < 6.0)
chef-utils (18.0.161)
concurrent-ruby
colorator (1.1.0)
concurrent-ruby (1.1.10)
@ -18,6 +16,9 @@ GEM
ffi (1.15.5)
forwardable-extended (2.6.0)
front_matter_parser (1.0.1)
git (1.12.0)
addressable (~> 2.8)
rchardet (~> 1.8)
html-proofer (3.19.4)
addressable (~> 2.3)
mercenary (~> 0.3)
@ -29,7 +30,6 @@ GEM
http_parser.rb (0.8.0)
i18n (1.12.0)
concurrent-ruby (~> 1.0)
io-like (0.3.1)
jekyll (4.2.2)
addressable (~> 2.4)
colorator (~> 1.0)
@ -76,25 +76,26 @@ GEM
tomlrb
mixlib-shellout (3.2.7)
chef-utils
nokogiri (1.13.8)
nokogiri (1.13.9)
mini_portile2 (~> 2.8.0)
racc (~> 1.4)
nokogiri (1.13.8-aarch64-linux)
nokogiri (1.13.9-aarch64-linux)
racc (~> 1.4)
nokogiri (1.13.8-x86_64-linux)
nokogiri (1.13.9-x86_64-linux)
racc (~> 1.4)
octopress-hooks (2.6.2)
jekyll (>= 2.0)
parallel (1.22.1)
pathutil (0.16.2)
forwardable-extended (~> 2.6)
public_suffix (4.0.7)
public_suffix (5.0.0)
racc (1.6.0)
rainbow (3.1.1)
rake (13.0.6)
rb-fsevent (0.11.1)
rb-fsevent (0.11.2)
rb-inotify (0.10.1)
ffi (~> 1.0)
rchardet (1.8.0)
rexml (3.2.5)
rouge (3.27.0)
safe_yaml (1.0.5)
@ -114,8 +115,8 @@ PLATFORMS
x86_64-linux
DEPENDENCIES
archive-zip (= 0.12.0)
front_matter_parser (= 1.0.1)
git (= 1.12.0)
html-proofer (= 3.19.4)
jekyll (= 4.2.2)
jekyll-redirect-from

View File

@ -41,7 +41,7 @@ exclude:
latest_engine_api_version: "1.41"
docker_ce_version: "20.10"
compose_v1_version: "1.29.2"
compose_version: "v2.11.2"
compose_version: "v2.12.2"
compose_file_v3: "3.9"
compose_file_v2: "2.4"
machine_version: "0.16.0"
@ -102,6 +102,24 @@ defaults:
toc_min: 2
toc_max: 4
# Exclude from sitemap
- scope:
path: "assets/**"
values:
sitemap: false
- scope:
path: "**/nav.html"
values:
sitemap: false
- scope:
path: "google*.html"
values:
sitemap: false
- scope:
path: "**/*.pdf"
values:
sitemap: false
# Set the correct edit-URL for some local and remote resources. We usually don't create a direct
# edit link for these, and instead point to the directory that contains the file.
- scope:

5
_config_stage.yml Normal file
View File

@ -0,0 +1,5 @@
##
# This file overrides options set in _config.yml for staging environment (https://docs-stage.docker.com/)
##
hotjar_id: 3218181

View File

@ -33,6 +33,17 @@ options:
experimentalcli: false
kubernetes: false
swarm: false
- option: quiet
shorthand: q
value_type: bool
default_value: "false"
description: Push without printing progress information
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
deprecated: false
experimental: false
experimentalcli: false

View File

@ -561,13 +561,13 @@ examples: |-
uploaded context. The builder reference contains detailed information on
[creating a .dockerignore file](../builder.md#dockerignore-file).
When using the [BuildKit backend](../builder.md#buildkit), `docker build` searches
for a `.dockerignore` file relative to the Dockerfile name. For example, running
`docker build -f myapp.Dockerfile .` will first look for an ignore file named
`myapp.Dockerfile.dockerignore`. If such a file is not found, the `.dockerignore`
file is used if present. Using a Dockerfile based `.dockerignore` is useful if a
project contains multiple Dockerfiles that expect to ignore different sets of
files.
When using the [BuildKit backend](https://docs.docker.com/build/buildkit/),
`docker build` searches for a `.dockerignore` file relative to the Dockerfile
name. For example, running `docker build -f myapp.Dockerfile .` will first look
for an ignore file named `myapp.Dockerfile.dockerignore`. If such a file is not
found, the `.dockerignore` file is used if present. Using a Dockerfile based
`.dockerignore` is useful if a project contains multiple Dockerfiles that
expect to ignore different sets of files.
### Tag an image (-t)
@ -823,8 +823,9 @@ examples: |-
> **Note**
>
> This feature requires the BuildKit backend. You can either
> [enable BuildKit](../builder.md#buildkit) or use the [buildx](https://github.com/docker/buildx)
> plugin which provides more output type options.
> [enable BuildKit](https://docs.docker.com/build/buildkit/#getting-started) or
> use the [buildx](https://github.com/docker/buildx) plugin which provides more
> output type options.
### Specifying external cache sources
@ -865,9 +866,9 @@ examples: |-
> **Note**
>
> This feature requires the BuildKit backend. You can either
> [enable BuildKit](../builder.md#buildkit) or use the [buildx](https://github.com/docker/buildx)
> plugin. The previous builder has limited support for reusing cache from
> pre-pulled images.
> [enable BuildKit](https://docs.docker.com/build/buildkit/#getting-started) or
> use the [buildx](https://github.com/docker/buildx) plugin. The previous
> builder has limited support for reusing cache from pre-pulled images.
### Squash an image's layers (--squash) (experimental)

View File

@ -1352,14 +1352,15 @@ examples: |-
### Add host device to container (--device)
```console
$ docker run --device=/dev/sdc:/dev/xvdc \
--device=/dev/sdd --device=/dev/zero:/dev/nulo \
-i -t \
ubuntu ls -l /dev/{xvdc,sdd,nulo}
$ docker run -it --rm \
--device=/dev/sdc:/dev/xvdc \
--device=/dev/sdd \
--device=/dev/zero:/dev/foobar \
ubuntu ls -l /dev/{xvdc,sdd,foobar}
brw-rw---- 1 root disk 8, 2 Feb 9 16:05 /dev/xvdc
brw-rw---- 1 root disk 8, 3 Feb 9 16:05 /dev/sdd
crw-rw-rw- 1 root root 1, 5 Feb 9 16:05 /dev/nulo
crw-rw-rw- 1 root root 1, 5 Feb 9 16:05 /dev/foobar
```
It is often necessary to directly expose devices to a container. The `--device`

View File

@ -29,9 +29,9 @@ guides:
path: /get-docker/
- sectiontitle: Get started
section:
- title: "Part 1: Getting started"
- title: "Part 1: Orientation and setup"
path: /get-started/
- title: "Part 2: Sample application"
- title: "Part 2: Containerize an application"
path: /get-started/02_our_app/
- title: "Part 3: Update the application"
path: /get-started/03_updating_app/
@ -115,6 +115,20 @@ guides:
path: /language/golang/configure-ci-cd/
- title: "Deploy your app"
path: /language/golang/deploy/
- sectiontitle: C# (.NET)
section:
- title: "Overview"
path: /language/dotnet/
- title: "Build images"
path: /language/dotnet/build-images/
- title: "Run containers"
path: /language/dotnet/run-containers/
- title: "Develop your app"
path: /language/dotnet/develop/
- title: "Configure CI/CD"
path: /language/dotnet/configure-ci-cd/
- title: "Deploy your app"
path: /language/dotnet/deploy/
- sectiontitle: Develop with Docker
section:
- path: /develop/
@ -125,8 +139,6 @@ guides:
section:
- path: /develop/develop-images/dockerfile_best-practices/
title: Dockerfile best practices
- path: /develop/develop-images/build_enhancements/
title: Build images with BuildKit
- path: /develop/develop-images/image_management/
title: Manage images
- path: /develop/develop-images/baseimages/
@ -1269,18 +1281,38 @@ manuals:
title: Run Docker Desktop for Windows in a VM or VDI environment
- path: /desktop/uninstall/
title: Uninstall Docker Desktop
- sectiontitle: Hardened Desktop
section:
- path: /desktop/hardened-desktop/
title: Overview
- sectiontitle: Settings Management
section:
- path: /desktop/hardened-desktop/settings-management/
title: What is Settings Management?
- path: /desktop/hardened-desktop/settings-management/configure/
title: Configure Settings Management
- sectiontitle: Enhanced Container Isolation
section:
- path: /desktop/hardened-desktop/enhanced-container-isolation/
title: What is Enhanced Container Isolation?
- path: /desktop/hardened-desktop/enhanced-container-isolation/how-eci-works/
title: How does it work?
- path: /desktop/hardened-desktop/enhanced-container-isolation/features-benefits/
title: Key features and benefits
- path: /desktop/hardened-desktop/enhanced-container-isolation/faq/
title: FAQs and known issues
- path: /desktop/hardened-desktop/registry-access-management/
title: Registry Access Management
- sectiontitle: Dev Environments (Beta)
section:
- path: /desktop/dev-environments/
title: Overview
- path: /desktop/dev-environments/create-dev-env/
title: Create a Dev Environment
title: Create a simple dev environment
- path: /desktop/dev-environments/create-compose-dev-env/
title: Create a Compose Dev Environment
title: Create an advanced dev environment
- path: /desktop/dev-environments/share/
title: Share your Dev Environment
- path: /desktop/dev-environments/specify/
title: Specify a Dockerfile or base image
title: Distribute your dev environment
- sectiontitle: Extensions (Beta)
section:
- path: /desktop/extensions/
@ -1359,6 +1391,8 @@ manuals:
path: /desktop/extensions-sdk/dev/api/reference/README/
- path: /desktop/containerd/
title: Containerd Image Store (Beta)
- path: /desktop/wasm/
title: Wasm (Beta)
- sectiontitle: FAQs
section:
- path: /desktop/faqs/general/
@ -1395,10 +1429,10 @@ manuals:
section:
- path: /engine/
title: Overview
- path: /engine/install/
title: Install
- sectiontitle: Installation per distro
- sectiontitle: Install
section:
- path: /engine/install/
title: Installation Overview
- path: /engine/install/centos/
title: Install on CentOS
- path: /engine/install/debian/
@ -1414,7 +1448,9 @@ manuals:
- path: /engine/install/binaries/
title: Install binaries
- path: /engine/install/linux-postinstall/
title: Optional post-installation steps
title: Post-installation steps
- path: /engine/install/troubleshoot/
title: Troubleshoot installation
- path: /engine/deprecated/
title: Deprecated features
- path: /engine/context/working-with-contexts/
@ -1521,6 +1557,12 @@ manuals:
title: Build contexts and linking targets
- path: /build/customize/bake/compose-file/
title: Building from Compose file
- sectiontitle: BuildKit
section:
- path: /build/buildkit/
title: Overview
- path: /build/buildkit/dockerfile-frontend/
title: Custom Dockerfile syntax
- sectiontitle: Buildx
section:
- path: /build/buildx/install/
@ -1533,18 +1575,26 @@ manuals:
section:
- path: /compose/
title: Overview
- path: /compose/features-uses/
title: Key features and use cases
- sectiontitle: Install Docker Compose
section:
- path: /compose/install/
title: Install overview
title: Overview
- path: /compose/install/linux/
title: Install on Linux
title: Install the Compose plugin
- path: /compose/install/other/
title: Other install scenarios
title: Install the Compose standalone
- path: /compose/install/uninstall/
title: Uninstall Docker Compose
title: Uninstall Compose
- path: /compose/gettingstarted/
title: Getting started
title: Try Docker Compose
- sectiontitle: Compose V2
section:
- path: /compose/compose-v2/
title: Overview
- path: /compose/cli-command-compatibility/
title: Compose v2 compatibility
- sectiontitle: Environment variables
section:
- path: /compose/environment-variables/
@ -1567,8 +1617,6 @@ manuals:
title: Control startup order
- path: /compose/samples-for-compose/
title: Sample apps with Compose
- path: /compose/cli-command-compatibility/
title: Compose v2 compatibility
- path: /compose/release-notes/
title: Release notes
@ -1686,8 +1734,7 @@ manuals:
title: SCIM
- path: /docker-hub/image-access-management/
title: Image Access Management
- path: /docker-hub/registry-access-management/
title: Registry Access Management
- sectiontitle: Security
section:
@ -1811,6 +1858,8 @@ contribute:
title: Voice and tone
- path: /contribute/file-conventions/
title: Source file conventions
- path: /contribute/ui/
title: UI elements in content
- sectiontitle: Useful components
section:
- path: /contribute/components/accordions/

View File

@ -7,7 +7,7 @@
"font_family": "Open Sans, sans serif",
"font_color": "b9c2cc",
"font_align": "center",
"permalink": "{{ site.repo }}/blob/master/{{ page.path }}"
"permalink": "{{ site.repo }}/blob/main/{{ page.path }}"
};
(function (d, c, j) {
if (!document.getElementById(j)) {

7
_includes/beta.md Normal file
View File

@ -0,0 +1,7 @@
Beta features provide early access to future product functionality.
These features are intended for testing and feedback only as they may change
between releases without warning or can be removed entirely from a future
release. Beta features must not be used in production environments.
Docker does not offer support for beta features.
For a list of current experimental features in the Docker CLI, see [Docker CLI Experimental features](https://github.com/docker/cli/blob/master/experimental/README.md).

View File

@ -13,7 +13,7 @@
<a class="btn btn-primary" href="https://desktop.docker.com/win/main/amd64/Docker%20Desktop%20Installer.exe?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-win-amd64" role="button">Download file</a>
<br>
<br>
<b>Checksum:</b> SHA-256 996a4c5fff5b80b707ecfc0121d7ebe70d96c0bd568f058fd96f32cdec0c10cf
<b>Checksum:</b> SHA-256 2452d4c9e315d36ad4cade724c962dd18515b8e2f0c2e7f66290648e0319d72b
</div>
</div>
</div>
@ -31,7 +31,7 @@
<a class="btn btn-primary" href="https://desktop.docker.com/mac/main/amd64/Docker.dmg?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-mac-amd64" role="button">Download file</a>
<br>
<br>
<b>Checksum:</b> SHA-256 41085009458ba1741c6a86c414190780ff3b288879aa27821fc4a985d229653c
<b>Checksum:</b> SHA-256 5a9e7b810bc9937a0945f9cbbb7ec00c2c5c386b5897c59c1c93187eaf0f2081
</div>
</div>
</div>
@ -49,7 +49,7 @@
<a class="btn btn-primary" href="https://desktop.docker.com/mac/main/arm64/Docker.dmg?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-mac-arm64" role="button">Download file</a>
<br>
<br>
<b>Checksum:</b> SHA-256 7eb63b4819cd1f87c61d5e8f54613692e07fb203d81bcf8d66f5de55489d3b81
<b>Checksum:</b> SHA-256 f33037ef7b02946d5714012398848654bd7a759ee729b0346acc46a8e73a76ed
</div>
</div>
</div>
@ -64,10 +64,10 @@
</div>
<div id="collapseFour" class="panel-collapse collapse" role="tabpanel" aria-labelledby="headingFour">
<div class="panel-body">
<a class="btn btn-primary" href="https://desktop.docker.com/linux/main/amd64/docker-desktop-4.12.0-amd64.deb?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64" role="button">Download file</a>
<a class="btn btn-primary" href="https://desktop.docker.com/linux/main/amd64/docker-desktop-4.13.0-amd64.deb?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64" role="button">Download file</a>
<br>
<br>
<b>Checksum:</b> SHA-256 4407023db032219d6ac6031f81da6389ab192d3d06084ee6dad1ba4f4c64a4fe
<b>Checksum:</b> SHA-256 aa1d4ee1c2c72bf7be05c88d33d8f1f6344ab4e6d9de52f50367d9e799641102
</div>
</div>
</div>
@ -82,10 +82,10 @@
</div>
<div id="collapseFive" class="panel-collapse collapse" role="tabpanel" aria-labelledby="headingFive">
<div class="panel-body">
<a class="btn btn-primary" href="https://desktop.docker.com/linux/main/amd64/docker-desktop-4.12.0-x86_64.rpm?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64" role="button">Download file</a>
<a class="btn btn-primary" href="https://desktop.docker.com/linux/main/amd64/docker-desktop-4.13.0-x86_64.rpm?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64" role="button">Download file</a>
<br>
<br>
<b>Checksum:</b> SHA-256 05e91f2a9763089acdfe710140893cb096bec955bcd99279bbe3aea035d09bc5
<b>Checksum:</b> SHA-256 6f70246d11d06f123b8011eeafb7b0a161d60764719b44b817a49dee7da4a06e
</div>
</div>
</div>
@ -100,10 +100,10 @@
</div>
<div id="collapseSix" class="panel-collapse collapse" role="tabpanel" aria-labelledby="headingSix">
<div class="panel-body">
<a class="btn btn-primary" href="https://desktop.docker.com/linux/main/amd64/docker-desktop-4.12.0-x86_64.pkg.tar.zst?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64" role="button">Download file</a>
<a class="btn btn-primary" href="https://desktop.docker.com/linux/main/amd64/docker-desktop-4.13.0-x86_64.pkg.tar.zst?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64" role="button">Download file</a>
<br>
<br>
<b>Checksum:</b> SHA-256 7c6b43c8ab140c755e6c8ce4ec494b3f5c4f3b0c1ab3cee8bfd0b6864f795d8a
<b>Checksum:</b> SHA-256 8aa42c18d643b5dc333cbea096f9585d89b912344a26fc9ea902f30d2c5140d8
</div>
</div>
</div>

13
_includes/feedback.html Normal file
View File

@ -0,0 +1,13 @@
{%- if site.hotjar_id != '' and page.skip_feedback != true -%}
<!-- Hotjar Tracking Code for https://docs.docker.com -->
<script>
(function(h,o,t,j,a,r){
h.hj=h.hj||function(){(h.hj.q=h.hj.q||[]).push(arguments)};
h._hjSettings={hjid:{{ site.hotjar_id }},hjsv:6};
a=o.getElementsByTagName('head')[0];
r=o.createElement('script');r.async=1;
r.src=t+h._hjSettings.hjid+j+h._hjSettings.hjsv;
a.appendChild(r);
})(window,document,'https://static.hotjar.com/c/hotjar-','.js?sv=');
</script>
{%- endif -%}

View File

@ -1,39 +0,0 @@
A Dockerfile is a text document that contains the instructions to assemble a
Docker image. When we tell Docker to build our image by executing the `docker build`
command, Docker reads these instructions, executes them, and creates a Docker
image as a result.
Lets walk through the process of creating a Dockerfile for our application. In
the root of your project, create a file named `Dockerfile` and open this file in
your text editor.
> **What to name your Dockerfile?**
>
> The default filename to use for a Dockerfile is `Dockerfile` (without a file-
> extension). Using the default name allows you to run the `docker build` command
> without having to specify additional command flags.
>
> Some projects may need distinct Dockerfiles for specific purposes. A common
> convention is to name these `Dockerfile.<something>` or `<something>.Dockerfile`.
> Such Dockerfiles can then be used through the `--file` (or `-f` shorthand)
> option on the `docker build` command. Refer to the
> ["Specify a Dockerfile" section](/engine/reference/commandline/build/#specify-a-dockerfile--f)
> in the `docker build` reference to learn about the `--file` option.
>
> We recommend using the default (`Dockerfile`) for your project's primary
> Dockerfile, which is what we'll use for most examples in this guide.
The first line to add to a Dockerfile is a [`# syntax` parser directive](/engine/reference/builder/#syntax).
While _optional_, this directive instructs the Docker builder what syntax to use
when parsing the Dockerfile, and allows older Docker versions with BuildKit enabled
to upgrade the parser before starting the build. [Parser directives](/engine/reference/builder/#parser-directives)
must appear before any other comment, whitespace, or Dockerfile instruction in
your Dockerfile, and should be the first line in Dockerfiles.
```dockerfile
# syntax=docker/dockerfile:1
```
We recommend using `docker/dockerfile:1`, which always points to the latest release
of the version 1 syntax. BuildKit automatically checks for updates of the syntax
before building, making sure you are using the most current version.

View File

@ -1,30 +0,0 @@
<!-- This text will be included in Build images topic in the Get started guides -->
### Enable BuildKit
Before we start building images, ensure you have enabled BuildKit on your machine.
BuildKit allows you to build Docker images efficiently. For more information,
see [Building images with BuildKit](/develop/develop-images/build_enhancements/).
BuildKit is enabled by default for all users on Docker Desktop. If you have
installed Docker Desktop, you don't have to manually enable BuildKit. If you are
running Docker on Linux, you can enable BuildKit either by using an environment
variable or by making BuildKit the default setting.
To set the BuildKit environment variable when running the `docker build` command,
run:
```console
$ DOCKER_BUILDKIT=1 docker build .
```
To enable docker BuildKit by default, set daemon configuration in `/etc/docker/daemon.json` feature to `true` and restart the daemon.
If the `daemon.json` file doesn't exist, create new file called `daemon.json` and then add the following to the file.
```json
{
"features":{"buildkit" : true}
}
```
Restart the Docker daemon.

View File

@ -73,6 +73,7 @@
{%- if site.local_search -%}
<script defer src="/assets/js/search.js"></script>
{%- endif -%}
{%- include feedback.html -%}
{%- comment -%}
preload fonts: https://www.freecodecamp.org/news/web-fonts-in-2018-f191a48367e8/

View File

@ -1,16 +1,17 @@
<!-- This file is included in Docker Engine - Community or EE installation docs for Linux. -->
### Install using the convenience script
Docker provides a convenience script at [get.docker.com](https://get.docker.com/)
to install Docker into development environments quickly and non-interactively.
The convenience script is not recommended for production environments, but can be
used as an example to create a provisioning script that is tailored to your needs.
Also refer to the [install using the repository](#install-using-the-repository)
steps to learn about installation steps to install using the package repository.
The source code for the script is open source, and can be found in the
[`docker-install` repository on GitHub](https://github.com/docker/docker-install){:target="_blank" rel="noopener" class="_"}.
Docker provides a convenience script at
[https://get.docker.com/](https://get.docker.com/) to install Docker into
development environments non-interactively. The convenience script isn't
recommended for production environments, but it's useful for creating a
provisioning script tailored to your needs. Also refer to the
[install using the repository](#install-using-the-repository) steps to learn
about installation steps to install using the package repository. The source
code for the script is open source, and can be found in the
[`docker-install` repository on GitHub](https://github.com/docker/docker-install){:target="_blank"
rel="noopener" class="_"}.
<!-- prettier-ignore -->
Always examine scripts downloaded from the internet before running them locally.
Before installing, make yourself familiar with potential risks and limitations
of the convenience script:
@ -18,31 +19,32 @@ of the convenience script:
- The script requires `root` or `sudo` privileges to run.
- The script attempts to detect your Linux distribution and version and
configure your package management system for you, and does not allow you to
customize most installation parameters.
configure your package management system for you.
- The script doesn't allow you to customize most installation parameters.
- The script installs dependencies and recommendations without asking for
confirmation. This may install a large number of packages, depending on the
current configuration of your host machine.
- By default, the script installs the latest stable release of Docker, containerd,
and runc. When using this script to provision a machine, this may result in
unexpected major version upgrades of Docker. Always test (major) upgrades in
- By default, the script installs the latest stable release of Docker,
containerd, and runc. When using this script to provision a machine, this may
result in unexpected major version upgrades of Docker. Always test upgrades in
a test environment before deploying to your production systems.
- The script is not designed to upgrade an existing Docker installation. When
- The script isn't designed to upgrade an existing Docker installation. When
using the script to update an existing installation, dependencies may not be
updated to the expected version, causing outdated versions to be used.
updated to the expected version, resulting in outdated versions.
> Tip: preview script steps before running
>
> You can run the script with the `DRY_RUN=1` option to learn what steps the
> script will execute during installation:
> script will run when invoked:
>
> ```console
> $ curl -fsSL https://get.docker.com -o get-docker.sh
> $ DRY_RUN=1 sh ./get-docker.sh
> $ DRY_RUN=1 sudo sh ./get-docker.sh
> ```
This example downloads the script from [get.docker.com](https://get.docker.com/)
and runs it to install the latest stable release of Docker on Linux:
This example downloads the script from
[https://get.docker.com/](https://get.docker.com/) and runs it to install the
latest stable release of Docker on Linux:
```console
$ curl -fsSL https://get.docker.com -o get-docker.sh
@ -51,41 +53,42 @@ Executing docker install script, commit: 7cae5f8b0decc17d6571f9f52eb840fbc13b273
<...>
```
Docker is installed. The `docker` service starts automatically on Debian based
distributions. On `RPM` based distributions, such as CentOS, Fedora, RHEL or SLES,
you need to start it manually using the appropriate `systemctl` or `service` command.
As the message indicates, non-root users cannot run Docker commands by default.
You have now successfully installed and started Docker Engine. The `docker`
service starts automatically on Debian based distributions. On `RPM` based
distributions, such as CentOS, Fedora, RHEL or SLES, you need to start it
manually using the appropriate `systemctl` or `service` command. As the message
indicates, non-root users can't run Docker commands by default.
> **Use Docker as a non-privileged user, or install in rootless mode?**
>
> The installation script requires `root` or `sudo` privileges to install and
> use Docker. If you want to grant non-root users access to Docker, refer to the
> [post-installation steps for Linux](/engine/install/linux-postinstall/#manage-docker-as-a-non-root-user).
> Docker can also be installed without `root` privileges, or configured to run
> in rootless mode. For instructions on running Docker in rootless mode, refer to
> You can also install Docker without `root` privileges, or configured to run in
> rootless mode. For instructions on running Docker in rootless mode, refer to
> [run the Docker daemon as a non-root user (rootless mode)](/engine/security/rootless/).
#### Install pre-releases
Docker also provides a convenience script at [test.docker.com](https://test.docker.com/)
to install pre-releases of Docker on Linux. This script is equivalent to the
script at `get.docker.com`, but configures your package manager to enable the
"test" channel from our package repository, which includes both stable and
pre-releases (beta versions, release-candidates) of Docker. Use this script to
get early access to new releases, and to evaluate them in a testing environment
before they are released as stable.
Docker also provides a convenience script at
[https://test.docker.com/](https://test.docker.com/) to install pre-releases of
Docker on Linux. This script is equal to the script at `get.docker.com`, but
configures your package manager to use the test channel of the Docker package
repository. The test channel includes both stable and pre-releases (beta
versions, release-candidates) of Docker. Use this script to get early access to
new releases, and to evaluate them in a testing environment before they're
released as stable.
To install the latest version of Docker on Linux from the "test" channel, run:
To install the latest version of Docker on Linux from the test channel, run:
```console
$ curl -fsSL https://test.docker.com -o test-docker.sh
$ sudo sh test-docker.sh
<...>
```
#### Upgrade Docker after using the convenience script
If you installed Docker using the convenience script, you should upgrade Docker
using your package manager directly. There is no advantage to re-running the
convenience script, and it can cause issues if it attempts to re-add
repositories which have already been added to the host machine.
using your package manager directly. There's no advantage to re-running the
convenience script. Re-running it can cause issues if it attempts to re-install
repositories which already exist on the host machine.

View File

@ -0,0 +1,19 @@
<section class="cta-banner desktop">
<div class="container">
<div class="col-xs-12 col-md-6 col-lg-offset-1 col-lg-7">
<h2>
Hardened Desktop for Docker Business customers
</h2>
</br>
<p>Two new security features have been introduced for Docker Business users, Settings Management and Enhanced Container Isolation, as part of Docker Desktops new Hardened Desktop security model.</p>
</div>
<div class="col-xs-12 col-md-6 col-lg-4 text-center">
<img src="/assets/images/hardened-desktop.PNG" alt="extensions" />
</div>
<div class="col-xs-12 col-md-6 col-lg-4 text-center">
<a class="btn" href="/desktop/hardened-desktop/" target="_blank">
Learn more
</a>
</div>
</div>
</section>

View File

@ -3,7 +3,7 @@
{%- if page.edit_url -%}
{%- assign edit_url = page.edit_url -%}
{%- else -%}
{% capture edit_url %}{{ site.repo }}/edit/master/{{ page.path }}{% endcapture %}
{% capture edit_url %}{{ site.repo }}/edit/main/{{ page.path }}{% endcapture %}
{%- endif -%}
{%- if page.issue_url -%}
{%- assign issue_url = page.issue_url -%}

View File

@ -143,7 +143,7 @@
</div>
</section>
{% include landing-page/extensions-banner.html %}
{% include landing-page/hardened-desktop.html %}
<section class="container help-by-product">
<div class="row">

View File

@ -1,9 +1,8 @@
require 'archive/zip'
require 'front_matter_parser'
require 'git'
require 'jekyll'
require 'json'
require 'octopress-hooks'
require 'open-uri'
require 'rake'
require_relative 'util.rb'
@ -12,14 +11,6 @@ module Jekyll
class FetchRemote < Octopress::Hooks::Site
priority :highest
def self.download(url, dest)
uri = URI.parse(url)
result = File.join(dest, File.basename(uri.path))
puts " Downloading #{url}"
IO.copy_stream(URI.open(url), result)
return result
end
def self.copy(src, dest)
if (tmp = Array.try_convert(src))
tmp.each do |s|
@ -50,16 +41,27 @@ module Jekyll
beginning_time = Time.now
puts "Starting plugin fetch_remote.rb..."
site.config['fetch-remote'].each do |entry|
puts " Repo #{entry['repo']} (#{entry['ref']})"
Dir.mktmpdir do |tmpdir|
tmpfile = FetchRemote.download("#{entry['repo']}/archive/#{entry['ref']}.zip", tmpdir)
Dir.mktmpdir do |ztmpdir|
puts " Extracting #{tmpfile}"
Archive::Zip.extract(
tmpfile,
ztmpdir,
:create => true
)
puts " Repo #{entry['repo']}"
gituri = Git::URL.parse(entry['repo'])
clonedir = "#{Dir.tmpdir}/docker-docs-clone#{gituri.path}"
if Dir.exist?(clonedir)
puts " Opening #{clonedir}"
begin
git = Git.open(clonedir)
puts " Fetching #{entry['ref']}"
git.fetch
git.checkout(entry['ref'])
rescue => e
FileUtils.rm_rf(clonedir)
puts " Cloning repository into #{clonedir}"
git = Git.clone("#{entry['repo']}.git", Pathname.new(clonedir), branch: entry['ref'], depth: 1)
end
else
puts " Cloning repository into #{clonedir}"
git = Git.clone("#{entry['repo']}.git", Pathname.new(clonedir), branch: entry['ref'], depth: 1)
end
entry['paths'].each do |path|
if File.extname(path['dest']) != ""
if path['src'].size > 1
@ -76,9 +78,9 @@ module Jekyll
files = FileList[]
path['src'].each do |src|
if "#{src}".start_with?("!")
files.exclude(File.join(ztmpdir, "*/"+"#{src}".delete_prefix("!")))
files.exclude(File.join(clonedir, "/"+"#{src}".delete_prefix("!")))
else
files.include(File.join(ztmpdir, "*/#{src}"))
files.include(File.join(clonedir, "/#{src}"))
end
end
@ -87,7 +89,7 @@ module Jekyll
s = File.realpath(s)
# traverse source directory
FileUtils::Entry_.new(s, nil, false).wrap_traverse(proc do |ent|
file_clean = ent.path.delete_prefix(ztmpdir).split("/").drop(2).join("/")
file_clean = ent.path.delete_prefix(clonedir).split("/").drop(1).join("/")
destent = FileUtils::Entry_.new(d, ent.rel, false)
puts " #{file_clean} => #{destent.path}"
@ -107,13 +109,16 @@ module Jekyll
# set edit and issue url and remote info for markdown files in site config defaults
edit_url = "#{entry['repo']}/edit/#{entry['default_branch']}/#{file_clean}"
issue_url = "#{entry['repo']}/issues/new?body=File: [#{file_clean}](#{get_docs_url}/#{destent.path.sub(/#{File.extname(destent.path)}$/, '')}/)"
last_modified_at = git.log.path(file_clean).first.date.strftime(LastModifiedAt::DATE_FORMAT)
puts " edit_url: #{edit_url}"
puts " issue_url: #{issue_url}"
puts " last_modified_at: #{last_modified_at}"
site.config['defaults'] << {
"scope" => { "path" => destent.path },
"values" => {
"edit_url" => edit_url,
"issue_url" => issue_url
"issue_url" => issue_url,
"last_modified_at" => last_modified_at,
},
}
end, proc do |_| end)
@ -121,8 +126,6 @@ module Jekyll
end
end
end
end
end
end_time = Time.now
puts "done in #{(end_time - beginning_time)} seconds"

View File

@ -0,0 +1,35 @@
require 'git'
require 'jekyll'
require 'octopress-hooks'
module Jekyll
class LastModifiedAt < Octopress::Hooks::Site
DATE_FORMAT = '%Y-%m-%d %H:%M:%S %z'
def pre_render(site)
if get_docs_url == "http://localhost:4000"
# Do not generate last_modified_at for local development
return
end
beginning_time = Time.now
Jekyll.logger.info "Starting plugin last_modified_at.rb..."
git = Git.open(site.source)
site.pages.each do |page|
next if page.relative_path == "redirect.html"
next unless File.extname(page.relative_path) == ".md" || File.extname(page.relative_path) == ".html"
unless page.data.key?('last_modified_at')
begin
page.data['last_modified_at'] = git.log.path(page.relative_path).first.date.strftime(DATE_FORMAT)
rescue => e
# Ignored
end
end
puts" #{page.relative_path}\n last_modified_at: #{page.data['last_modified_at']}"
end
end_time = Time.now
Jekyll.logger.info "done in #{(end_time - beginning_time)} seconds"
end
end
end

Binary file not shown.

After

Width:  |  Height:  |  Size: 72 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 97 KiB

1
assets/images/lock.svg Normal file
View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" height="24px" viewBox="0 0 24 24" width="24px" fill="#677285"><g fill="none"><path d="M0 0h24v24H0V0z"/><path d="M0 0h24v24H0V0z" opacity=".87"/></g><path d="M18 8h-1V6c0-2.76-2.24-5-5-5S7 3.24 7 6v2H6c-1.1 0-2 .9-2 2v10c0 1.1.9 2 2 2h12c1.1 0 2-.9 2-2V10c0-1.1-.9-2-2-2zm-6 9c-1.1 0-2-.9-2-2s.9-2 2-2 2 .9 2 2-.9 2-2 2zM9 8V6c0-1.66 1.34-3 3-3s3 1.34 3 3v2H9z"/></svg>

After

Width:  |  Height:  |  Size: 409 B

View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" height="24px" viewBox="0 0 24 24" width="24px" fill="#677285"><path d="M0 0h24v24H0V0z" fill="none"/><path d="M19 3H5c-1.1 0-2 .9-2 2v7c0 1.1.9 2 2 2h14c1.1 0 2-.9 2-2V5c0-1.1-.9-2-2-2zm0 6h-3.14c-.47 0-.84.33-.97.78C14.53 11.04 13.35 12 12 12s-2.53-.96-2.89-2.22c-.13-.45-.5-.78-.97-.78H5V6c0-.55.45-1 1-1h12c.55 0 1 .45 1 1v3zm-3.13 7H20c.55 0 1 .45 1 1v2c0 1.1-.9 2-2 2H5c-1.1 0-2-.9-2-2v-2c0-.55.45-1 1-1h4.13c.47 0 .85.34.98.8.35 1.27 1.51 2.2 2.89 2.2s2.54-.93 2.89-2.2c.13-.46.51-.8.98-.8z"/></svg>

After

Width:  |  Height:  |  Size: 545 B

1
assets/images/secure.svg Normal file
View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" height="24px" viewBox="0 0 24 24" width="24px" fill="#677285"><path d="M11.19 1.36l-7 3.11C3.47 4.79 3 5.51 3 6.3V11c0 5.55 3.84 10.74 9 12 5.16-1.26 9-6.45 9-12V6.3c0-.79-.47-1.51-1.19-1.83l-7-3.11c-.51-.23-1.11-.23-1.62 0zM12 11.99h7c-.53 4.12-3.28 7.79-7 8.94V12H5V6.3l7-3.11v8.8z"/></svg>

After

Width:  |  Height:  |  Size: 332 B

View File

@ -15,7 +15,9 @@ function copyCodeBlock(event) {
const copyButton = event.currentTarget
const codeBlock = copyButton.parentElement.querySelector("pre.highlight code")
const code = codeBlock.innerText.trim()
window.navigator.clipboard.writeText(code)
// remove "$ " prompt at start of lines in code
const strippedCode = code.replace(/^[\s]?\$\s+/gm, "")
window.navigator.clipboard.writeText(strippedCode)
// change the button text temporarily
copyButton.textContent = "Copied!"

View File

@ -205,3 +205,79 @@ RUN g++ -o /binary source.cpp
## Version compatibility
Multi-stage build syntax was introduced in Docker Engine 17.05.
## Differences between legacy builder and BuildKit
The legacy Docker Engine builder processes all stages of a Dockerfile leading
up to the selected `--target`. It will build a stage even if the selected
target doesn't depend on that stage.
[BuildKit](../buildkit/index.md) only builds the stages that the target stage
depends on.
For example, given the following Dockerfile:
```dockerfile
# syntax=docker/dockerfile:1
FROM ubuntu AS base
RUN echo "base"
FROM base AS stage1
RUN echo "stage1"
FROM base AS stage2
RUN echo "stage2"
```
With [BuildKit enabled](../buildkit/index.md#getting-started), building the
`stage2` target in this Dockerfile means only `base` and `stage2` are processed.
There is no dependency on `stage1`, so it's skipped.
```console
$ DOCKER_BUILDKIT=1 docker build --no-cache -f Dockerfile --target stage2 .
[+] Building 0.4s (7/7) FINISHED
=> [internal] load build definition from Dockerfile 0.0s
=> => transferring dockerfile: 36B 0.0s
=> [internal] load .dockerignore 0.0s
=> => transferring context: 2B 0.0s
=> [internal] load metadata for docker.io/library/ubuntu:latest 0.0s
=> CACHED [base 1/2] FROM docker.io/library/ubuntu 0.0s
=> [base 2/2] RUN echo "base" 0.1s
=> [stage2 1/1] RUN echo "stage2" 0.2s
=> exporting to image 0.0s
=> => exporting layers 0.0s
=> => writing image sha256:f55003b607cef37614f607f0728e6fd4d113a4bf7ef12210da338c716f2cfd15 0.0s
```
On the other hand, building the same target without BuildKit results in all
stages being processed:
```console
$ DOCKER_BUILDKIT=0 docker build --no-cache -f Dockerfile --target stage2 .
Sending build context to Docker daemon 219.1kB
Step 1/6 : FROM ubuntu AS base
---> a7870fd478f4
Step 2/6 : RUN echo "base"
---> Running in e850d0e42eca
base
Removing intermediate container e850d0e42eca
---> d9f69f23cac8
Step 3/6 : FROM base AS stage1
---> d9f69f23cac8
Step 4/6 : RUN echo "stage1"
---> Running in 758ba6c1a9a3
stage1
Removing intermediate container 758ba6c1a9a3
---> 396baa55b8c3
Step 5/6 : FROM base AS stage2
---> d9f69f23cac8
Step 6/6 : RUN echo "stage2"
---> Running in bbc025b93175
stage2
Removing intermediate container bbc025b93175
---> 09fc3770a9c4
Successfully built 09fc3770a9c4
```
`stage1` gets executed when BuildKit is disabled, even if `stage2` does not
depend on it.

View File

@ -29,6 +29,21 @@ multi-layer image builds based on your unique configurations. Dockerfiles can
start simple and grow with your needs and support images that require complex
instructions. For all the possible instructions, see the [Dockerfile reference](../../engine/reference/builder.md).
The default filename to use for a Dockerfile is `Dockerfile`, without a file
extension. Using the default name allows you to run the `docker build` command
without having to specify additional command flags.
Some projects may need distinct Dockerfiles for specific purposes. A common
convention is to name these `<something>.Dockerfile`. Such Dockerfiles can then
be used through the `--file` (or `-f` shorthand) option on the `docker build` command.
Refer to the ["Specify a Dockerfile" section](../../engine/reference/commandline/build.md#specify-a-dockerfile--f)
in the `docker build` reference to learn about the `--file` option.
> **Note**
>
> We recommend using the default (`Dockerfile`) for your project's primary
> Dockerfile.
Docker images consist of **read-only layers**, each resulting from an
instruction in the Dockerfile. Layers are stacked sequentially and each one is
a delta representing the changes applied to the previous layer.
@ -80,16 +95,23 @@ EXPOSE 8000
CMD flask run --host 0.0.0.0 --port 8000
```
We start by specifying the [syntax directive](../../engine/reference/builder.md#syntax).
It pins the exact version of the Dockerfile syntax we're using:
The first line to add to a Dockerfile is a [`# syntax` parser directive](../../engine/reference/builder.md#syntax).
While optional, this directive instructs the Docker builder what syntax to use
when parsing the Dockerfile, and allows older Docker versions with [BuildKit enabled](../buildkit/index.md#getting-started)
to use a specific [Dockerfile frontend](../buildkit/dockerfile-frontend.md)
before starting the build. [Parser directives](../../engine/reference/builder.md/#parser-directives)
must appear before any other comment, whitespace, or Dockerfile instruction in
your Dockerfile, and should be the first line in Dockerfiles.
```dockerfile
# syntax=docker/dockerfile:1
```
As a [best practice](../../develop/dev-best-practices.md), this should be the
very first line in all our Dockerfiles as it informs BuildKit the right version
of the Dockerfile to use.
> **Note**
>
> We recommend using `docker/dockerfile:1`, which always points to the latest
> release of the version 1 syntax. BuildKit automatically checks for updates of
> the syntax before building, making sure you are using the most current version.
Next we define the first instruction:
@ -185,11 +207,18 @@ To test our Dockerfile, we'll first build it using the [`docker build` command](
$ docker build -t test:latest .
```
* `-t test:latest` option specifies the name (required) and tag (optional) of
the image we're building.
* `.` specifies the build context as the current directory. In this example,
this is where build expects to find the Dockerfile and the local files the
Dockerfile needs to access, in this case your python application.
Here `-t test:latest` option specifies the name (required) and tag (optional)
of the image we're building. `.` specifies the build context as the current
directory. In this example, this is where build expects to find the Dockerfile
and the local files the Dockerfile needs to access, in this case your Python
application.
> **Warning**
>
> Avoid using your root directory, `/`, as the `PATH` for your build context,
> as it causes the build to transfer the entire contents of your hard drive to
> the daemon.
{:.warning}
So, in accordance with the build command issued and how build context works,
your Dockerfile and python app need to be in the same directory.

View File

@ -0,0 +1,102 @@
---
title: Custom Dockerfile syntax
keywords: build, buildkit, dockerfile, frontend
---
## Dockerfile frontend
BuildKit supports loading frontends dynamically from container images. To use
an external Dockerfile frontend, the first line of your [Dockerfile](../../engine/reference/builder.md)
needs to set the [`syntax` directive](../../engine/reference/builder.md#syntax)
pointing to the specific image you want to use:
```dockerfile
# syntax=[remote image reference]
```
For example:
```dockerfile
# syntax=docker/dockerfile:1
# syntax=docker.io/docker/dockerfile:1
# syntax=example.com/user/repo:tag@sha256:abcdef...
```
This defines the location of the Dockerfile syntax that is used to build the
Dockerfile. The BuildKit backend allows seamlessly using external
implementations that are distributed as Docker images and execute inside a
container sandbox environment.
Custom Dockerfile implementations allow you to:
- Automatically get bugfixes without updating the Docker daemon
- Make sure all users are using the same implementation to build your Dockerfile
- Use the latest features without updating the Docker daemon
- Try out new features or third-party features before they are integrated in the Docker daemon
- Use [alternative build definitions, or create your own](https://github.com/moby/buildkit#exploring-llb){:target="_blank" rel="noopener" class="_"}
> **Note**
>
> BuildKit also ships with a built-in Dockerfile frontend, but it's recommended
> to use an external image to make sure that all users use the same version on
> the builder and to pick up bugfixes automatically without waiting for a new
> version of BuildKit or Docker Engine.
## Official releases
Docker distributes official versions of the images that can be used for building
Dockerfiles under `docker/dockerfile` repository on Docker Hub. There are two
channels where new images are released: `stable` and `labs`.
### Stable channel
The `stable` channel follows [semantic versioning](https://semver.org){:target="_blank" rel="noopener" class="_"}.
For example:
- `docker/dockerfile:1` - kept updated with the latest `1.x.x` minor _and_ patch
release.
- `docker/dockerfile:1.2` - kept updated with the latest `1.2.x` patch release,
and stops receiving updates once version `1.3.0` is released.
- `docker/dockerfile:1.2.1` - immutable: never updated.
We recommend using `docker/dockerfile:1`, which always points to the latest
stable release of the version 1 syntax, and receives both "minor" and "patch"
updates for the version 1 release cycle. BuildKit automatically checks for
updates of the syntax when performing a build, making sure you are using the
most current version.
If a specific version is used, such as `1.2` or `1.2.1`, the Dockerfile needs
to be updated manually to continue receiving bugfixes and new features. Old
versions of the Dockerfile remain compatible with the new versions of the
builder.
### Labs channel
The `labs` channel provides early access to Dockerfile features that are not yet
available in the `stable` channel. `labs` images are released at the same time
as stable releases, and follow the same version pattern, but use the `-labs`
suffix, for example:
- `docker/dockerfile:labs` - latest release on `labs` channel.
- `docker/dockerfile:1-labs` - same as `dockerfile:1`, with experimental
features enabled.
- `docker/dockerfile:1.2-labs` - same as `dockerfile:1.2`, with experimental
features enabled.
- `docker/dockerfile:1.2.1-labs` - immutable: never updated. Same as
`dockerfile:1.2.1`, with experimental features enabled.
Choose a channel that best fits your needs. If you want to benefit from
new features, use the `labs` channel. Images in the `labs` channel contain
all the features in the `stable` channel, plus early access features.
Stable features in the `labs` channel follow
[semantic versioning](https://semver.org){:target="_blank" rel="noopener" class="_"},
but early access features don't, and newer releases may not be backwards compatible.
Pin the version to avoid having to deal with breaking changes.
## Other resources
For documentation on "labs" features, master builds, and nightly feature
releases, refer to the description in [the BuildKit source repository on GitHub](https://github.com/moby/buildkit/blob/master/README.md){:target="_blank" rel="noopener" class="_"}.
For a full list of available images, visit the [`docker/dockerfile` repository on Docker Hub](https://hub.docker.com/r/docker/dockerfile){:target="_blank" rel="noopener" class="_"},
and the [`docker/dockerfile-upstream` repository on Docker Hub](https://hub.docker.com/r/docker/dockerfile-upstream){:target="_blank" rel="noopener" class="_"}
for development builds.

104
build/buildkit/index.md Normal file
View File

@ -0,0 +1,104 @@
---
title: BuildKit
description: Introduction and overview of BuildKit
keywords: build, buildkit
---
## Overview
[BuildKit](https://github.com/moby/buildkit){:target="_blank" rel="noopener" class="_"}
is an improved backend to replace the legacy builder. It comes with new and much
improved functionality for improving your builds' performance and the
reusability of your Dockerfiles. It also introduces support for handling more
complex scenarios:
- Detect and skip executing unused build stages
- Parallelize building independent build stages
- Incrementally transfer only the changed files in your build context between builds
- Detect and skip transferring unused files in your build context
- Use [Dockerfile frontend](dockerfile-frontend.md) implementations with many new features
- Avoid side effects with rest of the API (intermediate images and containers)
- Prioritize your build cache for automatic pruning
Apart from many new features, the main areas BuildKit improves on the current
experience are performance, storage management, and extensibility. From the
performance side, a significant update is a new fully concurrent build graph
solver. It can run build steps in parallel when possible and optimize out
commands that don't have an impact on the final result. We have also optimized
the access to the local source files. By tracking only the updates made to these
files between repeated build invocations, there is no need to wait for local
files to be read or uploaded before the work can begin.
## LLB
At the core of BuildKit is a [Low-Level Build (LLB)](https://github.com/moby/buildkit#exploring-llb){:target="_blank" rel="noopener" class="_"}
definition format. LLB is an intermediate binary format that allows developers
to extend BuildKit. LLB defines a content-addressable dependency graph that can
be used to put together very complex build definitions. It also supports
features not exposed in Dockerfiles, like direct data mounting and nested
invocation.
![Directed acyclic graph (DAG)](../images/buildkit-dag.svg){:class="invertible" style="width:60%"}
Everything about execution and caching of your builds is defined in LLB. The
caching model is entirely rewritten compared to the legacy builder. Rather than
using heuristics to compare images, LLB directly tracks the checksums of build
graphs and content mounted to specific operations. This makes it much faster,
more precise, and portable. The build cache can even be exported to a registry,
where it can be pulled on-demand by subsequent invocations on any host.
LLB can be generated directly using a [golang client package](https://pkg.go.dev/github.com/moby/buildkit/client/llb)
that allows defining the relationships between your build operations using Go
language primitives. This gives you full power to run anything you can imagine,
but will probably not be how most people will define their builds. Instead,
most users would use a frontend component, or LLB nested invocation, to run
a prepared set of build steps.
## Frontend
A frontend is a component that takes a human-readable build format and converts
it to LLB so BuildKit can execute it. Frontends can be distributed as images,
and the user can target a specific version of a frontend that is guaranteed to
work for the features used by their definition.
For example, to build a [Dockerfile](../../engine/reference/builder.md) with
BuildKit, you would [use an external Dockerfile frontend](dockerfile-frontend.md).
## Getting started
BuildKit is enabled by default for all users on [Docker Desktop](../../desktop/index.md).
If you have installed Docker Desktop, you don't have to manually enable
BuildKit. If you are running Docker on Linux, you can enable BuildKit either by
using an environment variable or by making BuildKit the default setting.
To set the BuildKit environment variable when running the `docker build`
command, run:
```console
$ DOCKER_BUILDKIT=1 docker build .
```
>**Note**
>
> Buildx always enables BuildKit.
To enable docker BuildKit by default, set daemon configuration in `/etc/docker/daemon.json`
feature to `true` and restart the daemon. If the `daemon.json` file doesn't
exist, create new file called `daemon.json` and then add the following to the
file.
```json
{
"features": {
"buildkit" : true
}
}
```
And restart the Docker daemon.
> **Warning**
>
> BuildKit only supports building Linux containers. Windows support is tracked
> in [`moby/buildkit#616`](https://github.com/moby/buildkit/issues/616){:target="_blank" rel="noopener" class="_"}
{: .warning}

View File

@ -0,0 +1,74 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg width="985pt" height="491pt" viewBox="0 0 985 491" version="1.1" xmlns="http://www.w3.org/2000/svg">
<g id="#ffffffff">
</g>
<g id="#272425ff">
<path fill="#272425" opacity="1.00" d=" M 421.49 19.51 C 423.29 11.15 431.34 4.45 440.01 4.95 C 451.32 4.48 460.98 16.49 458.25 27.44 C 456.96 35.06 450.38 41.04 442.93 42.51 C 439.96 42.76 436.94 42.60 434.07 41.75 C 425.11 38.89 419.08 28.71 421.49 19.51 M 435.30 14.10 C 433.27 15.55 431.56 17.38 430.04 19.35 C 429.67 21.82 429.10 24.39 429.91 26.84 C 431.49 32.91 439.43 36.11 444.80 32.89 C 449.51 30.80 450.82 25.03 450.02 20.36 C 448.16 16.29 444.34 13.71 440.02 12.90 C 438.45 13.34 436.88 13.75 435.30 14.10 Z" />
<path fill="#272425" opacity="1.00" d=" M 391.90 78.83 C 398.63 74.47 408.06 75.00 414.19 80.23 C 416.62 82.20 418.43 84.85 419.57 87.76 C 423.86 97.63 417.59 110.41 407.07 112.87 C 398.11 115.64 387.64 110.42 384.50 101.57 C 381.11 93.47 384.51 83.45 391.90 78.83 M 394.10 88.04 C 389.57 92.88 391.65 101.95 397.89 104.26 C 403.82 107.20 412.14 102.72 412.36 95.93 C 413.09 91.43 410.19 87.50 406.67 85.09 C 402.34 83.23 397.14 84.40 394.10 88.04 Z" />
<path fill="#272425" opacity="1.00" d=" M 465.37 80.15 C 469.58 75.61 476.23 75.50 481.94 76.21 C 489.01 77.90 494.74 83.94 496.20 91.04 C 496.62 94.00 496.53 97.02 495.89 99.95 C 495.53 100.82 495.16 101.68 494.77 102.54 C 492.42 107.74 487.71 111.88 482.09 113.13 C 473.37 115.79 463.18 110.73 459.77 102.33 C 457.82 97.80 457.25 92.32 459.61 87.83 C 461.23 85.07 463.14 82.46 465.37 80.15 M 474.19 84.86 C 468.29 86.54 465.14 94.30 468.46 99.54 C 470.56 104.14 475.91 105.74 480.62 105.04 C 485.72 102.88 489.62 97.10 487.56 91.55 C 485.93 86.28 479.41 82.44 474.19 84.86 Z" />
<path fill="#272425" opacity="1.00" d=" M 547.53 78.52 C 553.20 75.19 560.75 74.89 566.47 78.29 C 571.13 80.56 574.11 85.21 575.63 90.03 C 576.22 92.98 576.28 96.01 575.77 98.97 C 573.94 106.11 567.85 112.18 560.45 113.25 C 551.26 115.17 541.65 108.85 539.07 99.95 C 538.32 97.04 538.30 94.00 538.76 91.05 C 539.78 85.87 543.17 81.41 547.53 78.52 M 554.05 84.82 C 548.60 86.40 546.04 92.30 546.97 97.62 C 548.50 100.91 551.08 103.52 554.38 105.04 C 557.21 105.23 560.05 104.97 562.71 103.93 C 564.78 102.07 566.77 99.95 567.44 97.15 C 568.78 93.65 566.72 90.05 564.82 87.18 C 561.71 85.09 557.79 82.82 554.05 84.82 Z" />
<path fill="#272425" opacity="1.00" d=" M 716.19 100.45 C 719.75 93.65 727.37 89.04 735.14 90.11 C 745.96 91.14 754.41 102.90 751.24 113.45 C 749.66 120.14 744.15 125.66 737.45 127.22 C 731.25 129.07 724.61 126.59 719.78 122.62 C 718.05 120.44 716.29 118.22 715.34 115.58 C 713.47 110.65 713.94 105.17 716.19 100.45 M 725.16 102.11 C 722.32 104.85 722.46 109.06 723.00 112.66 C 724.45 115.37 726.64 117.56 729.35 119.00 C 731.77 119.29 734.22 119.28 736.66 119.01 C 739.35 117.56 741.57 115.38 743.01 112.67 C 743.63 109.05 743.67 104.83 740.84 102.07 C 737.02 97.33 728.95 97.34 725.16 102.11 Z" />
<path fill="#272425" opacity="1.00" d=" M 203.09 120.09 C 202.21 110.84 208.95 101.36 218.27 99.88 C 228.29 97.66 238.83 105.03 240.56 115.07 C 240.89 118.04 240.79 121.07 240.00 123.97 C 237.99 130.26 232.54 135.32 226.11 136.80 C 221.93 137.21 217.38 137.48 213.62 135.26 C 207.67 132.62 203.80 126.43 203.09 120.09 M 219.43 108.00 C 216.84 109.31 213.82 110.58 212.61 113.45 C 210.50 117.48 211.72 122.21 214.17 125.79 C 216.59 127.29 219.12 129.08 222.10 129.02 C 227.62 128.77 232.75 123.66 232.27 117.98 C 232.36 115.47 230.92 113.35 229.84 111.20 C 228.12 110.08 226.38 109.00 224.60 108.00 C 222.87 108.00 221.15 108.00 219.43 108.00 Z" />
<path fill="#272425" opacity="1.00" d=" M 283.66 115.12 C 285.06 105.03 295.76 97.73 305.68 99.85 C 313.06 100.96 318.65 107.10 320.70 114.04 C 321.17 117.00 321.13 120.02 320.59 122.97 C 318.77 129.87 313.03 135.41 306.04 136.86 C 302.19 137.14 298.05 137.45 294.49 135.63 C 289.61 133.33 285.94 128.94 284.09 123.92 C 283.26 121.06 283.20 118.05 283.66 115.12 M 299.39 107.99 C 296.76 109.41 294.09 111.14 292.76 113.94 C 291.40 116.28 291.98 119.07 291.98 121.63 C 293.64 125.24 296.82 127.65 300.45 129.05 C 303.92 129.34 307.08 127.79 309.82 125.83 C 312.34 122.62 313.45 118.27 311.85 114.39 C 310.05 109.44 304.32 107.22 299.39 107.99 Z" />
<path fill="#272425" opacity="1.00" d=" M 539.15 136.97 C 541.19 128.65 549.61 122.76 558.07 123.15 C 566.89 123.35 574.48 130.45 575.99 139.01 C 576.34 142.00 576.09 145.03 575.26 147.92 C 573.21 153.73 568.43 158.62 562.41 160.20 C 557.24 161.91 551.53 160.63 546.99 157.85 C 540.18 153.59 536.86 144.65 539.15 136.97 M 548.06 137.25 C 544.57 143.20 549.00 152.09 556.04 152.38 C 560.55 153.13 564.50 150.21 566.90 146.67 C 568.76 142.32 567.59 137.11 563.93 134.07 C 559.28 129.74 550.66 131.47 548.06 137.25 Z" />
<path fill="#272425" opacity="1.00" d=" M 676.24 125.17 C 683.36 121.30 692.61 122.74 698.57 128.09 C 700.79 130.22 702.57 132.82 703.53 135.75 C 704.99 139.84 704.92 144.30 703.53 148.39 C 702.46 151.49 700.56 154.25 698.14 156.45 C 696.97 157.30 695.73 158.03 694.51 158.79 C 684.43 164.65 670.31 158.96 666.83 147.92 C 666.03 145.02 665.80 142.00 666.10 139.02 C 667.13 133.12 671.17 128.14 676.24 125.17 M 677.14 135.07 C 673.02 139.32 673.79 146.96 678.60 150.37 C 681.39 152.77 685.27 152.63 688.68 152.01 C 691.16 150.47 693.85 148.63 694.62 145.63 C 696.28 141.17 694.43 135.95 690.64 133.20 C 686.45 130.64 680.34 131.17 677.14 135.07 Z" />
<path fill="#272425" opacity="1.00" d=" M 289.24 152.21 C 295.01 146.05 305.32 145.21 312.30 149.80 C 316.82 152.38 319.55 157.19 320.77 162.13 C 321.20 165.06 321.05 168.05 320.56 170.98 C 318.20 177.26 313.00 182.93 306.15 184.10 C 296.53 186.62 285.83 179.74 283.90 170.03 C 282.21 163.72 284.43 156.60 289.24 152.21 M 292.00 162.37 C 291.44 166.04 292.07 169.68 294.16 172.78 C 298.09 176.48 304.62 177.54 308.87 173.79 C 312.56 170.87 313.27 165.69 311.91 161.40 C 311.14 160.37 310.43 159.31 309.75 158.23 C 307.47 156.56 304.88 155.27 302.03 155.05 C 297.62 155.49 293.89 158.43 292.00 162.37 Z" />
<path fill="#272425" opacity="1.00" d=" M 406.46 162.09 C 408.21 152.33 418.36 145.35 428.11 147.10 C 435.30 148.20 441.56 153.93 443.28 161.01 C 443.97 163.95 444.01 166.99 443.57 169.97 C 441.66 176.75 436.27 182.80 429.14 184.09 C 419.73 186.56 409.44 180.27 406.85 170.99 C 406.09 168.08 406.03 165.04 406.46 162.09 M 416.21 160.19 C 414.20 162.93 414.62 166.46 415.04 169.61 C 416.47 171.56 417.71 173.90 420.07 174.88 C 426.02 178.52 435.11 174.08 435.34 166.95 C 436.10 162.44 433.17 158.53 429.66 156.10 C 424.88 153.99 419.01 155.78 416.21 160.19 Z" />
<path fill="#272425" opacity="1.00" d=" M 152.73 160.75 C 158.66 153.99 169.80 152.96 177.05 158.16 C 180.87 160.64 183.44 164.73 184.70 169.05 C 185.34 171.97 185.38 175.00 184.81 177.93 C 183.21 184.29 178.22 189.76 171.93 191.68 C 165.18 193.85 157.30 191.72 152.49 186.53 C 146.04 179.51 145.77 167.52 152.73 160.75 M 163.11 163.81 C 157.97 165.36 154.69 171.35 156.33 176.50 C 157.18 180.09 160.33 182.37 163.39 184.03 C 166.22 184.22 169.06 183.98 171.72 182.94 C 173.79 181.07 175.81 178.96 176.48 176.15 C 179.00 168.78 170.30 160.58 163.11 163.81 Z" />
<path fill="#272425" opacity="1.00" d=" M 719.77 161.32 C 725.71 156.21 734.44 154.43 741.54 158.19 C 744.70 159.94 747.45 162.45 749.34 165.55 C 750.49 167.55 751.48 169.71 751.76 172.03 C 751.92 174.65 751.86 177.29 751.54 179.91 C 750.12 185.21 746.50 190.05 741.41 192.30 C 736.31 194.86 729.70 194.84 724.60 192.31 C 715.92 188.35 711.61 177.16 715.23 168.36 C 716.26 165.73 717.99 163.48 719.77 161.32 M 730.43 164.99 C 726.11 166.49 722.65 170.32 722.63 175.06 C 722.12 182.06 730.33 188.35 736.83 185.11 C 742.18 183.00 744.50 176.74 742.95 171.39 C 740.63 166.77 735.58 164.05 730.43 164.99 Z" />
<path fill="#272425" opacity="1.00" d=" M 538.74 186.01 C 540.25 176.29 550.24 169.19 559.90 170.71 C 567.50 171.61 573.89 177.71 575.74 185.04 C 576.28 187.99 576.23 191.02 575.67 193.96 C 573.75 200.80 568.15 206.59 561.05 207.87 C 557.16 208.14 552.99 208.45 549.43 206.57 C 544.70 204.23 540.95 199.94 539.19 194.97 C 538.40 192.05 538.25 188.99 538.74 186.01 M 546.97 186.39 C 546.98 188.89 546.69 191.52 547.82 193.85 C 549.20 197.06 552.48 198.62 555.45 200.05 C 559.98 200.21 565.24 198.18 566.83 193.58 C 570.43 187.11 564.18 178.42 557.01 178.96 C 552.37 178.87 548.88 182.52 546.97 186.39 Z" />
<path fill="#272425" opacity="1.00" d=" M 74.09 194.10 C 77.50 192.52 81.39 192.95 85.04 193.10 C 92.52 194.33 98.41 200.74 99.93 208.03 C 100.34 211.00 100.18 214.02 99.57 216.94 C 97.28 223.81 91.05 229.63 83.66 230.20 C 74.01 231.82 64.14 224.28 62.82 214.66 C 61.07 206.22 66.38 197.47 74.09 194.10 M 79.46 200.94 C 76.60 202.13 74.00 203.84 72.06 206.28 C 69.18 212.04 71.97 219.78 78.33 221.59 C 85.98 224.45 94.59 215.42 91.11 208.02 C 89.69 203.09 84.25 200.69 79.46 200.94 Z" />
<path fill="#272425" opacity="1.00" d=" M 283.96 208.03 C 286.01 201.34 291.43 195.51 298.14 193.37 C 302.58 192.67 307.42 192.50 311.56 194.55 C 316.68 196.91 320.26 201.84 321.75 207.19 L 321.43 208.22 C 320.62 211.36 321.04 214.64 320.88 217.85 C 318.27 227.66 306.64 234.55 296.83 231.06 C 290.29 229.25 285.14 223.59 283.74 216.98 C 283.18 214.02 283.23 210.95 283.96 208.03 M 298.38 203.03 C 296.20 204.54 293.76 206.02 292.81 208.65 C 290.06 213.93 293.11 220.62 298.32 223.00 C 301.99 223.79 306.20 223.65 309.04 220.87 C 313.74 217.00 313.73 209.02 308.99 205.19 C 306.23 202.33 302.01 202.47 298.38 203.03 Z" />
<path fill="#272425" opacity="1.00" d=" M 410.22 201.20 C 413.74 196.76 419.21 193.88 424.93 194.00 C 433.40 193.79 441.26 200.25 443.43 208.30 C 444.17 211.40 444.16 214.64 443.45 217.74 C 441.79 224.48 436.18 230.00 429.45 231.62 C 424.28 232.67 418.53 232.09 414.24 228.78 C 405.22 223.11 403.05 209.17 410.22 201.20 M 421.35 202.99 C 417.93 204.83 414.75 207.89 414.54 212.02 C 413.63 218.34 419.65 224.39 425.98 223.48 C 430.10 223.25 433.17 220.09 435.01 216.65 C 435.24 213.75 435.57 210.62 433.92 208.04 C 431.70 203.43 426.02 202.08 421.35 202.99 Z" />
<path fill="#272425" opacity="1.00" d=" M 115.74 235.74 C 119.59 231.74 125.42 230.16 130.84 230.71 C 138.51 231.51 145.01 237.72 147.01 245.03 C 147.56 247.99 147.55 251.03 146.98 253.98 C 145.23 260.30 140.19 265.71 133.87 267.61 C 127.62 268.86 120.42 268.01 115.76 263.24 C 107.91 256.38 107.84 242.59 115.74 235.74 M 124.05 240.08 C 120.88 241.38 119.36 244.56 117.96 247.46 C 117.43 253.11 121.08 258.30 126.44 259.98 C 127.81 259.98 129.18 259.99 130.56 260.00 C 135.24 258.41 139.42 254.17 139.04 248.94 C 139.06 245.18 136.42 242.32 133.71 240.08 C 130.62 238.97 127.02 238.23 124.05 240.08 Z" />
<path fill="#272425" opacity="1.00" d=" M 876.63 236.54 C 886.71 234.97 896.59 243.02 897.68 253.07 C 897.89 256.00 897.56 258.98 896.55 261.75 C 893.77 268.78 886.75 274.33 878.99 274.05 C 870.86 274.44 863.33 268.51 860.80 260.95 C 860.01 258.05 859.83 255.01 860.18 252.03 C 860.52 250.07 861.44 248.29 862.21 246.48 C 865.02 241.05 870.53 237.18 876.63 236.54 M 868.71 253.04 C 867.49 257.82 870.41 262.40 874.33 264.90 C 880.01 267.52 886.57 264.10 889.02 258.65 C 889.86 253.93 888.49 248.13 883.73 246.04 C 878.01 242.69 869.70 246.40 868.71 253.04 Z" />
<path fill="#272425" opacity="1.00" d=" M 935.80 253.01 C 937.02 245.28 943.38 238.81 951.08 237.45 C 960.70 235.43 970.89 242.34 972.83 251.91 C 973.17 256.09 973.50 260.63 971.26 264.38 C 967.57 272.54 957.25 277.05 948.80 274.01 C 942.92 272.35 938.31 267.49 936.47 261.74 C 935.58 258.92 935.39 255.93 935.80 253.01 M 947.20 248.16 C 944.90 251.14 943.46 254.73 943.99 258.55 C 945.26 261.69 947.38 264.30 950.38 265.95 C 954.34 267.05 958.45 266.17 961.79 263.83 C 963.27 261.45 965.02 259.00 965.02 256.07 C 964.94 250.44 959.76 245.23 954.03 245.65 C 951.50 245.61 949.34 247.04 947.20 248.16 Z" />
<path fill="#272425" opacity="1.00" d=" M 772.97 241.18 C 774.78 240.06 776.50 238.02 778.83 238.34 C 782.26 238.64 785.71 238.51 789.15 238.61 L 789.14 238.00 L 789.13 237.03 C 795.70 239.04 801.98 244.11 803.13 251.19 C 803.75 254.40 804.00 257.69 803.56 260.94 C 802.13 268.22 796.15 274.16 788.98 275.86 C 786.01 276.35 782.98 276.27 780.05 275.66 C 775.74 274.20 771.63 271.54 769.20 267.62 C 763.75 259.52 765.24 247.32 772.97 241.18 M 780.89 247.73 C 778.16 248.67 776.56 251.10 775.04 253.37 C 774.32 256.99 774.33 261.15 777.04 263.99 C 778.91 266.55 782.01 267.47 784.98 268.07 C 786.52 267.63 788.09 267.23 789.67 266.89 C 793.17 264.46 796.08 260.53 795.27 256.04 C 794.77 249.35 786.92 244.79 780.89 247.73 Z" />
<path fill="#272425" opacity="1.00" d=" M 505.82 270.60 C 507.36 263.70 513.01 257.80 519.92 256.20 C 524.50 255.68 529.49 255.61 533.52 258.20 C 538.38 260.78 541.57 265.75 542.81 271.01 C 543.21 273.99 543.14 277.05 542.32 279.96 C 541.88 281.18 541.29 282.33 540.79 283.52 C 538.35 288.07 534.04 291.66 528.99 292.86 C 521.25 294.96 512.25 291.50 508.19 284.51 C 505.80 280.37 504.61 275.31 505.82 270.60 M 519.33 265.11 C 517.14 266.83 515.29 268.92 513.98 271.37 C 513.92 273.45 513.92 275.54 513.96 277.63 C 515.58 280.72 517.91 283.84 521.51 284.68 C 525.66 286.00 530.22 284.02 532.80 280.69 C 534.91 277.65 534.86 273.77 533.95 270.33 C 531.36 265.05 524.69 262.44 519.33 265.11 Z" />
<path fill="#272425" opacity="1.00" d=" M 585.95 271.02 C 587.41 263.97 592.95 257.88 600.04 256.28 C 604.69 255.54 609.69 255.75 613.85 258.16 C 618.21 260.79 621.60 265.07 622.91 270.01 C 623.48 272.97 623.50 276.01 622.97 278.98 C 620.93 286.57 614.04 292.86 606.07 293.41 C 597.32 294.36 589.02 288.11 586.30 279.98 C 585.65 277.03 585.51 274.00 585.95 271.02 M 594.02 272.43 C 594.00 273.80 594.00 275.18 594.00 276.56 C 595.56 280.81 598.94 285.03 603.87 285.02 C 610.59 285.70 616.11 278.91 615.03 272.44 C 613.61 269.15 611.58 265.77 607.88 264.77 C 602.08 262.23 595.73 266.83 594.02 272.43 Z" />
<path fill="#272425" opacity="1.00" d=" M 250.90 293.81 C 252.78 284.11 263.61 277.42 273.13 279.90 C 280.62 281.21 286.26 287.78 287.82 295.02 C 288.16 298.01 287.99 301.05 287.20 303.96 C 285.16 310.01 280.08 314.87 273.92 316.58 C 267.98 317.87 261.11 316.76 256.74 312.28 C 251.59 307.85 249.00 300.41 250.90 293.81 M 264.25 289.07 C 259.51 291.16 258.13 296.94 258.98 301.64 C 260.70 305.06 263.64 307.90 267.50 308.66 C 274.30 310.04 281.01 302.89 279.31 296.13 C 278.36 289.53 269.93 285.69 264.25 289.07 Z" />
<path fill="#272425" opacity="1.00" d=" M 344.89 279.99 C 354.77 277.09 365.56 284.27 367.94 294.02 C 368.43 296.98 368.39 300.02 367.76 302.95 C 366.17 308.62 362.08 313.59 356.52 315.70 C 346.77 320.12 334.43 314.08 331.37 303.95 C 330.59 301.04 330.38 297.99 330.82 295.00 C 332.22 287.91 337.69 281.57 344.89 279.99 M 346.38 287.97 C 341.50 290.13 338.11 295.11 338.97 300.56 C 340.52 304.80 343.87 308.19 348.47 308.89 C 356.41 309.69 362.62 300.41 358.90 293.32 C 357.17 291.14 355.07 289.30 352.62 287.98 C 350.54 287.94 348.46 287.94 346.38 287.97 Z" />
<path fill="#272425" opacity="1.00" d=" M 108.34 292.22 C 112.95 289.03 119.06 288.06 124.41 289.77 C 129.41 291.11 133.31 295.07 135.79 299.47 C 136.27 300.65 136.81 301.82 137.21 303.05 C 137.89 305.95 137.89 308.98 137.52 311.93 C 135.88 317.68 131.94 322.94 126.28 325.20 C 119.48 328.27 110.96 326.55 105.75 321.25 C 102.67 318.34 100.94 314.25 100.20 310.13 C 99.28 303.28 102.54 296.02 108.34 292.22 M 109.57 312.55 C 112.10 317.99 119.61 319.93 124.55 316.59 C 128.75 314.17 129.89 308.81 129.03 304.34 C 126.92 299.89 122.16 296.48 117.08 297.58 C 110.42 298.57 106.47 306.61 109.57 312.55 Z" />
<path fill="#272425" opacity="1.00" d=" M 585.95 321.95 C 585.00 310.00 598.15 300.06 609.45 303.76 C 616.38 305.41 621.61 311.45 623.11 318.30 C 623.59 321.46 623.44 324.71 622.46 327.77 C 619.88 335.92 611.43 341.66 602.88 340.79 C 593.54 340.10 585.24 331.48 585.95 321.95 M 600.42 312.08 C 599.37 312.84 598.31 313.56 597.22 314.25 C 595.79 316.27 594.36 318.42 594.02 320.93 C 593.41 326.95 598.94 332.86 605.05 332.33 C 611.78 332.25 617.18 324.35 614.20 318.13 C 613.48 315.22 610.90 313.65 608.62 312.05 C 605.90 311.50 603.12 311.46 600.42 312.08 Z" />
<path fill="#272425" opacity="1.00" d=" M 713.59 318.31 C 715.22 309.79 723.25 302.95 731.98 303.01 C 740.31 302.83 748.11 309.08 750.29 317.02 C 751.01 319.93 751.09 322.98 750.60 325.95 C 748.92 332.79 743.38 338.63 736.46 340.24 C 727.21 342.86 716.97 336.75 714.06 327.75 C 713.10 324.70 713.00 321.44 713.59 318.31 M 721.99 318.35 C 721.82 321.07 721.28 324.03 722.61 326.56 C 723.76 329.02 726.09 330.61 728.34 332.00 C 730.77 332.30 733.22 332.32 735.66 332.03 C 739.08 330.17 742.28 327.09 742.41 322.95 C 743.28 316.70 737.35 310.73 731.10 311.59 C 726.94 311.70 723.84 314.91 721.99 318.35 Z" />
<path fill="#272425" opacity="1.00" d=" M 25.64 328.75 C 29.41 326.61 33.95 326.70 38.11 327.25 C 45.28 328.85 51.03 335.00 52.41 342.18 C 52.93 345.10 52.81 348.11 52.08 350.98 C 50.24 357.10 45.21 362.16 39.03 363.84 C 32.72 365.72 25.43 363.89 20.77 359.23 C 17.66 356.31 15.94 352.20 15.17 348.06 C 14.13 340.32 18.48 332.02 25.64 328.75 M 29.31 336.10 C 24.63 338.84 22.37 344.46 24.02 349.67 C 26.35 354.78 32.89 357.81 38.10 355.24 C 44.56 352.84 46.46 343.18 41.38 338.52 C 39.50 336.37 36.64 335.68 34.01 334.90 C 32.46 335.34 30.89 335.75 29.31 336.10 Z" />
<path fill="#272425" opacity="1.00" d=" M 238.69 337.59 C 242.69 328.44 254.50 324.35 263.48 328.35 C 269.07 330.86 273.23 336.13 274.65 342.07 C 275.01 344.97 274.96 347.95 274.26 350.81 C 272.73 355.89 269.25 360.34 264.53 362.81 C 254.82 368.46 240.65 362.84 237.86 351.87 C 237.08 348.67 236.94 345.35 237.16 342.07 C 237.28 340.47 238.08 339.04 238.69 337.59 M 251.31 336.09 C 249.64 337.46 247.81 338.79 246.78 340.76 C 244.06 345.60 245.85 352.67 251.14 354.92 C 257.51 358.74 266.48 353.44 266.29 346.03 C 266.93 338.46 257.98 332.88 251.31 336.09 Z" />
<path fill="#272425" opacity="1.00" d=" M 343.57 327.72 C 346.27 326.69 349.23 326.95 352.06 327.02 C 359.96 327.76 366.47 334.37 368.02 342.02 C 368.45 345.00 368.30 348.03 367.69 350.98 C 365.32 357.78 359.21 363.50 351.89 364.30 C 341.89 365.92 331.53 357.81 330.82 347.70 C 329.50 339.10 335.31 330.23 343.57 327.72 M 338.96 346.03 C 339.09 352.62 346.34 357.85 352.63 355.59 C 356.49 354.60 358.52 350.89 360.05 347.54 C 360.26 342.99 358.21 337.78 353.65 336.11 C 347.09 332.50 338.42 338.80 338.96 346.03 Z" />
<path fill="#272425" opacity="1.00" d=" M 463.64 328.82 C 467.36 326.59 471.85 326.80 476.01 327.13 C 483.11 328.40 488.75 334.19 490.65 341.04 C 491.19 343.98 491.24 347.00 490.73 349.96 C 488.81 356.95 483.00 363.04 475.67 364.15 C 466.47 366.02 456.68 359.94 454.09 350.94 C 453.28 348.03 453.29 344.98 453.76 342.02 C 454.82 336.41 458.75 331.61 463.64 328.82 M 470.45 334.95 C 468.00 336.19 465.30 337.29 463.70 339.65 C 461.68 342.18 461.87 345.57 461.96 348.61 C 464.17 353.98 470.55 357.97 476.19 355.24 C 480.33 353.81 482.32 349.64 482.97 345.58 C 482.58 344.17 482.22 342.75 481.91 341.33 C 480.03 336.88 475.05 334.76 470.45 334.95 Z" />
<path fill="#272425" opacity="1.00" d=" M 585.89 366.01 C 587.37 357.41 595.33 350.61 604.03 350.37 C 612.75 350.12 620.94 356.57 622.98 365.01 C 623.48 367.96 623.53 371.00 622.85 373.93 C 620.83 381.85 613.27 388.19 604.99 388.05 C 596.80 388.60 588.95 382.76 586.60 375.00 C 585.64 372.10 585.48 369.02 585.89 366.01 M 601.37 358.98 C 598.81 360.50 596.08 362.28 594.90 365.15 C 592.38 370.35 595.52 377.49 601.09 379.12 C 606.86 381.79 613.25 377.11 615.03 371.55 C 615.15 369.06 614.77 366.62 613.93 364.29 C 612.21 362.09 610.08 360.28 607.63 358.98 C 605.54 358.95 603.45 358.95 601.37 358.98 Z" />
<path fill="#272425" opacity="1.00" d=" M 67.01 371.01 C 70.63 367.27 75.65 364.44 81.01 364.95 C 89.61 364.24 97.63 370.93 99.46 379.19 C 100.14 382.06 100.09 385.05 99.60 387.95 C 97.91 393.98 93.48 399.30 87.44 401.29 C 79.75 404.19 70.23 401.29 65.71 394.36 C 60.70 387.50 61.32 377.30 67.01 371.01 M 78.56 373.37 C 72.45 374.92 68.72 382.41 71.76 388.09 C 73.86 392.47 78.85 394.65 83.56 394.02 C 85.94 392.70 88.77 391.64 90.11 389.09 C 91.87 386.16 91.63 382.60 90.92 379.40 C 90.16 378.35 89.44 377.29 88.76 376.20 C 85.88 374.08 82.21 372.05 78.56 373.37 Z" />
<path fill="#272425" opacity="1.00" d=" M 343.03 375.24 C 352.97 371.13 365.71 377.67 368.06 388.16 C 368.30 391.37 368.30 394.60 368.10 397.82 C 367.51 403.33 363.30 408.01 358.40 410.30 C 353.28 412.82 346.70 412.85 341.59 410.30 C 336.35 408.08 332.84 403.18 331.12 397.91 C 330.49 394.99 330.46 391.97 330.92 389.03 C 332.44 382.85 336.85 377.23 343.03 375.24 M 345.38 383.05 C 342.38 384.70 340.27 387.30 339.00 390.45 C 338.46 394.26 339.91 397.86 342.21 400.84 C 344.34 401.94 346.47 403.37 348.98 403.34 C 354.71 403.79 359.92 398.61 360.02 392.98 C 360.08 390.03 358.29 387.55 356.79 385.17 C 353.45 382.82 349.35 381.94 345.38 383.05 Z" />
<path fill="#272425" opacity="1.00" d=" M 457.28 381.48 C 461.98 375.33 470.75 372.37 478.13 375.02 C 484.16 376.90 488.98 382.12 490.46 388.25 C 491.32 391.32 491.32 394.57 490.50 397.64 C 488.81 404.55 482.83 410.18 475.86 411.52 C 468.59 413.00 460.61 409.63 456.55 403.43 C 452.17 396.90 452.45 387.68 457.28 381.48 M 461.90 393.97 C 462.12 397.97 465.08 401.05 468.38 402.96 C 471.28 403.29 474.47 403.79 477.12 402.20 C 481.91 399.57 484.15 393.37 481.89 388.34 C 479.49 384.83 475.60 381.90 471.12 382.61 C 465.49 382.80 461.29 388.58 461.90 393.97 Z" />
<path fill="#272425" opacity="1.00" d=" M 241.74 380.76 C 246.13 375.62 253.49 373.53 259.99 375.09 C 267.30 376.72 273.13 383.15 274.28 390.53 C 274.77 393.70 274.43 396.94 273.35 399.97 C 271.19 405.39 266.52 409.94 260.88 411.61 C 255.51 412.58 249.49 412.37 244.98 408.92 C 235.71 403.06 234.09 388.57 241.74 380.76 M 246.14 397.61 C 247.76 402.19 253.00 404.25 257.55 404.05 C 260.92 402.52 264.65 400.46 265.61 396.57 C 267.80 390.36 262.71 383.11 256.17 382.97 C 248.92 382.29 242.51 391.04 246.14 397.61 Z" />
<path fill="#272425" opacity="1.00" d=" M 166.02 404.92 C 169.48 402.39 173.67 400.64 178.03 400.96 C 186.40 400.71 193.89 407.16 196.07 415.04 C 196.70 417.98 196.76 421.03 196.18 423.99 C 194.45 430.75 189.06 436.64 182.11 438.07 C 172.86 440.67 162.44 434.49 159.77 425.38 C 157.40 418.13 160.00 409.61 166.02 404.92 M 170.20 412.20 C 168.64 414.37 167.62 416.83 167.02 419.43 C 167.52 423.18 169.25 426.61 172.28 428.93 C 174.93 429.86 177.78 430.59 180.59 429.88 C 185.03 428.36 188.79 423.84 188.09 418.96 C 188.03 413.56 183.02 409.64 177.98 409.07 C 175.11 409.26 172.53 410.61 170.20 412.20 Z" />
<path fill="#272425" opacity="1.00" d=" M 244.06 425.63 C 248.70 421.97 254.92 421.18 260.57 422.42 C 267.07 424.13 272.29 429.62 274.06 436.05 C 274.74 438.97 274.82 442.02 274.21 444.97 C 271.88 456.81 257.20 463.87 246.65 457.69 C 243.75 456.00 241.22 453.69 239.36 450.89 C 236.13 445.57 235.63 438.62 238.25 432.95 C 239.72 430.18 241.64 427.64 244.06 425.63 M 251.41 431.21 C 248.91 432.42 246.61 434.31 245.82 437.08 C 242.80 443.31 248.29 451.28 255.04 451.28 C 262.10 451.94 268.34 443.67 265.18 437.14 C 263.50 431.58 256.58 429.10 251.41 431.21 Z" />
<path fill="#272425" opacity="1.00" d=" M 163.21 455.22 C 167.91 449.06 176.70 446.56 183.94 449.27 C 190.08 451.29 194.80 456.73 196.21 463.00 C 196.73 465.91 196.68 468.91 196.08 471.81 C 194.35 477.94 189.49 483.50 183.16 484.95 C 177.89 484.64 171.81 486.42 167.34 482.79 C 158.26 477.17 156.03 463.21 163.21 455.22 M 168.10 471.67 C 169.92 474.01 172.02 476.46 175.06 477.15 C 179.84 478.48 185.37 476.09 187.19 471.37 C 190.54 464.95 185.17 456.46 177.94 456.69 C 170.45 456.16 164.95 465.04 168.10 471.67 Z" />
</g>
<g id="#70a7b9ff">
<path fill="#70a7b9" opacity="1.00" d=" M 434.07 41.75 C 436.94 42.60 439.96 42.76 442.93 42.51 C 443.07 49.21 442.97 55.91 443.00 62.61 C 449.83 69.20 458.09 74.08 465.37 80.15 C 463.14 82.46 461.23 85.07 459.61 87.83 C 458.90 86.75 458.29 85.52 457.18 84.79 C 451.37 80.62 445.72 76.20 439.98 71.95 C 434.59 75.48 429.64 79.66 424.41 83.43 C 422.74 84.80 420.61 85.78 419.57 87.76 C 418.43 84.85 416.62 82.20 414.19 80.23 C 420.71 75.26 427.15 70.10 433.97 65.57 C 434.12 57.63 433.94 49.69 434.07 41.75 Z" />
<path fill="#70a7b9" opacity="1.00" d=" M 240.56 115.07 C 254.92 114.99 269.29 114.88 283.66 115.12 C 283.20 118.05 283.26 121.06 284.09 123.92 C 269.40 124.09 254.70 123.99 240.00 123.97 C 240.79 121.07 240.89 118.04 240.56 115.07 Z" />
<path fill="#70a7b9" opacity="1.00" d=" M 698.57 128.09 C 703.65 123.26 710.13 120.25 715.34 115.58 C 716.29 118.22 718.05 120.44 719.78 122.62 L 720.32 123.58 C 716.27 126.01 712.33 128.63 708.61 131.53 C 706.87 132.89 704.57 133.68 703.53 135.75 C 702.57 132.82 700.79 130.22 698.57 128.09 Z" />
<path fill="#70a7b9" opacity="1.00" d=" M 698.14 156.45 C 700.56 154.25 702.46 151.49 703.53 148.39 C 705.82 151.18 709.06 152.97 711.99 155.01 C 714.88 156.69 717.26 159.13 720.28 160.60 L 719.77 161.32 C 717.99 163.48 716.26 165.73 715.23 168.36 C 709.73 164.12 704.02 160.16 698.14 156.45 Z" />
<path fill="#70a7b9" opacity="1.00" d=" M 184.70 169.05 C 197.22 169.00 210.22 167.97 222.01 173.02 C 227.58 175.92 233.71 178.17 238.24 182.70 C 246.59 191.18 255.10 199.50 263.43 208.00 C 270.27 208.02 277.12 207.97 283.96 208.03 C 283.23 210.95 283.18 214.02 283.74 216.98 C 264.47 217.05 245.20 216.95 225.94 217.03 C 221.12 217.19 219.28 222.35 216.21 225.20 C 208.53 232.50 201.65 240.91 192.36 246.27 C 184.59 250.64 175.89 253.26 167.04 254.03 C 160.35 253.97 153.66 254.03 146.98 253.98 C 147.55 251.03 147.56 247.99 147.01 245.03 C 153.67 244.98 160.34 244.97 167.01 245.04 C 172.04 245.27 176.72 243.10 181.47 241.77 C 186.28 239.32 191.49 237.25 195.28 233.26 C 200.60 227.79 206.12 222.52 211.39 217.01 C 174.12 216.95 136.84 217.08 99.57 216.94 C 100.18 214.02 100.34 211.00 99.93 208.03 C 150.42 207.95 200.91 208.04 251.39 207.99 C 245.95 202.32 240.28 196.86 234.78 191.25 C 226.82 184.06 216.63 179.37 206.02 177.99 C 198.95 177.95 191.88 178.09 184.81 177.93 C 185.38 175.00 185.34 171.97 184.70 169.05 Z" />
<path fill="#70a7b9" opacity="1.00" d=" M 751.76 172.03 C 757.85 172.03 763.95 171.91 770.05 172.04 C 780.97 173.69 788.81 184.33 789.03 195.06 C 789.04 209.37 788.85 223.69 789.14 238.00 L 789.15 238.61 C 785.71 238.51 782.26 238.64 778.83 238.34 C 779.60 237.04 780.21 235.64 780.03 234.10 C 779.97 223.41 780.01 212.71 780.00 202.02 C 779.88 197.49 780.64 192.78 778.79 188.49 C 776.72 184.78 773.21 181.90 769.06 180.90 C 763.37 179.17 757.37 180.40 751.54 179.91 C 751.86 177.29 751.92 174.65 751.76 172.03 Z" />
<path fill="#70a7b9" opacity="1.00" d=" M 803.13 251.19 C 804.31 251.77 805.58 252.13 806.93 252.03 C 824.68 251.98 842.43 251.98 860.18 252.03 C 859.83 255.01 860.01 258.05 860.80 260.95 C 841.72 261.02 822.64 261.05 803.56 260.94 C 804.00 257.69 803.75 254.40 803.13 251.19 Z" />
<path fill="#70a7b9" opacity="1.00" d=" M 897.68 253.07 C 910.38 252.91 923.09 253.04 935.80 253.01 C 935.39 255.93 935.58 258.92 936.47 261.74 C 923.16 261.97 909.85 261.96 896.55 261.75 C 897.56 258.98 897.89 256.00 897.68 253.07 Z" />
<path fill="#70a7b9" opacity="1.00" d=" M 780.05 275.66 C 782.98 276.27 786.01 276.35 788.98 275.86 C 788.95 285.60 789.12 295.35 788.91 305.09 C 787.94 309.01 786.99 313.19 784.29 316.32 C 780.59 322.15 773.77 325.34 767.05 326.00 C 761.57 326.04 756.08 326.01 750.60 325.95 C 751.09 322.98 751.01 319.93 750.29 317.02 C 755.20 316.99 760.12 316.97 765.04 317.04 C 772.80 317.81 779.96 310.70 780.00 303.04 C 780.04 293.91 779.93 284.78 780.05 275.66 Z" />
<path fill="#70a7b9" opacity="1.00" d=" M 287.82 295.02 C 302.15 294.97 316.49 295.01 330.82 295.00 C 330.38 297.99 330.59 301.04 331.37 303.95 C 316.65 304.03 301.92 304.03 287.20 303.96 C 287.99 301.05 288.16 298.01 287.82 295.02 Z" />
<path fill="#70a7b9" opacity="1.00" d=" M 137.21 303.05 C 142.82 303.09 148.44 303.10 154.06 303.04 C 157.75 302.93 161.29 304.31 165.00 304.14 C 175.41 306.69 185.41 311.43 193.26 318.82 C 200.93 326.59 208.71 334.24 216.37 342.02 C 223.30 342.09 230.23 342.00 237.16 342.07 C 236.94 345.35 237.08 348.67 237.86 351.87 C 236.68 351.20 235.37 350.85 234.01 350.97 C 214.82 351.03 195.63 350.98 176.44 351.00 C 168.60 358.98 160.62 366.81 152.76 374.77 C 147.74 379.27 141.68 382.31 135.44 384.73 C 130.77 386.33 125.94 387.67 121.01 388.03 C 113.87 387.96 106.73 388.06 99.60 387.95 C 100.09 385.05 100.14 382.06 99.46 379.19 C 106.98 378.76 114.52 379.11 122.05 379.01 C 126.34 378.66 130.35 376.93 134.46 375.76 C 142.99 372.17 150.00 365.97 156.23 359.26 C 158.65 356.54 161.91 354.45 163.48 351.07 C 126.35 350.89 89.21 351.07 52.08 350.98 C 52.81 348.11 52.93 345.10 52.41 342.18 C 102.73 341.82 153.06 342.21 203.39 341.99 C 197.27 335.63 190.91 329.51 184.75 323.21 C 180.20 318.78 174.10 316.69 168.41 314.22 C 163.61 313.35 158.89 311.69 153.97 311.97 C 148.48 312.01 143.00 312.04 137.52 311.93 C 137.89 308.98 137.89 305.95 137.21 303.05 Z" />
<path fill="#70a7b9" opacity="1.00" d=" M 274.65 342.07 C 280.58 342.30 286.77 341.02 292.46 343.21 C 298.42 345.70 303.31 350.57 305.79 356.54 C 306.29 358.32 307.06 360.07 307.02 361.97 C 307.00 371.31 306.99 380.65 307.00 390.00 C 312.02 389.78 317.14 390.60 322.07 389.35 C 324.98 388.77 327.97 389.05 330.92 389.03 C 330.46 391.97 330.49 394.99 331.12 397.91 C 326.38 398.25 321.60 397.60 316.90 398.27 C 313.66 399.09 310.31 399.04 307.00 399.01 C 306.98 407.68 307.01 416.36 307.01 425.03 C 306.52 432.38 301.30 438.46 295.37 442.34 C 288.97 446.29 281.30 444.73 274.21 444.97 C 274.82 442.02 274.74 438.97 274.06 436.05 C 280.89 435.57 288.98 437.86 294.32 432.28 C 300.77 425.07 298.64 414.79 299.02 405.98 C 298.68 403.71 300.01 400.56 297.68 399.09 C 292.13 399.01 286.47 398.49 281.03 399.81 C 278.48 400.16 275.90 399.96 273.35 399.97 C 274.43 396.94 274.77 393.70 274.28 390.53 C 282.53 392.32 290.73 389.57 299.00 390.00 C 298.99 382.32 299.02 374.65 299.00 366.97 C 298.48 361.75 296.87 355.78 292.01 352.96 C 286.78 349.42 280.15 351.31 274.26 350.81 C 274.96 347.95 275.01 344.97 274.65 342.07 Z" />
<path fill="#70a7b9" opacity="1.00" d=" M 196.07 415.04 C 205.38 414.96 214.69 415.03 223.99 414.99 C 231.68 415.51 240.20 418.46 244.06 425.63 C 241.64 427.64 239.72 430.18 238.25 432.95 C 236.50 429.66 233.94 426.74 230.51 425.15 C 228.69 424.67 226.90 423.95 225.00 423.98 C 215.39 423.99 205.78 424.02 196.18 423.99 C 196.76 421.03 196.70 417.98 196.07 415.04 Z" />
<path fill="#70a7b9" opacity="1.00" d=" M 239.36 450.89 C 241.22 453.69 243.75 456.00 246.65 457.69 C 244.83 459.92 243.57 462.55 241.65 464.70 C 238.70 467.24 235.18 469.11 231.61 470.63 C 219.88 472.66 207.93 471.57 196.08 471.81 C 196.68 468.91 196.73 465.91 196.21 463.00 C 205.49 462.99 214.77 462.99 224.05 463.02 C 225.42 463.08 226.73 462.70 227.97 462.14 C 228.59 462.08 229.82 461.97 230.44 461.91 C 234.72 459.48 238.07 455.73 239.36 450.89 Z" />
</g>
<g id="#cc851fff">
<path fill="#cc851f" opacity="1.00" d=" M 575.63 90.03 C 585.10 89.96 594.58 90.03 604.05 89.99 C 606.58 89.90 609.04 90.55 611.46 91.23 C 621.05 93.82 627.79 103.21 628.03 113.00 C 628.06 121.26 627.85 129.53 628.09 137.78 C 628.35 138.06 628.88 138.62 629.14 138.90 C 641.45 139.15 653.78 138.90 666.10 139.02 C 665.80 142.00 666.03 145.02 666.83 147.92 C 653.89 147.84 640.95 147.88 628.01 147.90 C 627.94 156.30 628.09 164.70 627.95 173.09 C 626.80 184.13 617.18 193.28 606.11 193.97 C 595.96 194.03 585.81 194.01 575.67 193.96 C 576.23 191.02 576.28 187.99 575.74 185.04 C 586.32 184.87 596.89 185.17 607.46 184.89 C 608.42 184.40 609.43 184.02 610.49 183.75 C 613.63 182.12 616.08 179.59 617.80 176.51 C 618.32 174.69 619.10 172.91 619.02 170.98 C 618.99 163.29 619.00 155.59 619.00 147.90 C 604.42 147.88 589.84 147.84 575.26 147.92 C 576.09 145.03 576.34 142.00 575.99 139.01 C 589.35 138.99 602.71 139.00 616.07 139.02 C 617.33 139.26 619.16 138.59 618.98 137.02 C 618.99 128.19 619.11 119.36 618.89 110.54 C 618.44 109.55 618.05 108.54 617.75 107.51 C 615.47 102.48 610.27 99.76 605.02 99.02 C 595.27 98.93 585.52 99.07 575.77 98.97 C 576.28 96.01 576.22 92.98 575.63 90.03 Z" />
<path fill="#cc851f" opacity="1.00" d=" M 496.20 91.04 C 510.38 91.10 524.57 91.08 538.76 91.05 C 538.30 94.00 538.32 97.04 539.07 99.95 C 524.68 100.03 510.28 100.04 495.89 99.95 C 496.53 97.02 496.62 94.00 496.20 91.04 Z" />
<path fill="#cc851f" opacity="1.00" d=" M 320.70 114.04 C 330.13 114.05 339.58 113.83 349.00 114.13 C 349.38 114.30 350.16 114.64 350.55 114.82 C 351.02 114.90 351.96 115.07 352.44 115.15 C 359.56 117.74 365.86 123.87 367.28 131.47 C 368.10 132.84 368.01 134.46 368.03 136.00 C 367.97 144.68 368.01 153.36 368.00 162.05 C 380.82 162.12 393.64 162.04 406.46 162.09 C 406.03 165.04 406.09 168.08 406.85 170.99 C 394.56 171.04 382.27 170.95 369.99 171.02 C 368.45 170.89 367.72 172.65 367.98 173.94 C 368.00 180.30 367.98 186.66 368.02 193.02 C 368.73 201.29 364.30 209.91 357.04 213.98 C 353.34 216.85 348.53 217.22 344.06 217.79 C 336.34 217.87 328.61 217.72 320.88 217.85 C 321.04 214.64 320.62 211.36 321.43 208.22 C 329.59 208.39 337.76 208.37 345.93 208.25 C 351.01 208.35 355.39 204.74 357.82 200.52 C 358.36 198.71 359.11 196.92 359.02 195.00 C 358.99 187.00 359.00 179.00 359.00 171.00 C 346.18 170.98 333.37 171.03 320.56 170.98 C 321.05 168.05 321.20 165.06 320.77 162.13 C 333.51 162.00 346.25 162.11 359.00 162.07 C 358.97 152.55 359.07 143.03 358.94 133.52 C 357.04 128.72 353.26 124.99 348.49 123.06 C 339.19 122.90 329.88 123.07 320.59 122.97 C 321.13 120.02 321.17 117.00 320.70 114.04 Z" />
<path fill="#cc851f" opacity="1.00" d=" M 443.28 161.01 C 452.87 160.99 462.46 161.00 472.04 160.99 C 475.61 161.24 479.29 161.96 482.45 163.72 C 488.19 166.23 492.42 171.49 494.54 177.31 C 495.15 180.18 495.65 183.08 496.05 185.99 C 510.28 186.02 524.51 185.98 538.74 186.01 C 538.25 188.99 538.40 192.05 539.19 194.97 C 524.80 195.03 510.40 194.99 496.01 194.99 C 494.82 198.50 494.91 202.38 492.73 205.53 C 490.30 210.11 486.18 213.77 481.39 215.74 C 480.37 216.04 479.36 216.38 478.37 216.75 C 475.60 217.27 472.83 217.93 470.00 217.83 C 461.15 217.74 452.29 217.89 443.45 217.74 C 444.16 214.64 444.17 211.40 443.43 208.30 C 453.59 208.26 463.76 208.47 473.92 208.21 C 479.26 208.01 483.34 203.92 485.81 199.51 C 487.58 192.98 487.51 185.99 485.85 179.45 C 484.03 175.94 481.15 172.88 477.55 171.19 C 476.48 170.94 475.45 170.58 474.47 170.11 C 464.17 169.84 453.87 170.11 443.57 169.97 C 444.01 166.99 443.97 163.95 443.28 161.01 Z" />
<path fill="#cc851f" opacity="1.00" d=" M 622.91 270.01 C 632.62 269.97 642.32 270.03 652.03 269.97 C 653.56 269.94 655.13 270.06 656.55 270.74 C 664.50 271.96 671.23 278.02 673.85 285.56 C 673.93 286.04 674.10 286.99 674.18 287.47 C 675.16 288.80 675.00 290.45 675.03 292.00 C 674.96 300.73 675.01 309.46 675.00 318.19 C 687.86 318.44 700.73 318.20 713.59 318.31 C 713.00 321.44 713.10 324.70 714.06 327.75 C 701.04 327.85 688.02 327.65 675.00 327.86 C 674.91 336.87 675.16 345.90 674.90 354.91 C 674.72 355.32 674.36 356.13 674.18 356.54 C 674.10 357.01 673.95 357.97 673.87 358.45 C 673.59 358.98 673.02 360.03 672.73 360.55 C 669.29 369.05 659.93 373.94 651.05 374.02 C 641.65 373.95 632.25 374.09 622.85 373.93 C 623.53 371.00 623.48 367.96 622.98 365.01 C 632.31 364.98 641.63 365.00 650.96 365.02 C 653.19 364.99 655.32 364.21 657.50 363.81 C 661.32 361.71 664.29 358.54 665.94 354.49 C 666.09 345.58 665.98 336.68 666.00 327.78 C 651.49 327.77 636.98 327.78 622.46 327.77 C 623.44 324.71 623.59 321.46 623.11 318.30 C 637.40 318.29 651.70 318.30 666.01 318.29 C 665.97 308.70 666.10 299.10 665.94 289.51 C 664.16 285.64 661.56 281.96 657.55 280.16 C 655.70 279.69 653.89 278.90 651.96 278.98 C 642.29 279.00 632.63 279.02 622.97 278.98 C 623.50 276.01 623.48 272.97 622.91 270.01 Z" />
<path fill="#cc851f" opacity="1.00" d=" M 542.81 271.01 C 557.19 270.99 571.57 270.99 585.95 271.02 C 585.51 274.00 585.65 277.03 586.30 279.98 C 571.64 280.01 556.97 280.03 542.32 279.96 C 543.14 277.05 543.21 273.99 542.81 271.01 Z" />
<path fill="#cc851f" opacity="1.00" d=" M 367.94 294.02 C 376.98 293.98 386.01 294.00 395.04 294.00 C 404.57 294.93 412.64 302.79 414.91 311.92 C 415.22 321.54 414.77 331.19 415.09 340.81 C 415.36 341.09 415.91 341.63 416.18 341.91 C 428.69 342.15 441.23 341.91 453.76 342.02 C 453.29 344.98 453.28 348.03 454.09 350.94 C 441.49 351.14 428.89 350.83 416.30 351.09 C 415.99 351.39 415.39 351.99 415.09 352.30 C 414.83 360.87 415.12 369.46 414.99 378.04 C 415.08 380.43 413.70 382.47 412.79 384.58 C 409.53 391.40 402.72 396.81 394.98 397.03 C 386.08 398.36 377.05 397.26 368.10 397.82 C 368.30 394.60 368.30 391.37 368.06 388.16 C 377.54 388.14 387.04 388.47 396.51 387.93 C 400.88 386.21 404.22 382.88 405.93 378.50 C 406.43 369.35 406.17 360.17 406.12 351.00 C 393.31 350.98 380.50 351.03 367.69 350.98 C 368.30 348.03 368.45 345.00 368.02 342.02 C 380.74 341.97 393.46 342.02 406.18 341.99 C 406.12 332.14 406.42 322.29 405.96 312.44 C 404.10 309.23 401.63 306.30 398.50 304.28 C 395.28 302.52 391.54 303.03 388.02 303.00 C 381.27 302.99 374.51 303.05 367.76 302.95 C 368.39 300.02 368.43 296.98 367.94 294.02 Z" />
<path fill="#cc851f" opacity="1.00" d=" M 490.65 341.04 C 500.09 340.94 509.54 341.05 518.99 340.97 C 526.93 340.99 535.12 344.91 539.22 351.88 C 541.98 356.01 543.10 361.08 543.07 366.00 C 557.34 366.01 571.61 365.99 585.89 366.01 C 585.48 369.02 585.64 372.10 586.60 375.00 C 572.13 375.01 557.65 375.00 543.18 375.00 C 542.73 378.28 541.95 381.55 540.29 384.45 C 537.50 390.63 531.64 395.30 525.03 396.77 C 513.60 398.47 502.01 397.40 490.50 397.64 C 491.32 394.57 491.32 391.32 490.46 388.25 C 501.48 388.03 512.51 388.51 523.52 387.97 C 528.59 385.97 532.58 381.81 533.92 376.48 C 533.98 368.93 535.29 360.35 529.78 354.23 C 526.50 351.62 522.29 349.80 518.04 349.97 C 508.93 350.00 499.83 350.04 490.73 349.96 C 491.24 347.00 491.19 343.98 490.65 341.04 Z" />
</g>
</svg>

After

Width:  |  Height:  |  Size: 37 KiB

View File

@ -5,6 +5,7 @@ keywords: build, buildx, buildkit
redirect_from:
- /build/buildx/
- /buildx/working-with-buildx/
- /develop/develop-images/build_enhancements/
---
## Overview
@ -20,29 +21,24 @@ and tools. The most common method of executing a build is by issuing a
sends the request to Docker Engine which, in turn, executes your build.
There are now two components in Engine that can be used to build an image.
Starting with the [18.09 release](../engine/release-notes/18.09.md#18090), Engine is
shipped with Moby [BuildKit](https://github.com/moby/buildkit){:target="_blank" rel="noopener" class="_"},
the new component for executing your builds by default.
BuildKit is the backend evolution from the Legacy Builder, it comes with new
and much improved functionality that can be powerful tools for improving your
builds' performance or reusability of your Dockerfiles, and it also introduces
support for complex scenarios.
Starting with the [18.09 release](../engine/release-notes/18.09.md#18090),
Engine is shipped with Moby [BuildKit](buildkit/index.md), the new component for
executing your builds by default.
The new client [Docker Buildx](https://github.com/docker/buildx){:target="_blank" rel="noopener" class="_"},
is a CLI plugin that extends the docker command with the full support of the
features provided by BuildKit builder toolkit. `docker buildx build` provides
the same user experience as `docker build` with many new features like creating
scoped builder instances, building against multiple nodes concurrently, outputs
configuration, inline build caching, and specifying target platform. In
addition, Buildx also supports new features that are not yet available for
regular `docker build` like building manifest lists, distributed caching, and
exporting build results to OCI image tarballs.
features provided by [BuildKit](buildkit/index.md) builder toolkit. [`docker buildx build` command](../engine/reference/commandline/buildx_build.md)
provides the same user experience as `docker build` with many new features like
creating scoped [builder instances](building/drivers/index.md), building
against multiple nodes concurrently, outputs configuration, inline [build caching](building/cache/index.md),
and specifying target platform. In addition, Buildx also supports new features
that are not yet available for regular `docker build` like building manifest
lists, distributed caching, and exporting build results to OCI image tarballs.
Docker Build is way more than a simple build command and is not only about
packaging your code, it's a whole ecosystem of tools and features that support
not only common workflow tasks but also provides support for more complex and
advanced scenarios:
advanced scenarios.
## Building your images
@ -114,13 +110,14 @@ to be built concurrently as part of a single request:
[High-level builds with Bake](customize/bake/index.md){: .button .outline-btn }
## Extending BuildKit
## BuildKit
### Custom syntax on Dockerfile
### Custom Dockerfile syntax
Use experimental versions of the Dockerfile frontend, or even just bring your
own to BuildKit using the power of custom frontends. See also the
[Syntax directive](../engine/reference/builder.md#syntax).
own to BuildKit using the power of custom frontends.
[Custom Dockerfile syntax](buildkit/dockerfile-frontend.md){: .button .outline-btn }
### Configure BuildKit

View File

@ -0,0 +1,45 @@
---
description: Key features and use cases of Docker Compose
keywords: documentation, docs, docker, compose, orchestration, containers, uses, features
title: Compose V2 Overview
---
## Compose V2 and the new `docker compose` command
> Important
>
> The new Compose V2, which supports the `compose` command as part of the Docker
> CLI, is now available.
>
> Compose V2 integrates compose functions into the Docker platform, continuing
> to support most of the previous `docker-compose` features and flags. You can
> run Compose V2 by replacing the hyphen (`-`) with a space, using `docker compose`,
> instead of `docker-compose`.
{: .important}
If you rely on using Docker Compose as `docker-compose` (with a hyphen), you can
set up Compose V2 to act as a drop-in replacement of the previous `docker-compose`.
Refer to the [Installing Compose](../install/index.md) section for detailed instructions.
## Context of Docker Compose evolution
Introduction of the [Compose specification](https://github.com/compose-spec/compose-spec){:target="_blank" rel="noopener" class="_"}
makes a clean distinction between the Compose YAML file model and the `docker-compose`
implementation. Making this change has enabled a number of enhancements, including
adding the `compose` command directly into the Docker CLI, being able to "up" a
Compose application on cloud platforms by simply switching the Docker context,
and launching of [Amazon ECS](../../cloud/ecs-integration.md) and [Microsoft ACI](../../cloud/aci-integration.md).
As the Compose specification evolves, new features land faster in the Docker CLI.
Compose V2 relies directly on the compose-go bindings which are maintained as part
of the specification. This allows us to include community proposals, experimental
implementations by the Docker CLI and/or Engine, and deliver features faster to
users. Compose V2 also supports some of the newer additions to the specification,
such as [profiles](../profiles.md) and [GPU](../gpu-support.md) devices.
Compose V2 has been re-written in [Go](https://go.dev), which improves integration
with other Docker command-line features, and allows it to run natively on
[macOS on Apple silicon](../../desktop/mac/apple-silicon.md), Windows, and Linux,
without dependencies such as Python.
For more information about compatibility with the compose v1 command-line, see the [docker-compose compatibility list](../cli-command-compatibility.md).

View File

@ -14,7 +14,7 @@ The following syntax rules apply to environment files:
- `VAR="VAL"` -> `VAL`
- `VAR='VAL'` -> `VAL`
- Lines beginning with `#` are processed as comments and ignored.
- Inline comments for unquoted values must be proceeded with a space.
- Inline comments for unquoted values must be preceded with a space.
- `VAR=VAL # comment` -> `VAL`
- `VAR=VAL# not a comment` -> `VAL# not a comment`
- Inline comments for quoted values must follow the closing quote.

View File

@ -112,6 +112,19 @@ the directory contents of the image.
There are [many examples of Compose files on
GitHub](https://github.com/search?q=in%3Apath+docker-compose.yml+extension%3Ayml&type=Code).
## Getting help
Docker Compose is under active development. If you need help, would like to
contribute, or simply want to talk about the project with like-minded
individuals, we have a number of open channels for communication.
* To report bugs or file feature requests: use the [issue tracker on Github](https://github.com/docker/compose/issues).
* To talk about the project with people in real time: join the
`#docker-compose` channel on the Docker Community Slack.
* To contribute code or documentation changes: submit a [pull request on Github](https://github.com/docker/compose/pulls).
## Compose documentation

131
compose/features-uses.md Normal file
View File

@ -0,0 +1,131 @@
---
description: Key features and use cases of Docker Compose
keywords: documentation, docs, docker, compose, orchestration, containers, uses, features
title: Key features and use cases
---
Using Compose is essentially a three-step process:
1. Define your app's environment with a `Dockerfile` so it can be reproduced
anywhere.
2. Define the services that make up your app in `docker-compose.yml`
so they can be run together in an isolated environment.
3. Run `docker compose up` and the [Docker compose command](compose-v2/index.md#compose-v2-and-the-new-docker-compose-command) starts and runs your entire app. You can alternatively run `docker-compose up` using Compose standalone(`docker-compose` binary).
A `docker-compose.yml` looks like this:
```yaml
version: "{{ site.compose_file_v3 }}" # optional since v1.27.0
services:
web:
build: .
ports:
- "8000:5000"
volumes:
- .:/code
- logvolume01:/var/log
depends_on:
- redis
redis:
image: redis
volumes:
logvolume01: {}
```
For more information about the Compose file, see the
[Compose file reference](compose-file/index.md).
## Key features of Docker Compose
### Have multiple isolated environments on a single host
Compose uses a project name to isolate environments from each other. You can make use of this project name in several different contexts:
* on a dev host, to create multiple copies of a single environment, such as when you want to run a stable copy for each feature branch of a project
* on a CI server, to keep builds from interfering with each other, you can set
the project name to a unique build number
* on a shared host or dev host, to prevent different projects, which may use the
same service names, from interfering with each other
The default project name is the basename of the project directory. You can set
a custom project name by using the
[`-p` command line option](reference/index.md) or the
[`COMPOSE_PROJECT_NAME` environment variable](reference/envvars.md#compose_project_name).
The default project directory is the base directory of the Compose file. A custom value
for it can be defined with the `--project-directory` command line option.
### Preserves volume data when containers are created
Compose preserves all volumes used by your services. When `docker compose up`
runs, if it finds any containers from previous runs, it copies the volumes from
the old container to the new container. This process ensures that any data
you've created in volumes isn't lost.
If you use `docker-compose` on a Windows machine, see
[Environment variables](reference/envvars.md) and adjust the necessary environment
variables for your specific needs.
### Only recreate containers that have changed
Compose caches the configuration used to create a container. When you
restart a service that has not changed, Compose re-uses the existing
containers. Re-using containers means that you can make changes to your
environment very quickly.
### Supports variables and moving a composition between environments
Compose supports variables in the Compose file. You can use these variables
to customize your composition for different environments, or different users.
See [Variable substitution](compose-file/compose-file-v3.md#variable-substitution) for more
details.
You can extend a Compose file using the `extends` field or by creating multiple
Compose files. See [extends](extends.md) for more details.
## Common use cases of Docker Compose
Compose can be used in many different ways. Some common use cases are outlined
below.
### Development environments
When you're developing software, the ability to run an application in an
isolated environment and interact with it is crucial. The Compose command
line tool can be used to create the environment and interact with it.
The [Compose file](compose-file/index.md) provides a way to document and configure
all of the application's service dependencies (databases, queues, caches,
web service APIs, etc). Using the Compose command line tool you can create
and start one or more containers for each dependency with a single command
(`docker-compose up`).
Together, these features provide a convenient way for developers to get
started on a project. Compose can reduce a multi-page "developer getting
started guide" to a single machine readable Compose file and a few commands.
### Automated testing environments
An important part of any Continuous Deployment or Continuous Integration process
is the automated test suite. Automated end-to-end testing requires an
environment in which to run tests. Compose provides a convenient way to create
and destroy isolated testing environments for your test suite. By defining the full environment in a [Compose file](compose-file/index.md), you can create and destroy these environments in just a few commands:
```console
$ docker compose up -d
$ ./run_tests
$ docker compose down
```
### Single host deployments
Compose has traditionally been focused on development and testing workflows,
but with each release we're making progress on more production-oriented features.
For details on using production-oriented features, see
[compose in production](production.md) in this documentation.

View File

@ -1,23 +1,23 @@
---
description: Get started with Docker Compose
keywords: documentation, docs, docker, compose, orchestration, containers
title: Get started with Docker Compose
title: Try Docker Compose
---
On this page you build a simple Python web application running on Docker
Compose. The application uses the Flask framework and maintains a hit counter in
Redis. While the sample uses Python, the concepts demonstrated here should be
understandable even if you're not familiar with it.
This tutorial is designed to introduce the key concepts of Docker Compose whilst building a simple Python web application. The application uses the Flask framework and maintains a hit counter in
Redis.
The concepts demonstrated here should be understandable even if you're not familiar Python.
## Prerequisites
Make sure you have already installed both [Docker Engine](../get-docker.md)
and [Docker Compose](install/index.md). You don't need to install Python or Redis, as
both are provided by Docker images.
You need to have Docker Engine and Docker Compose on your machine. You can either:
- Install [Docker Engine](../get-docker.md) and [Docker Compose](install/index.md) as standalone binaries
- Install [Docker Desktop](../desktop/index.md) which includes both Docker Engine and Docker Compose
## Step 1: Setup
You don't need to install Python or Redis, as both are provided by Docker images.
Define the application dependencies.
## Step 1: Define the application dependencies
1. Create a directory for the project:
@ -26,7 +26,7 @@ Define the application dependencies.
$ cd composetest
```
2. Create a file called `app.py` in your project directory and paste this in:
2. Create a file called `app.py` in your project directory and paste the following code in:
```python
import time
@ -62,13 +62,13 @@ Define the application dependencies.
> Note the way the `get_hit_count` function is written. This basic retry
> loop lets us attempt our request multiple times if the redis service is
> not available. This is useful at startup while the application comes
> online, but also makes our application more resilient if the Redis
> online, but also makes the application more resilient if the Redis
> service needs to be restarted anytime during the app's lifetime. In a
> cluster, this also helps handling momentary connection drops between
> nodes.
3. Create another file called `requirements.txt` in your project directory and
paste this in:
paste the following code in:
```text
flask
@ -77,12 +77,11 @@ Define the application dependencies.
## Step 2: Create a Dockerfile
In this step, you write a Dockerfile that builds a Docker image. The image
The Dockerfile is used to build a Docker image. The image
contains all the dependencies the Python application requires, including Python
itself.
In your project directory, create a file named `Dockerfile` and paste the
following:
In your project directory, create a file named `Dockerfile` and paste the following code in:
```dockerfile
# syntax=docker/dockerfile:1
@ -109,6 +108,11 @@ This tells Docker to:
* Copy the current directory `.` in the project to the workdir `.` in the image.
* Set the default command for the container to `flask run`.
>Important
>
>Check that the `Dockerfile` has no file extension like `.txt`. Some editors may append this file extension automatically and which results in an error when you run the application.
{: .important}
For more information on how to write Dockerfiles, see the
[Docker user guide](../develop/index.md)
and the [Dockerfile reference](/engine/reference/builder/).
@ -132,13 +136,8 @@ services:
This Compose file defines two services: `web` and `redis`.
### Web service
The `web` service uses an image that's built from the `Dockerfile` in the current directory.
It then binds the container and the host machine to the exposed port, `8000`. This example service uses the default port for
the Flask web server, `5000`.
### Redis service
It then binds the container and the host machine to the exposed port, `8000`. This example service uses the default port for the Flask web server, `5000`.
The `redis` service uses a public [Redis](https://registry.hub.docker.com/_/redis/)
image pulled from the Docker Hub registry.
@ -175,11 +174,7 @@ image pulled from the Docker Hub registry.
2. Enter http://localhost:8000/ in a browser to see the application running.
If you're using Docker natively on Linux, Docker Desktop for Mac, or Docker Desktop for
Windows, then the web app should now be listening on port 8000 on your
Docker daemon host. Point your web browser to http://localhost:8000 to
find the `Hello World` message. If this doesn't resolve, you can also try
http://127.0.0.1:8000.
If this doesn't resolve, you can also try http://127.0.0.1:8000.
You should see a message in your browser saying:
@ -346,12 +341,9 @@ container:
$ docker compose down --volumes
```
At this point, you have seen the basics of how Compose works.
## Where to go next
- Next, try the [Sample apps with Compose](samples-for-compose.md)
- Next, try the [Sample apps with Compose](https://github.com/docker/awesome-compose)
- [Explore the full list of Compose commands](reference/index.md)
- [Compose configuration file reference](compose-file/index.md)
- [Explore the Compose configuration file reference](compose-file/index.md)
- To learn more about volumes and bind mounts, see [Manage data in Docker](../storage/index.md)

View File

@ -10,216 +10,88 @@ redirect_from:
- /compose/completion/
---
>**Looking for Compose file reference?** [Find the latest version here](compose-file/index.md).
Compose is a tool for defining and running multi-container Docker applications.
With Compose, you use a YAML file to configure your application's services.
Then, with a single command, you create and start all the services
from your configuration. To learn more about all the features of Compose,
see [the list of features](#features).
from your configuration.
Compose works in all environments: production, staging, development, testing, as
well as CI workflows. You can learn more about each case in [Common Use
Cases](#common-use-cases).
Using Compose is basically a three-step process:
1. Define your app's environment with a `Dockerfile` so it can be reproduced
anywhere.
2. Define the services that make up your app in `docker-compose.yml`
so they can be run together in an isolated environment.
3. Run `docker compose up` and the [Docker compose command](#compose-v2-and-the-new-docker-compose-command) starts and runs your entire app. You can alternatively run `docker-compose up` using Compose standalone(`docker-compose` binary).
A `docker-compose.yml` looks like this:
```yaml
version: "{{ site.compose_file_v3 }}" # optional since v1.27.0
services:
web:
build: .
ports:
- "8000:5000"
volumes:
- .:/code
- logvolume01:/var/log
depends_on:
- redis
redis:
image: redis
volumes:
logvolume01: {}
```
For more information about the Compose file, see the
[Compose file reference](compose-file/index.md).
Compose has commands for managing the whole lifecycle of your application:
well as CI workflows. It also has commands for managing the whole lifecycle of your application:
* Start, stop, and rebuild services
* View the status of running services
* Stream the log output of running services
* Run a one-off command on a service
## Compose V2 and the new `docker compose` command
The key features of Compose that make it effective are:
> Important
>
> The new Compose V2, which supports the `compose` command as part of the Docker
> CLI, is now available.
>
> Compose V2 integrates compose functions into the Docker platform, continuing
> to support most of the previous `docker-compose` features and flags. You can
> run Compose V2 by replacing the hyphen (`-`) with a space, using `docker compose`,
> instead of `docker-compose`.
{: .important}
* [Have multiple isolated environments on a single host](features-uses.md#have-multiple-isolated-environments-on-a-single-host)
* [Preserves volume data when containers are created](features-uses.md#preserves-volume-data-when-containers-are-created)
* [Only recreate containers that have changed](features-uses.md#only-recreate-containers-that-have-changed)
* [Supports variables and moving a composition between environments](features-uses.md#supports-variables-and-moving-a-composition-between-environments)
If you rely on using Docker Compose as `docker-compose` (with a hyphen), you can
set up Compose V2 to act as a drop-in replacement of the previous `docker-compose`.
Refer to the [Installing Compose](install/index.md) section for detailed instructions.
## Context of Docker Compose evolution
Introduction of the [Compose specification](https://github.com/compose-spec/compose-spec){:target="_blank" rel="noopener" class="_"}
makes a clean distinction between the Compose YAML file model and the `docker-compose`
implementation. Making this change has enabled a number of enhancements, including
adding the `compose` command directly into the Docker CLI, being able to "up" a
Compose application on cloud platforms by simply switching the Docker context,
and launching of [Amazon ECS](../cloud/ecs-integration.md) and [Microsoft ACI](../cloud/aci-integration.md).
As the Compose specification evolves, new features land faster in the Docker CLI.
Compose V2 relies directly on the compose-go bindings which are maintained as part
of the specification. This allows us to include community proposals, experimental
implementations by the Docker CLI and/or Engine, and deliver features faster to
users. Compose V2 also supports some of the newer additions to the specification,
such as [profiles](profiles.md) and [GPU](gpu-support.md) devices.
Compose V2 has been re-written in [Go](https://go.dev), which improves integration
with other Docker command-line features, and allows it to run natively on
[macOS on Apple silicon](../desktop/mac/apple-silicon.md), Windows, and Linux,
without dependencies such as Python.
For more information about compatibility with the compose v1 command-line, see the [docker-compose compatibility list](cli-command-compatibility.md).
<div class="component-container">
<!--start row-->
<div class="row">
<div class="col-xs-12 col-sm-12 col-md-12 col-lg-4 block">
<div class="component">
<div class="component-icon">
<a href="/compose/install/"><img src="/assets/images/download.svg" alt="Download and install" width="70" height="70"></a>
</div>
<h2 id="docker-compose"><a href="/compose/install/">Install Compose </a></h2>
<p>Follow the instructions on how to install Docker Compose.</p>
</div>
</div>
<div class="col-xs-12 col-sm-12 col-md-12 col-lg-4 block">
<div class="component">
<div class="component-icon">
<a href="/compose/gettingstarted/"><img src="/assets/images/explore.svg" alt="Docker Compose" width="70" height="70"></a>
</div>
<h2 id="docker-compose"><a href="/compose/gettingstarted/">Try Compose</a></h2>
<p>Learn the key concepts of Docker Compose whilst building a simple Python web application.</p>
</div>
</div>
<div class="col-xs-12 col-sm-12 col-md-12 col-lg-4 block">
<div class="component">
<div class="component-icon">
<a href="/compose/release-notes/"><img src="/assets/images/note-add.svg" alt="Release notes" width="70" height="70"></a>
</div>
<h2 id="docker-compose"><a href="/compose/release-notes/">View the release notes</a></h2>
<p>Find out about the latest enhancements and bug fixes.</p>
</div>
</div>
</div>
<!--start row-->
<div class="row">
<div class="col-xs-12 col-sm-12 col-md-12 col-lg-4 block">
<div class="component">
<div class="component-icon">
<a href="/compose/features-uses/"><img src="/assets/images/help.svg" alt="FAQs" width="70" height="70"></a>
</div>
<h2 id="docker-compose"><a href="/compose/features-uses/">Understand key features of Compose</a></h2>
<p>Understand its key features and explore common use cases.</p>
</div>
</div>
<div class="col-xs-12 col-sm-12 col-md-12 col-lg-4 block">
<div class="component">
<div class="component-icon">
<a href="/compose/compose-file/"><img src="/assets/images/all-inbox.svg" alt="Additional resources" width="70" height="70"></a>
</div>
<h2 id="docker-compose"><a href="/compose/compose-file/">Explore the Compose file reference</a></h2>
<p>Find information on defining services, networks, and volumes for a Docker application.</p>
</div>
</div>
<div class="col-xs-12 col-sm-12 col-md-12 col-lg-4 block">
<div class="component">
<div class="component-icon">
<a href="/compose/faq/"><img src="/assets/images/sms.svg" alt="Give feedback" width="70" height="70"></a>
</div>
<h2 id="docker-compose"><a href="/compose/faq/">Browse common FAQs</a></h2>
<p>Explore general FAQs and find out how to give feedback.</p>
</div>
</div>
</div>
</div>
## Features
The features of Compose that make it effective are:
* [Multiple isolated environments on a single host](#multiple-isolated-environments-on-a-single-host)
* [Preserve volume data when containers are created](#preserve-volume-data-when-containers-are-created)
* [Only recreate containers that have changed](#only-recreate-containers-that-have-changed)
* [Variables and moving a composition between environments](#variables-and-moving-a-composition-between-environments)
### Multiple isolated environments on a single host
Compose uses a project name to isolate environments from each other. You can make use of this project name in several different contexts:
* on a dev host, to create multiple copies of a single environment, such as when you want to run a stable copy for each feature branch of a project
* on a CI server, to keep builds from interfering with each other, you can set
the project name to a unique build number
* on a shared host or dev host, to prevent different projects, which may use the
same service names, from interfering with each other
The default project name is the basename of the project directory. You can set
a custom project name by using the
[`-p` command line option](reference/index.md) or the
[`COMPOSE_PROJECT_NAME` environment variable](reference/envvars.md#compose_project_name).
The default project directory is the base directory of the Compose file. A custom value
for it can be defined with the `--project-directory` command line option.
### Preserve volume data when containers are created
Compose preserves all volumes used by your services. When `docker compose up`
runs, if it finds any containers from previous runs, it copies the volumes from
the old container to the new container. This process ensures that any data
you've created in volumes isn't lost.
If you use `docker-compose` on a Windows machine, see
[Environment variables](reference/envvars.md) and adjust the necessary environment
variables for your specific needs.
### Only recreate containers that have changed
Compose caches the configuration used to create a container. When you
restart a service that has not changed, Compose re-uses the existing
containers. Re-using containers means that you can make changes to your
environment very quickly.
### Variables and moving a composition between environments
Compose supports variables in the Compose file. You can use these variables
to customize your composition for different environments, or different users.
See [Variable substitution](compose-file/compose-file-v3.md#variable-substitution) for more
details.
You can extend a Compose file using the `extends` field or by creating multiple
Compose files. See [extends](extends.md) for more details.
## Common use cases
Compose can be used in many different ways. Some common use cases are outlined
below.
### Development environments
When you're developing software, the ability to run an application in an
isolated environment and interact with it is crucial. The Compose command
line tool can be used to create the environment and interact with it.
The [Compose file](compose-file/index.md) provides a way to document and configure
all of the application's service dependencies (databases, queues, caches,
web service APIs, etc). Using the Compose command line tool you can create
and start one or more containers for each dependency with a single command
(`docker-compose up`).
Together, these features provide a convenient way for developers to get
started on a project. Compose can reduce a multi-page "developer getting
started guide" to a single machine readable Compose file and a few commands.
### Automated testing environments
An important part of any Continuous Deployment or Continuous Integration process
is the automated test suite. Automated end-to-end testing requires an
environment in which to run tests. Compose provides a convenient way to create
and destroy isolated testing environments for your test suite. By defining the full environment in a [Compose file](compose-file/index.md), you can create and destroy these environments in just a few commands:
```console
$ docker compose up -d
$ ./run_tests
$ docker compose down
```
### Single host deployments
Compose has traditionally been focused on development and testing workflows,
but with each release we're making progress on more production-oriented features.
For details on using production-oriented features, see
[compose in production](production.md) in this documentation.
## Release notes
To see a detailed list of changes for past and current releases of Docker
Compose, refer to the
[CHANGELOG](https://github.com/docker/compose/blob/master/CHANGELOG.md).
## Getting help
Docker Compose is under active development. If you need help, would like to
contribute, or simply want to talk about the project with like-minded
individuals, we have a number of open channels for communication.
* To report bugs or file feature requests: use the [issue tracker on Github](https://github.com/docker/compose/issues).
* To talk about the project with people in real time: join the
`#docker-compose` channel on the Docker Community Slack.
* To contribute code or documentation changes: submit a [pull request on Github](https://github.com/docker/compose/pulls).

View File

@ -1,54 +1,44 @@
---
description: How to install Docker Compose
keywords: compose, orchestration, install, installation, docker, documentation
title: Install Docker Compose
title: Overview
toc_max: 3
redirect_from:
- /compose/compose-desktop/
---
On this page you can find information on how to get and install Compose.
This page contains summary information about the available options for getting Docker Compose.
## Install Compose
## Installation scenarios
If you have Docker Desktop, you've got a full Docker installation, including Compose.
### Scenario one: Install Docker Desktop
You can check this by clicking on **About Docker Desktop** from the Docker menu ![whale menu](../../desktop/images/whale-x.svg){: .inline}.
The easiest and recommended way to get Docker Compose is to install Docker Desktop. Docker Desktop
includes Docker Compose along with Docker Engine and Docker CLI which are Compose prerequisites.
![about-desktop](../../desktop/images/about_desktop_versions.png){:width="750px"}
Docker Desktop is available on:
- [Linux](../../desktop/install/linux-install.md)
- [Mac](../../desktop/install/mac-install.md)
- [Windows](../../desktop/install/windows-install.md)
### New Docker Installation
If you have already installed Docker Desktop, you can check which version of Compose you have by selecting **About Docker Desktop** from the Docker menu ![whale menu](../../desktop/images/whale-x.svg){: .inline}
If you havent yet installed Docker Desktop:
### Scenario two: Install the Compose plugin
{% assign yes = '![yes](/assets/images/green-check.svg){: .inline style="height: 14px; margin: 0 auto; align=right"}' %}
If you already have Docker Engine and Docker CLI installed, you can install the Compose plugin from the command line, by either:
- [Using Docker's repository](linux.md#install-using-the-repository)
- [Downloading and installing manually](linux.md#install-the-plugin-manually)
| Platform | Docker Desktop |
|:---------------|:------------------------------:|
|Linux (64-bit) |{{ yes }} [Install](../../desktop/install/linux-install.md)|
|Mac |{{ yes }} [Install](../../desktop/install/mac-install.md)|
|Windows |{{ yes }} [Install](../../desktop/install/windows-install.md)|
>Note
>
>This is only available on Linux
{: .important}
### Scenario three: Install the Compose standalone
### Pre-existing Docker Installation
You can [install the Compose standalone](other.md) on Linux or on Windows Server.
If you have a Docker installation on Linux to which you want to add the Compose plugin, you can choose one of these options:
* [Install using the repository](./linux.md#install-using-the-repository)
* [Install the plugin manually](./linux.md#install-the-plugin-manually)
### Other install scenarios
For other options, see [Other install scenarios](./other.md).
## Where to go next
- [Getting Started](../gettingstarted.md)
- [Command line reference](../../reference/index.md)
- [Compose file reference](../compose-file/index.md)
- [Sample apps with Compose](../samples-for-compose.md)
## Feedback
We'd love to hear from you about the improvements we've recently made to the Docker Compose installation instructions. Let us know what you think by completing this short [survey](https://survey.alchemer.com/s3/7002962/Compose-Install-Documentation-Feedback){:target="_blank" rel="noopener" class="_"}.
>Note
>
>This install scenario is no longer supported.
{: .important}

View File

@ -2,30 +2,24 @@
description: How to install Docker Compose on Linux
keywords: compose, orchestration, install, installation, docker, documentation
toc_max: 3
title: Install on Linux
title: Install the Compose plugin
redirect_from:
- /compose/compose-plugin/
- /compose/compose-linux/
---
On this page you can find instructions on how to install the Compose on Linux from the command line.
On this page you can find instructions on how to install the Compose plugin on Linux from the command line.
## Install Compose
To install Compose:
* Option 1: [Set up Docker's repository on your Linux system](#install-using-the-repository).
* Option 2: [Install Compose manually](#install-the-plugin-manually).
### Install using the repository
To install the Compose plugin on Linux, you can either:
- [Set up Docker's repository on your Linux system](#install-using-the-repository).
- [Install Compose manually](#install-the-plugin-manually).
> **Note**
>
> These instructions assume you already have Docker Engine and Docker CLI installed and now want to install the Compose plugin.
For Compose standalone, see [Install Compose Standalone](other.md#install-compose-standalone).
For Compose standalone, see [Install Compose Standalone](other.md).
If you have already set up the Docker repository, jump to step 2.
### Install using the repository
1. Set up the repository. Find distro-specific instructions in:
@ -36,15 +30,15 @@ If you have already set up the Docker repository, jump to step 2.
[RHEL](../../engine/install/rhel.md/#set-up-the-repository) |
[SLES](../../engine/install/sles.md/#set-up-the-repository).
2. Update the package index, and install the _latest version_ of Docker Compose:
2. Update the package index, and install the latest version of Docker Compose:
* Ubuntu, Debian:
* For Ubuntu and Debian, run:
```console
$ sudo apt-get update
$ sudo apt-get install docker-compose-plugin
```
* RPM-based distros:
* For RPM-based distros, run:
```console
$ sudo yum update
@ -62,15 +56,15 @@ Where `vN.N.N` is placeholder text standing in for the latest version.
#### Update Compose
To update Compose, run the following commands:
To update the Compose plugin, run the following commands:
* Ubuntu, Debian:
* For Ubuntu and Debian, run:
```console
$ sudo apt-get update
$ sudo apt-get install docker-compose-plugin
```
* RPM-based distros:
* For RPM-based distros, run:
```console
$ sudo yum update
@ -81,7 +75,7 @@ To update Compose, run the following commands:
> **Note**
>
> This option requires you to manage upgrades manually. We recommend setting up Docker's repository for an easier maintenance.
> This option requires you to manage upgrades manually. We recommend setting up Docker's repository for easier maintenance.
1. To download and install the Compose CLI plugin, run:
@ -115,10 +109,3 @@ To update Compose, run the following commands:
$ docker compose version
Docker Compose version {{site.compose_version}}
```
## Where to go next
- [Manage Docker as a non-root user](../../engine/install/linux-postinstall.md)
- [Command line reference](../../reference/index.md)
- [Compose file reference](../compose-file/index.md)
- [Sample apps with Compose](../samples-for-compose.md)

View File

@ -2,17 +2,16 @@
description: How to install Docker Compose - Other Scenarios
keywords: compose, orchestration, install, installation, docker, documentation
toc_max: 3
title: Other install Scenarios
title: Install the Compose standalone
---
## Install Compose Standalone
On this page you can find instructions on how to install the Compose standalone on Linux or Windows Server, from the command line.
### On Linux
> **Compose standalone**
>
> Note that Compose standalone uses the _dash compose_ syntax instead of current's standard syntax (_space compose_).
For example: type `docker-compose up` when using Compose standalone, instead of `docker compose up`.
> Note that Compose standalone uses the `-compose` syntax instead of the current standard syntax `compose`.
>For example type `docker-compose up` when using Compose standalone, instead of `docker compose up`.
1. To download and install Compose standalone, run:
```console
@ -35,7 +34,7 @@ For example: type `docker-compose up` when using Compose standalone, instead of
Follow these instructions if you are running the Docker daemon and client directly
on Microsoft Windows Server and want to install Docker Compose.
1. Run a PowerShell as an administrator.
1. Run PowerShell as an administrator.
When asked if you want to allow this app to make changes to your device, click **Yes** in order to proceed with the installation.
2. GitHub now requires TLS1.2. In PowerShell, run the following:

View File

@ -68,14 +68,25 @@ script:
set -e
host="$1"
# Shift arguments with mapping:
# - $0 => $0
# - $1 => <discarded>
# - $2 => $1
# - $3 => $2
# - ...
# This is done for `exec "$@"` below to work correctly
shift
# Login for user (`-U`) and once logged in execute quit ( `-c \q` )
# If we can not login sleep for 1 sec
until PGPASSWORD=$POSTGRES_PASSWORD psql -h "$host" -U "postgres" -c '\q'; do
>&2 echo "Postgres is unavailable - sleeping"
sleep 1
done
>&2 echo "Postgres is up - executing command"
# Print and execute all other arguments starting with `$1`
# So `exec "$1" "$2" "$3" ...`
exec "$@"
```

View File

@ -23,8 +23,9 @@ the following:
WARNING: No swap limit support
```
Consult your operating system's documentation for enabling them.
[Learn more](../../engine/install/linux-postinstall.md#your-kernel-does-not-support-cgroup-swap-limit-capabilities).
Consult your operating system's documentation for enabling them. See also the
[Docker Engine troubleshooting guide](../../engine/install/troubleshoot.md#kernel-cgroup-swap-limit-capabilities)
for more information.
## Memory

View File

@ -40,7 +40,7 @@ the machine reboots.
The command to start Docker depends on your operating system. Check the correct
page under [Install Docker](../../engine/install/index.md). To configure Docker
to start automatically at system boot, see
[Configure Docker to start on boot](../../engine/install/linux-postinstall.md#configure-docker-to-start-on-boot).
[Configure Docker to start on boot](../../engine/install/linux-postinstall.md#configure-docker-to-start-on-boot-with-systemd).
## Start the daemon manually

View File

@ -26,7 +26,7 @@ $ sudo systemctl start docker
### Start automatically at system boot
If you want Docker to start at boot, see
[Configure Docker to start on boot](../../engine/install/linux-postinstall.md#configure-docker-to-start-on-boot).
[Configure Docker to start on boot](../../engine/install/linux-postinstall.md#configure-docker-to-start-on-boot-with-systemd).
## Custom Docker daemon options

View File

@ -4,7 +4,7 @@ description: Guidelines for contributing to Docker's docs
keywords: contribute, guide, style guide
---
The live docs are published from the `master` branch. Therefore, you must create pull requests against the `master` branch for all content updates. This includes:
The live docs are published from the `main` branch. Therefore, you must create pull requests against the `main` branch for all content updates. This includes:
- Conceptual and task-based information
- Restructuring / rewriting
@ -17,7 +17,7 @@ There are two ways to contribute a pull request to the docs repository:
This opens the GitHub editor, which means you don't need to know a lot about Git, or even about Markdown. When you save, Git prompts you to create a fork if you don't already have one, and to create a branch in your fork and submit the pull request.
2. Fork the [docs GitHub repository]({{ site.repo }}). Suggest changes or add new content on your local branch, and submit a pull request (PR) to the `master` branch.
2. Fork the [docs GitHub repository]({{ site.repo }}). Suggest changes or add new content on your local branch, and submit a pull request (PR) to the `main` branch.
This is the manual, more advanced version of clicking 'Edit this page' on a published docs page. Initiating a docs changes in a PR from your own branch gives you more flexibility, as you can submit changes to multiple pages or files under a single pull request, and even create new topics.
@ -44,7 +44,7 @@ Help us review your PRs more quickly by following these guidelines.
- Don't change whitespace or line wrapping in parts of a file you are not editing for other reasons. Make sure your text editor is not configured to
automatically reformat the whole file when saving.
- We highly recommend that you [build](#build-and-preview-the-docs-locally) and [test](#test-the-docs-locally) the docs locally before submitting a PR.
- A Netlify test runs for each PR that is against the `master` branch, and deploys the result of your PR to a staging site. The URL will be available at in the **Conversation** tab. Check the staging site to verify how your changes look and fix issues, if necessary.
- A Netlify test runs for each PR that is against the `main` branch, and deploys the result of your PR to a staging site. The URL will be available at in the **Conversation** tab. Check the staging site to verify how your changes look and fix issues, if necessary.
### Collaborate on a pull request
@ -56,11 +56,11 @@ given file in the **Files** view.
If a PR consists of multiple small addendum commits on top of a more significant
one, the commit will usually be "squash-merged", so that only one commit is
merged into the `master` branch. In some scenarios where a squash and merge isn't appropriate, all commits are kept separate when merging.
merged into the `main` branch. In some scenarios where a squash and merge isn't appropriate, all commits are kept separate when merging.
### Per-PR staging on GitHub
A Netlify test runs for each PR created against the `master` branch and deploys the result of your PR to a staging site. When the site builds successfully, you will see a comment in the **Conversation** tab in the PR stating **Deploy Preview for docsdocker ready!**. Click the **Browse the preview** URL and check the staging site to verify how your changes look and fix issues, if necessary. Reviewers also check the staged site before merging the PR to protect the integrity of the docs site.
A Netlify test runs for each PR created against the `main` branch and deploys the result of your PR to a staging site. When the site builds successfully, you will see a comment in the **Conversation** tab in the PR stating **Deploy Preview for docsdocker ready!**. Click the **Browse the preview** URL and check the staging site to verify how your changes look and fix issues, if necessary. Reviewers also check the staged site before merging the PR to protect the integrity of the docs site.
## Build and preview the docs locally

View File

@ -20,15 +20,16 @@ file that starts and ends with three hyphens. It includes YAML content. The
following keys are supported. The title, description, and keywords are required.
| Key | Required | Description |
|------------------------|-----------|-----------------------------------------|
|----------------|----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| title | yes | The page title. This is added to the HTML output as a `<h1>` level header. |
| description | yes | A sentence that describes the page contents. This is added to the HTML metadata. Its not rendered on the page. |
| keywords | yes | A comma-separated list of keywords. These are added to the HTML metadata. |
| redirect_from | no | A YAML list of pages which should redirect to the current page. At build time, each page listed here is created as an HTML stub containing a 302 redirect to this page. |
| notoc | no | Either `true` or `false`. If `true`, no in-page TOC is generated for the HTML output of this page. Defaults to `false`. Appropriate for some landing pages that have no in-page headings.|
| notoc | no | Either `true` or `false`. If `true`, no in-page TOC is generated for the HTML output of this page. Defaults to `false`. Appropriate for some landing pages that have no in-page headings. |
| toc_min | no | Ignored if `notoc` is set to `true`. The minimum heading level included in the in-page TOC. Defaults to `2`, to show `<h2>` headings as the minimum. |
| toc_max | no | Ignored if `notoc` is set to `false`. The maximum heading level included in the in-page TOC. Defaults to `3`, to show `<h3>` headings. Set to the same as `toc_min` to only show `toc_min` level of headings. |
| skip_read_time | no | Set to `true` to disable the 'Estimated reading time' banner for this page. |
| skip_feedback | no | Set to `true` to disable the Feedback widget for this page. |
| sitemap | no | Exclude the page from indexing by search engines. When set to `false`, the page is excluded from `sitemap.xml`, and a `<meta name="robots" content="noindex"/>` header is added to the page. |
Here's an example of a valid (but contrived) page metadata. The order of

View File

@ -157,3 +157,6 @@ Version 4.8.2
- Avoid awkward doubling of vowels. For example semi-independence*,* or re-elect.
- Prevent misreading of certain words. For example Re-collect means to collect again; without a hyphen the word recollect has a different meaning.
### Parentheses
Don't use parentheses in technical documentation. They can reduce the readability of a sentence.

43
contribute/ui.md Normal file
View File

@ -0,0 +1,43 @@
---
title: UI elements in content
description: How to refer and interact with UI content
keywords: ui, contribute, style guide
---
This page contains information on how to write technical content that involves a user interface (UI).
## Format names of UI elements
Always bold UI elements when referring to them by name.
This includes names for buttons, menus, dialogs, windows, list items, or any other feature on the page that has a visible name.
Don't make an official feature name or product name bold, except when it directly refers to an element on the page that uses the name, such as a window title or button name.
In most cases, follow the capitalization as it appears on the page. However, if labels are inconsistent or they're all uppercase, use sentence case.
## Focus on the task
When practical, state instructions in terms of what the user should accomplish, rather than focusing on the widgets and gestures. By avoiding reference to UI elements, you help the user understand the purpose of an instruction, and it can help future-proof procedures.
|Correct |Incorrect |
|:-----------|:------------|
|Expand the **Advanced options** section | Select the zippy to expand the **Advanced options** section|
## Refer to UI elements
Don't use UI elements as if they were English verbs or nouns.
|Correct |Incorrect |
|:-----------|:------------|
|In the **Name** field, enter an account name. | **Name** the account.|
|To save the settings, select **Save**.| **Save** the settings.|
## Prepositions
When documenting the UI, use the following prepositions.
|Preposition |UI element | Example |
|:-----------|:------------|:-----------|
|in | dialogs <br>fields <br>lists <br>menus <br>panes <br>windows <br>| In the **Alert** dialog, select **OK**. <br> In the **Name** field, enter `wsfc-1`. <br> In the **Item** list, select **Desktop**. <br>In the **File** menu, click **Tools**.<br> In the **Metrics** pane, select **New**. <br>In the **Task** window, select **Start**. |
| on |pages <br>tabs <br>toolbars | On the **Create an instance** page, select **Add**. <br> On the **Edit** tab, select **Save**.<br> On the **Dashboard toolbar**, select **Edit**.<br>|

View File

@ -209,27 +209,41 @@ View Tags on DockerHub to see multi-platform result:
## Containerd Image Store Release Notes
(2022-09-01)
### 2022-10-19
New commands and multiple bug fixes included with Docker Desktop 4.13.0.
#### New
* Added `docker diff` and `docker run --platform` support.
#### Bug fixes
* Fixed a bug which caused Kubernetes not starting.
* Fixed a bug which caused Kubernetes IN Docker (kind) not starting.
* Fixed a bug which caused Dev Environments not working.
* Fixed a bug which caused Insecure Registries not working.
#### Known issues
* Listing images with `docker images` returns the error `content digest not found` on ARM machines after running or pulling an image with the `--platform` parameter.
### 2022-09-01
The Containerd Image Store is shipped as a [Beta](../../release-lifecycle.md/#beta) feature on Docker Desktop 4.12.0.
### New
#### New
Initial implementation of the Docker commands: `run`, `commit`, `build`, `push`, `load`, `search` and `save`.
### Known issues
#### Known issues
* The Containerd Image Store feature requires Buildx version 0.9.0 or newer.
+ On Docker Desktop for Linux (DD4L), validate if your locally installed version meets this requirement.
>**Note**
>
> If an older version is installed, the Docker daemon will report an error: **Multiple platforms feature is currently not supported for docker driver. Please switch to a different driver**.
> If an older version is installed, the Docker daemon reports the following error **Multiple platforms feature is currently not supported for docker driver. Please switch to a different driver**.
Install a newer version of Buildx following the instructions on [Docker Buildx Manual download](../../build/buildx/install/#manual-download).
* Containerd Image Store feature and Kubernetes cluster support in Docker Desktop 4.12.0 are incompatible at the moment. Disable the Containerd Image Store feature if you are using the Kubernetes from Docker Desktop.
* Local registry mirror configuration isn't implemented yet with the Containerd Image Store. Hence the `registry-mirrors` and `insecure-registries` aren't taken into account by the Docker daemon.
* Containerd Image Store feature and Kubernetes cluster support in Docker Desktop 4.12.0 are incompatible at the moment. Turn off the Containerd Image Store feature if you are using the Kubernetes from Docker Desktop.
* Local registry mirror configuration isn't implemented yet with the Containerd Image Store. The `registry-mirrors` and `insecure-registries` aren't taken into account by the Docker daemon.
* The `reference` filter isn't implemented yet and will return the error `invalid filter 'reference'` when listing images.
* Pulling an image may fail with the error `pull access denied, repository does not exist or may require authorization: server message: insufficient_scope: authorization failed`, in the situation where the image does not contain a manifest list. To workaround this issue run the `docker login` command and pull the image again.
## Feedback
Thanks for trying the new features available with `containerd`.
Wed love to hear from you! Please feel free to give feedback or report any bugs you may find through the issues tracker on the [feedback form](https://dockr.ly/3PODIhD){: target="_blank" rel="noopener" class="_"}.
Thanks for trying the new features available with `containerd`. Give feedback or report any bugs you may find through the issues tracker on the [feedback form](https://dockr.ly/3PODIhD){: target="_blank" rel="noopener" class="_"}.

View File

@ -1,12 +1,12 @@
---
description: Dev Environments
keywords: Dev Environments, share, collaborate, local, compose
title: Create a Compose Dev Environment
title: Create an advanced dev environment
---
Use Dev Environments to collaborate on any Docker Compose-based projects.
Create an advanced dev environment such as a microservice with a server, proxy and DB.
As with a simple Dev Environment, you can create a Compose Dev Environment from a:
As with a simple dev environment, you can create a more advanced dev environment from a:
- Git repository
- Branch or tag of a Git repository
- Subfolder of a Git repository
@ -22,12 +22,12 @@ The example below, taken from the `compose-dev-env` project from the [Docker Sam
>Note
>
>If you want to create a Compose Dev Environment from a subdirectory of a Git repo, you need to define your own compose file in a .docker folder located in your subdirectory as currently, Dev Environments is not able to detect the main language of the subdirectory.
>Currently, Dev Environments is not able to detect the main language of the subdirectory. You need to define your own base image or compose services in a `compose-dev.yaml` located in your subdirectory.
>
>For more information on how to configure, see the [React application with a Spring backend and a MySQL database sample](https://github.com/docker/awesome-compose/tree/master/react-java-mysql) or the [Go server with an Nginx proxy and a Postgres database sample](https://github.com/docker/awesome-compose/tree/master/nginx-golang-postgres).
1. From **Dev Environments**, select **Create**. The **Create a Dev Environment** dialog displays.
2. Click **Get Started** and then copy `https://github.com/dockersamples/compose-dev-env.git` and add it to the **Enter the Git Repository** field on the **Existing Git repo** tab.
2. Click **Get Started** and then copy `https://github.com/dockersamples/compose-dev-env.git` and add it to the **Enter the Git Repository** field with **Existing Git repo** as the source.
3. Click **Continue**. This initializes the project, clones the Git code, and builds the Compose application. This:
- Builds local images for services that are defined in the Compose file
@ -43,13 +43,21 @@ Note that VS Code doesn't open directly, unlike a simple Dev Environment, as the
You can now update your service and test it against your Compose application.
## Set up your own Compose Dev Environment
## Set up your own dev environment
To set up a Dev Environment for your own Compose-based project, there are additional configuration steps to tell Docker Desktop how to build, start, and use the right Dev Environment image for your services.
>**Changes to Dev Environments with Docker Desktop 4.13**
>
>Docker has simplified how you configure your dev environment project. All you need to get started is a `compose-dev.yaml` file. If you have an existing project with a `.docker/` folder this is automatically migrated the next time you launch.
>
> If you are using `.docker/docker-compose.yaml`, we move it to `../compose-dev.yaml`.
>If you are using `.docker/config.json`, we create a `../compose-dev.yaml` file with a single service named "app”. It is configured to use the image or Dockerfile referenced in the JSON as a starting point.
{: .important}
Dev Environments use an additional `docker-compose.yaml` file located in the `.docker` directory at the root of your project. This file allows you to define the image required for a dedicated service, the ports you'd like to expose, along with additional configuration options dedicated to Dev Environments coming in the future.
To set up a dev environment, there are additional configuration steps to tell Docker Desktop how to build, start, and use the right image for your services.
Take a detailed look at the `docker-compose.yaml` file used in the [compose-dev-env](https://github.com/dockersamples/compose-dev-env/blob/main/.docker/docker-compose.yaml){:target="_blank" rel="noopener" class="_"} sample project.
Dev Environments use an `compose-dev.yaml` file located in the at the root of your project. This file allows you to define the image required for a dedicated service, the ports you'd like to expose, along with additional configuration options dedicated to Dev Environments coming in the future.
Take a detailed look at the `compose-dev.yaml` file used in the [compose-dev-env](https://github.com/dockersamples/compose-dev-env/blob/main/.docker/docker-compose.yaml){:target="_blank" rel="noopener" class="_"} sample project.
```yaml
version: "3.7"
@ -120,4 +128,4 @@ In the example, the Docker Compose files are the same. However, they could be di
## What's next?
Learn how to [share your Dev Environment](share.md)
Learn how to [distribute your dev environment](share.md)

View File

@ -1,10 +1,10 @@
---
description: Dev Environments
keywords: Dev Environments, share, collaborate, local
title: Create a Dev Environment
title: Create a simple dev environment
---
You can create a Dev Environment from a:
You can create a dev environment from a:
- Git repository
- Branch or tag of a Git repository
- Subfolder of a Git repository
@ -12,7 +12,7 @@ You can create a Dev Environment from a:
This did not conflict with any of the local files or local tooling set up on your host.
## Create a Dev Environment from a Git repository
## Create a dev environment from a Git repository
The simplest way to get started with Dev Environments is to create a new environment by cloning the Git repository of the project you are working on.
@ -55,10 +55,10 @@ fi
</div>
</div>
To create a Dev Environment:
To create a dev environment:
1. From **Under Dev Environments** in Docker Dashboard, click **Create**. The **Create a Dev Environment** dialog displays.
2. Select **Get Started** and then copy `https://github.com/dockersamples/single-dev-env.git` and add it to the **Enter the Git Repository** field on the **Existing Git repo** tab.
2. Select **Get Started** and then copy `https://github.com/dockersamples/single-dev-env.git` and add it to the **Enter the Git Repository** field with **Existing Git repo** as the source.
3. Select **Continue**.
This detects the main language of your repository, clones the Git code inside a volume, determines the best image for your Dev Environment, and opens VS Code inside the Dev Environment container.
@ -68,7 +68,7 @@ To create a Dev Environment:
5. To launch the application, run the command `make run` in your terminal. This opens an http server on port 8080. Open [http://localhost:8080](http://localhost:8080) in your browser to see the running application.
## Create a Dev Environment from a specific branch or tag
## Create a dev environment from a specific branch or tag
You can create a dev environment from a specific branch (for example, a branch corresponding to a Pull Request) or a tag by adding `@mybranch` or `@tag` as a suffix to your Git URL:
@ -80,14 +80,14 @@ You can create a dev environment from a specific branch (for example, a branch c
Docker then clones the repository with your specified branch or tag.
## Create a Dev Environment from a subdirectory of a Git repository
## Create a dev environment from a subdirectory of a Git repository
>Note
>
>Currently, Dev Environments is not able to detect the main language of the subdirectory. You need to define your own base image or compose file in a .docker folder located in your subdirectory. For more information on how to configure, see the [React application with a Spring backend and a MySQL database sample](https://github.com/docker/awesome-compose/tree/master/react-java-mysql) or the [Go server with an Nginx proxy and a Postgres database sample](https://github.com/docker/awesome-compose/tree/master/nginx-golang-postgres).
>Currently, Dev Environments is not able to detect the main language of the subdirectory. You need to define your own base image or services in a `compose-dev.yaml`file located in your subdirectory. For more information on how to configure, see the [React application with a Spring backend and a MySQL database sample](https://github.com/docker/awesome-compose/tree/master/react-java-mysql) or the [Go server with an Nginx proxy and a Postgres database sample](https://github.com/docker/awesome-compose/tree/master/nginx-golang-postgres).
1. From **Dev Environments** in Docker Dashboard, click **Create**. The **Create a Dev Environment** dialog displays.
2. Select **Get Started** and then copy your Git subfolder link into the **Enter the Git Repository** field on the **Existing Git repo** tab.
2. Select **Get Started** and then copy your Git subfolder link into the **Enter the Git Repository** field with **Existing Git repo** as the source.
3. Select **Continue**.
This clones the Git code inside a volume, determines the best image for your Dev Environment, and opens VS Code inside the Dev Environment container.
@ -96,10 +96,10 @@ Docker then clones the repository with your specified branch or tag.
5. To launch the application, run the command `make run` in your terminal. This opens an http server on port 8080. Open [http://localhost:8080](http://localhost:8080) in your browser to see the running application.
## Create a Dev Environment from a local folder
## Create a dev environment from a local folder
1. From **Dev Environments** in Docker Dashboard, click **Create**. The **Create a Dev Environment** dialog displays.
2. Select **Get Started** and then the **Local Folder** tab.
2. Select **Get Started** and then choose **Local Folder** as the source.
3. Select **Select directory** to open the root of the code that you would like to work on.
4. Select **Continue**.
@ -107,8 +107,8 @@ Docker then clones the repository with your specified branch or tag.
> **Note**
>
> When using a local folder for a Dev Environment, file changes are synchronized between your Dev Environment container and your local files. This can affect the performance inside the container, depending on the number of files in your local folder and the operations performed in the container.
> When using a local folder for a dev environment, file changes are synchronized between your environment container and your local files. This can affect the performance inside the container, depending on the number of files in your local folder and the operations performed in the container.
## What's next?
Learn how to [share your Dev Environment](share.md)
Learn how to [distribute your dev environment](share.md)

View File

@ -3,17 +3,19 @@ description: Dev Environments
keywords: Dev Environments, share, collaborate, local
title: Overview
---
Dev Environments boosts collaboration by allowing you to share work-in-progress code with your team members. This removes any potential merge conflicts while moving between Git branches to get your code on to their machine.
Dev Environments uses tools built into code editors that allows Docker to access code mounted into a container rather than on your local host. This isolates the tools, files and running services on your machine allowing multiple versions of them to exist side by side.
You can also switch between your developer environments or your team members' environments, move between branches to look at changes that are in progress, without moving off your current Git branch. This makes reviewing PRs as simple as opening a new environment.
> **Beta**
>
> The Dev Environments feature is currently in [Beta](../../release-lifecycle.md#beta). We recommend that you do not use this in production environments.
Dev Environments lets you create a configurable developer environment with all the code and tools you need to quickly get up and running.
It uses tools built into code editors that allows Docker to access code mounted into a container rather than on your local host. This isolates the tools, files and running services on your machine allowing multiple versions of them to exist side by side.
>**Changes to Dev Environments with Docker Desktop 4.13**
>
>Docker has simplified how you configure your dev environment project. All you need to get started is a `compose-dev.yaml` file. If you have an existing project with a `.docker/` folder this is automatically migrated the next time you launch.
{: .important}
![Dev environment intro](../images/dev-env.PNG){:width="700px"}
## Prerequisites
@ -46,12 +48,11 @@ If it doesn't detect Git as a valid command, you must reinstall Git and ensure y
The following section lists known issues and workarounds:
1. When sharing a Dev Environment between Mac and Windows, the VS Code terminal may not function correctly in some cases. To work around this issue, use the Exec in CLI option in the Docker Dashboard.
2. When sharing a Dev Environment between ARM64 and AMD64 machines, the environment is emulated.
1. When sharing a dev environment between Mac and Windows, the VS Code terminal may not function correctly in some cases. To work around this issue, use the Exec in CLI option in the Docker Dashboard.
## What's next?
Learn how to:
- [Create a Dev Environment](create-dev-env.md)
- [Create a Compose Dev Environment](create-compose-dev-env.md)
- [Share your Dev Environment](share.md)
- [Create a simple dev environment](create-dev-env.md)
- [Create an advanced dev environment](create-compose-dev-env.md)
- [Distribute your dev environment](share.md)

View File

@ -1,27 +1,25 @@
---
description: Dev Environments
keywords: Dev Environments, share, collaborate, local, share
title: Share your Dev Environment
title: Distribute your dev environment
---
{% include upgrade-cta.html
body="Docker Team and Business users can now share Dev Environments with their team members."
header-text="This feature requires a paid Docker subscription"
target-url="https://www.docker.com/pricing?utm_source=docker&utm_medium=webreferral&utm_campaign=docs_driven_upgrade"
%}
The `compose-dev.yaml` config file makes distributing your dev environment easy so everyone can access the same code and any dependencies.
Sharing a Dev Environment lets your team members access the code, any dependencies, and the current Git branch you are working on. They can also review your changes and provide feedback before you create a pull request.
### Distribute your dev environment
## Share your Dev Environment
When you are ready to share your environment, simply copy the link to the Github repo where your project is stored, and share the link with your team members.
When you are ready to share your environment, hover over your Dev Environment, select the **Share** icon, and specify the Docker Hub namespace where youd like to push your Dev Environment to.
You can also create a link that automatically starts your dev environment when opened. This can then be placed on a GitHub README or pasted into a Slack channel, for example.
This creates an image of your Dev Environment, uploads it to the Docker Hub namespace you have specified, and provides a tiny URL to share with your team members.
To create the link simply join the following link with the link to your dev environment's GitHub repository:
![Dev environment shared](../images/dev-share.PNG){:width="700px"}
`https://open.docker.com/dashboard/dev-envs?url=`
## Open a Dev Environment that has been shared with you
The following example opens a [Compose sample](https://github.com/docker/awesome-compose/tree/master/nginx-golang-mysql), a Go server with an Nginx proxy and a MariaDB/MySQL database, in Docker Desktop.
To open a Dev Environment that was shared with you, select the **Create** button in the top right-hand corner, select the **Existing Dev Environment** tab, and then paste the URL.
[https://open.docker.com/dashboard/dev-envs?url=https://github.com/docker/awesome-compose/tree/master/nginx-golang-mysql](https://open.docker.com/dashboard/dev-envs?url=https://github.com/docker/awesome-compose/tree/master/nginx-golang-mysql)
Using this shared Dev Environment, your team members can access the code, any dependencies, and the current Git branch you are working on. They can also review your changes and give feedback even before you create a pull request!
### Open a dev environment that has been distributed to you
To open a dev environment that has been shared with you, select the **Create** button in the top right-hand corner, select source **Existing Git repo**, and then paste the URL.

View File

@ -1,45 +0,0 @@
---
description: Dev Environments
keywords: Dev Environments, share, collaborate, local, Dockerfile, specify, base image
title: Specify a Dockerfile or base image
---
## Specify a Dockerfile
Use a JSON file to specify a Dockerfile which in turn defines your Dev Environment. You must include this as part of the `.docker` folder and then add it as a `config.json` file. For example:
```jsx
{
"dockerfile": "Dockerfile.devenv"
}
```
Next, define the dependencies you want to include in your `Dockerfile.devenv`.
While some images or Dockerfiles include a non-root user, many base images and Dockerfiles do not. Fortunately, you can add a non-root user named `vscode`. If you include Docker tooling, for example the Docker CLI or `docker compose`, in the `Dockerfile.devenv`, you need the `vscode` user to be included in the `docker` group.
```dockerfile
# syntax=docker/dockerfile:1
FROM <your base image>
RUN useradd -s /bin/bash -m vscode \
&& groupadd docker \
&& usermod -aG docker vscode
USER vscode
```
## Specify a base image
If you already have an image built, you can specify it as a base image to define your Dev Environment. You must include this as part of the `.docker` folder and then add it as a `config.json` file. For example, to use the Jekyll base image, add:
```jsx
{
"image": "jekyll/jekyll"
}
```
> **Note**
>
> This configuration is to unblock users for the Beta release only. We may move this configuration for single and multi-container applications to a Compose-based implementation in future releases.

View File

@ -56,8 +56,8 @@ This includes:
- [Vulnerability scanning](../../docker-hub/vulnerability-scanning.md)
- Viewing remote images in the Docker Dashboard
- Settting up [Dev Environments](../dev-environments/index.md)
- Docker build when using [Buildkit](../../develop/develop-images/build_enhancements.md). You can work around this by disabling
BuildKit. Run `DOCKER_BUILDKIT=0 docker build .` to disable BuildKit.
- Docker build when using [BuildKit](../../build/buildkit/index.md#getting-started).
You can work around this by disabling BuildKit. Run `DOCKER_BUILDKIT=0 docker build .` to disable BuildKit.
- Deploying an app to the cloud through Compose
[ACI](../../cloud/aci-integration.md) and [ECS](../../cloud/ecs-integration.md)
integrations

View File

@ -0,0 +1,129 @@
---
title: FAQs and known issues
description: FAQ for Enhanced Container Isolation
keywords: enhanced container isolation, security, faq, sysbox
toc_max: 2
---
<ul class="nav nav-tabs">
<li class="active"><a data-toggle="tab" data-target="#tab3">FAQs</a></li>
<li><a data-toggle="tab" data-target="#tab4">Limitations and Known Issues</a></li>
</ul>
<div class="tab-content">
<div id="tab3" class="tab-pane fade in active" markdown="1">
#### Do I need to change the way I use Docker when Enhanced Container Isolation is enabled?
No, you can continue to use Docker as usual. Enhanced Container Isolation will be mostly transparent to you.
#### Do all container workloads work well with Enhanced Container Isolation?
Most container workloads do, a few do not (yet). For the few workloads that
don't yet work with Enhanced Container Isolation, Docker will continue to improve the feature to reduce
this to a minimum.
#### Can I run privileged containers with Enhanced Container Isolation?
Yes, you can use the `--privileged` flag in containers but unlike privileged
containers without Enhanced Container Isolation, the container can only use it's elevated privileges to
access resources assigned to the container. It can't access global kernel
resources in the Docker Desktop Linux VM. This allows you to run privileged
containers securely. For more information, see [Key features and benefits](features-benefits.md#privileged-containers-are-also-secured).
#### Will all privileged container workloads run with Enhanced Container Isolation?
No. Privileged container workloads, or non-namespaced workloads, that wish to access global kernel resources inside the Docker Desktop Linux VM won't
work. For example, you can't use a privileged container to load a kernel module.
#### Why not just restrict usage of the `--privileged` flag?
Privileged containers are typically used to run advanced workloads in
containers, for example Docker-in-Docker or Kubernetes-in-Docker, to
perform kernel operations such as loading modules, or to access hardware
devices.
Enhanced Container Isolation allows running advanced workloads, but denies the ability to perform
kernel operations or access hardware devices.
#### Does Enhanced Container Isolation restrict bind mounts inside the container?
Yes, it restricts bind mounts of directories located in the Docker Desktop Linux
VM into the container.
It does not restrict bind mounts of your host machine files into the container,
as configured via Docker Desktop's **Settings** > **Resources** > **File Sharing**.
#### Does Enhanced Container Isolation protect all containers launched with Docker Desktop?
It protects all containers launched by users via `docker create` and `docker run`. It does not yet protect Docker Desktop Kubernetes pods, Extension
Containers, and Dev Environments.
#### Does Enhanced Container Isolation affect performance of containers?
Enhanced Container Isolation has very little impact on the performance of containers. The exception is
for containers that perform lots of `mount` and `umount` system calls, as these
are trapped and vetted by the Sysbox container runtime.
#### With Enhanced Container Isolation, can the user still override the `--runtime` flag from the CLI ?
No. With Enhanced Container Isolation enabled, Sysbox is locked as the default (and only) runtime for
containers deployed by Docker Desktop users. If a user attempts to override the
runtime (e.g., `docker run --runtime=runc`), this request is ignored and the
container is created through the Sysbox runtime.
The reason `runc` is disallowed with Enhanced Container Isolation because it
allows users to run as "true root" on the Docker Desktop Linux VM, thereby
providing them with implicit control of the VM and the ability to modify the
administrative configurations for Docker Desktop, for example.
#### How is ECI different from Docker Engine's userns-remap mode?
See [How does it work](how-eci-works.md#enhanced-container-isolation-vs-docker-userns-remap-mode).
#### How is ECI different from Rootless Docker?
See [How does it work](how-eci-works.md#enhanced-container-isolation-vs-rootless-docker)
<hr>
</div>
<div id="tab4" class="tab-pane fade" markdown="1">
#### Incompatibility with Windows Subsystem for Linux (WSL)
Enhanced Container Isolation (ECI) does not currently work when Docker Desktop runs on
Windows with WSL/WSL2. This is due to some limitations of the WSL/WSL2 Linux
Kernel. As a result, to use Enhanced Container Isolation on Windows, you must
configure Docker Desktop to use Hyper-V. This can be enforced using Admin
Controls. For more information, see [Settings Management](../settings-management/index.md).
#### Docker build and buildx has some restrictions
With ECI enabled, Docker build `--network=host` and Docker buildx entitlements
(`network.host`, `security.insecure`) are not allowed. Builds that require
these will not work properly.
#### Kubernetes pods are not yet protected
Kubernetes pods are not yet protected by ECI. A malicious or privileged pod can
compromise the Docker Desktop Linux VM and bypass security controls. We expect
to improve on this in future versions of Docker Desktop.
#### Extension Containers are not yet protected
Extension containers are also not yet protected by ECI. Ensure you extension
containers come from trusted entities to avoid issues. We expect to improve on
this in future versions of Docker Desktop.
#### Docker Desktop dev environments are not yet protected
Containers launched by the Docker Desktop Dev Environments feature are not yet
protected either. We expect to improve on this in future versions of Docker
Desktop.
#### Use in production
In general users should not experience differences between running a container
in Docker Desktop with ECI enabled, which uses the Sysbox runtime, and running
that same container in production, through the standard OCI `runc` runtime.
However in some cases, typically when running advanced or privileged workloads in
containers, users may experience some differences. In particular, the container
may run with ECI but not with `runc`, or vice-versa.
<hr>
</div>
</div>

View File

@ -0,0 +1,289 @@
---
description: Instructions on how to set up enhanced container isolation
title: Key features and benefits
keywords: set up, enhanced container isolation, rootless, security
---
### Linux User Namespace on all Containers
With Enhanced Container Isolation, all user containers leverage the [Linux user-namespace](https://man7.org/linux/man-pages/man7/user_namespaces.7.html)
for extra isolation. This means that the root user in the container maps to an unprivileged
user in the Docker Desktop Linux VM.
For example:
```
$ docker run -it --rm --name=first alpine
/ # cat /proc/self/uid_map
0 100000 65536
```
The output `0 100000 65536` is the signature of the Linux user-namespace. It
means that the root user (0) in the container is mapped to unprivileged user
100000 in the Docker Desktop Linux VM, and the mapping extends for a continuous
range of 64K user IDs. The same applies to group IDs.
Each container gets an exclusive range of mappings, managed by Sysbox. For
example, if a second container is launched the mapping range is different:
```
$ docker run -it --rm --name=second alpine
/ # cat /proc/self/uid_map
0 165536 65536
```
In contrast, without Enhanced Container Isolation, the container's root user is
in fact root on the host (aka "true root") and this applies to all containers:
```
$ docker run -it --rm alpine
/ # cat /proc/self/uid_map
0 0 4294967295
```
By virtue of using the Linux user-namespace, Enhanced Container Isolation
ensures the container processes never run as user ID 0 (true root) in the Linux
VM. In fact they never run with any valid user-ID in the Linux VM. Thus, their
Linux capabilities are constrained to resources within the container only,
increasing isolation significantly compared to regular containers, both
container-to-host and cross-container isolation.
### Privileged Containers Are Also Secured
Privileged containers `docker run --privileged ...` are insecure because they
give the container full access to the Linux kernel. That is, the container runs
as true root with all capabilities enabled, seccomp and AppArmor restrictions
are disabled, all hardware devices are exposed, for example.
For organizations that wish to secure Docker Desktop on their developer's
machines, privileged containers are problematic as they allow container
workloads whether benign or malicious to gain control of the Linux kernel
inside the Docker Desktop VM and thus modify security related settings, for example registry
access management, and network proxies.
With Enhanced Container Isolation, privileged containers can no longer do this. The combination of the Linux user-namespace and other security techniques used
by Sysbox ensures that processes inside a privileged container can only access
resources assigned to the container.
> Note
>
> Enhanced Container Isolation does not prevent users from launching privileged
> containers, but rather runs them securely by ensuring that they can only
> modify resources associated with the container. Privileged workloads that
> modify global kernel settings, for example loading a kernel module or changing BPF
> settings will not work properly as they will receive "permission
> denied" error when attempting such operations.
For example, Enhanced Container Isolation ensures privileged containers can't
access Docker Desktop network settings in the Linux VM configured via Berkeley
Packet Filters (BPF):
```
$ docker run --privileged djs55/bpftool map show
Error: can't get next map: Operation not permitted
```
In contrast, without Enhanced Container Isolation, privileged containers
can easily do this:
```
$ docker run --privileged djs55/bpftool map show
17: ringbuf name blocked_packets flags 0x0
key 0B value 0B max_entries 16777216 memlock 0B
18: hash name allowed_map flags 0x0
key 4B value 4B max_entries 10000 memlock 81920B
20: lpm_trie name allowed_trie flags 0x1
key 8B value 8B max_entries 1024 memlock 16384B
```
Note that some advanced container workloads require privileged containers, for
example Docker-in-Docker, Kubernetes-in-Docker, etc. With Enhanced Container
Isolation you can still run such workloads but do so much more securely than
before.
### Containers can't share namespaces with the Linux VM
When Enhanced Container Isolation is enabled, containers can't share Linux
namespaces with the host (e.g., pid, network, uts, etc.) as that essentially
breaks isolation.
For example, sharing the pid namespace fails:
```
$ docker run -it --rm --pid=host alpine
docker: Error response from daemon: failed to create shim task: OCI runtime create failed: error in the container spec: invalid or unsupported container spec: sysbox containers can't share namespaces [pid] with the host (because they use the linux user-namespace for isolation): unknown.
```
Similarly sharing the network namespace fails:
```
docker run -it --rm --network=host alpine
docker: Error response from daemon: failed to create shim task: OCI runtime create failed: error in the container spec: invalid or unsupported container spec: sysbox containers can't share a network namespace with the host (because they use the linux user-namespace for isolation): unknown.
```
In addition, the `--userns=host` flag, used to disable the user-namespace on the
container, is ignored:
```
$ docker run -it --rm --userns=host alpine
/ # cat /proc/self/uid_map
0 100000 65536
```
Finally, Docker build `--network=host` and Docker buildx entitlements
(`network.host`, `security.insecure`) are not allowed. Builds that require these
won't work properly.
### Bind mount restrictions
When Enhanced Container Isolation is enabled, Docker Desktop users can continue
to bind mount host directories into containers as configured via **Settings** >
**Resources** > **File sharing**, but they are no longer allowed to bind mount
arbitrary Linux VM directories into containers.
This prevents containers from modifying sensitive files inside the Docker
Desktop Linux VM, files that can hold configurations for registry access
management, proxies, docker engine configurations, and more.
For example, the following bind mount of the Docker Engine's configuration file
(`/etc/docker/daemon.json` inside the Linux VM) into a container is restricted
and therefore fails:
```
$ docker run -it --rm -v /etc/docker/daemon.json:/mnt/daemon.json alpine
docker: Error response from daemon: failed to create shim task: OCI runtime create failed: error in the container spec: can't mount /etc/docker/daemon.json because it's configured as a restricted host mount: unknown
```
In contrast, without Enhanced Container Isolation this mount works and gives the
container full read and write access to the Docker Engine's configuration.
Of course, bind mounts of host files continue to work as usual. For example,
assuming a user configures Docker Desktop to file share her $HOME directory,
she can bind mount it into the container:
```
$ docker run -it --rm -v $HOME:/mnt alpine
/ #
```
> Note
>
> Enhanced Container Isolation won't allow bind mounting the Docker socket
> (/var/run/docker.sock) into a container, as doing so essentially grants the
> container control of Docker, thus breaking container isolation. Containers
> that rely on this will not work with Enhanced Container Isolation enabled.
### Vetting sensitive system calls
Another feature of Enhanced Container Isolation is that it intercepts and vets a
few highly sensitive system calls inside containers, such as `mount` and
`umount`. This ensures that processes that have capabilities to execute these
system calls can't use them to breach the container.
For example, a container that has `CAP_SYS_ADMIN` (required to execute the
`mount` system call) can't use that capability to change a read-only bind mount
into a read-write mount:
```
$ docker run -it --rm --cap-add SYS_ADMIN -v $HOME:/mnt:ro alpine
/ # mount -o remount,rw /mnt /mnt
mount: permission denied (are you root?)
```
Since the `$HOME` directory was mounted into the container's `/mnt` directory as
read-only, it can't be changed from within the container to read-write, even if the container process has the capability to do so. This
ensures container processes can't use `mount`, or `umount`, to breach the container's
root filesystem.
Note however that in the example above the container can still create mounts
within the container, and mount them read-only or read-write as needed. Those
mounts are allowed since they occur within the container, and therefore don't
breach it's root filesystem:
```
/ # mkdir /root/tmpfs
/ # mount -t tmpfs tmpfs /root/tmpfs
/ # mount -o remount,ro /root/tmpfs /root/tmpfs
/ # findmnt | grep tmpfs
├─/root/tmpfs tmpfs tmpfs ro,relatime,uid=100000,gid=100000
/ # mount -o remount,rw /root/tmpfs /root/tmpfs
/ # findmnt | grep tmpfs
├─/root/tmpfs tmpfs tmpfs rw,relatime,uid=100000,gid=100000
```
This feature, together with the user-namespace, ensures that even if a container
process has all Linux capabilities they can't be used to breach the container.
Finally, Enhanced Container Isolation does system call vetting in such a way
that it does not affect the performance of containers in the great majority of
cases. It intercepts control-path system calls that are rarely used in most
container workloads but data-path system calls are not intercepted.
### Filesystem user-ID mappings
As mentioned above, Enhanced Container Isolation enables the Linux
user-namespace on all containers and this ensures that the container's user-ID
range (0->64K) maps to an unprivileged range of "real" user-IDs in the Docker
Desktop Linux VM (e.g., 100000->165535).
Moreover, each container gets an exclusive range of real user-IDs in the Linux
VM (e.g., container 0 could get mapped to 100000->165535, container 2 to
165536->231071, container 3 to 231072->296607, and so on). Same applies to
group-IDs. In addition, if a container is stopped and restarted, there is no
guarantee it will receive the same mapping as before. This by design and further
improves security.
However the above presents a problem when mounting Docker volumes into
containers, as the files written to such volumes will have the real
user/group-IDs and will therefore won't be accessible across a container's
start/stop/restart, or between containers due to the different real
user-ID/group-ID of each container.
To solve this problem, Sysbox uses "filesystem user-ID remapping" via the Linux
Kernel's ID-mapped mounts feature (added in 2021) or an alternative module
called shiftfs. These technologies map filesystem accesses from the container's
real user-ID (e.g., range 100000->165535) to the range (0->65535) inside Docker
Desktop's Linux VM. This way, volumes can now be mounted or shared across
containers, even if each container uses an exclusive range of user-IDs. Users
need not worry about the container's real user-IDs.
Note that although filesystem user-ID remapping may cause containers to access
Linux VM files mounted into the container with real user-ID 0 (i.e., root), the
[restricted mounts feature](#bind-mount-restrictions) described above ensures
that no Linux VM sensitive files can be mounted into the container.
### Procfs & Sysfs Emulation
Another feature of Enhanced Container Isolation is that inside each container,
the procfs ("/proc") and sysfs ("/sys") filesystems are partially emulated. This
serves several purposes, such as hiding sensitive host information inside the
container and namespacing host kernel resources that are not yet namespaced by
the Linux kernel itself.
As a simple example, when Enhanced Container Isolation is enabled the
`/proc/uptime` file shows the uptime of the container itself, not that of the
Docker Desktop Linux VM:
```
$ docker run -it --rm alpine
/ # cat /proc/uptime
5.86 5.86
```
In contrast, without Enhanced Container Isolation you see the uptime of
the Docker Desktop Linux VM. Though this is a trivial example, it shows how
Enhanced Container Isolation aims to prevent the Linux VM's configuration and
information from leaking into the container so as to make it more difficult to
breach the VM.
In addition several other resources under `/proc/sys` that are not namespaced by
the Linux Kernel are also emulated inside the container. Each container
sees a separate view of each such resource and Sysbox reconciles the values
across the containers when programming the corresponding Linux kernel setting.
This has the advantage of enabling container workloads that would otherwise
require truly privileged containers to access such non-namespaced kernel
resources to run with Enhanced Container Isolation enabled, thereby improving
security.

View File

@ -0,0 +1,94 @@
---
description: Instructions on how to set up enhanced container isolation
title: How does it work?
keywords: set up, enhanced container isolation, rootless, security
---
>**Note**
>
>Enhance Container Isolation is available to Docker Business customers only.
Enhanced Container Isolation hardens container isolation using the [Sysbox
container runtime](https://github.com/nestybox/sysbox). Sysbox is a fork of the
standard OCI runc runtime that was modified to enhance container isolation and
workloads. For more details see [Under the hood](#under-the-hood).
Starting with version 4.13, Docker Desktop includes a customized version of
Sysbox.
When [Enhanced Container Isolation is enabled](index.md#how-do-i-enable-enhanced-container-isolation), containers
created by users through `docker run` or `docker create` are automatically
launched using Sysbox instead of the standard OCI runc runtime. Users need not
do anything else and can continue to use containers as usual. For exceptions,
see [FAQs and known issues](faq.md).
Even containers that use the insecure `--privileged` flag can now be run
securely with Enhanced Container Isolation, such that they can no longer be used
to breach the Docker Desktop Virtual Machine (VM) or other containers.
>Note
>
> When Enhanced Container Isolation is enabled in Docker Desktop, the Docker CLI
> "--runtime" flag is ignored. Docker's default runtime continues to be "runc",
> but all user containers are implicitly launched with Sysbox.
Enhanced Container Isolation is not the same as Docker Engine's userns-remap
mode or Rootless Docker. This is explained further below.
### Under the hood
Sysbox enhances container isolation by using techniques such as:
* Enabling the Linux user-namespace on all containers (root user in the container maps to an unprivileged user in the Linux VM).
* Restricting the container from mounting sensitive VM directories.
* Vetting sensitive system-calls between the container and the Linux kernel.
* Mapping filesystem user/group IDs between the container's user-namespace and the Linux VM.
* Emulating portions of the procfs and sysfs filesystems inside the container.
Some of these are made possible by recent advances in the Linux kernel which
Docker Desktop now incorporates. Sysbox applies these techniques with minimal
functional or performance impact to containers.
These techniques complement Docker's traditional container security mechanisms
such as using other Linux namespaces, cgroups, restricted Linux capabilities,
seccomp, and AppArmor. They add a strong layer of isolation between the
container and the Linux kernel inside the Docker Desktop VM.
For more information, see [Key features and benefits](features-benefits.md).
### Enhanced Container Isolation vs Docker Userns-Remap Mode
The Docker Engine includes a feature called [userns-remap mode](/engine/security/userns-remap/)
that enables the user-namespace in all containers. However it suffers from a few
[limitations](/engine/security/userns-remap/) and it's
not supported within Docker Desktop.
Userns-remap mode is similar to Enhanced Container Isolation in that both improve
container isolation by leveraging the Linux user-namespace.
However, Enhanced Container Isolation is much more advanced since it assigns
exclusive user-namespace mappings per container automatically and add several
other [container isolation features](#under-the-hood) meant to secure Docker
Desktop in organizations with stringent security requirements.
### Enhanced Container Isolation vs Rootless Docker
[Rootless Docker](/engine/security/rootless/) allows the Docker Engine, and by
extension the containers, to run without root privileges natively a Linux host. This
allows non-root users install and run Docker natively on Linux.
Rootless Docker is not supported within Docker Desktop. While it's a valuable
feature when running Docker natively on Linux, its value within Docker Desktop
is reduced since Docker Desktop runs the Docker Engine within a Linux VM. That
is, Docker Desktop already allows non-root host users to run Docker and
isolates the Docker Engine from the host using a virtual machine.
Unlike Rootless Docker, Enhanced Container Isolation does not run Docker Engine
within a Linux user-namespace. Rather it runs the containers generated by that
engine within a user-namespace. This has the advantage of bypassing [the
limitations](/engine/security/rootless/#known-limitations) of Rootless Docker
and creates a stronger boundary between the containers and the Docker Engine.
Enhanced Container Isolation is meant to ensure containers launched with Docker
Desktop can't easily breach the Docker Desktop Linux VM and therefore modify
security settings within it.

View File

@ -0,0 +1,118 @@
---
description: Enhanced Container Isolation - benefits, why use it, how it differs to Docker rootless, who it is for
keywords: containers, rootless, security, sysbox, runtime
title: What is Enhanced Container Isolation?
---
>**Note**
>
>Enhanced Container Isolation is available to Docker Business customers only.
Enhanced Container Isolation provides an additional layer of security that uses a variety of advanced techniques to harden container isolation without impacting developer productivity. It is available with [Docker Desktop 4.13.0 or later](../../release-notes.md).
These techniques include:
- Running all containers unprivileged through the Linux user-namespace.
- Restricting containers from modifying Docker Desktop VM settings.
- Vetting some critical system calls to prevent container escapes, and partially virtualizing portions of `/proc` and `/sys` inside the container for further isolation.
- Preventing console access to the Docker Desktop VM.
This is done automatically and with minimal functional or performance impact.
Enhanced Container Isolation helps ensure strong container isolation and also locks in any security configurations that have been created, for instance through [Registry Access Management policies](../registry-access-management.md) or with [Settings Management](../settings-management/index.md).
>**Note**
>
> Enhanced Container Isolation is in addition to other container security techniques used by Docker. For example, reduced Linux Capabilities, Seccomp, AppArmor.
### Who is it for?
- For organizations that want to prevent container attacks and reduce vulnerabilities.
- For organizations that want to ensure stronger container isolation that is easy and intuitive to implement on developers' machines.
### What happens when Enhanced Container Isolation is enabled?
When Enhanced Container Isolation is enabled using [Settings Management](../settings-management/index.md), the following features are enabled:
- All user containers are automatically run in Linux User Namespaces which ensures stronger isolation.
- The root user in the container maps to an unprivileged user at VM level.
- Users can continue using containers as usual, including bind mounting host directories, volumes, networking configurations, etc.
- Privileged containers work, but they are only privileged within the container's Linux User Namespace, not in the Docker Desktop VM.
- Containers can no longer share namespaces with the Docker Desktop VM. For example, `--network=host`, `--pid=host`.
- Containers can no longer modify configuration files in the Docker Desktop VM.
- Console access to the Desktop VM is forbidden for all users.
- Containers become harder to breach. For example, sensitive system calls are vetted and portions of `/proc` and `/sys` are emulated.
For more information on how Enhanced Container Isolation work, see [How does it work](how-eci-works.md).
>**Important**
>
>Enhanced Container Isolation is currently incompatible with WSL and does not protect Kubernetes pods. For more information on known limitations and workarounds, see [FAQs and known issues](faq.md).
{: .important}
### How do I enable Enhanced Container Isolation?
As an admin, you first need to [configure a `registry.json` file to enforce sign-in](../../../docker-hub/configure-sign-in.md). This is because the Enhanced Container Isolation feature requires a Docker Business subscription and therefore your Docker Desktop users must authenticate to your organization for this configuration to take effect.
Next, you must [create and configure the `admin-settings.json` file](../settings-management/configure.md) and specify:
```JSON
{
"configurationFileVersion": 2,
"enhancedContainerIsolation": {
"value": true,
"locked": true
}
}
```
For this to take effect:
- On a new install, developers need to launch Docker Desktop and authenticate to their organization.
- On an existing install, developers need to quit Docker Desktop through the Docker menu, and then relaunch Docker Desktop. If they are already signed in, they dont need to sign in again for the changes to take effect.
>Important
>
>Selecting **Restart** from the Docker menu isn't enough as it only restarts some components of Docker Desktop.
{: .important}
### What do users see when this setting is enforced?
When Enhanced Container Isolation is enabled, users see that containers run within a Linux user namespace.
To check, run:
```
$ docker run --rm alpine cat /proc/self/uid_map
```
The following output displays:
```
0 100000 65536
```
This indicates that the container's root user (0) maps to unprivileged user (100000) in the Docker Desktop VM, and that the mapping extends for a range of 64K user-IDs.
In contrast, without Enhanced Container Isolation the Linux user namespace is not used, the following displays:
```
0 0 4294967295
```
This means that the root user in the container (0) is in fact the root user in the Docker Desktop VM (0) which reduces container isolation. The user-ID mapping varies with each new container, as each container gets an exclusive range of host User-IDs for isolation. User-ID mapping is automatically managed by Docker Desktop.
With Enhanced Container Isolation, if a process were to escape the container, it would find itself without privileges at the VM level. For further details, see [How Enhanced Container Isolation works](how-eci-works.md).
Since Enhanced Container Isolation [uses the Sysbox container runtime](how-eci-works.md) embedded in the Docker Desktop Linux VM, another way to determine if a container is running with Enhanced Container Isolation is by using `docker inspect`:
{% highlight liquid %}
docker inspect --format={% raw %}'{{.HostConfig.Runtime}}'{% endraw %} my_container
{% endhighlight %}
It outputs:
```
sysbox-runc
```
Without Enhanced Container Isolation, `docker inspect` outputs `runc`, which is the standard OCI runtime.

View File

@ -0,0 +1,55 @@
---
title: Hardened Desktop
description: Overview of what Hardened Desktop is
keywords: security, hardened desktop, enhanced container isolation, registry access management, admin controls, root access, admins, docker desktop
---
>Note
>
>Hardened Desktop is available to Docker Business customers only.
Hardened Desktop is a security model for Docker Desktop. It's designed to provide admins with a simple and powerful way to improve their organization's security posture for containerized development, without impacting the developer experience that Docker Desktop offers.
It is for security conscious organizations who dont give their users root or admin access on their machines, and who would like Docker Desktop to be within their organizations centralized control.
The Hardened Desktop security model moves the ownership boundary for containers to the organization, meaning that any security controls admins set cannot be altered by the user of Docker Desktop.
Hardened Desktop includes:
- Settings Management, which helps admins to confidently manage and control the usage of Docker Desktop within their organization.
- Enhanced Container Isolation, a setting that instantly enhances security by preventing containers from running as root in Docker Desktops Linux VM and ensures that any configurations set using Settings Management, cannot be modified by containers.
- Registry Access Management, which allows admins to control the registries developers can access.
Docker plans to continue adding more security enhancements to the Hardened Desktop security model.
<div class="component-container">
<!--start row-->
<div class="row">
<div class="col-xs-12 col-sm-12 col-md-12 col-lg-4 block">
<div class="component">
<div class="component-icon">
<a href="/desktop/hardened-desktop/settings-management/"><img src="/assets/images/lock.svg" alt="Hardened Desktop" width="70" height="70"></a>
</div>
<h2 id="hardened-desktop"><a href="/desktop/hardened-desktop/settings-management/">Settings Management </a></h2>
<p>Learn how Settings Management can secure your developers' workflows.</p>
</div>
</div>
<div class="col-xs-12 col-sm-12 col-md-12 col-lg-4 block">
<div class="component">
<div class="component-icon">
<a href="/desktop/hardened-desktop/enhanced-container-isolation"><img src="/assets/images/secure.svg" alt="Release notes" width="70" height="70"></a>
</div>
<h2 id="hardened-desktop"><a href="/desktop/hardened-desktop/enhanced-container-isolation">Enhanced Container Isolation</a></h2>
<p>Understand how Enhanced Container Isolation can prevent container attacks. </p>
</div>
</div>
<div class="col-xs-12 col-sm-12 col-md-12 col-lg-4 block">
<div class="component">
<div class="component-icon">
<a href="/desktop/hardened-desktop/registry-access-management/"><img src="/assets/images/registry.svg" alt="Hardened Desktop" width="70" height="70"></a>
</div>
<h2 id="hardened-desktop"><a href="/desktop/hardened-desktop/registry-access-management/">Registry Access Management</a></h2>
<p>Control the registries developers can access while using Docker Desktop.</p>
</div>
</div>
</div>
</div>

View File

@ -0,0 +1,63 @@
---
description: Registry Access Management
keywords: registry, access, managment
title: Registry Access Management
redirect_from:
- /docker-hub/registry-access-management/
---
>Note
>
>Registry Access Management is available to Docker Business customers only.
With Registry Access Management, administrators can ensure that their developers using Docker Desktop only access registries that are allowed. This is done through the Registry Access Management dashboard on Docker Hub.
Below are some example registries administrators can allow:
- Docker Hub. This is enabled by default.
- Amazon ECR
- GitHub Container Registry
- Google Container Registry
Administrators can ensure registries are locked in and cannot be edited by developers, if Enhanced Container Isolation is switched on. To learn more, see [Enhanced Container Isolation](enhanced-container-isolation/index.md).
## Prerequisites
You need to [configure a registry.json to enforce sign-in](../../docker-hub/configure-sign-in.md). For Registry Access Management to take effect, Docker Desktop users must authenticate to your organization.
## Configure Registry Access Management permissions
To configure Registry Access Management permissions:
1. Sign in to your [Docker Hub](https://hub.docker.com){: target="_blank" rel="noopener" class="_"} account as an organization owner.
2. Select an organization and then navigate to the **Settings** tab on the **Organizations** page and select **Registry Access**.
3. Toggle on Registry Access Management to set the permissions for your registry.
> **Note**
>
> When enabled, the Docker Hub registry is set by default, however you can also restrict this registry for your developers.
4. To add registries to your list, select **Add** and enter your registry details in the applicable fields, then select **Create**.
5. Verify that the registry appears in your list and select **Save & Apply**. You can verify that your changes are saved in the **Activity** tab. There is no limit on the number of registries you can add.
> **Note**
>
> Once you add a registry, it takes up to 24 hours for the changes to be enforced on your developers machines. If you want to apply the changes sooner, you must force a Docker logout on your developers machine and have the developers re-authenticate for Docker Desktop.
![Registry Access Management](../../docker-hub/images/registry-access-management.png){:width="700px"}
## Verify the restrictions
The new Registry Access Management policy takes effect after the developer successfully authenticates to Docker Desktop using their organization credentials. If a developer attempts to pull an image from a disallowed registry via the Docker CLI, they receive an error message that the organization has disallowed this registry.
## Known issues
There are certain limitations when using Registry Access Management:
- Windows image pulls, and image builds are not restricted
- Builds such as `docker buildx` using a Kubernetes driver are not restricted
- Builds such as `docker buildx` using a custom docker-container driver are not restricted
- Blocking is DNS-based; you must use a registry's access control mechanisms to distinguish between “push” and “pull”
- WSL 2 requires at least a 5.4 series Linux kernel (this does not apply to earlier Linux kernel series)
- Under the WSL 2 network, traffic from all Linux distributions is restricted (this will be resolved in the updated 5.15 series Linux kernel)
Also, Registry Access Management operates on the level of hosts, not IP addresses. Developers can bypass this restriction within their domain resolution, for example by running Docker against a local proxy or modifying their operating system's `sts` file. Docker Desktop does not support blocking these forms of manipulation.

View File

@ -0,0 +1,137 @@
---
description: settings management for desktop
keywords: admin, controls, rootless, enhanced container isolation
title: Configure Settings Management
---
>**Note**
>
>Settings Management is available to Docker Business customers only.
This page contains information for admins on how to configure Settings Management to specify and lock configuration parameters to create a standardized Docker Desktop environment across the organization.
Settings Management is designed specifically for organizations who dont give developers root access to their machines.
### Prerequisites
- [Download and install Docker Desktop 4.13.0 or later](../../release-notes.md).
- As an admin, you need to [configure a registry.json to enforce sign-in](../../../docker-hub/configure-sign-in.md). This is because this feature requires a Docker Business subscription and therefore your Docker Desktop users must authenticate to your organization for this configuration to take effect.
### Step one: Create the `admin-settings.json` file and save it in the correct location
You can either use the `--admin-settings` installer flag on [macOS](../../install/mac-install.md#install-from-the-command-line) or [Windows](../../install/windows-install.md#install-from-the-command-line) to automatically create the `admin-settings.json` and save it in the correct location, or set it up manually.
To set it up manually:
1. Create a new, empty JSON file and name it `admin-settings`.
2. Save the `admin-settings.json` file on your developers' machines in the following locations:
- Mac: `/Library/Application\ Support/com.docker.docker/admin-settings.json`
- Windows: `C:\ProgramData\DockerDesktop\admin-settings.json`
- Linux: `/usr/share/docker-desktop/admin-settings.json`
By placing this file in the above protected directories, end users are unable to modify it.
>**Note**
>
> It is assumed that you have the ability to push the `admin-settings.json` settings file to the locations specified above through a device management software such as [Jamf](https://www.jamf.com/lp/en-gb/apple-mobile-device-management-mdm-jamf-shared/?attr=google_ads-brand-search-shared&gclid=CjwKCAjw1ICZBhAzEiwAFfvFhEXjayUAi8FHHv1JJitFPb47C_q_RCySTmF86twF1qJc_6GST-YDmhoCuJsQAvD_BwE).
### Step two: Configure the settings you want to lock in
>**Note**
>
>Some of the configuration parameters only apply to Windows. This is highlighted in the table below.
The `admin-settings.json` file requires a nested list of configuration parameters, each of which must contain the `locked` parameter. You can add or remove configuration parameters as per your requirements.
If `locked: true`, users aren't able to edit this setting from Docker Desktop or the CLI.
If `locked: false`, it's similar to setting a factory default in that:
- For new installs, `locked: false` pre-populates the relevant settings in the Docker Desktop UI, but users are able to modify it.
- If Docker Desktop is already installed and being used, `locked: false` is ignored. This is because existing users of Docker Desktop may have already updated a setting, which in turn will have been written to the relevant config file, for example the `settings.json` or `daemon.json`. In these instances, the user's preferences are respected and we don't alter these values. These can be controlled by the admin by setting `locked: true`.
The following `admin-settings.json` code and table provides an example of the required syntax and descriptions for parameters and values:
```json
{
"configurationFileVersion": 2,
"exposeDockerAPIOnTCP2375": {
"locked": true,
"value": false
},
"proxy": {
"locked": true,
"mode": "system",
"http": "",
"https": "",
"exclude": []
},
"enhancedContainerIsolation": {
"locked": true,
"value": true
},
"linuxVM": {
"wslEngineEnabled": {
"locked": false,
"value": false
},
"dockerDaemonOptions": {
"locked": false,
"value":"{\"debug\": false}"
},
"vpnkitCIDR": {
"locked": false,
"value":"192.168.65.0/24"
}
},
"windowsContainers": {
"dockerDaemonOptions": {
"locked": false,
"value":"{\"debug\": false}"
}
},
"disableUpdate": {
"locked": false,
"value": false
},
"analyticsEnabled": {
"locked": false,
"value": true
}
}
```
| Parameter | | Description |
| :------------------------------- |---| :------------------------------- |
| `configurationFileVersion` | |Specifies the version of the configuration file format. |
| `exposeDockerAPIOnTCP2375` | <span class="badge badge-info">Windows only</span>| Exposes the Docker API on a specified port. If `value` is set to true, the Docker API is exposed on port 2375. Note: This is unauthenticated and should only be enabled if protected by suitable firewall rules.|
| `proxy` | |If `mode` is set to `system` instead of `manual`, Docker Desktop gets the proxy values from the system and ignores and values set for `http`, `https` and `exclude`. Change `mode` to `manual` to manually configure proxy servers. If the proxy port is custom, specify it in the `http` or `https` property, for example `"https": "http://myotherproxy.com:4321"`. The `exclude` property specifies a comma-separated list of hosts and domains to bypass the proxy. |
| `enhancedContainerIsolation` | | If `value` is set to true, Docker Desktop runs all containers as unprivileged, via the Linux user-namespace, prevents them from modifying sensitive configurations inside the Docker Desktop VM, and uses other advanced techniques to isolate them. For more information, see [Enhanced Container Isolation](../enhanced-container-isolation/index.md). Note: Enhanced Container Isolation is currently [incompatible with WSL](../enhanced-container-isolation/faq.md#incompatibility-with-windows-subsystem-for-linux-wsl). |
| `linuxVM` | |Parameters and settings related to Linux VM options - grouped together here for convenience. |
| &nbsp; &nbsp; &nbsp; &nbsp;`wslEngineEnabled` | <span class="badge badge-info">Windows only</span> | If `value` is set to true, Docker Desktop uses the WSL 2 based engine. This overrides anything that may have been set at installation using the `--backend=<backend name>` flag. It is also incompatible with Enhanced Container Isolation. See [Known issues](../enhanced-container-isolation/faq.md) for more information.|
| &nbsp;&nbsp; &nbsp; &nbsp;`dockerDaemonOptions`| |If `value` is set to true, it overrides the options in the Docker Engine config file. See the [Docker Engine reference](/engine/reference/commandline/dockerd/#daemon-configuration-file). Note that for added security, a few of the config attributes may be overridden when Enhanced Container Isolation is enabled. |
| &nbsp;&nbsp; &nbsp; &nbsp;`vpnkitCIDR` | |Overrides the network range used for vpnkit DHCP/DNS for `*.docker.internal` |
| `windowsContainers` | | Parameters and settings related to `windowsContainers` options - grouped together here for convenience. |
| &nbsp; &nbsp; &nbsp; &nbsp;`dockerDaemonOptions` | | Overrides the options in the linux daemon config file. See the [Docker Engine reference](/engine/reference/commandline/dockerd/#daemon-configuration-file).| |
|`disableUpdate`| |If `value` is set to true, checking for and notifications about Docker Desktop updates is disabled.|
|`analyticsEnabled`| |If `value` is set to false, Docker Desktop doesn't send usage statistics to Docker. |
### Step three: Re-launch Docker Desktop
>**Note**
>
>Administrators should test the changes made through the `admin-settings.json` file locally to see if the settings work as expected.
For settings to take effect:
- On a new install, developers need to launch Docker Desktop and authenticate to their organization.
- On an existing install, developers need to quit Docker Desktop through the Docker menu, and then relaunch Docker Desktop. If they are already signed in, they don't need to sign in again for the changes to take effect.
>**Important**
>
>Selecting **Restart** from the Docker menu isn't enough as it only restarts some components of Docker Desktop.
{: .important}
Docker doesn't automatically mandate that developers re-launch and sign in once a change has been made so as not to disrupt your developers' workflow.
In Docker Desktop, developers see the relevant settings grayed out and the message **Locked by your administrator**.
![Proxy settings grayed out](/assets/images/grayed-setting.png){:width="750px"}

View File

@ -0,0 +1,58 @@
---
description: Settings Management for desktop
keywords: Settings Management, rootless, docker desktop, hardened desktop
title: What is Settings Management?
---
>**Note**
>
>Settings Management is available to Docker Business customers only.
Settings Management is a feature that helps admins to control certain Docker Desktop settings on client machines within their organization.
With a few lines of JSON, admins can configure controls for Docker Desktop settings such as proxies and network settings. For an extra layer of security, admins can also use Settings Management to enable [Enhanced Container Isolation](../enhanced-container-isolation/index.md) which ensures that any configurations set with Settings Management cannot be modified by containers.
It is available with [Docker Desktop 4.13.0 or later](../../release-notes.md).
### Who is it for?
- For Organizations who wish to configure Docker Desktop to be within their organization's centralized control.
- For Organizations who want to create a standardized Docker Desktop environment at scale.
- For Docker Business customers who want to confidently manage their use of Docker Desktop within tightly regulated environments.
### How does it work?
Administrators can configure several Docker Desktop settings using an `admin-settings.json` file. This file is located on the Docker Desktop host and can only be accessed by users with root or admin privileges.
Values that are set to `locked: true` within the `admin-settings.json` override any previous values set by users and ensure that these cannot be modified. For more information, see [Configure Settings Management](../settings-management/configure.md#step-two-configure-the-settings-you-want-to-lock-in).
### What features can I configure with Settings Management?
Using the `admin-settings.json` file, admins can:
- Enable [Enhanced Container Isolation](../enhanced-container-isolation/index.md) (currently incompatible with WSL)
- Configure HTTP proxies
- Configure network settings
- Enforce the use of WSL2 based engine or Hyper-V
- Configure Docker Engine
- Turn off Docker Desktop's ability to checks for updates
- Turn off Docker Desktop's ability to send usage statistics
For more details on the syntax and options admins can set, see [Configure Settings Management](configure.md).
### How do I set up and enforce Settings Management?
As an administrator, you first need to [configure a registry.json to enforce sign-in](../../../docker-hub/configure-sign-in.md). This is because the Settings Management feature requires a Docker Business subscription and therefore your Docker Desktop users must authenticate to your organization for this configuration to take effect.
Next, you must either manually [create and configure the admin-settings.json file](configure.md), or use the `--admin-settings` installer flag on [macOS](../../install/mac-install.md#install-from-the-command-line) or [Windows](../../install/windows-install.md#install-from-the-command-line) to automatically create the `admin-settings.json` and save it in the correct location.
Once this is done, Docker Desktop users receive the changed settings when they either:
- Quit, re-launch, and sign in to Docker Desktop
- Launch and sign in to Docker Desktop for the first time
Docker doesn't automatically mandate that developers re-launch and re-authenticate once a change has been made, so as not to disrupt your developers' workflow.
### What do users see when the settings are enforced?
Any settings that are enforced, are grayed out in Docker Desktop and the user is unable to edit them, either via the Docker Desktop UI, CLI, or the `settings.json` file. In addition, if Enhanced Container Isolation is enforced, users can't use privileged containers or similar techniques to modify enforced settings within the Docker Desktop Linux VM, for example, reconfigure proxy and networking of reconfigure Docker Engine.
![Proxy settings grayed out](/assets/images/grayed-setting.png){:width="750px"}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 61 KiB

After

Width:  |  Height:  |  Size: 90 KiB

View File

@ -8,7 +8,7 @@ redirect_from:
This topic discusses installation of Docker Desktop from an [Arch package](https://desktop-stage.docker.com/linux/main/amd64/78459/docker-desktop-4.8.0-x86_64.pkg.tar.zst?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64) that Docker provides in addition to the supported platforms. Docker has not tested or verified the installation.
[Arch package (experimental)](https://desktop.docker.com/linux/main/amd64/docker-desktop-4.12.0-x86_64.pkg.tar.zst?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64){: .button .primary-btn }
[Arch package (experimental)](https://desktop.docker.com/linux/main/amd64/docker-desktop-4.13.0-x86_64.pkg.tar.zst?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64){: .button .primary-btn }
*For checksums, see [Release notes](../release-notes.md)*

View File

@ -9,7 +9,7 @@ redirect_from:
This page contains information on how to install, launch, and upgrade Docker Desktop on a Debian distribution.
[DEB package](https://desktop.docker.com/linux/main/amd64/docker-desktop-4.12.0-amd64.deb?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64){: .button .primary-btn }
[DEB package](https://desktop.docker.com/linux/main/amd64/docker-desktop-4.13.0-amd64.deb?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64){: .button .primary-btn }
*For checksums, see [Release notes](../release-notes.md)*

View File

@ -9,7 +9,7 @@ redirect_from:
This page contains information on how to install, launch and upgrade Docker Desktop on a Fedora distribution.
[RPM package](https://desktop.docker.com/linux/main/amd64/docker-desktop-4.12.0-x86_64.rpm?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64){: .button .primary-btn }
[RPM package](https://desktop.docker.com/linux/main/amd64/docker-desktop-4.13.0-x86_64.rpm?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64){: .button .primary-btn }
## Prerequisites
@ -31,7 +31,7 @@ To install Docker Desktop on Fedora:
1. Set up [Docker's package repository](../../engine/install/fedora.md#set-up-the-repository).
2. Download latest [RPM package](https://desktop.docker.com/linux/main/amd64/docker-desktop-4.12.0-x86_64.rpm?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64).
2. Download latest [RPM package](https://desktop.docker.com/linux/main/amd64/docker-desktop-4.13.0-x86_64.rpm?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64).
3. Install the package with dnf as follows:

View File

@ -100,6 +100,10 @@ The `install` command accepts the following flags:
- `--accept-license`: accepts the [Docker Subscription Service Agreement](https://www.docker.com/legal/docker-subscription-service-agreement){: target="_blank" rel="noopener" class="_"} now, rather than requiring it to be accepted when the application is first run
- `--allowed-org=<org name>`: requires the user to sign in and be part of the specified Docker Hub organization when running the application
- `--user=<username>`: Runs the privileged helper service once during installation, then disables it at runtime. This removes the need for the user to grant root privileges on first run. For more information, see [Privileged helper permission requirements](../mac/permission-requirements.md#permission-requirements){: target="_blank" rel="noopener" class="_"}. To find the username, enter `ls /Users` in the CLI.
- `--admin-settings`: Automatically creates an `admin-settings.json` file which is used by admins to control certain Docker Desktop settings on client machines within their organization. For more information, see [Settings Management](../hardened-desktop/settings-management/index.md).
- It must be used together with the `--allowed-org=<org name>` flag.
- For example:
`--allowed-org=<org name> --admin-settings='{"configurationFileVersion": 2, "enhancedContainerIsolation": {"value": true, "locked": false}}'`
## Where to go next

View File

@ -9,7 +9,7 @@ redirect_from:
This page contains information on how to install, launch and upgrade Docker Desktop on an Ubuntu distribution.
[DEB package](https://desktop.docker.com/linux/main/amd64/docker-desktop-4.12.0-amd64.deb?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64){: .button .primary-btn }
[DEB package](https://desktop.docker.com/linux/main/amd64/docker-desktop-4.13.0-amd64.deb?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64){: .button .primary-btn }
*For checksums, see [Release notes](../release-notes.md)*
@ -48,7 +48,7 @@ Recommended approach to install Docker Desktop on Ubuntu:
1. Set up [Docker's package repository](../../engine/install/ubuntu.md#set-up-the-repository).
2. Download latest [DEB package](https://desktop.docker.com/linux/main/amd64/docker-desktop-4.12.0-amd64.deb?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64).
2. Download latest [DEB package](https://desktop.docker.com/linux/main/amd64/docker-desktop-4.13.0-amd64.deb?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64).
3. Install the package with apt as follows:

View File

@ -151,13 +151,18 @@ If using the Windows Command Prompt:
start /w "" "Docker Desktop Installer.exe" install
```
The install command accepts the following flags:
The `install` command accepts the following flags:
- `--quiet`: suppresses information output when running the installer
- `--accept-license`: accepts the [Docker Subscription Service Agreement](https://www.docker.com/legal/docker-subscription-service-agreement){: target="_blank" rel="noopener" class="_"} now, rather than requiring it to be accepted when the application is first run
- `--no-windows-containers`: disables Windows containers integration
- `--allowed-org=<org name>`: requires the user to sign in and be part of the specified Docker Hub organization when running the application
- `--backend=<backend name>`: selects the default backend to use for Docker Desktop, `hyper-v`, `windows` or `wsl-2` (default)
- `--installation-dir=<path>`: changes the default installation location (`C:\Program Files\Docker\Docker`)
- `--admin-settings`: Automatically creates an `admin-settings.json` file which is used by admins to control certain Docker Desktop settings on client machines within their organization. For more information, see [Settings Management](../hardened-desktop/settings-management/index.md).
- It must be used together with the `--allowed-org=<org name>` flag.
- For example:
`--allowed-org=<org name> --admin-settings='{"configurationFileVersion": 2, "enhancedContainerIsolation": {"value": true, "locked": false}}'`
If your admin account is different to your user account, you must add the user to the **docker-users** group:

View File

@ -24,12 +24,108 @@ Take a look at the [Docker Public Roadmap](https://github.com/docker/roadmap/pro
For frequently asked questions about Docker Desktop releases, see [FAQs](faqs/general.md/#releases)
## Docker Desktop 4.13.0
2022-10-19
> Download Docker Desktop
>
> {% include desktop-install.html %}
### New
- Two new security features have been introduced for Docker Business users, Settings Management and Enhanced Container Isolation. Read more about Docker Desktops new [Hardened Desktop security model](hardened-desktop/index.md).
- Added the new Dev Environments CLI `docker dev`, so you can create, list, and run Dev Envs via command line. Now it's easier to integrate Dev Envs into custom scripts.
- Docker Desktop can now be installed to any drive and folder using the `--installation-dir`. Partially addresses [docker/roadmap#94](https://github.com/docker/roadmap/issues/94).
### Upgrades
- [Docker Scan v0.21.0](https://github.com/docker/scan-cli-plugin/releases/tag/v0.21.0)
- [Go 1.19.2](https://github.com/golang/go/releases/tag/go1.19.2) to address [CVE-2022-2879](https://www.cve.org/CVERecord?id=CVE-2022-2879){: target="_blank" rel="noopener"}, [CVE-2022-2880](https://www.cve.org/CVERecord?id=CVE-2022-2880){: target="_blank" rel="noopener"} and [CVE-2022-41715](https://www.cve.org/CVERecord?id= CVE-2022-41715){: target="_blank" rel="noopener"}
- Updated Docker Engine and Docker CLI to [v20.10.20](https://docs.docker.com/engine/release-notes/#201020),
which contain mitigations against a Git vulnerability, tracked in [CVE-2022-39253](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-39253){:target="_blank" rel="noopener"},
and updated handling of `image:tag@digest` image references, as well as a fix for [CVE-2022-36109](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-36109).
- [Docker Credential Helpers v0.7.0](https://github.com/docker/docker-credential-helpers/releases/tag/v0.7.0){: target="blank" rel="noopener" class=""}
- [Docker Compose v2.12.0](https://github.com/docker/compose/releases/tag/v2.12.0)
- [Kubernetes v1.25.2](https://github.com/kubernetes/kubernetes/releases/tag/v1.25.2)
- [Qemu 7.0.0](https://wiki.qemu.org/ChangeLog/7.0) used for cpu emulation, inside the Docker Desktop VM.
- [Linux kernel 5.15.49](https://hub.docker.com/layers/docker/for-desktop-kernel/5.15.49-13422a825f833d125942948cf8a8688cef721ead/images/sha256-ebf1f6f0cb58c70eaa260e9d55df7c43968874d62daced966ef6a5c5cd96b493?context=explore)
### Bug fixes and minor changes
#### For all platforms
- Docker Desktop now allows the use of TLS when talking to HTTP and HTTPS proxies to encrypt proxy usernames and passwords.
- Docker Desktop now stores HTTP and HTTPS proxy passwords in the OS credential store.
- If Docker Desktop detects that the HTTP or HTTPS proxy password has changed then it will prompt developers for the new password.
- The **Bypass proxy settings for these hosts and domains** setting now handles domain names correctly for HTTPS.
- The **Remote Repositories** view and Tip of the Day now works with HTTP and HTTPS proxies which require authentication
- Weve introduced dark launch for features that are in early stages of the product development lifecycle. Users that are opted in can opt out at any time in the settings under the “beta features” section.
- Added categories to the Extensions Marketplace.
- Added an indicator in the whale menu and on the **Extension** tab on when extension updates are available.
- Fixed failing uninstalls of extensions with image names that do not have a namespace, as in 'my-extension'.
- Show port mapping explicitly in the **Container** tab.
- Changed the refresh rate for disk usage information for images to happen automatically once a day.
- Made the tab style consistent for the **Container** and **Volume** tabs.
- Fixed Grpcfuse filesharing mode enablement in **Settings**. Fixes [docker/for-mac#6467](https://github.com/docker/for-mac/issues/6467)
- Virtualization Framework and VirtioFS are disabled for users running macOS < 12.5.
- Ports on the **Containers** tab are now clickable.
- The Extensions SDK now allows `ddClient.extension.vm.cli.exec`, `ddClient.extension.host.cli.exec`, `ddClient.docker.cli.exec` to accept a different working directory and pass environment variables through the options parameters.
- Added a small improvement to navigate to the Extensions Marketplace when clicking on **Extensions** in the sidebar.
- Added a badge to identify new extensions in the Marketplace.
- Fixed kubernetes not starting with the containerd integration.
- Fixed `kind` not starting with the containerd integration.
- Fixed dev environments not working with the containerd integration.
- Implemented `docker diff` in the containerd integration.
- Implemented `docker run —-platform` in the containerd integration.
- Fixed insecure registries not working with the containerd integration.
- Fixed a bug that showed incorrect values on used space in **Settings**.
- Docker Desktop now installs credential helpers from Github releases. See [docker/for-win#10247](https://github.com/docker/for-win/issues/10247), [docker/for-win#12995](https://github.com/docker/for-win/issues/12995), [docker/for-mac#12399](https://github.com/docker/for-mac/issues/12399).
- Fixed an issue where users were logged out of Docker Desktop after 7 days.
#### For Mac
- Added **Hide**, **Hide others**, **Show all** menu items for Docker Desktop. See [docker/for-mac#6446](https://github.com/docker/for-mac/issues/6446).
- Fixed a bug which caused the application to be deleted when running the install utility from the installed application. Fixes [docker/for-mac#6442](https://github.com/docker/for-mac/issues/6442).
- By default Docker will not create the /var/run/docker.sock symlink on the host and use the docker-desktop CLI context instead.
#### For Linux
- Fixed a bug that prevented pushing images from the Dashboard
## Docker Desktop 4.12.0
2022-09-01
> Download Docker Desktop
>
> {% include desktop-install.html %}
> [Windows](https://desktop.docker.com/win/main/amd64/85629/Docker%20Desktop%20Installer.exe) |
> [Mac with Intel chip](https://desktop.docker.com/mac/main/amd64/85629/Docker.dmg) |
> [Mac with Apple chip](https://desktop.docker.com/mac/main/arm64/85629/Docker.dmg) |
> [Debian](https://desktop.docker.com/linux/main/amd64/85629/docker-desktop-4.12.0-amd64.deb) |
> [RPM](https://desktop.docker.com/linux/main/amd64/85629/docker-desktop-4.12.0-x86_64.rpm) |
> [Arch package](https://desktop.docker.com/linux/main/amd64/85629/docker-desktop-4.12.0-x86_64.pkg.tar.zst)
<div class="panel-group" id="accordion" role="tablist" aria-multiselectable="true">
<div class="panel panel-default">
<div class="panel-heading" role="tab" id="headingSeven">
<h5 class="panel-title">
<a role="button" data-toggle="collapse" data-parent="#accordion" href="#collapseSeven" aria-expanded="true" aria-controls="collapseSeven">
Checksums
<i class="fa fa-chevron-down"></i>
</a>
</h5>
</div>
<div id="collapseSeven" class="panel-collapse collapse" role="tabpanel" aria-labelledby="headingSeven">
<div class="panel-body">
<li><b>Windows:</b> SHA-256 996a4c5fff5b80b707ecfc0121d7ebe70d96c0bd568f058fd96f32cdec0c10cf</li>
<li><b>Mac Intel:</b> SHA-256 41085009458ba1741c6a86c414190780ff3b288879aa27821fc4a985d229653c</li>
<li><b>Mac Arm:</b> SHA-256 7eb63b4819cd1f87c61d5e8f54613692e07fb203d81bcf8d66f5de55489d3b81</li>
<li><b>Linux DEB:</b> SHA-256 4407023db032219d6ac6031f81da6389ab192d3d06084ee6dad1ba4f4c64a4fe</li>
<li><b>Linux RPM:</b> SHA-256 05e91f2a9763089acdfe710140893cb096bec955bcd99279bbe3aea035d09bc5</li>
<li><b>Linux Arch:</b> SHA-256 7c6b43c8ab140c755e6c8ce4ec494b3f5c4f3b0c1ab3cee8bfd0b6864f795d8a</li>
</div>
</div>
</div>
</div>
### New
@ -693,7 +789,7 @@ For frequently asked questions about Docker Desktop releases, see [FAQs](faqs/ge
#### For Mac
- Docker Desktop 4.6.0 gives macOS users the option of enabling a new experimental file sharing technology called VirtioFS. During testing VirtioFS has been shown to drastically reduce the time taken to sync changes between the host and VM, leading to substantial performance improvements. For more information, see [VirtioFS](settings/mac.md#experimental-features).
- Docker Desktop 4.6.0 gives macOS users the option of enabling a new experimental file sharing technology called VirtioFS. During testing VirtioFS has been shown to drastically reduce the time taken to sync changes between the host and VM, leading to substantial performance improvements. For more information, see [VirtioFS](settings/mac.md#beta-features).
### Upgrades

View File

@ -34,7 +34,7 @@ On the **General** tab, you can configure when to start Docker and specify other
dashboard when starting Docker Desktop.
- **Use Docker Compose V2**. Select to enable the `docker-compose` command to
use Docker Compose V2. For more information, see [Docker Compose V2](../../compose/index.md#compose-v2-and-the-new-docker-compose-command).
use Docker Compose V2. For more information, see [Docker Compose V2](../../compose/compose-v2/index.md).
## Resources
@ -128,11 +128,13 @@ Type a JSON configuration file in the box to configure the daemon settings. For
Click **Apply & Restart** to save your settings and restart Docker Desktop.
## Experimental Features
## Beta Features
{% include experimental.md %}
{% include beta.md %}
From the **Experimental features** tab, you can sign up to the [Developer Preview program](https://www.docker.com/community/get-involved/developer-preview/).
From the **Beta features** tab, you can sign up to the [Developer Preview program](https://www.docker.com/community/get-involved/developer-preview/).
On the **Beta features** tab, you also have the option to allow version 4.13 feature flags, which are product features Docker is currently experimenting with. This is switched on by default.
## Kubernetes

View File

@ -45,7 +45,7 @@ On the **General** tab, you can configure when to start Docker and specify other
dashboard when starting Docker Desktop.
- **Use Docker Compose V2**. Select to enable the `docker-compose` command to
use Docker Compose V2. For more information, see [Docker Compose V2](../../compose/index.md#compose-v2-and-the-new-docker-compose-command).
use Docker Compose V2. For more information, see [Docker Compose V2](../../compose/compose-v2/index.md).
## Resources
@ -159,9 +159,11 @@ Type a JSON configuration file in the box to configure the daemon settings. For
Click **Apply & Restart** to save your settings and restart Docker Desktop.
## Experimental Features
## Beta Features
{% include experimental.md %}
{% include beta.md %}
On the **Beta features** tab, you also have the option to allow version 4.13 feature flags, which are product features Docker is currently experimenting with. This is switched on by default.
### Enable the new Apple Virtualization framework

View File

@ -42,7 +42,7 @@ On the **General** tab, you can configure when to start Docker and specify other
dashboard when starting Docker Desktop.
- **Use Docker Compose V2**. Select to enable the `docker-compose` command to
use Docker Compose V2. For more information, see [Docker Compose V2](../../compose/index.md#compose-v2-and-the-new-docker-compose-command).
use Docker Compose V2. For more information, see [Docker Compose V2](../../compose/compose-v2/index.md).
## Resources
@ -201,11 +201,13 @@ Type a JSON configuration file in the box to configure the daemon settings. For
Click **Apply & Restart** to save your settings and restart Docker Desktop.
## Experimental features
## Beta features
{% include experimental.md %}
{% include beta.md %}
From the **Experimental features** tab, you can sign up to the [Developer Preview program](https://www.docker.com/community/get-involved/developer-preview/).
From the **Beta features** tab, you can sign up to the [Developer Preview program](https://www.docker.com/community/get-involved/developer-preview/).
On the **Beta features** tab, you also have the option to allow version 4.13 feature flags, which are product features Docker is currently experimenting with. This is switched on by default.
## Kubernetes

174
desktop/wasm/index.md Normal file
View File

@ -0,0 +1,174 @@
---
title: Docker+Wasm (Beta)
description: How to use the Wasm integration in Docker Desktop
keywords: Docker, WebAssembly, wasm, containerd, engine
toc_max: 3
---
Wasm (short for WebAssembly) is a faster, lighter alternative to the Linux & Windows containers youre using in Docker today (with [some tradeoffs](https://www.docker.com/blog/docker-wasm-technical-preview/)).
This page provides information about the new ability to run Wasm applications alongside your Linux containers in Docker. To learn more about the launch and how the preview works, read [the launch blog post here](https://www.docker.com/blog/docker-wasm-technical-preview/).
> **Beta**
>
> The Docker+Wasm feature is currently in [Beta](../../release-lifecycle.md/#beta). We recommend that you do not use this feature in production environments as this feature may change or be removed from future releases.
## Enable the Docker+Wasm integration
The Docker+Wasm integration currently requires a technical preview build of Docker Desktop.
>**Warning**
>
> With the technical preview build of Docker Desktop, things might not work as expected. Be sure to back up your containers and images before proceeding.
{: .warning}
>**Important**
>
> The technical preview build enables the [Containerd Image Store](../containerd/index.md) feature. This cannot be disabled. If youre not currently using the Containerd Image Store, then pre-existing images and containers will be inaccessible.
{: .important}
Download the technical preview build of Docker Desktop:
- [macOS Apple Silicon](https://www.docker.com/download/wasm-preview/macosxsilicon){: id="dkr_docs_dl_wasm_apple"}
- [macOS Intel](https://www.docker.com/download/wasm-preview/macintel){: id="dkr_docs_dl_wasm_intel"}
- [Windows AMD64](https://www.docker.com/download/wasm-preview/windowsamd64){: id="dkr_docs_dl_wasm_windows"}
- Linux Arm64 ([deb](https://www.docker.com/download/wasm-preview/linuxarm64deb){: id="dkr_docs_dl_wasm_arm64deb"})
- Linux AMD64 ([deb](https://www.docker.com/download/wasm-preview/linuxamd64deb){: id="dkr_docs_dl_wasm_amd64deb"}, [rpm](https://www.docker.com/download/wasm-preview/linuxamd64rpm){: id="dkr_docs_dl_wasm_amd64rpm"}, [tar](https://www.docker.com/download/wasm-preview/linuxamd64tarball){: id="dkr_docs_dl_wasm_amd64tar"})
## Usage examples
### Running a Wasm application with `docker run`
```
$ docker run -dp 8080:8080 \
--name=wasm-example \
--runtime=io.containerd.wasmedge.v1 \
--platform=wasi/wasm32 \
michaelirwin244/wasm-example
```
Note the two additional flags to the run command:
- `--runtime=io.containerd.wasmedge.v1`. This informs the Docker engine that you want to use the Wasm containerd shim instead of the standard Linux container runtime
- `--platform=wasi/wasm32`. This specifies the architecture of the image you want to use. By leveraging a Wasm architecture, you dont need to build separate images for the different machine architectures. The Wasm runtime does the final step of converting the Wasm binary to machine instructions.
### Running a Wasm application with Docker Compose
The same application can be run using the following Docker Compose file:
```yaml
services:
app:
image: michaelirwin244/wasm-example
platform: wasi/wasm32
runtime: io.container.wasmedge.v1
ports:
- 8080:8080
```
Start the application using the normal Docker Compose commands:
```
docker compose up
```
### Running a multi-service application with Wasm
Networking works the same as you expect with Linux containers, giving you the flexibility to combine Wasm applications with other containerized workloads, such as a database, in a single application stack.
In the following example, the Wasm application leverages a MariaDB database running in a container.
1. Clone the repository.
```
$ git clone https://github.com/second-state/microservice-rust-mysql.git
Cloning into 'microservice-rust-mysql'...
remote: Enumerating objects: 75, done.
remote: Counting objects: 100% (75/75), done.
remote: Compressing objects: 100% (42/42), done.
remote: Total 75 (delta 29), reused 48 (delta 14), pack-reused 0
Receiving objects: 100% (75/75), 19.09 KiB | 1.74 MiB/s, done.
Resolving deltas: 100% (29/29), done.
```
2. Navigate into the cloned project and start the project using Docker Compose.
```
$ cd microservice-rust-mysql
$ docker compose up
[+] Running 0/1
⠿ server Warning 0.4s
[+] Building 4.8s (13/15)
...
microservice-rust-mysql-db-1 | 2022-10-19 19:54:45 0 [Note] mariadbd: ready for connections.
microservice-rust-mysql-db-1 | Version: '10.9.3-MariaDB-1:10.9.3+maria~ubu2204' socket: '/run/mysqld/mysqld.sock' port: 3306 mariadb.org binary distribution
```
In another terminal, we can see the Wasm image that was created.
```
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
server latest 2c798ddecfa1 2 minutes ago 3MB
```
Inspecting the image shows the image has a `wasi/wasm32` platform. A combination of Os and Architecture.
```
$ docker image inspect server | grep -A 3 "Architecture"
"Architecture": "wasm32",
"Os": "wasi",
"Size": 3001146,
"VirtualSize": 3001146,
```
3. Open the website at http://localhost:8090 and create a few sample orders. All of these are interacting with the Wasm server.
4. When you're all done, tear everything down by hitting `Ctrl+C` in the terminal you launched the application.
### Building and pushing a Wasm module
1. Create a Dockerfile that builds your Wasm application. This varies depending on the language you are using.
2. In a separate stage in your `Dockerfile`, extract the module and set it as the `ENTRYPOINT`.
```
FROM scratch
COPY --from=build /build/hello_world.wasm /hello_world.wasm
ENTRYPOINT [ "hello_world.wasm" ]
```
3. Build and push the image specifying the `wasi/wasm32` architecture. Buildx makes this easy to do in a single command.
```
$ docker buildx build --platform wasi/wasm32 -t username/hello-world .
...
=> exporting to image 0.0s
=> => exporting layers 0.0s
=> => exporting manifest sha256:2ca02b5be86607511da8dc688234a5a00ab4d58294ab9f6beaba48ab3ba8de56 0.0s
=> => exporting config sha256:a45b465c3b6760a1a9fd2eda9112bc7e3169c9722bf9e77cf8c20b37295f954b 0.0s
=> => naming to docker.io/username/hello-world:latest 0.0s
=> => unpacking to docker.io/username/hello-world:latest 0.0s
$ docker push username/hello-world
```
## Docker+Wasm Release Notes
2022-10-24
Initial release
### New
- Initial implementation of Wasm integration
### Known issues
- Docker Compose may not exit cleanly when interrupted
- Workaround: Clean up `docker-compose` processes by sending them a SIGKILL (`killall -9 docker-compose`).
- Pushes to Hub might give an error stating `server message: insufficient_scope: authorization failed`, even after logging in using Docker Desktop
- Workaround: Run `docker login` in the CLI
## Feedback
Thanks for trying the new Docker+Wasm integration. Give feedback or report any bugs you may find through the issues tracker on the [public roadmap item](https://github.com/docker/roadmap/issues/426){: target="_blank" rel="noopener" class="_"}.

View File

@ -1,284 +0,0 @@
---
title: Build images with BuildKit
description: Learn the new features of Docker Build with BuildKit
keywords: build, security, engine, secret, BuildKit
---
Docker Build is one of the most used features of the Docker Engine - users
ranging from developers, build teams, and release teams all use Docker Build.
Docker Build enhancements for 18.09 release introduces a much-needed overhaul of
the build architecture. By integrating BuildKit, users should see an improvement
on performance, storage management, feature functionality, and security.
* Docker images created with BuildKit can be pushed to Docker Hub just like
Docker images created with legacy build
* the Dockerfile format that works on legacy build will also work with BuildKit
builds
* The new `--secret` command line option allows the user to pass secret
information for building new images with a specified Dockerfile
For more information on build options, see the reference guide on the
[command line build options](../../engine/reference/commandline/build.md) and
the [Dockerfile reference](/engine/reference/builder/) page.
## Requirements
* A current version of Docker (18.09 or higher)
* Network connection required for downloading images of custom frontends
## Limitations
* Only supported for building Linux containers
## To enable BuildKit builds
Easiest way from a fresh install of docker is to set the `DOCKER_BUILDKIT=1`
environment variable when invoking the `docker build` command, such as:
```console
$ DOCKER_BUILDKIT=1 docker build .
```
To enable docker BuildKit by default, set daemon configuration in
`/etc/docker/daemon.json` feature to true and restart the daemon:
```json
{ "features": { "buildkit": true } }
```
## New Docker Build command line build output
New docker build BuildKit TTY output (default):
```console
$ docker build .
[+] Building 70.9s (34/59)
=> [runc 1/4] COPY hack/dockerfile/install/install.sh ./install.sh 14.0s
=> [frozen-images 3/4] RUN /download-frozen-image-v2.sh /build buildpa 24.9s
=> [containerd 4/5] RUN PREFIX=/build/ ./install.sh containerd 37.1s
=> [tini 2/5] COPY hack/dockerfile/install/install.sh ./install.sh 4.9s
=> [vndr 2/4] COPY hack/dockerfile/install/vndr.installer ./ 1.6s
=> [dockercli 2/4] COPY hack/dockerfile/install/dockercli.installer ./ 5.9s
=> [proxy 2/4] COPY hack/dockerfile/install/proxy.installer ./ 15.7s
=> [tomlv 2/4] COPY hack/dockerfile/install/tomlv.installer ./ 12.4s
=> [gometalinter 2/4] COPY hack/dockerfile/install/gometalinter.install 25.5s
=> [vndr 3/4] RUN PREFIX=/build/ ./install.sh vndr 33.2s
=> [tini 3/5] COPY hack/dockerfile/install/tini.installer ./ 6.1s
=> [dockercli 3/4] RUN PREFIX=/build/ ./install.sh dockercli 18.0s
=> [runc 2/4] COPY hack/dockerfile/install/runc.installer ./ 2.4s
=> [tini 4/5] RUN PREFIX=/build/ ./install.sh tini 11.6s
=> [runc 3/4] RUN PREFIX=/build/ ./install.sh runc 23.4s
=> [tomlv 3/4] RUN PREFIX=/build/ ./install.sh tomlv 9.7s
=> [proxy 3/4] RUN PREFIX=/build/ ./install.sh proxy 14.6s
=> [dev 2/23] RUN useradd --create-home --gid docker unprivilegeduser 5.1s
=> [gometalinter 3/4] RUN PREFIX=/build/ ./install.sh gometalinter 9.4s
=> [dev 3/23] RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.ba 4.3s
=> [dev 4/23] RUN echo source /usr/share/bash-completion/bash_completion 2.5s
=> [dev 5/23] RUN ln -s /usr/local/completion/bash/docker /etc/bash_comp 2.1s
```
New docker build BuildKit plain output:
```console
$ docker build --progress=plain .
#1 [internal] load .dockerignore
#1 digest: sha256:d0b5f1b2d994bfdacee98198b07119b61cf2442e548a41cf4cd6d0471a627414
#1 name: "[internal] load .dockerignore"
#1 started: 2018-08-31 19:07:09.246319297 +0000 UTC
#1 completed: 2018-08-31 19:07:09.246386115 +0000 UTC
#1 duration: 66.818µs
#1 started: 2018-08-31 19:07:09.246547272 +0000 UTC
#1 completed: 2018-08-31 19:07:09.260979324 +0000 UTC
#1 duration: 14.432052ms
#1 transferring context: 142B done
#2 [internal] load Dockerfile
#2 digest: sha256:2f10ef7338b6eebaf1b072752d0d936c3d38c4383476a3985824ff70398569fa
#2 name: "[internal] load Dockerfile"
#2 started: 2018-08-31 19:07:09.246331352 +0000 UTC
#2 completed: 2018-08-31 19:07:09.246386021 +0000 UTC
#2 duration: 54.669µs
#2 started: 2018-08-31 19:07:09.246720773 +0000 UTC
#2 completed: 2018-08-31 19:07:09.270231987 +0000 UTC
#2 duration: 23.511214ms
#2 transferring dockerfile: 9.26kB done
```
## Overriding default frontends
The new syntax features in `Dockerfile` are available if you override the default
frontend. To override the default frontend, set the first line of the
`Dockerfile` as a comment with a specific frontend image:
```dockerfile
# syntax=<frontend image>, e.g. # syntax=docker/dockerfile:1.2
```
The examples on this page use features that are available in `docker/dockerfile`
version 1.2.0 and up. We recommend using `docker/dockerfile:1`, which always
points to the latest release of the version 1 syntax. BuildKit automatically
checks for updates of the syntax before building, making sure you are using the
most current version. Learn more about the `syntax` directive in the
[Dockerfile reference](/engine/reference/builder/#syntax).
## New Docker Build secret information
The new `--secret` flag for docker build allows the user to pass secret
information to be used in the Dockerfile for building docker images in a safe
way that will not end up stored in the final image.
`id` is the identifier to pass into the `docker build --secret`. This identifier
is associated with the `RUN --mount` identifier to use in the Dockerfile. Docker
does not use the filename of where the secret is kept outside of the Dockerfile,
since this may be sensitive information.
`dst` renames the secret file to a specific file in the Dockerfile `RUN` command
to use.
For example, with a secret piece of information stored in a text file:
```console
$ echo 'WARMACHINEROX' > mysecret.txt
```
And with a Dockerfile that specifies use of a BuildKit frontend
`docker/dockerfile:1.2`, the secret can be accessed when performing a `RUN`:
```dockerfile
# syntax=docker/dockerfile:1.2
FROM alpine
# shows secret from default secret location:
RUN --mount=type=secret,id=mysecret cat /run/secrets/mysecret
# shows secret from custom secret location:
RUN --mount=type=secret,id=mysecret,dst=/foobar cat /foobar
```
The secret needs to be passed to the build using the `--secret` flag.
This Dockerfile is only to demonstrate that the secret can be accessed. As you
can see the secret printed in the build output. The final image built will not
have the secret file:
```console
$ docker build --no-cache --progress=plain --secret id=mysecret,src=mysecret.txt .
...
#8 [2/3] RUN --mount=type=secret,id=mysecret cat /run/secrets/mysecret
#8 digest: sha256:5d8cbaeb66183993700828632bfbde246cae8feded11aad40e524f54ce7438d6
#8 name: "[2/3] RUN --mount=type=secret,id=mysecret cat /run/secrets/mysecret"
#8 started: 2018-08-31 21:03:30.703550864 +0000 UTC
#8 1.081 WARMACHINEROX
#8 completed: 2018-08-31 21:03:32.051053831 +0000 UTC
#8 duration: 1.347502967s
#9 [3/3] RUN --mount=type=secret,id=mysecret,dst=/foobar cat /foobar
#9 digest: sha256:6c7ebda4599ec6acb40358017e51ccb4c5471dc434573b9b7188143757459efa
#9 name: "[3/3] RUN --mount=type=secret,id=mysecret,dst=/foobar cat /foobar"
#9 started: 2018-08-31 21:03:32.052880985 +0000 UTC
#9 1.216 WARMACHINEROX
#9 completed: 2018-08-31 21:03:33.523282118 +0000 UTC
#9 duration: 1.470401133s
...
```
## Using SSH to access private data in builds
> **Acknowledgment**
>
> Please see [Build secrets and SSH forwarding in Docker 18.09](https://medium.com/@tonistiigi/build-secrets-and-ssh-forwarding-in-docker-18-09-ae8161d066)
> for more information and examples.
Some commands in a `Dockerfile` may need specific SSH authentication - for example, to clone a private repository.
Rather than copying private keys into the image, which runs the risk of exposing them publicly, `docker build` provides a way to use the host system's ssh access while building the image.
There are three steps to this process.
First, run `ssh-add` to add private key identities to the authentication agent.
If you have more than one SSH key and your default `id_rsa` is not the one you use for accessing the resources in question, you'll need to add that key by path: `ssh-add ~/.ssh/<some other key>`.
(For more information on SSH agent, see the [OpenSSH man page](https://man.openbsd.org/ssh-agent).)
Second, when running `docker build`, use the `--ssh` option to pass in an existing SSH agent connection socket.
For example, `--ssh default=$SSH_AUTH_SOCK`, or the shorter equivalent, `--ssh default`.
Third, to make use of that SSH access in a `RUN` command in the `Dockerfile`, define a mount with type `ssh`.
This will set the `SSH_AUTH_SOCK` environment variable for that command to the value provided by the host to `docker build`, which will cause any programs in the `RUN` command which rely on SSH to automatically use that socket.
Only the commands in the `Dockerfile` that have explicitly requested SSH access by defining `type=ssh` mount will have access to SSH agent connections.
The other commands will have no knowledge of any SSH agent being available.
Here is an example `Dockerfile` using SSH in the container:
```dockerfile
# syntax=docker/dockerfile:1
FROM alpine
# Install ssh client and git
RUN apk add --no-cache openssh-client git
# Download public key for github.com
RUN mkdir -p -m 0700 ~/.ssh && ssh-keyscan github.com >> ~/.ssh/known_hosts
# Clone private repository
RUN --mount=type=ssh git clone git@github.com:myorg/myproject.git myproject
```
The image could be built as follows:
```console
$ docker build --ssh default .
```
As with `--mount=type=secret`, you can specify an `id` if you want to use multiple sockets per build and want to differentiate them.
For example, you could run `docker build --ssh main=$SSH_AUTH_SOCK --ssh other=$OTHER_SSH_AUTH_SOCK`.
In your `Dockerfile`, you could then have a `RUN --mount=type=ssh,id=main` and a `RUN --mount=type=ssh,id=other` to use those two sockets.
If a `--mount=type=ssh` doesn't specify an `id`, `default` is assumed.
## Troubleshooting : issues with private registries
#### x509: certificate signed by unknown authority
If you are fetching images from insecure registry (with self-signed certificates)
and/or using such a registry as a mirror, you are facing a known issue in
Docker 18.09 :
```console
[+] Building 0.4s (3/3) FINISHED
=> [internal] load build definition from Dockerfile
=> => transferring dockerfile: 169B
=> [internal] load .dockerignore
=> => transferring context: 2B
=> ERROR resolve image config for docker.io/docker/dockerfile:experimental
------
> resolve image config for docker.io/docker/dockerfile:experimental:
------
failed to do request: Head https://repo.mycompany.com/v2/docker/dockerfile/manifests/experimental: x509: certificate signed by unknown authority
```
Solution: secure your registry properly. You can get SSL certificates from
Let's Encrypt for free. See [Deploy a registry server](../../registry/deploying.md).
#### image not found when the private registry is running on Sonatype Nexus version < 3.15
If you are running a private registry using Sonatype Nexus version < 3.15, and
receive an error similar to the following :
```console
------
> [internal] load metadata for docker.io/library/maven:3.5.3-alpine:
------
------
> [1/4] FROM docker.io/library/maven:3.5.3-alpine:
------
rpc error: code = Unknown desc = docker.io/library/maven:3.5.3-alpine not found
```
you may be facing the bug below : [NEXUS-12684](https://issues.sonatype.org/browse/NEXUS-12684)
Solution is to upgrade your Nexus to version 3.15 or above.

View File

@ -183,7 +183,13 @@ repository.
From either location, you can select a build job to view its build report. The
build report shows information about the build job. This includes the source
repository and branch (or tag), the build logs, the build duration, creation time and location, and the user namespace the build occurred in. You can also refresh the page to see the current progress of your build logs.
repository and branch (or tag), the build logs, the build duration, creation time and location, and the user namespace the build occurred in.
>**Note**
>
> You can now view the progress of your builds every 30 seconds when you
> refresh the Builds page. With the in-progress build logs, you can debug your
> builds before they're finished.
![Build Report](/docker-hub/images/index-report.png)
@ -311,7 +317,7 @@ sources. For example, you might have
Autobuilds use the BuildKit build system by default. If you want to use the legacy
Docker build system, add the [environment variable](index.md#environment-variables-for-builds){: target="_blank" rel="noopener" class="_"}
`DOCKER_BUILDKIT=0`. Refer to the [build images with BuildKit](../../develop/develop-images/build_enhancements.md)
`DOCKER_BUILDKIT=0`. Refer to the [BuildKit](../../build/buildkit/index.md)
page for more information on BuildKit.
## Build repositories with linked private submodules

View File

@ -10,7 +10,10 @@ Docker Hub organizations let you create teams so you can give your team access
to shared image repositories.
An **Organization** is a collection of teams and repositories
that can be managed together. Docker users become members of an organization
that can be managed together. A **Team** is a group of Docker members that belong to an organization.
An organization can have multiple teams.
Docker users become members of an organization
when they are assigned to at least one team in the organization. When you first
create an organization, youll see that you have a team, the **owners** (Admins)
team, with a single member. An organization owner is someone that is part of the
@ -20,12 +23,6 @@ selecting a team the user should be part of. An org owner can also add
additional org owners to help them manage users, teams, and repositories in the
organization.
A **Team** is a group of Docker users that belong to an organization. An
organization can have multiple teams. When you first create an organization,
youll see that you have a team, the **owners** team, with a single member. An
organization owner can then create new teams and add members to an existing team
using their Docker ID or email address and by selecting a team the user should be part of.
## Create an organization
There are multiple ways to create an organization. You can create a brand new

View File

@ -11,6 +11,10 @@ known issues for each Docker Hub release.
Take a look at the [Docker Public Roadmap](https://github.com/docker/roadmap/projects/1){: target="_blank" rel="noopener" class="_"} to see what's coming next.
## 2022-09-26
### New feature
The new [autobuild feature](../docker-hub/builds/index.md#check-your-active-builds) lets you view your in-progress logs every 30 seconds instead of when the build is complete.
## 2022-09-21
### Enhancement

View File

@ -4,6 +4,8 @@ keywords: Docker, docker, trusted, registry, accounts, plans, Dockerfile, Docker
title: Access repositories
---
Within your repository, you can give others access to push and pull to your repository, and you can assign permissions. You can also view your repository tags and the associated images.
## Collaborators and their role
A collaborator is someone you want to give access to a private repository. Once
@ -23,38 +25,28 @@ see the [organizations documentation](../../../docker-hub/orgs.md#create-an-orga
## Viewing repository tags
Docker Hub's individual repositories view shows you the available tags and the
size of the associated image. Go to the **Repositories** view and select repository to see its tags.
![Repository View](../../images/repos-create.png)
size of the associated image. Go to the **Repositories** view and select repository to see its tags. To view individual tags, select the **Tags** tab.
![View Repo Tags](../../images/repo-overview.png)
Image sizes are the cumulative space taken up by the image and all its parent
images. This is also the disk space used by the contents of the `.tar` file
created when you `docker save` an image.
created when you `docker save` an image. An image is stale if there has been no push/pull activity for more than one month. For example:
To view individual tags, select the **Tags** tab.
* It hasn't been pulled for more than one month
* And it hasn't been pushed for more than one month
![Manage Repo Tags](../../images/repo-tags-list.png)
An image is considered stale if there has been no push/pull activity for more
than 1 month, i.e.:
* It hasn't been pulled for more than 1 month
* And it hasn't been pushed for more than 1 month
A multi-architecture image is considered stale if all single-architecture images
A multi-architecture image is stale if all single-architecture images
part of its manifest are stale.
To delete a tag, select the corresponding checkbox and select **Delete** from the
**Action** drop-down list.
To delete a tag, select the corresponding checkbox and select **Delete** from the **Action** drop-down list.
> **Note**
>
> Only a user with administrative access (owner or team member with Admin
> permission) over the repository can delete tags.
Select a tag's digest to view details.
You can select a tag's digest to view more details.
![View Tag](../../images/repo-image-layers.png)
@ -99,6 +91,4 @@ You now have an image from which you can run containers.
## Starring repositories
Your repositories can be starred and you can star repositories in return. Stars
are a way to show that you like a repository. They're also an easy way of
bookmarking your favorites.
Your can star your repositories. Stars are a way to show that you like a repository. They're also an easy way of bookmarking your favorites.

View File

@ -3,55 +3,46 @@ description: Configuring repositories on Docker Hub
keywords: Docker, docker, trusted, registry, accounts, plans, Dockerfile, Docker Hub, webhooks, docs, documentation, creating, deleting, consolidating
title: Configure repositories
---
When configuring repositories, your private repositories let you keep your container images private, to your personal account or within an organization or team. You can also view your permissions or manage your users' access to those repositories.
## Private repositories
Private repositories let you keep container images private, either to your
own account or within an organization or team.
You can make an existing repository private by going to its **Settings** tab and select **Make private**.
To create a private repository, select **Private** when creating a repository:
![Repo make private](../../images/repo-make-private.png){: style="max-width: 60%"}
![Create Private Repo](../../images/repo-create-private.png){: style="max-width: 60%"}
You get one free private repository with your Docker Hub user account (not
available for organizations you're a member of). If you need more private
repositories for your user account, upgrade your Docker Hub subscription from your [Billing Information](https://hub.docker.com/billing/plan){: target="_blank" rel="noopener" class="_"} page.
You can also make an existing repository private by going to its **Settings** tab:
![Convert Repo to Private](../../images/repo-make-private.png){: style="max-width: 60%"}
You get one private repository for free with your Docker Hub user account (not
usable for organizations you're a member of). If you need more private
repositories for your user account, upgrade your Docker Hub plan from your
[Billing Information](https://hub.docker.com/billing/plan){: target="_blank" rel="noopener" class="_"} page.
Once you create the private repository, you can `push` and `pull` images to and
Once you've created a private repository, you can `push` and `pull` images to and
from it using Docker.
> **Note**: You must sign in and have access to work with a
> private repository.
> **Note**: Private repositories aren't available to search through
> private repository. Private repositories aren't available to search through
> the top-level search or `docker search`.
You can choose collaborators and manage their access to a private
repository from that repository's **Settings** page. You can also toggle the
repository's status between public and private, if you have an available
repository slot open. Otherwise, you can upgrade your
[Docker Hub](https://hub.docker.com/account/billing-plans/){: target="_blank" rel="noopener" class="_"} plan.
[Docker Hub](https://hub.docker.com/account/billing-plans/){: target="_blank" rel="noopener" class="_"} subscription.
### Permissions reference
Permissions are cumulative. For example, if you have Read & Write permissions,
you automatically have Read-only permissions:
- `Read-only` access allows users to view, search, and pull a private repository in the same way as they can a public repository.
- `Read & Write` access allows users to pull, push, and view a repository Docker
Hub. In addition, it allows users to view, cancel, retry or trigger builds
- `Admin` access allows users to Pull, push, view, edit, and delete a
repository; edit build settings; update the repository description modify the
repositories "Description", "Collaborators" rights, "Public/Private"
visibility, and "Delete".
- `Read-only` access lets users view, search, and pull a private repository in the same way as they can a public repository.
- `Read & Write` access lets users pull, push, and view a repository. In addition, it lets users view, cancel, retry or trigger builds
- `Admin` access lets users pull, push, view, edit, and delete a
repository. You can also edit build settings, and update the repositories description, collaborators rights, public/private visibility, and delete.
> **Note**
>
> A User who hasn't yet verified their email address only has
> A user who hasn't verified their email address only has
> `Read-only` access to the repository, regardless of the rights their team
> membership has given them.

View File

@ -13,7 +13,7 @@ Docker images are pushed to Docker Hub through the [`docker push`](/engine/refer
command. A single Docker Hub repository can hold many Docker images (stored as
**tags**).
## Creating repositories
## Creating a repository
To create a repository, sign into Docker Hub, select **Repositories** then
**Create Repository**:
@ -31,20 +31,26 @@ When creating a new repository:
>
> You can't rename a Docker Hub repository once it's created.
* The description can be up to 100 characters and is used in the search result.
* The description can be up to 100 characters and used in the search result.
* You can link a GitHub or Bitbucket account now, or choose to do it later in
the repository settings.
![Setting page for creating a repo](../images/repo-create-details.png)
After you hit the **Create** button, you can start using `docker push` to push
After you select **Create**, you can start using `docker push` to push
images to this repository.
## Creating a private repository
To create a private repository, navigate to Docker Hub and select **Repositories** and **Private**.
![Create Private Repo](/docker-hub/images/repo-create-private.png){: style="max-width: 60%"}
## Deleting a repository
1. Sign into [Docker Hub](https://hub.docker.com){: target="_blank" rel="noopener" class="_"} and select **Repositories**.
1. Navigate to [Docker Hub](https://hub.docker.com){: target="_blank" rel="noopener" class="_"} and select **Repositories**.
2. Select a repository from the list, select **Settings** and then Delete Repository.
2. Select a repository from the list, select **Settings**, and then Delete Repository.
> **Note:**
>

View File

@ -1,12 +1,13 @@
---
description: Instructions for installing Docker Engine on Debian
keywords: requirements, apt, installation, debian, install, uninstall, upgrade, update
keywords:
requirements, apt, installation, debian, install, uninstall, upgrade, update
redirect_from:
- /engine/installation/debian/
- /engine/installation/linux/raspbian/
- /engine/installation/linux/debian/
- /engine/installation/linux/docker-ce/debian/
- /install/linux/docker-ce/debian/
- /engine/installation/debian/
- /engine/installation/linux/raspbian/
- /engine/installation/linux/debian/
- /engine/installation/linux/docker-ce/debian/
- /install/linux/docker-ce/debian/
title: Install Docker Engine on Debian
toc_max: 4
---
@ -27,12 +28,14 @@ Raspbian versions:
- Raspbian Bullseye 11 (stable)
- Raspbian Buster 10 (oldstable)
Docker Engine is supported on `x86_64` (or `amd64`), `armhf`, and `arm64` architectures.
Docker Engine is compatible with `x86_64` (or `amd64`), `armhf`, and `arm64`
architectures.
### Uninstall old versions
Older versions of Docker were called `docker`, `docker.io`, or `docker-engine`.
If these are installed, uninstall them:
Older versions of Docker went by the names of `docker`, `docker.io`, or
`docker-engine`. Uninstall any such older versions before attempting to install
a new version:
```console
$ sudo apt-get remove docker docker-engine docker.io containerd runc
@ -40,38 +43,37 @@ $ sudo apt-get remove docker docker-engine docker.io containerd runc
It's OK if `apt-get` reports that none of these packages are installed.
The contents of `/var/lib/docker/`, including images, containers, volumes, and
networks, are preserved. If you do not need to save your existing data, and want to
start with a clean installation, refer to the [uninstall Docker Engine](#uninstall-docker-engine)
section at the bottom of this page.
Images, containers, volumes, and networks stored in `/var/lib/docker/` aren't
automatically removed when you uninstall Docker. If you want to start with a
clean installation, and prefer to clean up any existing data, refer to the
[uninstall Docker Engine](#uninstall-docker-engine) section.
## Installation methods
You can install Docker Engine in different ways, depending on your needs:
- Most users
[set up Docker's repositories](#install-using-the-repository) and install
from them, for ease of installation and upgrade tasks. This is the
recommended approach, except for Raspbian.
- Docker Engine comes bundled with
[Docker Desktop for Linux](../../desktop/install/linux-install.md). This is
the easiest and quickest way to get started.
- Some users download the DEB package and
[install it manually](#install-from-a-package) and manage
upgrades completely manually. This is useful in situations such as installing
Docker on air-gapped systems with no access to the internet.
- You can also set up and install Docker Engine from
[Docker's `apt` repository](#install-using-the-repository).
- In testing and development environments, some users choose to use automated
[convenience scripts](#install-using-the-convenience-script) to install Docker.
This is currently the only approach for Raspbian.
- [Install it manually](#install-from-a-package) and manage upgrades manually.
- Using a [convenience scripts](#install-using-the-convenience-script). Only
recommended for testing and development environments. This is the only
approach available for Raspbian.
### Install using the repository
Before you install Docker Engine for the first time on a new host machine, you need
to set up the Docker repository. Afterward, you can install and update Docker
from the repository.
Before you install Docker Engine for the first time on a new host machine, you
need to set up the Docker repository. Afterward, you can install and update
Docker from the repository.
> **Raspbian users cannot use this method!**
> **Raspbian users can't use this method.**
>
> For Raspbian, installing using the repository is not yet supported. You must
> For Raspbian, installing using the repository isn't yet supported. You must
> instead use the [convenience script](#install-using-the-convenience-script).
#### Set up the repository
@ -108,99 +110,142 @@ from the repository.
#### Install Docker Engine
This procedure works for Debian on `x86_64` / `amd64`, `armhf`, `arm64`, and Raspbian.
This procedure works for Debian on `x86_64` / `amd64`, `armhf`, `arm64`, and
Raspbian.
1. Update the `apt` package index, and install the _latest version_ of Docker
Engine, containerd, and Docker Compose, or go to the next step to install a specific version:
1. Update the `apt` package index:
```console
$ sudo apt-get update
$ sudo apt-get install docker-ce docker-ce-cli containerd.io docker-compose-plugin
```
> Receiving a GPG error when running `apt-get update`?
>
> Your default umask may not be set correctly, causing the public key file
> for the repo to not be detected. Run the following command and then try to
> update your repo again: `sudo chmod a+r /etc/apt/keyrings/docker.gpg`.
> Your default [umask](https://en.wikipedia.org/wiki/Umask){: target="blank"
> rel="noopener" } may be incorrectly configured, preventing detection of the
> repository public key file. Try granting read permission for the Docker
> public key file before updating the package index:
>
> ```console
> $ sudo chmod a+r /etc/apt/keyrings/docker.gpg
> $ sudo apt-get update
> ```
2. To install a _specific version_ of Docker Engine, list the available versions
in the repo, then select and install:
2. Install Docker Engine, containerd, and Docker Compose.
a. List the versions available in your repo:
<ul class="nav nav-tabs">
<li class="active"><a data-toggle="tab" data-target="#tab-latest">Latest</a></li>
<li><a data-toggle="tab" data-target="#tab-version">Specific version</a></li>
</ul>
<div class="tab-content">
<br>
<div id="tab-latest" class="tab-pane fade in active" markdown="1">
To install the latest version, run:
```console
$ apt-cache madison docker-ce
docker-ce | 5:18.09.1~3-0~debian-stretch | {{ download-url-base }} stretch/stable amd64 Packages
docker-ce | 5:18.09.0~3-0~debian-stretch | {{ download-url-base }} stretch/stable amd64 Packages
docker-ce | 18.06.1~ce~3-0~debian | {{ download-url-base }} stretch/stable amd64 Packages
docker-ce | 18.06.0~ce~3-0~debian | {{ download-url-base }} stretch/stable amd64 Packages
$ sudo apt-get install docker-ce docker-ce-cli containerd.io docker-compose-plugin
```
b. Install a specific version using the version string from the second column,
for example, `5:18.09.1~3-0~debian-stretch`.
</div>
<div id="tab-version" class="tab-pane fade" markdown="1">
To install a specific version of Docker Engine, start by list the available
versions in the repository:
```console
$ sudo apt-get install docker-ce=<VERSION_STRING> docker-ce-cli=<VERSION_STRING> containerd.io docker-compose-plugin
# List the available versions:
$ apt-cache madison docker-ce | awk '{ print $3 }'
5:18.09.1~3-0~debian-stretch
5:18.09.0~3-0~debian-stretch
18.06.1~ce~3-0~debian
18.06.0~ce~3-0~debian
```
3. Verify that Docker Engine is installed correctly by running the `hello-world`
image.
Select the desired version and install:
```console
$ VERSION_STRING=5:18.09.0~3-0~debian-stretch
$ sudo apt-get install docker-ce=$VERSION_STRING docker-ce-cli=$VERSION_STRING containerd.io docker-compose-plugin
```
</div>
<hr>
</div>
3. Verify that the Docker Engine installation is successful by running the
`hello-world` image:
```console
$ sudo docker run hello-world
```
This command downloads a test image and runs it in a container. When the
container runs, it prints a message and exits.
container runs, it prints a confirmation message and exits.
Docker Engine is installed and running. The `docker` group is created but no users
are added to it. You need to use `sudo` to run Docker commands.
Continue to [Linux postinstall](linux-postinstall.md) to allow non-privileged
users to run Docker commands and for other optional configuration steps.
You have now successfully installed and started Docker Engine. The `docker` user
group exists but contains no users, which is why you're required to use `sudo`
to run Docker commands. Continue to [Linux post-install](linux-postinstall.md)
to allow non-privileged users to run Docker commands and for other optional
configuration steps.
#### Upgrade Docker Engine
To upgrade Docker Engine, first run `sudo apt-get update`, then follow the
[installation instructions](#install-using-the-repository), choosing the new
version you want to install.
To upgrade Docker Engine, follow the
[installation instructions](#install-docker-engine), choosing the new version
you want to install.
### Install from a package
If you cannot use Docker's repository to install Docker Engine, you can download the
`.deb` file for your release and install it manually. You need to download
a new file each time you want to upgrade Docker.
If you can't use Docker's `apt` repository to install Docker Engine, you can
download the `deb` file for your release and install it manually. You need to
download a new file each time you want to upgrade Docker Engine.
1. Go to [`{{ download-url-base }}/dists/`]({{ download-url-base }}/dists/){: target="_blank" rel="noopener" class="_" },
choose your Debian version, then browse to `pool/stable/`, choose `amd64`,
`armhf`, or `arm64`, and download the `.deb` file for the Docker Engine
version you want to install.
1. Go to [`{{ download-url-base }}/dists/`]({{ download-url-base }}/dists/){:
target="_blank" rel="noopener" class="_" }.
2. Install Docker Engine, changing the path below to the path where you downloaded
the Docker package.
2. Select your Debian version in the list.
3. Go to `pool/stable/` and select the applicable architecture (`amd64`,
`armhf`, `arm64`, or `s390x`).
4. Download the following `deb` files for the Docker Engine, CLI, containerd,
and Docker Compose packages:
- `containerd.io_<version>_<arch>.deb`
- `docker-ce_<version>_<arch>.deb`
- `docker-ce-cli_<version>_<arch>.deb`
- `docker-compose-plugin_<version>_<arch>.deb`
5. Install the `.deb` packages. Update the paths in the following example to
where you downloaded the Docker packages.
```console
$ sudo dpkg -i /path/to/package.deb
$ sudo dpkg -i ./containerd.io_<version>_<arch>.deb \
./docker-ce_<version>_<arch>.deb \
./docker-ce-cli_<version>_<arch>.deb \
./docker-compose-plugin_<version>_<arch>.deb
```
The Docker daemon starts automatically.
3. Verify that Docker Engine is installed correctly by running the `hello-world`
image.
6. Verify that the Docker Engine installation is successful by running the
`hello-world` image:
```console
$ sudo service docker start
$ sudo docker run hello-world
```
This command downloads a test image and runs it in a container. When the
container runs, it prints a message and exits.
container runs, it prints a confirmation message and exits.
Docker Engine is installed and running. The `docker` group is created but no users
are added to it. You need to use `sudo` to run Docker commands.
Continue to [Post-installation steps for Linux](linux-postinstall.md) to allow
non-privileged users to run Docker commands and for other optional configuration
steps.
You have now successfully installed and started Docker Engine. The `docker` user
group exists but contains no users, which is why you're required to use `sudo`
to run Docker commands. Continue to [Linux post-install](linux-postinstall.md)
to allow non-privileged users to run Docker commands and for other optional
configuration steps.
#### Upgrade Docker Engine
@ -211,15 +256,14 @@ To upgrade Docker Engine, download the newer package file and repeat the
## Uninstall Docker Engine
1. Uninstall the Docker Engine, CLI, Containerd, and Docker Compose packages:
1. Uninstall the Docker Engine, CLI, containerd, and Docker Compose packages:
```console
$ sudo apt-get purge docker-ce docker-ce-cli containerd.io docker-compose-plugin
```
2. Images, containers, volumes, or customized configuration files on your host
are not automatically removed. To delete all images, containers, and
volumes:
2. Images, containers, volumes, or custom configuration files on your host
aren't automatically removed. To delete all images, containers, and volumes:
```console
$ sudo rm -rf /var/lib/docker
@ -231,4 +275,5 @@ You must delete any edited configuration files manually.
## Next steps
- Continue to [Post-installation steps for Linux](linux-postinstall.md).
- Review the topics in [Develop with Docker](../../develop/index.md) to learn how to build new applications using Docker.
- Review the topics in [Develop with Docker](../../develop/index.md) to learn
how to build new applications using Docker.

View File

@ -1,5 +1,5 @@
---
title: Install Docker Engine
title: Docker Engine installation overview
description: Lists the installation methods
keywords: docker, installation, install, Docker Engine, Docker Engine, docker editions, stable, edge
redirect_from:

View File

@ -1,34 +1,40 @@
---
description: Optional post-installation steps for Linux
keywords: Docker, Docker documentation, requirements, apt, installation, ubuntu, install, uninstall, upgrade, update
keywords: >
Docker, Docker documentation, requirements, apt, installation, ubuntu,
install, uninstall, upgrade, update
title: Post-installation steps for Linux
redirect_from:
- /engine/installation/linux/docker-ee/linux-postinstall/
- /engine/installation/linux/linux-postinstall/
- /install/linux/linux-postinstall/
- /engine/installation/linux/docker-ee/linux-postinstall/
- /engine/installation/linux/linux-postinstall/
- /install/linux/linux-postinstall/
---
This section contains optional procedures for configuring Linux hosts to work
better with Docker.
These optional post-installation procedures shows you how to configure your
Linux host machine to work better with Docker.
## Manage Docker as a non-root user
The Docker daemon binds to a Unix socket instead of a TCP port. By default
that Unix socket is owned by the user `root` and other users can only access it
using `sudo`. The Docker daemon always runs as the `root` user.
The Docker daemon binds to a Unix socket, not a TCP port. By default it's the
`root` user that owns the Unix socket, and other users can only access it using
`sudo`. The Docker daemon always runs as the `root` user.
If you don't want to preface the `docker` command with `sudo`, create a Unix
group called `docker` and add users to it. When the Docker daemon starts, it
creates a Unix socket accessible by members of the `docker` group.
creates a Unix socket accessible by members of the `docker` group. On some Linux
distributions, the system automatically creates this group when installing
Docker Engine using a package manager. In that case, there is no need for you to
manually create the group.
> Warning
<!-- prettier-ignore -->
> **Warning**
>
> The `docker` group grants privileges equivalent to the `root`
> user. For details on how this impacts security in your system, see
> [*Docker Daemon Attack Surface*](../security/index.md#docker-daemon-attack-surface).
> The `docker` group grants root-level privileges to the user. For
> details on how this impacts security in your system, see
> [Docker Daemon Attack Surface](../security/index.md#docker-daemon-attack-surface).
{: .warning}
> **Note**:
> **Note**
>
> To run Docker without root privileges, see
> [Run the Docker daemon as a non-root user (Rootless mode)](../security/rootless.md).
@ -49,11 +55,10 @@ To create the `docker` group and add your user:
3. Log out and log back in so that your group membership is re-evaluated.
If testing on a virtual machine, it may be necessary to restart the virtual machine for changes to take effect.
> If you're running Linux in a virtual machine, it may be necessary to
> restart the virtual machine for changes to take effect.
On a desktop Linux environment such as X Windows, log out of your session completely and then log back in.
On Linux, you can also run the following command to activate the changes to groups:
You can also run the following command to activate the changes to groups:
```console
$ newgrp docker
@ -68,103 +73,103 @@ To create the `docker` group and add your user:
This command downloads a test image and runs it in a container. When the
container runs, it prints a message and exits.
If you initially ran Docker CLI commands using `sudo` before adding
your user to the `docker` group, you may see the following error,
which indicates that your `~/.docker/` directory was created with
incorrect permissions due to the `sudo` commands.
If you initially ran Docker CLI commands using `sudo` before adding your user
to the `docker` group, you may see the following error:
```none
WARNING: Error loading config file: /home/user/.docker/config.json -
stat /home/user/.docker/config.json: permission denied
```
To fix this problem, either remove the `~/.docker/` directory
(it is recreated automatically, but any custom settings
are lost), or change its ownership and permissions using the
following commands:
This error indicates that the permission settings for the `~/.docker/`
directory are incorrect, due to having used the `sudo` command earlier.
To fix this problem, either remove the `~/.docker/` directory (it's recreated
automatically, but any custom settings are lost), or change its ownership and
permissions using the following commands:
```console
$ sudo chown "$USER":"$USER" /home/"$USER"/.docker -R
$ sudo chmod g+rwx "$HOME/.docker" -R
```
## Configure Docker to start on boot
## Configure Docker to start on boot with systemd
Most current Linux distributions (RHEL, CentOS, Fedora, Debian, Ubuntu 16.04 and
higher) use [`systemd`](../../config/daemon/systemd.md) to manage which services
start when the system boots. On Debian and Ubuntu, the Docker service is configured
to start on boot by default. To automatically start Docker and Containerd on boot
for other distros, use the commands below:
Many modern Linux distributions use [systemd](../../config/daemon/systemd.md) to
manage which services start when the system boots. On Debian and Ubuntu, the
Docker service starts on boot by default. To automatically start Docker and
containerd on boot for other Linux distributions using systemd, run the
following commands:
```console
$ sudo systemctl enable docker.service
$ sudo systemctl enable containerd.service
```
To disable this behavior, use `disable` instead.
To stop this behavior, use `disable` instead.
```console
$ sudo systemctl disable docker.service
$ sudo systemctl disable containerd.service
```
If you need to add an HTTP Proxy, set a different directory or partition for the
If you need to add an HTTP proxy, set a different directory or partition for the
Docker runtime files, or make other customizations, see
[customize your systemd Docker daemon options](../../config/daemon/systemd.md).
## Use a different storage engine
For information about the different storage engines, see
[Storage drivers](../../storage/storagedriver/index.md).
The default storage engine and the list of supported storage engines depend on
your host's Linux distribution and available kernel drivers.
## Configure default logging driver
Docker provides the [capability](../../config/containers/logging/index.md) to
collect and view log data from all containers running on a host via a series of
logging drivers. The default logging driver, `json-file`, writes log data to
JSON-formatted files on the host filesystem. Over time, these log files expand
in size, leading to potential exhaustion of disk resources.
Docker provides [logging drivers](../../config/containers/logging/index.md) for
collecting and viewing log data from all containers running on a host. The
default logging driver, `json-file`, writes log data to JSON-formatted files on
the host filesystem. Over time, these log files expand in size, leading to
potential exhaustion of disk resources.
To alleviate such issues, either configure the `json-file` logging driver to
enable [log rotation](../../config/containers/logging/json-file.md), use an
[alternative logging driver](../../config/containers/logging/configure.md#configure-the-default-logging-driver)
such as the ["local" logging driver](../../config/containers/logging/local.md)
that performs log rotation by default, or use a logging driver that sends
logs to a remote logging aggregator.
To avoid issues with overusing disk for log data, consider one of the following
options:
- Configure the `json-file` logging driver to turn on
[log rotation](../../config/containers/logging/json-file.md)
- Use an
[alternative logging driver](../../config/containers/logging/configure.md#configure-the-default-logging-driver)
such as the ["local" logging driver](../../config/containers/logging/local.md)
that performs log rotation by default
- Use a logging driver that sends logs to a remote logging aggregator.
## Configure where the Docker daemon listens for connections
By default, the Docker daemon listens for connections on a UNIX socket to accept
requests from local clients. It is possible to allow Docker to accept requests
By default, the Docker daemon listens for connections on a Unix socket to accept
requests from local clients. It's possible to allow Docker to accept requests
from remote hosts by configuring it to listen on an IP address and port as well
as the UNIX socket. For more detailed information on this configuration option
take a look at "Bind Docker to another host/port or a unix socket" section of
the [Docker CLI Reference](/engine/reference/commandline/dockerd/) article.
as the Unix socket. For more detailed information on this configuration option,
refer to the
[dockerd CLI reference](/engine/reference/commandline/dockerd/#bind-docker-to-another-hostport-or-a-unix-socket).
<!-- prettier-ignore -->
> Secure your connection
>
> Before configuring Docker to accept connections from remote hosts it is critically important that you
> understand the security implications of opening docker to the network. If steps are not taken to secure the connection,
> it is possible for remote non-root users to gain root access on the host. For more information on how to use TLS
> certificates to secure this connection, check this article on
> [how to protect the Docker daemon socket](../security/protect-access.md).
> Before configuring Docker to accept connections from remote hosts it's
> critically important that you understand the security implications of opening
> Docker to the network. If steps aren't taken to secure the connection, it's
> possible for remote non-root users to gain root access on the host. For more
> information on how to use TLS certificates to secure this connection, check
> [Protect the Docker daemon socket](../security/protect-access.md).
{: .warning}
Configuring Docker to accept remote connections can be done with the `docker.service`
systemd unit file for Linux distributions using systemd, such as recent versions
of RedHat, CentOS, Ubuntu and SLES, or with the `daemon.json` file which is
recommended for Linux distributions that do not use systemd.
You can configure Docker to accept remote connections. This can be done using
the `docker.service` systemd unit file for Linux distributions using systemd. Or
you can use the `daemon.json` file, if your distribution doesn't use systemd.
> systemd vs daemon.json
> systemd vs `daemon.json`
>
> Configuring Docker to listen for connections using both the `systemd` unit file and the `daemon.json`
> file causes a conflict that prevents Docker from starting.
> Configuring Docker to listen for connections using both the systemd unit file
> and the `daemon.json` file causes a conflict that prevents Docker from
> starting.
### Configuring remote access with `systemd` unit file
### Configuring remote access with systemd unit file
1. Use the command `sudo systemctl edit docker.service` to open an override file for `docker.service` in a text editor.
1. Use the command `sudo systemctl edit docker.service` to open an override file
for `docker.service` in a text editor.
2. Add or modify the following lines, substituting your own values.
@ -188,7 +193,7 @@ recommended for Linux distributions that do not use systemd.
$ sudo systemctl restart docker.service
```
6. Check to see whether the change was honored by reviewing the output of `netstat` to confirm `dockerd` is listening on the configured port.
6. Verify that the change has gone through.
```console
$ sudo netstat -lntp | grep dockerd
@ -197,7 +202,8 @@ recommended for Linux distributions that do not use systemd.
### Configuring remote access with `daemon.json`
1. Set the `hosts` array in the `/etc/docker/daemon.json` to connect to the UNIX socket and an IP address, as follows:
1. Set the `hosts` array in the `/etc/docker/daemon.json` to connect to the UNIX
socket and an IP address, as follows:
```json
{
@ -207,7 +213,7 @@ recommended for Linux distributions that do not use systemd.
2. Restart Docker.
3. Check to see whether the change was honored by reviewing the output of `netstat` to confirm `dockerd` is listening on the configured port.
3. Verify that the change has gone through.
```console
$ sudo netstat -lntp | grep dockerd
@ -219,284 +225,9 @@ recommended for Linux distributions that do not use systemd.
To enable IPv6 on the Docker daemon, see
[Enable IPv6 support](../../config/daemon/ipv6.md).
## Troubleshooting
### Kernel compatibility
Docker cannot run correctly if your kernel is older than version 3.10 or if it
is missing some modules. To check kernel compatibility, you can download and
run the [`check-config.sh`](https://raw.githubusercontent.com/docker/docker/master/contrib/check-config.sh)
script.
```console
$ curl https://raw.githubusercontent.com/docker/docker/master/contrib/check-config.sh > check-config.sh
$ bash ./check-config.sh
```
The script only works on Linux, not macOS.
### `Cannot connect to the Docker daemon`
If you see an error such as the following, your Docker client may be configured
to connect to a Docker daemon on a different host, and that host may not be
reachable.
```none
Cannot connect to the Docker daemon. Is 'docker daemon' running on this host?
```
To see which host your client is configured to connect to, check the value of
the `DOCKER_HOST` variable in your environment.
```console
$ env | grep DOCKER_HOST
```
If this command returns a value, the Docker client is set to connect to a
Docker daemon running on that host. If it is unset, the Docker client is set to
connect to the Docker daemon running on the local host. If it is set in error,
use the following command to unset it:
```console
$ unset DOCKER_HOST
```
You may need to edit your environment in files such as `~/.bashrc` or
`~/.profile` to prevent the `DOCKER_HOST` variable from being set
erroneously.
If `DOCKER_HOST` is set as intended, verify that the Docker daemon is running
on the remote host and that a firewall or network outage is not preventing you
from connecting.
### IP forwarding problems
If you manually configure your network using `systemd-network` with `systemd`
version 219 or higher, Docker containers may not be able to access your network.
Beginning with `systemd` version 220, the forwarding setting for a given network
(`net.ipv4.conf.<interface>.forwarding`) defaults to *off*. This setting
prevents IP forwarding. It also conflicts with Docker's behavior of enabling
the `net.ipv4.conf.all.forwarding` setting within containers.
To work around this on RHEL, CentOS, or Fedora, edit the `<interface>.network`
file in `/usr/lib/systemd/network/` on your Docker host
(ex: `/usr/lib/systemd/network/80-container-host0.network`) and add the
following block within the `[Network]` section.
```systemd
[Network]
...
IPForward=kernel
# OR
IPForward=true
```
This configuration allows IP forwarding from the container as expected.
### `DNS resolver found in resolv.conf and containers can't use it`
Linux systems which use a GUI often have a network manager running, which uses a
`dnsmasq` instance running on a loopback address such as `127.0.0.1` or
`127.0.1.1` to cache DNS requests, and adds this entry to
`/etc/resolv.conf`. The `dnsmasq` service speeds up
DNS look-ups and also provides DHCP services. This configuration does not work
within a Docker container which has its own network namespace, because
the Docker container resolves loopback addresses such as `127.0.0.1` to
**itself**, and it is very unlikely to be running a DNS server on its own
loopback address.
If Docker detects that no DNS server referenced in `/etc/resolv.conf` is a fully
functional DNS server, the following warning occurs and Docker uses the public
DNS servers provided by Google at `8.8.8.8` and `8.8.4.4` for DNS resolution.
```none
WARNING: Local (127.0.0.1) DNS resolver found in resolv.conf and containers
can't use it. Using default external servers : [8.8.8.8 8.8.4.4]
```
If you see this warning, first check to see if you use `dnsmasq`:
```console
$ ps aux |grep dnsmasq
```
If your container needs to resolve hosts which are internal to your network, the
public nameservers are not adequate. You have two choices:
- You can specify a DNS server for Docker to use, **or**
- You can disable `dnsmasq` in NetworkManager. If you do this, NetworkManager
adds your true DNS nameserver to `/etc/resolv.conf`, but you lose the
possible benefits of `dnsmasq`.
**You only need to use one of these methods.**
### Specify DNS servers for Docker
The default location of the configuration file is `/etc/docker/daemon.json`. You
can change the location of the configuration file using the `--config-file`
daemon flag. The documentation below assumes the configuration file is located
at `/etc/docker/daemon.json`.
1. Create or edit the Docker daemon configuration file, which defaults to
`/etc/docker/daemon.json` file, which controls the Docker daemon
configuration.
```console
$ sudo nano /etc/docker/daemon.json
```
2. Add a `dns` key with one or more IP addresses as values. If the file has
existing contents, you only need to add or edit the `dns` line.
```json
{
"dns": ["8.8.8.8", "8.8.4.4"]
}
```
If your internal DNS server cannot resolve public IP addresses, include at
least one DNS server which can, so that you can connect to Docker Hub and so
that your containers can resolve internet domain names.
Save and close the file.
3. Restart the Docker daemon.
```console
$ sudo service docker restart
```
4. Verify that Docker can resolve external IP addresses by trying to pull an
image:
```console
$ docker pull hello-world
```
5. If necessary, verify that Docker containers can resolve an internal hostname
by pinging it.
```console
$ docker run --rm -it alpine ping -c4 <my_internal_host>
PING google.com (192.168.1.2): 56 data bytes
64 bytes from 192.168.1.2: seq=0 ttl=41 time=7.597 ms
64 bytes from 192.168.1.2: seq=1 ttl=41 time=7.635 ms
64 bytes from 192.168.1.2: seq=2 ttl=41 time=7.660 ms
64 bytes from 192.168.1.2: seq=3 ttl=41 time=7.677 ms
```
#### Disable `dnsmasq`
##### Ubuntu
If you prefer not to change the Docker daemon's configuration to use a specific
IP address, follow these instructions to disable `dnsmasq` in NetworkManager.
1. Edit the `/etc/NetworkManager/NetworkManager.conf` file.
2. Comment out the `dns=dnsmasq` line by adding a `#` character to the beginning
of the line.
```none
# dns=dnsmasq
```
Save and close the file.
3. Restart both NetworkManager and Docker. As an alternative, you can reboot
your system.
```console
$ sudo systemctl restart network-manager
$ sudo systemctl restart docker
```
##### RHEL, CentOS, or Fedora
To disable `dnsmasq` on RHEL, CentOS, or Fedora:
1. Disable the `dnsmasq` service:
```console
$ sudo systemctl stop dnsmasq
$ sudo systemctl disable dnsmasq
```
2. Configure the DNS servers manually using the
[Red Hat documentation](https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/6/html/Deployment_Guide/s1-networkscripts-interfaces.html){: target="_blank" rel="noopener" class="_"}.
### Allow access to the remote API through a firewall
If you run a firewall on the same host as you run Docker and you want to access
the Docker Remote API from another host and remote access is enabled, you need
to configure your firewall to allow incoming connections on the Docker port,
which defaults to `2376` if TLS encrypted transport is enabled or `2375`
otherwise.
Two common firewall daemons are
[UFW (Uncomplicated Firewall)](https://help.ubuntu.com/community/UFW) (often
used for Ubuntu systems) and [firewalld](https://firewalld.org) (often used
for RPM-based systems). Consult the documentation for your OS and firewall, but
the following information might help you get started. These options are fairly
permissive and you may want to use a different configuration that locks your
system down more.
- **UFW**: Set `DEFAULT_FORWARD_POLICY="ACCEPT"` in your configuration.
- **firewalld**: Add rules similar to the following to your policy (one for
incoming requests and one for outgoing requests). Be sure the interface names
and chain names are correct.
```xml
<direct>
[ <rule ipv="ipv6" table="filter" chain="FORWARD_direct" priority="0"> -i zt0 -j ACCEPT </rule> ]
[ <rule ipv="ipv6" table="filter" chain="FORWARD_direct" priority="0"> -o zt0 -j ACCEPT </rule> ]
</direct>
```
### `Your kernel does not support cgroup swap limit capabilities`
On Ubuntu or Debian hosts, You may see messages similar to the following when
working with an image.
```none
WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.
```
This warning does not occur on RPM-based systems, which enable these
capabilities by default.
If you don't need these capabilities, you can ignore the warning. You can enable
these capabilities on Ubuntu or Debian by following these instructions. Memory
and swap accounting incur an overhead of about 1% of the total available memory
and a 10% overall performance degradation, even if Docker is not running.
1. Log into the Ubuntu or Debian host as a user with `sudo` privileges.
2. Edit the `/etc/default/grub` file. Add or edit the `GRUB_CMDLINE_LINUX` line
to add the following two key-value pairs:
```none
GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"
```
Save and close the file.
3. Update GRUB.
```console
$ sudo update-grub
```
If your GRUB configuration file has incorrect syntax, an error occurs.
In this case, repeat steps 2 and 3.
The changes take effect when the system is rebooted.
## Next steps
- Take a look at the [Get started](../../get-started/index.md) training modules to learn how to build an image and run it as a containerized application.
- Review the topics in [Develop with Docker](../../develop/index.md) to learn how to build new applications using Docker.
- Take a look at the [Get started](../../get-started/index.md) training modules
to learn how to build an image and run it as a containerized application.
- Review the topics in [Develop with Docker](../../develop/index.md) to learn
how to build new applications using Docker.

View File

@ -0,0 +1,293 @@
---
title: Troubleshoot Docker Engine
description:
Diagnose and resolve error messages related to the Docker Engine installation
keywords: Docker Engine, troubleshooting, error, Linux
---
This page contains instructions for troubleshooting and diagnosing the Docker
Engine installation.
## Kernel compatibility
Docker can't run correctly if your kernel is older than version 3.10, or if it's
missing kernel modules. To check kernel compatibility, you can download and run
the
[`check-config.sh`](https://raw.githubusercontent.com/docker/docker/master/contrib/check-config.sh)
script.
```console
$ curl https://raw.githubusercontent.com/docker/docker/master/contrib/check-config.sh > check-config.sh
$ bash ./check-config.sh
```
The script only works on Linux.
## Unable to connect to the Docker daemon
```none
Cannot connect to the Docker daemon. Is 'docker daemon' running on this host?
```
This error may indicate:
- The Docker daemon isn't running on your system. Start the daemon and try
running the command again.
- Your Docker client is attempting to connect to a Docker daemon on a different
host, and that host is unreachable.
To see which host your client is connecting to, check the value of the
`DOCKER_HOST` variable in your environment.
```console
$ env | grep DOCKER_HOST
```
If this command returns a value, the Docker client is set to connect to a Docker
daemon running on that host. If it's unset, the Docker client is set to connect
to the Docker daemon running on the local host. If it's set in error, use the
following command to unset it:
```console
$ unset DOCKER_HOST
```
You may need to edit your environment in files such as `~/.bashrc` or
`~/.profile` to prevent the `DOCKER_HOST` variable from being set erroneously.
If `DOCKER_HOST` is set as intended, verify that the Docker daemon is running on
the remote host and that a firewall or network outage isn't preventing you from
connecting.
## IP forwarding problems
If you manually configure your network using `systemd-network` with systemd
version 219 or later, Docker containers may not be able to access your network.
Beginning with systemd version 220, the forwarding setting for a given network
(`net.ipv4.conf.<interface>.forwarding`) defaults to off. This setting prevents
IP forwarding. It also conflicts with Docker's behavior of enabling the
`net.ipv4.conf.all.forwarding` setting within containers.
To work around this on RHEL, CentOS, or Fedora, edit the `<interface>.network`
file in `/usr/lib/systemd/network/` on your Docker host, for example,
`/usr/lib/systemd/network/80-container-host0.network`.
Add the following block within the `[Network]` section.
```systemd
[Network]
...
IPForward=kernel
# OR
IPForward=true
```
This configuration allows IP forwarding from the container as expected.
## DNS resolver issues
```console
DNS resolver found in resolv.conf and containers can't use it
```
Linux desktop environments often have a network manager program running, that
uses `dnsmasq` to cache DNS requests by adding them to `/etc/resolv.conf`. The
`dnsmasq` instance runs on a loopback address such as `127.0.0.1` or
`127.0.1.1`. It speeds up DNS look-ups and provides DHCP services. Such a
configuration doesn't work within a Docker container. The Docker container uses
its own network namespace, and resolves loopback addresses such as `127.0.0.1`
to itself, and it's unlikely to be running a DNS server on its own loopback
address.
If Docker detects that no DNS server referenced in `/etc/resolv.conf` is a fully
functional DNS server, the following warning occurs:
```none
WARNING: Local (127.0.0.1) DNS resolver found in resolv.conf and containers
can't use it. Using default external servers : [8.8.8.8 8.8.4.4]
```
If you see this warning, first check to see if you use `dnsmasq`:
```console
$ ps aux | grep dnsmasq
```
If your container needs to resolve hosts which are internal to your network, the
public nameservers aren't adequate. You have two choices:
- Specify DNS servers for Docker to use.
- Turn off `dnsmasq`.
Turning off `dnsmasq` adds the IP addresses of actual DNS nameserver to
`/etc/resolv.conf`, and you lose the benefits of `dnsmasq`.
You only need to use one of these methods.
## Specify DNS servers for Docker
The default location of the configuration file is `/etc/docker/daemon.json`. You
can change the location of the configuration file using the `--config-file`
daemon flag. The following instruction assumes that the location of the
configuration file is `/etc/docker/daemon.json`.
1. Create or edit the Docker daemon configuration file, which defaults to
`/etc/docker/daemon.json` file, which controls the Docker daemon
configuration.
```console
$ sudo nano /etc/docker/daemon.json
```
2. Add a `dns` key with one or more DNS server IP addresses as values.
```json
{
"dns": ["8.8.8.8", "8.8.4.4"]
}
```
If the file has existing contents, you only need to add or edit the `dns`
line. If your internal DNS server can't resolve public IP addresses, include
at least one DNS server that can. Doing so allows you to connect to Docker
Hub, and your containers to resolve internet domain names.
Save and close the file.
3. Restart the Docker daemon.
```console
$ sudo service docker restart
```
4. Verify that Docker can resolve external IP addresses by trying to pull an
image:
```console
$ docker pull hello-world
```
5. If necessary, verify that Docker containers can resolve an internal hostname
by pinging it.
```console
$ docker run --rm -it alpine ping -c4 <my_internal_host>
PING google.com (192.168.1.2): 56 data bytes
64 bytes from 192.168.1.2: seq=0 ttl=41 time=7.597 ms
64 bytes from 192.168.1.2: seq=1 ttl=41 time=7.635 ms
64 bytes from 192.168.1.2: seq=2 ttl=41 time=7.660 ms
64 bytes from 192.168.1.2: seq=3 ttl=41 time=7.677 ms
```
## Turn off `dnsmasq`
### Ubuntu
If you prefer not to change the Docker daemon's configuration to use a specific
IP address, follow these instructions to turn off `dnsmasq` in NetworkManager.
1. Edit the `/etc/NetworkManager/NetworkManager.conf` file.
2. Comment out the `dns=dnsmasq` line by adding a `#` character to the beginning
of the line.
```none
# dns=dnsmasq
```
Save and close the file.
3. Restart both NetworkManager and Docker. As an alternative, you can reboot
your system.
```console
$ sudo systemctl restart network-manager
$ sudo systemctl restart docker
```
### RHEL, CentOS, or Fedora
To turn off `dnsmasq` on RHEL, CentOS, or Fedora:
1. Turn off the `dnsmasq` service:
```console
$ sudo systemctl stop dnsmasq
$ sudo systemctl disable dnsmasq
```
2. Configure the DNS servers manually using the
[Red Hat documentation](https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/6/html/Deployment_Guide/s1-networkscripts-interfaces.html){:
target="_blank" rel="noopener" class="_"}.
## Allow access to the remote API through a firewall
If you run a firewall on the same host as you run Docker, and you want to access
the Docker Remote API from another remote host, you must configure your firewall
to allow incoming connections on the Docker port. The default port is `2376` if
you're using TLS encrypted transport, or `2375` otherwise.
Two common firewall daemons are:
- [Uncomplicated Firewall (UFW)](https://help.ubuntu.com/community/UFW), often
used for Ubuntu systems.
- [firewalld](https://firewalld.org), often used for RPM-based systems.
Consult the documentation for your OS and firewall. The following information
might help you get started. These settings used in this instruction are
permissive, and you may want to use a different configuration that locks your
system down more.
- For UFW, set `DEFAULT_FORWARD_POLICY="ACCEPT"` in your configuration.
- For firewalld, add rules similar to the following to your policy. One for
incoming requests, and one for outgoing requests.
```xml
<direct>
[ <rule ipv="ipv6" table="filter" chain="FORWARD_direct" priority="0"> -i zt0 -j ACCEPT </rule> ]
[ <rule ipv="ipv6" table="filter" chain="FORWARD_direct" priority="0"> -o zt0 -j ACCEPT </rule> ]
</direct>
```
Make sure that the interface names and chain names are correct.
## Kernel cgroup swap limit capabilities
On Ubuntu or Debian hosts, you may see messages similar to the following when
working with an image.
```none
WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.
```
If you don't need these capabilities, you can ignore the warning.
You can turn on these capabilities on Ubuntu or Debian by following these
instructions. Memory and swap accounting incur an overhead of about 1% of the
total available memory and a 10% overall performance degradation, even when
Docker isn't running.
1. Log into the Ubuntu or Debian host as a user with `sudo` privileges.
2. Edit the `/etc/default/grub` file. Add or edit the `GRUB_CMDLINE_LINUX` line
to add the following two key-value pairs:
```none
GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"
```
Save and close the file.
3. Update the GRUB boot loader.
```console
$ sudo update-grub
```
An error occurs if your GRUB configuration file has incorrect syntax. In this
case, repeat steps 2 and 3.
The changes take effect when you reboot the system.

View File

@ -1,29 +1,22 @@
---
description: Instructions for installing Docker Engine on Ubuntu
keywords: requirements, apt, installation, ubuntu, install, uninstall, upgrade, update
keywords:
requirements, apt, installation, ubuntu, install, uninstall, upgrade, update
redirect_from:
- /ee/docker-ee/ubuntu/
- /engine/installation/linux/docker-ce/ubuntu/
- /engine/installation/linux/docker-ee/ubuntu/
- /engine/installation/linux/ubuntu/
- /engine/installation/linux/ubuntulinux/
- /engine/installation/ubuntulinux/
- /install/linux/docker-ce/ubuntu/
- /install/linux/docker-ee/ubuntu/
- /install/linux/ubuntu/
- /installation/ubuntulinux/
- /ee/docker-ee/ubuntu/
- /engine/installation/linux/docker-ce/ubuntu/
- /engine/installation/linux/docker-ee/ubuntu/
- /engine/installation/linux/ubuntu/
- /engine/installation/linux/ubuntulinux/
- /engine/installation/ubuntulinux/
- /install/linux/docker-ce/ubuntu/
- /install/linux/docker-ee/ubuntu/
- /install/linux/ubuntu/
- /installation/ubuntulinux/
title: Install Docker Engine on Ubuntu
toc_max: 4
---
> **Docker Desktop for Linux**
>
> Docker Desktop helps you build, share, and run containers easily on Mac and
> Windows as you do on Linux. We are excited to share that Docker Desktop for
> Linux is now GA. For more information, see
[Docker Desktop for Linux](../../desktop/install/linux-install.md).
{: .important}
To get started with Docker Engine on Ubuntu, make sure you
[meet the prerequisites](#prerequisites), then
[install Docker](#installation-methods).
@ -40,12 +33,14 @@ versions:
- Ubuntu Focal 20.04 (LTS)
- Ubuntu Bionic 18.04 (LTS)
Docker Engine is supported on `x86_64` (or `amd64`), `armhf`, `arm64`, and `s390x` architectures.
Docker Engine is compatible with `x86_64` (or `amd64`), `armhf`, `arm64`, and
`s390x` architectures.
### Uninstall old versions
Older versions of Docker were called `docker`, `docker.io`, or `docker-engine`.
If these are installed, uninstall them:
Older versions of Docker went by the names of `docker`, `docker.io`, or
`docker-engine`. Uninstall any such older versions before attempting to install
a new version:
```console
$ sudo apt-get remove docker docker-engine docker.io containerd runc
@ -53,33 +48,32 @@ $ sudo apt-get remove docker docker-engine docker.io containerd runc
It's OK if `apt-get` reports that none of these packages are installed.
The contents of `/var/lib/docker/`, including images, containers, volumes, and
networks, are preserved. If you do not need to save your existing data, and want to
start with a clean installation, refer to the [uninstall Docker Engine](#uninstall-docker-engine)
section at the bottom of this page.
Images, containers, volumes, and networks stored in `/var/lib/docker/` aren't
automatically removed when you uninstall Docker. If you want to start with a
clean installation, and prefer to clean up any existing data, refer to the
[uninstall Docker Engine](#uninstall-docker-engine) section.
## Installation methods
You can install Docker Engine in different ways, depending on your needs:
- Most users
[set up Docker's repositories](#install-using-the-repository) and install
from them, for ease of installation and upgrade tasks. This is the
recommended approach.
- Docker Engine comes bundled with
[Docker Desktop for Linux](../../desktop/install/linux-install.md). This is
the easiest and quickest way to get started.
- Some users download the DEB package and
[install it manually](#install-from-a-package) and manage
upgrades completely manually. This is useful in situations such as installing
Docker on air-gapped systems with no access to the internet.
- You can also set up and install Docker Engine from
[Docker's `apt` repository](#install-using-the-repository).
- In testing and development environments, some users choose to use automated
[convenience scripts](#install-using-the-convenience-script) to install Docker.
- [Install it manually](#install-from-a-package) and manage upgrades manually.
- Using a [convenience scripts](#install-using-the-convenience-script). Only
recommended for testing and development environments.
### Install using the repository
Before you install Docker Engine for the first time on a new host machine, you need
to set up the Docker repository. Afterward, you can install and update Docker
from the repository.
Before you install Docker Engine for the first time on a new host machine, you
need to set up the Docker repository. Afterward, you can install and update
Docker from the repository.
#### Set up the repository
@ -115,98 +109,138 @@ from the repository.
#### Install Docker Engine
1. Update the `apt` package index, and install the _latest version_ of Docker
Engine, containerd, and Docker Compose, or go to the next step to install a specific version:
1. Update the `apt` package index:
```console
$ sudo apt-get update
$ sudo apt-get install docker-ce docker-ce-cli containerd.io docker-compose-plugin
```
> Receiving a GPG error when running `apt-get update`?
>
> Your default umask may not be set correctly, causing the public key file
> for the repo to not be detected. Run the following command and then try to
> update your repo again: `sudo chmod a+r /etc/apt/keyrings/docker.gpg`.
> Your default [umask](https://en.wikipedia.org/wiki/Umask){: target="blank"
> rel="noopener" } may be incorrectly configured, preventing detection of the
> repository public key file. Try granting read permission for the Docker
> public key file before updating the package index:
>
> ```console
> $ sudo chmod a+r /etc/apt/keyrings/docker.gpg
> $ sudo apt-get update
> ```
2. To install a _specific version_ of Docker Engine, list the available versions
in the repo, then select and install:
2. Install Docker Engine, containerd, and Docker Compose.
a. List the versions available in your repo:
<ul class="nav nav-tabs">
<li class="active"><a data-toggle="tab" data-target="#tab-latest">Latest</a></li>
<li><a data-toggle="tab" data-target="#tab-version">Specific version</a></li>
</ul>
<div class="tab-content">
<br>
<div id="tab-latest" class="tab-pane fade in active" markdown="1">
To install the latest version, run:
```console
$ apt-cache madison docker-ce
docker-ce | 5:20.10.16~3-0~ubuntu-jammy | https://download.docker.com/linux/ubuntu jammy/stable amd64 Packages
docker-ce | 5:20.10.15~3-0~ubuntu-jammy | https://download.docker.com/linux/ubuntu jammy/stable amd64 Packages
docker-ce | 5:20.10.14~3-0~ubuntu-jammy | https://download.docker.com/linux/ubuntu jammy/stable amd64 Packages
docker-ce | 5:20.10.13~3-0~ubuntu-jammy | https://download.docker.com/linux/ubuntu jammy/stable amd64 Packages
$ sudo apt-get install docker-ce docker-ce-cli containerd.io docker-compose-plugin
```
b. Install a specific version using the version string from the second column,
for example, `5:20.10.16~3-0~ubuntu-jammy`.
</div>
<div id="tab-version" class="tab-pane fade" markdown="1">
To install a specific version of Docker Engine, start by list the available
versions in the repository:
```console
$ sudo apt-get install docker-ce=<VERSION_STRING> docker-ce-cli=<VERSION_STRING> containerd.io docker-compose-plugin
# List the available versions:
$ apt-cache madison docker-ce | awk '{ print $3 }'
5:20.10.16~3-0~ubuntu-jammy
5:20.10.15~3-0~ubuntu-jammy
5:20.10.14~3-0~ubuntu-jammy
5:20.10.13~3-0~ubuntu-jammy
```
3. Verify that Docker Engine is installed correctly by running the `hello-world`
image.
Select the desired version and install:
```console
$ VERSION_STRING=5:20.10.13~3-0~ubuntu-jammy
$ sudo apt-get install docker-ce=$VERSION_STRING docker-ce-cli=$VERSION_STRING containerd.io docker-compose-plugin
```
</div>
<hr>
</div>
3. Verify that the Docker Engine installation is successful by running the
`hello-world` image:
```console
$ sudo service docker start
$ sudo docker run hello-world
```
This command downloads a test image and runs it in a container. When the
container runs, it prints a message and exits.
container runs, it prints a confirmation message and exits.
Docker Engine is installed and running. The `docker` group is created but no users
are added to it. You need to use `sudo` to run Docker commands.
Continue to [Linux postinstall](linux-postinstall.md) to allow non-privileged
users to run Docker commands and for other optional configuration steps.
You have now successfully installed and started Docker Engine. The `docker` user
group exists but contains no users, which is why you're required to use `sudo`
to run Docker commands. Continue to [Linux post-install](linux-postinstall.md)
to allow non-privileged users to run Docker commands and for other optional
configuration steps.
#### Upgrade Docker Engine
To upgrade Docker Engine, first run `sudo apt-get update`, then follow the
[installation instructions](#install-using-the-repository), choosing the new
version you want to install.
To upgrade Docker Engine, follow the
[installation instructions](#install-docker-engine), choosing the new version
you want to install.
### Install from a package
If you cannot use Docker's repository to install Docker Engine, you can download the
`.deb` file for your release and install it manually. You need to download
a new file each time you want to upgrade Docker.
If you can't use Docker's `apt` repository to install Docker Engine, you can
download the `deb` file for your release and install it manually. You need to
download a new file each time you want to upgrade Docker Engine.
1. Go to [`{{ download-url-base }}/dists/`]({{ download-url-base }}/dists/){: target="_blank" rel="noopener" class="_" },
choose your Ubuntu version, then browse to `pool/stable/`, choose `amd64`,
`armhf`, `arm64`, or `s390x`, and download the `.deb` file for the Docker Engine
version you want to install.
1. Go to [`{{ download-url-base }}/dists/`]({{ download-url-base }}/dists/){:
target="_blank" rel="noopener" class="_" }.
2. Install Docker Engine, changing the path below to the path where you downloaded
the Docker package.
2. Select your Ubuntu version in the list.
3. Go to `pool/stable/` and select the applicable architecture (`amd64`,
`armhf`, `arm64`, or `s390x`).
4. Download the following `deb` files for the Docker Engine, CLI, containerd,
and Docker Compose packages:
- `containerd.io_<version>_<arch>.deb`
- `docker-ce_<version>_<arch>.deb`
- `docker-ce-cli_<version>_<arch>.deb`
- `docker-compose-plugin_<version>_<arch>.deb`
5. Install the `.deb` packages. Update the paths in the following example to
where you downloaded the Docker packages.
```console
$ sudo dpkg -i /path/to/package.deb
$ sudo dpkg -i ./containerd.io_<version>_<arch>.deb \
./docker-ce_<version>_<arch>.deb \
./docker-ce-cli_<version>_<arch>.deb \
./docker-compose-plugin_<version>_<arch>.deb
```
The Docker daemon starts automatically.
3. Verify that Docker Engine is installed correctly by running the `hello-world`
image.
6. Verify that the Docker Engine installation is successful by running the
`hello-world` image:
```console
$ sudo docker run hello-world
```
This command downloads a test image and runs it in a container. When the
container runs, it prints a message and exits.
container runs, it prints a confirmation message and exits.
Docker Engine is installed and running. The `docker` group is created but no users
are added to it. You need to use `sudo` to run Docker commands.
Continue to [Post-installation steps for Linux](linux-postinstall.md) to allow
non-privileged users to run Docker commands and for other optional configuration
steps.
You have now successfully installed and started Docker Engine. The `docker` user
group exists but contains no users, which is why you're required to use `sudo`
to run Docker commands. Continue to [Linux post-install](linux-postinstall.md)
to allow non-privileged users to run Docker commands and for other optional
configuration steps.
#### Upgrade Docker Engine
@ -217,15 +251,14 @@ To upgrade Docker Engine, download the newer package file and repeat the
## Uninstall Docker Engine
1. Uninstall the Docker Engine, CLI, Containerd, and Docker Compose packages:
1. Uninstall the Docker Engine, CLI, containerd, and Docker Compose packages:
```console
$ sudo apt-get purge docker-ce docker-ce-cli containerd.io docker-compose-plugin
```
2. Images, containers, volumes, or customized configuration files on your host
are not automatically removed. To delete all images, containers, and
volumes:
2. Images, containers, volumes, or custom configuration files on your host
aren't automatically removed. To delete all images, containers, and volumes:
```console
$ sudo rm -rf /var/lib/docker
@ -237,4 +270,5 @@ You must delete any edited configuration files manually.
## Next steps
- Continue to [Post-installation steps for Linux](linux-postinstall.md).
- Review the topics in [Develop with Docker](../../develop/index.md) to learn how to build new applications using Docker.
- Review the topics in [Develop with Docker](../../develop/index.md) to learn
how to build new applications using Docker.

View File

@ -25,5 +25,5 @@ The process for generating the YAML files is still in flux. Check with
release branch of `docker/cli`, for example, the `19.03` branch.
After generating the YAML files, replace the YAML files in
[https://github.com/docker/docs/tree/master/_data/engine-cli](https://github.com/docker/docs/tree/master/_data/engine-cli)
[https://github.com/docker/docs/tree/main/_data/engine-cli](https://github.com/docker/docs/tree/main/_data/engine-cli)
with the newly-generated files. Submit a pull request.

View File

@ -30,4 +30,3 @@ WARNING: No swap limit support
You can ignore these warnings unless you actually need the ability to
[limit these resources](../../../config/containers/resource_constraints.md), in which case you
should consult your operating system's documentation for enabling them.
[Learn more](../../install/linux-postinstall.md#your-kernel-does-not-support-cgroup-swap-limit-capabilities).

View File

@ -1,9 +1,9 @@
---
title: "Sample application"
title: "Containerize an application"
keywords: get started, setup, orientation, quickstart, intro, concepts, containers, docker desktop
redirect_from:
- /get-started/part2/
description: overview of our simple application for learning docker
description: Containerize and run a simple application to learn Docker
---
@ -35,12 +35,40 @@ we have created a ZIP file containing the application.
## Build the app's container image
In order to build the application, we need to use a `Dockerfile`. A
Dockerfile is simply a text-based script of instructions that is used to
create a container image. If you've created Dockerfiles before, you might
see a few flaws in the Dockerfile below. But, don't worry. We'll go over them.
In order to build the application, you'll need to use a `Dockerfile`. A
Dockerfile is simply a text-based file with no file extension. A Dockerfile contains a script of instructions that are used to create a container image.
1. In the `app` folder, the same location as the `package.json` file, create a file named `Dockerfile` with the following contents.
1. In the `app` folder, the same location as the `package.json` file, create a file named `Dockerfile`. You can use the following commands below to create a Dockerfile based on your operating system.
<ul class="nav nav-tabs">
<li class="active"><a data-toggle="tab" data-target="#mac-linux">Mac / Linux</a></li>
<li><a data-toggle="tab" data-target="#windows">Windows</a></li>
</ul>
<div class="tab-content">
<div id="mac-linux" class="tab-pane fade in active" markdown="1">
In the terminal, run the following commands listed below.
```console
$ cd /path/to/app
$ touch Dockerfile
```
<hr>
</div>
<div id="windows" class="tab-pane fade" markdown="1">
In the Windows Command Prompt, run the following commands listed below.
```console
$ cd \path\to\app
$ type nul > Dockerfile
```
<hr>
</div>
</div>
2. Add the following contents to the Dockerfile. If you've created Dockerfiles before, you might see a few flaws in the Dockerfile below. But, don't worry. We'll go over them.
```dockerfile
# syntax=docker/dockerfile:1
@ -52,10 +80,11 @@ see a few flaws in the Dockerfile below. But, don't worry. We'll go over them.
CMD ["node", "src/index.js"]
EXPOSE 3000
```
> **Note**
>
> Select an instruction in the Dockerfile example to learn more about the instruction.
Please check that the file `Dockerfile` has no file extension like `.txt`. Some editors may append this file extension automatically and this would result in an error in the next step.
2. Open a terminal and go to the `app` directory with the `Dockerfile`. Now build the container image using the `docker build` command.
3. Open a terminal and go to the `app` directory with the `Dockerfile`. Now build the container image using the `docker build` command.
```console
$ docker build -t getting-started .
@ -111,10 +140,12 @@ If you take a quick look at the Docker Dashboard, you should see your two contai
![Docker Dashboard with tutorial and app containers running](images/dashboard-two-containers.png)
## Recap
## Next steps
In this short section, we learned the very basics about building a container image and created a
Dockerfile to do so. Once we built an image, we started the container and saw the running app.
In this short section, you learned the basics about building a container image and created a
Dockerfile to do so. Once you built an image, you started the container and saw the running app.
Next, we're going to make a modification to our app and learn how to update our running application
with a new image. Along the way, we'll learn a few other useful commands.
Next, you're going to make a modification to your app and learn how to update your running application
with a new image. Along the way, you'll learn a few other useful commands.
[Update the application](03_updating_app.md){: .button .primary-btn}

View File

@ -105,13 +105,15 @@ much easier than having to look up the container ID and remove it.
![Updated application with updated empty text](images/todo-list-updated-empty-text.png){: style="width:55%" }
{: .text-center }
## Recap
## Next steps
While we were able to build an update, there were two things you might have noticed:
While you were able to build an update, there were two things you might have noticed:
- All of the existing items in our todo list are gone! That's not a very good app! We'll talk about that
- All of the existing items in your todo list are gone! That's not a very good app! You'll fix that
shortly.
- There were _a lot_ of steps involved for such a small change. In an upcoming section, we'll talk about
how to see code updates without needing to rebuild and start a new container every time we make a change.
- There were a lot of steps involved for such a small change. In an upcoming section, you'll learn
how to see code updates without needing to rebuild and start a new container every time you make a change.
Before talking about persistence, we'll quickly see how to share these images with others.
Before talking about persistence, you'll see how to share these images with others.
[Sharing the application](04_sharing_app.md){: .button .primary-btn}

Some files were not shown because too many files have changed in this diff Show More