Merge pull request #13671 from docker/master

Publish updates from master
This commit is contained in:
Usha Mandya 2021-10-08 17:10:33 +01:00 committed by GitHub
commit 561482421e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 42 additions and 48 deletions

View File

@ -1,11 +1,11 @@
#!/bin/sh #!/bin/sh
# Fetches upstream resources from docker/docker and docker/distribution # Fetches upstream resources from docker/docker and distribution/distribution
# before handing off the site to Jekyll to build # before handing off the site to Jekyll to build
# Relies on the "ENGINE_BRANCH" and "DISTRIBUTION_BRANCH" environment variables, # Relies on the "ENGINE_BRANCH" and "DISTRIBUTION_BRANCH" environment variables,
# which are usually set by the Dockerfile. # which are usually set by the Dockerfile.
: "${ENGINE_BRANCH?No release branch set for docker/docker and docker/cli}" : "${ENGINE_BRANCH?No release branch set for docker/docker and docker/cli}"
: "${DISTRIBUTION_BRANCH?No release branch set for docker/distribution}" : "${DISTRIBUTION_BRANCH?No release branch set for distribution/distribution}"
: "${COMPOSE_CLI_BRANCH?No release branch set for docker/compose-cli}" : "${COMPOSE_CLI_BRANCH?No release branch set for docker/compose-cli}"
# Translate branches for use by svn # Translate branches for use by svn
@ -23,10 +23,10 @@ if [ "${compose_cli_svn_branch}" = "branches/main" ]; then
fi fi
# Directories to get via SVN. We use this because you can't use git to clone just a portion of a repository # Directories to get via SVN. We use this because you can't use git to clone just a portion of a repository
svn co "https://github.com/docker/cli/${engine_svn_branch}/docs/extend" ./engine/extend || (echo "Failed engine/extend download" && exit 1) svn co "https://github.com/docker/cli/${engine_svn_branch}/docs/extend" ./engine/extend || (echo "Failed engine/extend download" && exit 1)
svn co "https://github.com/docker/docker/${engine_svn_branch}/docs/api" ./engine/api || (echo "Failed engine/api download" && exit 1) svn co "https://github.com/docker/docker/${engine_svn_branch}/docs/api" ./engine/api || (echo "Failed engine/api download" && exit 1)
svn co "https://github.com/docker/compose-cli/${compose_cli_svn_branch}/docs" ./cloud || (echo "Failed compose-cli/docs download" && exit 1) svn co "https://github.com/docker/compose-cli/${compose_cli_svn_branch}/docs" ./cloud || (echo "Failed compose-cli/docs download" && exit 1)
svn co "https://github.com/docker/distribution/${distribution_svn_branch}/docs/spec" ./registry/spec || (echo "Failed registry/spec download" && exit 1) svn co "https://github.com/distribution/distribution/${distribution_svn_branch}/docs/spec" ./registry/spec || (echo "Failed registry/spec download" && exit 1)
# Fix up URls in swagger files # Fix up URls in swagger files
find ./engine/api -type f -name '*.yaml' | while read i; do sed -i 's#https://docs.docker.com/#/#g' "$i"; done; find ./engine/api -type f -name '*.yaml' | while read i; do sed -i 's#https://docs.docker.com/#/#g' "$i"; done;
@ -35,12 +35,12 @@ find ./engine/api -type f -name '*.yaml' | while read i; do sed -i 's#https://do
find . -name ".svn" -print0 | xargs -0 /bin/rm -rf find . -name ".svn" -print0 | xargs -0 /bin/rm -rf
# Get a few one-off files that we use directly from upstream # Get a few one-off files that we use directly from upstream
wget --quiet --directory-prefix=./engine/ "https://raw.githubusercontent.com/docker/cli/${ENGINE_BRANCH}/docs/deprecated.md" || (echo "Failed engine/deprecated.md download" && exit 1) wget --quiet --directory-prefix=./engine/ "https://raw.githubusercontent.com/docker/cli/${ENGINE_BRANCH}/docs/deprecated.md" || (echo "Failed engine/deprecated.md download" && exit 1)
wget --quiet --directory-prefix=./engine/reference/ "https://raw.githubusercontent.com/docker/cli/${ENGINE_BRANCH}/docs/reference/builder.md" || (echo "Failed engine/reference/builder.md download" && exit 1) wget --quiet --directory-prefix=./engine/reference/ "https://raw.githubusercontent.com/docker/cli/${ENGINE_BRANCH}/docs/reference/builder.md" || (echo "Failed engine/reference/builder.md download" && exit 1)
wget --quiet --directory-prefix=./engine/reference/ "https://raw.githubusercontent.com/docker/cli/${ENGINE_BRANCH}/docs/reference/run.md" || (echo "Failed engine/reference/run.md download" && exit 1) wget --quiet --directory-prefix=./engine/reference/ "https://raw.githubusercontent.com/docker/cli/${ENGINE_BRANCH}/docs/reference/run.md" || (echo "Failed engine/reference/run.md download" && exit 1)
wget --quiet --directory-prefix=./engine/reference/commandline/ "https://raw.githubusercontent.com/docker/cli/${ENGINE_BRANCH}/docs/reference/commandline/cli.md" || (echo "Failed engine/reference/commandline/cli.md download" && exit 1) wget --quiet --directory-prefix=./engine/reference/commandline/ "https://raw.githubusercontent.com/docker/cli/${ENGINE_BRANCH}/docs/reference/commandline/cli.md" || (echo "Failed engine/reference/commandline/cli.md download" && exit 1)
wget --quiet --directory-prefix=./engine/reference/commandline/ "https://raw.githubusercontent.com/docker/cli/${ENGINE_BRANCH}/docs/reference/commandline/dockerd.md" || (echo "Failed engine/reference/commandline/dockerd.md download" && exit 1) wget --quiet --directory-prefix=./engine/reference/commandline/ "https://raw.githubusercontent.com/docker/cli/${ENGINE_BRANCH}/docs/reference/commandline/dockerd.md" || (echo "Failed engine/reference/commandline/dockerd.md download" && exit 1)
wget --quiet --directory-prefix=./registry/ "https://raw.githubusercontent.com/docker/distribution/${DISTRIBUTION_BRANCH}/docs/configuration.md" || (echo "Failed registry/configuration.md download" && exit 1) wget --quiet --directory-prefix=./registry/ "https://raw.githubusercontent.com/distribution/distribution/${DISTRIBUTION_BRANCH}/docs/configuration.md" || (echo "Failed registry/configuration.md download" && exit 1)
# Remove things we don't want in the build # Remove things we don't want in the build
rm -f ./engine/extend/cli_plugins.md # the cli plugins api is not a stable API, and not included in the TOC for that reason. rm -f ./engine/extend/cli_plugins.md # the cli plugins api is not a stable API, and not included in the TOC for that reason.

View File

@ -489,6 +489,7 @@ x-aws-cloudformation:
Certificates: Certificates:
- CertificateArn: "arn:aws:acm:certificate/123abc" - CertificateArn: "arn:aws:acm:certificate/123abc"
Protocol: HTTPS Protocol: HTTPS
Port: 443
``` ```
## Using existing AWS network resources ## Using existing AWS network resources

View File

@ -29,7 +29,7 @@ need this level of resilience, you can work around the problem with a wrapper
script: script:
- Use a tool such as [wait-for-it](https://github.com/vishnubob/wait-for-it), - Use a tool such as [wait-for-it](https://github.com/vishnubob/wait-for-it),
[dockerize](https://github.com/jwilder/dockerize), sh-compatible [dockerize](https://github.com/powerman/dockerize), sh-compatible
[wait-for](https://github.com/Eficode/wait-for), or [RelayAndContainers](https://github.com/jasonsychau/RelayAndContainers) template. These are small [wait-for](https://github.com/Eficode/wait-for), or [RelayAndContainers](https://github.com/jasonsychau/RelayAndContainers) template. These are small
wrapper scripts which you can include in your application's image to wrapper scripts which you can include in your application's image to
poll a given host and port until it's accepting TCP connections. poll a given host and port until it's accepting TCP connections.

View File

@ -38,39 +38,16 @@ this in a few different ways.
#!/bin/bash #!/bin/bash
# Start the first process # Start the first process
./my_first_process -D ./my_first_process &
status=$?
if [ $status -ne 0 ]; then
echo "Failed to start my_first_process: $status"
exit $status
fi
# Start the second process # Start the second process
./my_second_process -D ./my_second_process &
status=$?
if [ $status -ne 0 ]; then
echo "Failed to start my_second_process: $status"
exit $status
fi
# Naive check runs checks once a minute to see if either of the processes exited. # Wait for any process to exit
# This illustrates part of the heavy lifting you need to do if you want to run wait -n
# more than one service in a container. The container exits with an error
# if it detects that either of the processes has exited.
# Otherwise it loops forever, waking up every 60 seconds
while sleep 60; do # Exit with status of process that exited first
ps aux |grep my_first_process |grep -q -v grep exit $?
PROCESS_1_STATUS=$?
ps aux |grep my_second_process |grep -q -v grep
PROCESS_2_STATUS=$?
# If the greps above find anything, they exit with 0 status
# If they are not both 0, then something is wrong
if [ $PROCESS_1_STATUS -ne 0 -o $PROCESS_2_STATUS -ne 0 ]; then
echo "One of the processes has already exited."
exit 1
fi
done
``` ```
Next, the Dockerfile: Next, the Dockerfile:

View File

@ -2,15 +2,25 @@
description: Image Access Management description: Image Access Management
keywords: image, access, management keywords: image, access, management
title: Image Access Management title: Image Access Management
toc_max: 2
--- ---
Image Access Management is a new feature that is a part of the Docker Business subscription. This feature allows Organization owners to control which types of images (Docker Official Images, Docker Verified Publisher Images, Community images) their developers can pull from Docker Hub. Image Access Management is a new feature that is a part of the Docker Business subscription. This feature allows Organization owners to control which types of images (Docker Official Images, Docker Verified Publisher Images, Community images) their developers can pull from Docker Hub.
For example, a developer, who is part of an organization, building a new containerized application could accidentally use an untrusted, community image as a component of their application. This image could be malicious and pose a security risk to the company. Using Image Access Management, the Organization owner could ensure that the developer can only access trusted content like Docker Official Images, Docker Verified Publisher Images, or the Organizations own images, preventing such a risk. For example, a developer, who is part of an organization, building a new containerized application could accidentally use an untrusted, community image as a component of their application. This image could be malicious and pose a security risk to the company. Using Image Access Management, the Organization owner could ensure that the developer can only access trusted content like Docker Official Images, Docker Verified Publisher Images, or the Organizations own images, preventing such a risk.
## Configure Image Access Management permissions ## Configure Image Access Management permissions
The following video walks you through the process of configuring Image Access Management permissions.
<iframe width="560" height="315" src="https://www.youtube-nocookie.com/embed/phFp0iqzwRQ" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
<br>
### Detailed instructions
To configure Image Access Management permissions, perform the following steps:
1. Log into your [Docker Hub](https://hub.docker.com) account as an organization administrator. 1. Log into your [Docker Hub](https://hub.docker.com) account as an organization administrator.
2. Select an organization, and navigate to the **Settings** tab on the **Organizations** page and click Org Permissions. 2. Select an organization, and navigate to the **Settings** tab on the **Organizations** page and click Org Permissions.

View File

@ -16,9 +16,9 @@ title: Install Docker Engine on Ubuntu
toc_max: 4 toc_max: 4
--- ---
> **Scan your images for vulnerabilities** > **Docker Desktop for Linux**
> >
> Using open source components in your container images can introduce vulnerabilities. Run `docker scan` to start securing your images using Snyk. If you have a Docker Pro, Team, or a Business subscription, you can automatically scan images when you push an image to Docker Hub. See [Hub Vulnerability Scanning](../../docker-hub/vulnerability-scanning.md) for more information. > Docker Desktop helps you build, share, and run containers easily on Mac and Windows as you do on Linux. Docker handles the complex setup and allows you to focus on writing the code. Thanks to the positive support we received on the [subscription updates](https://www.docker.com/blog/updating-product-subscriptions/){: target="_blank" rel="noopener" class="_" id="dkr_docs_cta"}, we've started working on [Docker Desktop for Linux](https://www.docker.com/blog/accelerating-new-features-in-docker-desktop/){: target="_blank" rel="noopener" class="_" id="dkr_docs_cta"} which is the second-most popular feature request in our public roadmap. If you are interested in early access, sign up for our [Developer Preview program](https://www.docker.com/community/get-involved/developer-preview){: target="_blank" rel="noopener" class="_" id="dkr_docs_cta"}.
{: .important} {: .important}
To get started with Docker Engine on Ubuntu, make sure you To get started with Docker Engine on Ubuntu, make sure you

View File

@ -578,6 +578,12 @@ the Docker Engine removes the `/foo` volume but not the `awesome` volume.
$ docker run --rm -v /foo -v awesome:/bar busybox top $ docker run --rm -v /foo -v awesome:/bar busybox top
``` ```
> **Note**:
>
> If another container binds the volumes with
> `--volumes-from`, the volume definitions are _copied_ and the
> anonymous volume also stays after the first container is removed.
### Remove all volumes ### Remove all volumes
To remove all unused volumes and free up space: To remove all unused volumes and free up space: