diff --git a/.gitignore b/.gitignore
index 366c6d8a5a..010dcf6657 100644
--- a/.gitignore
+++ b/.gitignore
@@ -6,5 +6,4 @@ _site/**
.sass-cache/**
CNAME
Gemfile.lock
-_samples/library/**
_kbase/**
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 89d5f282ea..e3bda7862a 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -30,7 +30,7 @@ you give it a try!
### Overall doc improvements
-Most commits will be made against the `master` branch. This include:
+Most commits will be made against the `master` branch. This includes:
- Conceptual and task-based information not specific to new features
- Restructuring / rewriting
diff --git a/Dockerfile b/Dockerfile
index f91e3545d8..07e4dec0e3 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -64,15 +64,6 @@ COPY --from=docs/docker.github.io:v17.12 ${TARGET} ${TARGET}
COPY --from=docs/docker.github.io:v18.03 ${TARGET} ${TARGET}
COPY --from=docs/docker.github.io:v18.09 ${TARGET} ${TARGET}
-# Fetch library samples (documentation from official images on Docker Hub)
-# Only add the files that are needed to build these reference docs, so that
-# these docs are only rebuilt if changes were made to the configuration.
-# @todo find a way to build HTML in this stage, and still have them included in the navigation tree
-FROM builderbase AS library-samples
-COPY ./_scripts/fetch-library-samples.sh ./_scripts/
-COPY ./_samples/boilerplate.txt ./_samples/
-RUN bash ./_scripts/fetch-library-samples.sh
-
# Fetch upstream resources (reference documentation)
# Only add the files that are needed to build these reference docs, so that
# these docs are only rebuilt if changes were made to the configuration.
@@ -86,7 +77,6 @@ RUN bash ./_scripts/fetch-upstream-resources.sh .
# Build the current docs from the checked out branch
FROM builderbase AS current
COPY . .
-COPY --from=library-samples /usr/src/app/md_source/. ./
COPY --from=upstream-resources /usr/src/app/md_source/. ./
# Build the static HTML, now that everything is in place
diff --git a/Jenkinsfile b/Jenkinsfile
index 0f6897f2f0..a3e6e51836 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -110,7 +110,7 @@ pipeline {
post {
unsuccessful {
sh """
- curl -X POST -H 'Content-type: application/json' --data '{"text":"Error in docker.github.io:published build. Please contact the Customer Success Engineering team for help."}' $SLACK
+ curl -X POST -H 'Content-type: application/json' --data '{"text":"Error in Jenkins build. Please contact the Customer Success Engineering team for help."}' $SLACK
"""
}
}
diff --git a/_config.yml b/_config.yml
index 77b4854621..3d6a28ef9f 100644
--- a/_config.yml
+++ b/_config.yml
@@ -111,7 +111,7 @@ defaults:
- scope:
path: "install"
values:
- win_latest_build: "docker-19.03.2"
+ win_latest_build: "docker-19.03.3"
- scope:
path: "datacenter"
values:
@@ -125,7 +125,7 @@ defaults:
dtr_org: "docker"
dtr_repo: "dtr"
ucp_version: "3.2.1"
- dtr_version: "2.7.2"
+ dtr_version: "2.7.3"
# Previous DTR Releases
- scope:
path: "datacenter/dtr/2.6"
@@ -133,7 +133,7 @@ defaults:
hide_from_sitemap: true
dtr_org: "docker"
dtr_repo: "dtr"
- dtr_version: "2.6.9"
+ dtr_version: "2.6.10"
- scope:
path: "datacenter/dtr/2.5"
values:
@@ -178,21 +178,21 @@ defaults:
hide_from_sitemap: true
ucp_org: "docker"
ucp_repo: "ucp"
- ucp_version: "3.1.10"
+ ucp_version: "3.1.11"
- scope:
path: "datacenter/ucp/3.0"
values:
hide_from_sitemap: true
ucp_org: "docker"
ucp_repo: "ucp"
- ucp_version: "3.0.14"
+ ucp_version: "3.0.15"
- scope:
path: "datacenter/ucp/2.2"
values:
hide_from_sitemap: true
ucp_org: "docker"
ucp_repo: "ucp"
- ucp_version: "2.2.21"
+ ucp_version: "2.2.22"
- scope:
path: "datacenter/ucp/2.1"
values:
diff --git a/_data/ddc_offline_files_2.yaml b/_data/ddc_offline_files_2.yaml
index a025a83786..8e05f7748d 100644
--- a/_data/ddc_offline_files_2.yaml
+++ b/_data/ddc_offline_files_2.yaml
@@ -21,6 +21,14 @@
- product: "ucp"
version: "3.1"
tar-files:
+ - description: "3.1.11 Linux"
+ url: https://packages.docker.com/caas/ucp_images_3.1.11.tar.gz
+ - description: "3.1.11 Windows Server 2016 LTSC"
+ url: https://packages.docker.com/caas/ucp_images_win_2016_3.1.11.tar.gz
+ - description: "3.1.11 Windows Server 1803"
+ url: https://packages.docker.com/caas/ucp_images_win_1803_3.1.11.tar.gz
+ - description: "3.1.11 Windows Server 2019 LTSC"
+ url: https://packages.docker.com/caas/ucp_images_win_2019_3.1.11.tar.gz
- description: "3.1.10 Linux"
url: https://packages.docker.com/caas/ucp_images_3.1.10.tar.gz
- description: "3.1.10 Windows Server 2016 LTSC"
@@ -122,6 +130,14 @@
- product: "ucp"
version: "3.0"
tar-files:
+ - description: "3.0.15 Linux"
+ url: https://packages.docker.com/caas/ucp_images_3.0.15.tar.gz
+ - description: "3.0.15 IBM Z"
+ url: https://packages.docker.com/caas/ucp_images_s390x_3.0.15.tar.gz
+ - description: "3.0.15 Windows Server 2016 LTSC"
+ url: https://packages.docker.com/caas/ucp_images_win_2016_3.0.15.tar.gz
+ - description: "3.0.15 Windows Server 1803"
+ url: https://packages.docker.com/caas/ucp_images_win_1803_3.0.15.tar.gz
- description: "3.0.14 Linux"
url: https://packages.docker.com/caas/ucp_images_3.0.14.tar.gz
- description: "3.0.14 IBM Z"
@@ -249,6 +265,12 @@
- product: "ucp"
version: "2.2"
tar-files:
+ - description: "2.2.22 Linux"
+ url: https://packages.docker.com/caas/ucp_images_2.2.22.tar.gz
+ - description: "2.2.22 IBM Z"
+ url: https://packages.docker.com/caas/ucp_images_s390x_2.2.22.tar.gz
+ - description: "2.2.22 Windows"
+ url: https://packages.docker.com/caas/ucp_images_win_2.2.22.tar.gz
- description: "2.2.21 Linux"
url: https://packages.docker.com/caas/ucp_images_2.2.21.tar.gz
- description: "2.2.21 IBM Z"
@@ -372,6 +394,8 @@
- product: "dtr"
version: "2.7"
tar-files:
+ - description: "DTR 2.7.3 Linux x86"
+ url: https://packages.docker.com/caas/dtr_images_2.7.3.tar.gz
- description: "DTR 2.7.2 Linux x86"
url: https://packages.docker.com/caas/dtr_images_2.7.2.tar.gz
- description: "DTR 2.7.1 Linux x86"
@@ -381,6 +405,8 @@
- product: "dtr"
version: "2.6"
tar-files:
+ - description: "DTR 2.6.10 Linux x86"
+ url: https://packages.docker.com/caas/dtr_images_2.6.10.tar.gz
- description: "DTR 2.6.9 Linux x86"
url: https://packages.docker.com/caas/dtr_images_2.6.9.tar.gz
- description: "DTR 2.6.8 Linux x86"
diff --git a/_data/glossary.yaml b/_data/glossary.yaml
index 2ab21dae3a..404890beda 100644
--- a/_data/glossary.yaml
+++ b/_data/glossary.yaml
@@ -218,8 +218,8 @@ overlay storage driver: |
It is supported by the Docker daemon as a storage driver.
parent image: |
An image's **parent image** is the image designated in the `FROM` directive
- in the image's Dockerfile. All subsequent commands are applied to this parent
- image. A Dockerfile with no `FROM` directive has no parent image, and is called
+ in the image's Dockerfile. All subsequent commands are based on this parent
+ image. A Dockerfile with the `FROM scratch` directive has no parent image, and is called
a **base image**.
persistent storage: |
Persistent storage or volume storage provides a way for a user to add a persistent layer to the running container's file system. This persistent layer could live on the container host or an external device. The lifecycle of this persistent layer is not connected to the lifecycle of the container, allowing a user to retain state.
diff --git a/_data/toc.yaml b/_data/toc.yaml
index 5363352dbd..7654c98d0c 100644
--- a/_data/toc.yaml
+++ b/_data/toc.yaml
@@ -127,20 +127,18 @@ guides:
title: FAQ
- sectiontitle: Get started
section:
- - sectiontitle: Get started with Docker
+ - sectiontitle: Quickstart
section:
- - title: "Part 1: Orientation"
+ - title: "Part 1: Orientation and setup"
path: /get-started/
- - title: "Part 2: Containers"
+ - title: "Part 2: Containerizing an Application"
path: /get-started/part2/
- - title: "Part 3: Services"
+ - title: "Part 3: Deploying to Kubernetes"
path: /get-started/part3/
- - title: "Part 4: Swarms"
+ - title: "Part 4: Deploying to Swarm"
path: /get-started/part4/
- - title: "Part 5: Stacks"
+ - title: "Part 5: Sharing Images on Docker Hub"
path: /get-started/part5/
- - title: "Part 6: Deploy your app"
- path: /get-started/part6/
- path: /engine/docker-overview/
title: Docker overview
- sectiontitle: Develop with Docker
@@ -1257,9 +1255,6 @@ reference:
samples:
- path: /samples/#tutorial-labs
title: Tutorial labs
-- sectiontitle: Library references
- section:
- - generateTOC: library
- sectiontitle: Sample applications
section:
- path: /samples/
@@ -1282,6 +1277,8 @@ samples:
title: Riak
- path: /engine/examples/running_ssh_service/
title: SSHd
+- path: /samples/#library-references
+ title: Library references
manuals:
- sectiontitle: Docker Enterprise
@@ -1445,9 +1442,9 @@ manuals:
- path: /ee/ucp/authorization/pull-images/
title: Allow users to pull images
- path: /ee/ucp/authorization/ee-standard/
- title: Docker Enterprise Standard use case
+ title: Access control design
- path: /ee/ucp/authorization/ee-advanced/
- title: Docker Enterprise Advanced use case
+ title: Access control design using additional security requirements
- sectiontitle: Access UCP
section:
- path: /ee/ucp/user-access/
@@ -1513,7 +1510,7 @@ manuals:
- title: Specifying a routing mode
path: /ee/ucp/interlock/usage/interlock-vip-mode/
- title: Using routing labels
- path: /ee/ucp/interlock/usage/labels-reference.md/
+ path: /ee/ucp/interlock/usage/labels-reference/
- title: Implementing redirects
path: /ee/ucp/interlock/usage/redirects/
- title: Implementing a service cluster
@@ -1560,7 +1557,7 @@ manuals:
path: /ee/ucp/kubernetes/cluster-ingress/
- title: Install Ingress
path: /ee/ucp/kubernetes/cluster-ingress/install/
- - title: Deploy Simple Application
+ - title: Deploy a Sample Application
path: /ee/ucp/kubernetes/cluster-ingress/ingress/
- title: Deploy a Canary Deployment
path: /ee/ucp/kubernetes/cluster-ingress/canary/
@@ -2601,9 +2598,9 @@ manuals:
- path: /ee/dtr/admin/configure/use-your-own-tls-certificates/
title: Use your own TLS certificates
- path: /ee/dtr/admin/configure/enable-single-sign-on/
- title: Disable persistent cookies
- - path: /ee/dtr/admin/configure/disable-persistent-cookies/
title: Enable single sign-on
+ - path: /ee/dtr/admin/configure/disable-persistent-cookies/
+ title: Disable persistent cookies
- sectiontitle: External storage
section:
- path: /ee/dtr/admin/configure/external-storage/
@@ -3994,6 +3991,10 @@ manuals:
section:
- path: /docker-hub/
title: Quickstart
+ - sectiontitle: Security and Authentication
+ section:
+ - path: /docker-hub/access-tokens/
+ title: Managing Access Tokens
- path: /docker-hub/release-notes/
title: Release notes
- path: /docker-hub/repos/
@@ -4038,6 +4039,8 @@ manuals:
title: Trust Chain
- path: /docker-hub/publish/byol/
title: Bring Your Own License (BYOL)
+ - path: /docker-hub/deactivate-account/
+ title: Deactivate an account or an organization
- sectiontitle: Open-source projects
section:
- sectiontitle: Docker Notary
diff --git a/_includes/cli.md b/_includes/cli.md
index 8f74ccc1e8..358442337c 100644
--- a/_includes/cli.md
+++ b/_includes/cli.md
@@ -45,15 +45,15 @@ your client and daemon API versions.
{% if site.data[include.datafolder][include.datafile].experimentalcli %}
-> This command is experimental.
+> This command is experimental on the Docker client.
+>
+> **It should not be used in production environments.**
>
-> This command is experimental on the Docker client. It should not be used in
-> production environments.
> To enable experimental features in the Docker CLI, edit the
> [config.json](/engine/reference/commandline/cli.md#configuration-files)
-> and set `experimental` to `enabled`.
->
-> {% include experimental.md %}
+> and set `experimental` to `enabled`. You can go [here](https://docs.docker.com/engine/reference/commandline/cli/#experimental-features)
+> for more information.
+{: .important }
{% endif %}
diff --git a/_includes/generateTOC.html b/_includes/generateTOC.html
deleted file mode 100644
index d4db0fe9eb..0000000000
--- a/_includes/generateTOC.html
+++ /dev/null
@@ -1,3 +0,0 @@
-{% if include.tocToGenerate=="library"%}{% for thisPage in site.samples %}
-
{{ thisPage.title }}
-{% endfor %}{% endif %}
diff --git a/_includes/global-header.html b/_includes/global-header.html
index b6aaf94384..5907915958 100644
--- a/_includes/global-header.html
+++ b/_includes/global-header.html
@@ -9,29 +9,27 @@
- - What is Docker?
- - Product
+ - Why Docker?
+ - Product
-
Get Docker
- - Docs
- Community
- Create Docker ID
- Sign In
diff --git a/_samples/boilerplate.txt b/_includes/library-samples.md
similarity index 74%
rename from _samples/boilerplate.txt
rename to _includes/library-samples.md
index cab6bdcba4..8cab921509 100644
--- a/_samples/boilerplate.txt
+++ b/_includes/library-samples.md
@@ -3,6 +3,6 @@
> This content is imported from
> [the official Docker Library docs](https://github.com/docker-library/docs/tree/master/{{ page.repo}}/),
> and is provided by the original uploader. You can view the Docker Hub page for this image at
-> [https://hub.docker.com/images/{{ page.repo }}](https://hub.docker.com/images/{{ page.repo }})
+> [https://hub.docker.com/_/{{ page.repo }}](https://hub.docker.com/images/{{ page.repo }})
diff --git a/_layouts/docs.html b/_layouts/docs.html
index ab5bfc179d..8f8c8ad6c3 100755
--- a/_layouts/docs.html
+++ b/_layouts/docs.html
@@ -317,7 +317,6 @@
-
-
-Here is a list of the basic Docker commands from this page, and some related
-ones if you'd like to explore a bit before moving on.
-
-```shell
-docker build -t friendlyhello . # Create image using this directory's Dockerfile
-docker run -p 4000:80 friendlyhello # Run "friendlyhello" mapping port 4000 to 80
-docker run -d -p 4000:80 friendlyhello # Same thing, but in detached mode
-docker container ls # List all running containers
-docker container ls -a # List all containers, even those not running
-docker container stop # Gracefully stop the specified container
-docker container kill # Force shutdown of the specified container
-docker container rm # Remove specified container from this machine
-docker container rm $(docker container ls -a -q) # Remove all containers
-docker image ls -a # List all images on this machine
-docker image rm # Remove specified image from this machine
-docker image rm $(docker image ls -a -q) # Remove all images from this machine
-docker login # Log in this CLI session using your Docker credentials
-docker tag username/repository:tag # Tag for upload to registry
-docker push username/repository:tag # Upload tagged image to registry
-docker run username/repository:tag # Run image from a registry
-```
+4. Once you're satisfied that your bulletin board container works correctly, delete it:
+
+ ```script
+ docker container rm --force bb
+ ```
+
+## Conclusion
+
+At this point, we've performed a simple containerization of an application, and confirmed that our app runs successfully in its container. The next step will be to write the Kubernetes yaml that describes how to run and manage these containers on Kubernetes which we'll study in Part 3 of this tutorial, or to write the stack file that will let us do the same on Docker Swarm, which we discuss in Part 4.
+
+[On to Part 3 >>](part3.md){: class="button outline-btn" style="margin-bottom: 30px; margin-right: 100%"}
+
+## CLI References
+
+Further documentation for all CLI commands used in this article are available here:
+
+ - [docker image *](https://docs.docker.com/engine/reference/commandline/image/)
+ - [docker container *](https://docs.docker.com/engine/reference/commandline/container/)
+ - [Dockerfile reference](https://docs.docker.com/engine/reference/builder/)
\ No newline at end of file
diff --git a/get-started/part3.md b/get-started/part3.md
index 180f066f26..8ded5a29b4 100644
--- a/get-started/part3.md
+++ b/get-started/part3.md
@@ -1,278 +1,137 @@
---
-title: "Get Started, Part 3: Services"
-keywords: services, replicas, scale, ports, compose, compose file, stack, networking
-description: Learn how to define load-balanced and scalable service that runs containers.
+title: "Get Started, Part 3: Deploying to Kubernetes"
+keywords: kubernetes, pods, deployments, kubernetes services
+description: Learn how to describe and deploy a simple application on Kubernetes.
---
{% include_relative nav.html selected="3" %}
## Prerequisites
-- [Install Docker version 1.13 or higher](/engine/installation/index.md).
+- Work through containerizing an application in [Part 2](part2.md).
+- Make sure that Kubernetes is enabled on your Docker Desktop:
+ - **OSX**: click the Docker icon in your menu bar and make sure there's a green light beside 'Kubernetes is Running'
+ - **Windows**: click the Docker icon in the system tray and navigate to Kubernetes, and make sure there's a green light beside 'Kubernetes is Running'.
-- Get [Docker Compose](/compose/overview.md). On [Docker Desktop for
-Mac](/docker-for-mac/index.md) and [Docker Desktop for
-Windows](/docker-for-windows/index.md) it's pre-installed, so you're good-to-go.
-On Linux systems you need to [install it
-directly](https://github.com/docker/compose/releases). On pre Windows 10 systems
-_without Hyper-V_, use [Docker
-Toolbox](/toolbox/overview.md).
-
-- Read the orientation in [Part 1](index.md).
-
-- Learn how to create containers in [Part 2](part2.md).
-
-- Make sure you have published the `friendlyhello` image you created by
-[pushing it to a registry](/get-started/part2.md#share-your-image). We use that
-shared image here.
-
-- Be sure your image works as a deployed container. Run this command,
-slotting in your info for `username`, `repo`, and `tag`: `docker run -p 4000:80
-username/repo:tag`, then visit `http://localhost:4000/`.
+ If Kubernetes isn't running, follow the instructions in [Part 1](part1.md) of this tutorial to finish setting it up.
## Introduction
-In part 3, we scale our application and enable load-balancing. To do this, we
-must go one level up in the hierarchy of a distributed application: the
-**service**.
+Now that we've demonstrated that the individual components of our application run as stand-alone containers, it's time to arrange for them to be managed by an orchestrator like Kubernetes. Kubernetes provides many tools for scaling, networking, securing and maintaining your containerized applications, above and beyond the abilities of containers themselves.
-- Stack
-- **Services** (you are here)
-- Container (covered in [part 2](part2.md))
+In order to validate that our containerized application works well on Kubernetes, we'll use Docker Desktop's built in Kubernetes environment right on our development machine to deploy our application, before handing it off to run on a full Kubernetes cluster in production. The Kubernetes environment created by Docker Desktop is _fully featured_, meaning it has all the Kubernetes features your app will enjoy on a real cluster, accessible from the convenience of your development machine.
-## About services
+## Describing Apps Using Kubernetes YAML
-In a distributed application, different pieces of the app are called "services".
-For example, if you imagine a video sharing site, it probably includes a service
-for storing application data in a database, a service for video transcoding in
-the background after a user uploads something, a service for the front-end, and
-so on.
+All containers in Kubernetes are scheduled as _pods_, which are groups of co-located containers that share some resources. Furthermore, in a realistic application we almost never create individual pods; instead, most of our workloads are scheduled as _deployments_, which are scalable groups of pods maintained automatically by Kubernetes. Lastly, all Kubernetes objects can and should be described in manifests called _Kubernetes YAML_ files; these YAML files describe all the components and configurations of your Kubernetes app, and can be used to easily create and destroy your app in any Kubernetes environment.
-Services are really just "containers in production." A service only runs one
-image, but it codifies the way that image runs—what ports it should use,
-how many replicas of the container should run so the service has the capacity it
-needs, and so on. Scaling a service changes the number of container instances
-running that piece of software, assigning more computing resources to the
-service in the process.
+1. You already wrote a very basic Kubernetes YAML file in the first part of this tutorial; let's write a slightly more sophisticated one now, to run and manage our bulletin board. Place the following in a file called `bb.yaml`and save it in the same place you put the other yaml file.
-Luckily it's very easy to define, run, and scale services with the Docker
-platform -- just write a `docker-compose.yml` file.
+ ```yaml
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: bb-demo
+ namespace: default
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ bb: web
+ template:
+ metadata:
+ labels:
+ bb: web
+ spec:
+ containers:
+ - name: bb-site
+ image: bulletinboard:1.0
+ ---
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: bb-entrypoint
+ namespace: default
+ spec:
+ type: NodePort
+ selector:
+ bb: web
+ ports:
+ - port: 8080
+ targetPort: 8080
+ nodePort: 30001
+ ```
-## Your first `docker-compose.yml` file
+ In this Kubernetes YAML file, we have two objects, separated by the `---`:
+ - A `Deployment`, describing a scalable group of identical pods. In this case, you'll get just one `replica`, or copy, of your pod, and that pod (which is described under the `template:` key) has just one container in it, based off of your `bulletinboard:1.0` image from the previous step in this tutorial.
+ - A `NodePort` service, which will route traffic from port 30001 on your host to port 8080 inside the pods it routes to, allowing you to reach your bulletin board from the network.
-A `docker-compose.yml` file is a YAML file that defines how Docker containers
-should behave in production.
+ Also notice that while Kubernetes YAML can appear long and complicated at first, it almost always follows the same pattern:
+ - The `apiVersion`, which indicates the Kubernetes API that parses this object
+ - The `kind`, indicating what sort of object this is
+ - Some `metadata`, applying things like names to your objects
+ - The `spec`, specifying all the parameters and configurations of your object.
-### `docker-compose.yml`
+## Deploying and Checking Your Application
-Save this file as `docker-compose.yml` wherever you want. Be sure you have
-[pushed the image](/get-started/part2.md#share-your-image) you created in [Part
-2](part2.md) to a registry, and update this `.yml` by replacing
-`username/repo:tag` with your image details.
+1. In a terminal, navigate to where you created `bb.yaml` and deploy your application to Kubernetes:
-```yaml
-version: "3"
-services:
- web:
- # replace username/repo:tag with your name and image details
- image: username/repo:tag
- deploy:
- replicas: 5
- resources:
- limits:
- cpus: "0.1"
- memory: 50M
- restart_policy:
- condition: on-failure
- ports:
- - "4000:80"
- networks:
- - webnet
-networks:
- webnet:
-```
+ ```shell
+ kubectl apply -f bb.yaml
+ ```
-This `docker-compose.yml` file tells Docker to do the following:
+ you should see output that looks like the following, indicating your Kubernetes objects were created successfully:
-- Pull [the image we uploaded in step 2](part2.md) from the registry.
+ ```shell
+ deployment.apps/bb-demo created
+ service/bb-entrypoint created
+ ```
-- Run 5 instances of that image as a service
- called `web`, limiting each one to use, at most, 10% of a single core of
- CPU time (this could also be e.g. "1.5" to mean 1 and half core for each),
- and 50MB of RAM.
+2. Make sure everything worked by listing your deployments:
-- Immediately restart containers if one fails.
+ ```shell
+ kubectl get deployments
+ ```
-- Map port 4000 on the host to `web`'s port 80.
+ if all is well, your deployment should be listed as follows:
-- Instruct `web`'s containers to share port 80 via a load-balanced network
- called `webnet`. (Internally, the containers themselves publish to
- `web`'s port 80 at an ephemeral port.)
+ ```shell
+ NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
+ bb-demo 1 1 1 1 48s
+ ```
-- Define the `webnet` network with the default settings (which is a
- load-balanced overlay network).
+ This indicates all one of the pods you asked for in your YAML are up and running. Do the same check for your services:
+ ```shell
+ kubectl get services
-## Run your new load-balanced app
+ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+ bb-entrypoint NodePort 10.106.145.116 8080:30001/TCP 53s
+ kubernetes ClusterIP 10.96.0.1 443/TCP 138d
+ ```
-Before we can use the `docker stack deploy` command we first run:
+ In addition to the default `kubernetes` service, we see our `bb-entrypoint` service, accepting traffic on port 30001/TCP.
-```shell
-docker swarm init
-```
+3. Open a browser and visit your bulletin board at `localhost:30001`; you should see your bulletin board, the same as when we ran it as a stand-alone container in the previous step of this tutorial.
->**Note**: We get into the meaning of that command in [part 4](part4.md).
-> If you don't run `docker swarm init` you get an error that "this node is not a swarm manager."
+4. Once satisfied, tear down your application:
-Now let's run it. You need to give your app a name. Here, it is set to
-`getstartedlab`:
+ ```shell
+ kubectl delete -f bb.yaml
+ ```
-```shell
-docker stack deploy -c docker-compose.yml getstartedlab
-```
+## Conclusion
-Our single service stack is running 5 container instances of our deployed image
-on one host. Let's investigate.
+At this point, we have successfully used Docker Desktop to deploy our application to a fully-featured Kubernetes environment on our development machine. We haven't done much with Kubernetes yet, but the door is now open: you can begin adding other components to your app and taking advantage of all the features and power of Kubernetes, right on your own machine.
-Get the service ID for the one service in our application:
+In addition to deploying to Kubernetes, we have also described our application as a Kubernetes YAML file. This simple text file contains everything we need to create our application in a running state; we can check it into version control and share it with our colleagues, allowing us to distribute our applications to other clusters (like the testing and production clusters that probably come after our development environments) easily.
-```shell
-docker service ls
-```
+[On to Part 4 >>](part4.md){: class="button outline-btn" style="margin-bottom: 30px; margin-right: 100%"}
-Look for output for the `web` service, prepended with your app name. If you
-named it the same as shown in this example, the name is
-`getstartedlab_web`. The service ID is listed as well, along with the number of
-replicas, image name, and exposed ports.
+## Kubernetes References
-Alternatively, you can run `docker stack services`, followed by the name of
-your stack. The following example command lets you view all services associated with the
-`getstartedlab` stack:
+Further documentation for all new Kubernetes objects used in this article are available here:
-```bash
-docker stack services getstartedlab
-ID NAME MODE REPLICAS IMAGE PORTS
-bqpve1djnk0x getstartedlab_web replicated 5/5 username/repo:tag *:4000->80/tcp
-```
+ - [Kubernetes Pods](https://kubernetes.io/docs/concepts/workloads/pods/pod/)
+ - [Kubernetes Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/)
+ - [Kubernetes Services](https://kubernetes.io/docs/concepts/services-networking/service/)
-A single container running in a service is called a **task**. Tasks are given unique
-IDs that numerically increment, up to the number of `replicas` you defined in
-`docker-compose.yml`. List the tasks for your service:
-
-```bash
-docker service ps getstartedlab_web
-```
-
-Tasks also show up if you just list all the containers on your system, though that
-is not filtered by service:
-
-```bash
-docker container ls -q
-```
-
-You can run `curl -4 http://localhost:4000` several times in a row, or go to that URL in
-your browser and hit refresh a few times.
-
-
-
-Either way, the container ID changes, demonstrating the
-load-balancing; with each request, one of the 5 tasks is chosen, in a
-round-robin fashion, to respond. The container IDs match your output from
-the previous command (`docker container ls -q`).
-
-To view all tasks of a stack, you can run `docker stack ps` followed by your app name, as shown in the following example:
-
-```bash
-docker stack ps getstartedlab
-ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS
-uwiaw67sc0eh getstartedlab_web.1 username/repo:tag docker-desktop Running Running 9 minutes ago
-sk50xbhmcae7 getstartedlab_web.2 username/repo:tag docker-desktop Running Running 9 minutes ago
-c4uuw5i6h02j getstartedlab_web.3 username/repo:tag docker-desktop Running Running 9 minutes ago
-0dyb70ixu25s getstartedlab_web.4 username/repo:tag docker-desktop Running Running 9 minutes ago
-aocrb88ap8b0 getstartedlab_web.5 username/repo:tag docker-desktop Running Running 9 minutes ago
-```
-
-> Running Windows 10?
->
-> Windows 10 PowerShell should already have `curl` available, but if not you can
-> grab a Linux terminal emulator like
-> [Git BASH](https://git-for-windows.github.io/){: target="_blank" class="_"},
-> or download
-> [wget for Windows](http://gnuwin32.sourceforge.net/packages/wget.htm)
-> which is very similar.
-
-> Slow response times?
->
-> Depending on your environment's networking configuration, it may take up to 30
-> seconds for the containers
-> to respond to HTTP requests. This is not indicative of Docker or
-> swarm performance, but rather an unmet Redis dependency that we
-> address later in the tutorial. For now, the visitor counter isn't working
-> for the same reason; we haven't yet added a service to persist data.
-
-
-## Scale the app
-
-You can scale the app by changing the `replicas` value in `docker-compose.yml`,
-saving the change, and re-running the `docker stack deploy` command:
-
-```shell
-docker stack deploy -c docker-compose.yml getstartedlab
-```
-
-Docker performs an in-place update, no need to tear the stack down first or kill
-any containers.
-
-Now, re-run `docker container ls -q` to see the deployed instances reconfigured.
-If you scaled up the replicas, more tasks, and hence, more containers, are
-started.
-
-### Take down the app and the swarm
-
-* Take the app down with `docker stack rm`:
-
- ```shell
- docker stack rm getstartedlab
- ```
-
-* Take down the swarm.
-
- ```
- docker swarm leave --force
- ```
-
-It's as easy as that to stand up and scale your app with Docker. You've taken a
-huge step towards learning how to run containers in production. Up next, you
-learn how to run this app as a bonafide swarm on a cluster of Docker
-machines.
-
-> **Note**: Compose files like this are used to define applications with Docker, and can be uploaded to cloud providers using [Docker
-Cloud](/docker-cloud/), or on any hardware or cloud provider you choose with
-[Docker Enterprise Edition](https://www.docker.com/enterprise-edition).
-
-[On to "Part 4" >>](part4.md){: class="button outline-btn"}
-
-## Recap and cheat sheet (optional)
-
-Here's [a terminal recording of what was covered on this page](https://asciinema.org/a/b5gai4rnflh7r0kie01fx6lip):
-
-
-
-To recap, while typing `docker run` is simple enough, the true implementation
-of a container in production is running it as a service. Services codify a
-container's behavior in a Compose file, and this file can be used to scale,
-limit, and redeploy our app. Changes to the service can be applied in place, as
-it runs, using the same command that launched the service:
-`docker stack deploy`.
-
-Some commands to explore at this stage:
-
-```shell
-docker stack ls # List stacks or apps
-docker stack deploy -c # Run the specified Compose file
-docker service ls # List running services associated with an app
-docker service ps # List tasks associated with an app
-docker inspect # Inspect task or container
-docker container ls -q # List container IDs
-docker stack rm # Tear down an application
-docker swarm leave --force # Take down a single node swarm from the manager
-```
diff --git a/get-started/part4.md b/get-started/part4.md
index 2431f20456..1bfb85ccdf 100644
--- a/get-started/part4.md
+++ b/get-started/part4.md
@@ -1,585 +1,98 @@
---
-title: "Get Started, Part 4: Swarms"
-keywords: swarm, scale, cluster, machine, vm, manager, worker, deploy, ssh, orchestration
-description: Learn how to create clusters of Dockerized machines.
+title: "Get Started, Part 4: Deploying to Swarm"
+keywords: swarm, swarm services, stacks
+description: Learn how to describe and deploy a simple application on Docker Swarm.
---
{% include_relative nav.html selected="4" %}
## Prerequisites
-- [Install Docker version 1.13 or higher](/engine/installation/index.md).
+- Work through containerizing an application in [Part 2](part2.md).
+- Make sure that Swarm is enabled on your Docker Desktop by typing `docker system info`, and looking for a message `Swarm: active` (you might have to scroll up a little).
-- Get [Docker Compose](/compose/overview.md) as described in [Part 3 prerequisites](/get-started/part3.md#prerequisites).
-
-- Get [Docker Machine](/machine/overview.md), which is pre-installed with
-[Docker Desktop for Mac](/docker-for-mac/index.md) and [Docker Desktop for
-Windows](/docker-for-windows/index.md), but on Linux systems you need to
-[install it directly](/machine/install-machine/#installing-machine-directly). On pre Windows 10 systems _without Hyper-V_, as well as Windows 10 Home, use
-[Docker Toolbox](/toolbox/overview.md).
-
-- Read the orientation in [Part 1](index.md).
-
-- Learn how to create containers in [Part 2](part2.md).
-
-- Make sure you have published the `friendlyhello` image you created by
-[pushing it to a registry](/get-started/part2.md#share-your-image). We use that
-shared image here.
-
-- Be sure your image works as a deployed container. Run this command,
-slotting in your info for `username`, `repo`, and `tag`: `docker run -p 80:80
-username/repo:tag`, then visit `http://localhost/`.
-
-- Have a copy of your `docker-compose.yml` from [Part 3](part3.md) handy.
+ If Swarm isn't running, simply type `docker swarm init` at a shell prompt to set it up.
## Introduction
-In [part 3](part3.md), you took an app you wrote in [part 2](part2.md), and
-defined how it should run in production by turning it into a service, scaling it
-up 5x in the process.
+Now that we've demonstrated that the individual components of our application run as stand-alone containers and shown how to deploy it using Kubernetes, let's look at how to arrange for them to be managed by Docker Swarm. Swarm provides many tools for scaling, networking, securing and maintaining your containerized applications, above and beyond the abilities of containers themselves.
-Here in part 4, you deploy this application onto a cluster, running it on
-multiple machines. Multi-container, multi-machine applications are made possible
-by joining multiple machines into a "Dockerized" cluster called a **swarm**.
+In order to validate that our containerized application works well on Swarm, we'll use Docker Desktop's built in Swarm environment right on our development machine to deploy our application, before handing it off to run on a full Swarm cluster in production. The Swarm environment created by Docker Desktop is _fully featured_, meaning it has all the Swarm features your app will enjoy on a real cluster, accessible from the convenience of your development machine.
-## Understanding Swarm clusters
+## Describing Apps Using Stack Files
-A swarm is a group of machines that are running Docker and joined into
-a cluster. After that has happened, you continue to run the Docker commands
-you're used to, but now they are executed on a cluster by a **swarm manager**.
-The machines in a swarm can be physical or virtual. After joining a swarm, they
-are referred to as **nodes**.
+Swarm never creates individual containers like we did in the previous step of this tutorial; instead, all Swarm workloads are scheduled as _services_, which are scalable groups of containers with added networking features maintained automatically by Swarm. Furthermore, all Swarm objects can and should be described in manifests called _stack files_; these YAML files describe all the components and configurations of your Swarm app, and can be used to easily create and destroy your app in any Swarm environment.
-Swarm managers can use several strategies to run containers, such as "emptiest
-node" -- which fills the least utilized machines with containers. Or "global",
-which ensures that each machine gets exactly one instance of the specified
-container. You instruct the swarm manager to use these strategies in the Compose
-file, just like the one you have already been using.
+1. Let's write a simple stack file to run and manage our bulletin board. Place the following in a file called `bb-stack.yaml`:
-Swarm managers are the only machines in a swarm that can execute your commands,
-or authorize other machines to join the swarm as **workers**. Workers are just
-there to provide capacity and do not have the authority to tell any other
-machine what it can and cannot do.
+ ```yaml
+ version: '3.7'
-Up until now, you have been using Docker in a single-host mode on your local
-machine. But Docker also can be switched into **swarm mode**, and that's what
-enables the use of swarms. Enabling swarm mode instantly makes the current
-machine a swarm manager. From then on, Docker runs the commands you execute
-on the swarm you're managing, rather than just on the current machine.
+ services:
+ bb-app:
+ image: bulletinboard:1.0
+ ports:
+ - "8000:8080"
+ ```
-## Set up your swarm
-A swarm is made up of multiple nodes, which can be either physical or virtual
-machines. The basic concept is simple enough: run `docker swarm init` to enable
-swarm mode and make your current machine a swarm manager, then run
-`docker swarm join` on other machines to have them join the swarm as workers.
-Choose a tab below to see how this plays out in various contexts. We use VMs
-to quickly create a two-machine cluster and turn it into a swarm.
+ In this Swarm YAML file, we have just one object: a `service`, describing a scalable group of identical containers. In this case, you'll get just one container (the default), and that container will be based off of your `bulletinboard:1.0` image from step 2 of this tutorial. We've furthermore asked Swarm to forward all traffic arriving at port 8000 on our development machine to port 8080 inside our bulletin board container.
-### Create a cluster
+ > **Kubernetes Services and Swarm Services are very different!** Despite the similar name, the two orchestrators mean very different things by the term 'service'. In Swarm, a service provides both scheduling _and_ networking facilities, creating containers and providing tools for routing traffic to them. In Kubernetes, scheduling and networking are handled separately: _deployments_ (or other controllers) handle the scheduling of containers as pods, while _services_ are responsible only for adding networking features to those pods.
+
+## Deploying and Checking Your Application
-
-
-
-{% capture local-content %}
+1. Deploy your application to Swarm:
-#### VMs on your local machine (Mac, Linux, Windows 7 and 8)
+ ```shell
+ docker stack deploy -c bb-stack.yaml demo
+ ```
-You need a hypervisor that can create virtual machines (VMs), so
-[install Oracle VirtualBox](https://www.virtualbox.org/wiki/Downloads) for your
-machine's OS.
+ If all goes well, Swarm will report creating all your stack objects with no complaints:
-> **Note**: If you are on a Windows system that has Hyper-V installed,
-such as Windows 10, there is no need to install VirtualBox and you should
-use Hyper-V instead. View the instructions for Hyper-V systems by clicking
-the Hyper-V tab above. If you are using
-[Docker Toolbox](/toolbox/overview.md), you should already have
-VirtualBox installed as part of it, so you are good to go.
+ ```shell
+ Creating network demo_default
+ Creating service demo_bb-app
+ ```
-Now, create a couple of VMs using `docker-machine`, using the VirtualBox driver:
+ Notice that in addition to your service, Swarm also creates a Docker network by default to isolate the containers deployed as part of your stack.
-```shell
-docker-machine create --driver virtualbox myvm1
-docker-machine create --driver virtualbox myvm2
-```
+2. Make sure everything worked by listing your service:
-{% endcapture %}
-{{ local-content | markdownify }}
+ ```shell
+ docker service ls
+ ```
-
-
-{% capture localwin-content %}
+ If all has gone well, your service will report with 1/1 of its replicas created:
-#### VMs on your local machine (Windows 10)
+ ```shell
+ ID NAME MODE REPLICAS IMAGE PORTS
+ il7elwunymbs demo_bb-app replicated 1/1 bulletinboard:1.0 *:8000->8080/tcp
+ ```
-First, quickly create a virtual switch for your virtual machines (VMs) to share,
-so they can connect to each other.
+ This indicates 1/1 containers you asked for as part of your services are up and running. Also, we see that port 8000 on your development machine is getting forwarded to port 8080 in your bulletin board container.
-1. Launch Hyper-V Manager
-2. Click **Virtual Switch Manager** in the right-hand menu
-3. Click **Create Virtual Switch** of type **External**
-4. Give it the name `myswitch`, and check the box to share your host machine's
- active network adapter
+3. Open a browser and visit your bulletin board at `localhost:8000`; you should see your bulletin board, the same as when we ran it as a stand-alone container in Step 2 of this tutorial.
-Now, create a couple of VMs using our node management tool,
-`docker-machine`:
+4. Once satisfied, tear down your application:
-> **Note**: you need to run the following as administrator or else you don't have the permission to create hyperv VMs!
+ ```shell
+ docker stack rm demo
+ ```
-```shell
-docker-machine create -d hyperv --hyperv-virtual-switch "myswitch" myvm1
-docker-machine create -d hyperv --hyperv-virtual-switch "myswitch" myvm2
-```
+## Conclusion
-{% endcapture %}
-{{ localwin-content | markdownify }}
-
-
-
+At this point, we have successfully used Docker Desktop to deploy our application to a fully-featured Swarm environment on our development machine. We haven't done much with Swarm yet, but the door is now open: you can begin adding other components to your app and taking advantage of all the features and power of Swarm, right on your own machine.
-#### List the VMs and get their IP addresses
+In addition to deploying to Swarm, we have also described our application as a stack file. This simple text file contains everything we need to create our application in a running state; we can check it into version control and share it with our colleagues, allowing us to distribute our applications to other clusters (like the testing and production clusters that probably come after our development environments) easily.
-You now have two VMs created, named `myvm1` and `myvm2`.
+[On to Part 5 >>](part5.md){: class="button outline-btn" style="margin-bottom: 30px; margin-right: 100%"}
-Use this command to list the machines and get their IP addresses.
+## Swarm & CLI References
-> **Note**: you need to run the following as administrator or else you don't get any reasonable output (only "UNKNOWN").
+Further documentation for all new Swarm objects and CLI commands used in this article are available here:
-```shell
-docker-machine ls
-```
+ - [Swarm Services](https://docs.docker.com/engine/swarm/how-swarm-mode-works/services/)
+ - [Swarm Stacks](https://docs.docker.com/engine/swarm/stack-deploy/)
+ - [`docker stack *`](https://docs.docker.com/engine/reference/commandline/stack/)
+ - [`docker service *`](https://docs.docker.com/engine/reference/commandline/service/)
-Here is example output from this command.
-
-```shell
-$ docker-machine ls
-NAME ACTIVE DRIVER STATE URL SWARM DOCKER ERRORS
-myvm1 - virtualbox Running tcp://192.168.99.100:2376 v17.06.2-ce
-myvm2 - virtualbox Running tcp://192.168.99.101:2376 v17.06.2-ce
-```
-
-#### Initialize the swarm and add nodes
-
-The first machine acts as the manager, which executes management commands
-and authenticates workers to join the swarm, and the second is a worker.
-
-You can send commands to your VMs using `docker-machine ssh`. Instruct `myvm1`
-to become a swarm manager with `docker swarm init` and look for output like
-this:
-
-```shell
-$ docker-machine ssh myvm1 "docker swarm init --advertise-addr "
-Swarm initialized: current node is now a manager.
-
-To add a worker to this swarm, run the following command:
-
- docker swarm join \
- --token \
- :
-
-To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.
-```
-
-> Ports 2377 and 2376
->
-> Always run `docker swarm init` and `docker swarm join` with port 2377
-> (the swarm management port), or no port at all and let it take the default.
->
-> The machine IP addresses returned by `docker-machine ls` include port 2376,
-> which is the Docker daemon port. Do not use this port or
-> [you may experience errors](https://forums.docker.com/t/docker-swarm-join-with-virtualbox-connection-error-13-bad-certificate/31392/2){: target="_blank" class="_"}.
-
-> Having trouble using SSH? Try the --native-ssh flag
->
-> Docker Machine has [the option to let you use your own system's SSH](/machine/reference/ssh/#different-types-of-ssh), if
-> for some reason you're having trouble sending commands to your Swarm manager. Just specify the
-> `--native-ssh` flag when invoking the `ssh` command:
->
-> ```
-> docker-machine --native-ssh ssh myvm1 ...
-> ```
-
-As you can see, the response to `docker swarm init` contains a pre-configured
-`docker swarm join` command for you to run on any nodes you want to add. Copy
-this command, and send it to `myvm2` via `docker-machine ssh` to have `myvm2`
-join your new swarm as a worker:
-
-```shell
-$ docker-machine ssh myvm2 "docker swarm join \
---token \
-:2377"
-
-This node joined a swarm as a worker.
-```
-
-Congratulations, you have created your first swarm!
-
-Run `docker node ls` on the manager to view the nodes in this swarm:
-
-```shell
-$ docker-machine ssh myvm1 "docker node ls"
-ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS
-brtu9urxwfd5j0zrmkubhpkbd myvm2 Ready Active
-rihwohkh3ph38fhillhhb84sk * myvm1 Ready Active Leader
-```
-
-> Leaving a swarm
->
-> If you want to start over, you can run `docker swarm leave` from each node.
-
-## Deploy your app on the swarm cluster
-
-The hard part is over. Now you just repeat the process you used in [part
-3](part3.md) to deploy on your new swarm. Just remember that only swarm managers
-like `myvm1` execute Docker commands; workers are just for capacity.
-
-### Configure a `docker-machine` shell to the swarm manager
-
-So far, you've been wrapping Docker commands in `docker-machine ssh` to talk to
-the VMs. Another option is to run `docker-machine env ` to get
-and run a command that configures your current shell to talk to the Docker
-daemon on the VM. This method works better for the next step because it allows
-you to use your local `docker-compose.yml` file to deploy the app
-"remotely" without having to copy it anywhere.
-
-Type `docker-machine env myvm1`, then copy-paste and run the command provided as
-the last line of the output to configure your shell to talk to `myvm1`, the
-swarm manager.
-
-The commands to configure your shell differ depending on whether you are Mac,
-Linux, or Windows, so examples of each are shown on the tabs below.
-
-
-
-
- {% capture mac-linux-machine-content %}
-
-#### Docker machine shell environment on Mac or Linux
-
-Run `docker-machine env myvm1` to get the command to configure your shell to
-talk to `myvm1`.
-
-```shell
-$ docker-machine env myvm1
-export DOCKER_TLS_VERIFY="1"
-export DOCKER_HOST="tcp://192.168.99.100:2376"
-export DOCKER_CERT_PATH="/Users/sam/.docker/machine/machines/myvm1"
-export DOCKER_MACHINE_NAME="myvm1"
-# Run this command to configure your shell:
-# eval $(docker-machine env myvm1)
-```
-
-Run the given command to configure your shell to talk to `myvm1`.
-
-```shell
-eval $(docker-machine env myvm1)
-```
-
-Run `docker-machine ls` to verify that `myvm1` is now the active machine, as
-indicated by the asterisk next to it.
-
-```shell
-$ docker-machine ls
-NAME ACTIVE DRIVER STATE URL SWARM DOCKER ERRORS
-myvm1 * virtualbox Running tcp://192.168.99.100:2376 v17.06.2-ce
-myvm2 - virtualbox Running tcp://192.168.99.101:2376 v17.06.2-ce
-```
-
-{% endcapture %}
-{{ mac-linux-machine-content | markdownify }}
-
-
-
-{% capture win-machine-content %}
-
-#### Docker machine shell environment on Windows
-
-Run `docker-machine env myvm1` to get the command to configure your shell to
-talk to `myvm1`.
-
-```shell
-PS C:\Users\sam\sandbox\get-started> docker-machine env myvm1
-$Env:DOCKER_TLS_VERIFY = "1"
-$Env:DOCKER_HOST = "tcp://192.168.203.207:2376"
-$Env:DOCKER_CERT_PATH = "C:\Users\sam\.docker\machine\machines\myvm1"
-$Env:DOCKER_MACHINE_NAME = "myvm1"
-$Env:COMPOSE_CONVERT_WINDOWS_PATHS = "true"
-# Run this command to configure your shell:
-# & "C:\Program Files\Docker\Docker\Resources\bin\docker-machine.exe" env myvm1 | Invoke-Expression
-```
-
-Run the given command to configure your shell to talk to `myvm1`.
-
-```shell
-& "C:\Program Files\Docker\Docker\Resources\bin\docker-machine.exe" env myvm1 | Invoke-Expression
-```
-
-Run `docker-machine ls` to verify that `myvm1` is the active machine as indicated by the asterisk next to it.
-
-```shell
-PS C:PATH> docker-machine ls
-NAME ACTIVE DRIVER STATE URL SWARM DOCKER ERRORS
-myvm1 * hyperv Running tcp://192.168.203.207:2376 v17.06.2-ce
-myvm2 - hyperv Running tcp://192.168.200.181:2376 v17.06.2-ce
-```
-
- {% endcapture %}
- {{ win-machine-content | markdownify }}
-
-
-
-
-### Deploy the app on the swarm manager
-
-Now that you have `myvm1`, you can use its powers as a swarm manager to
-deploy your app by using the same `docker stack deploy` command you used in part
-3 to `myvm1`, and your local copy of `docker-compose.yml`. This command may take a few seconds
-to complete and the deployment takes some time to be available. Use the
-`docker service ps ` command on a swarm manager to verify that
-all services have been redeployed.
-
-You are connected to `myvm1` by means of the `docker-machine` shell
-configuration, and you still have access to the files on your local host. Make
-sure you are in the same directory as before, which includes the
-[`docker-compose.yml` file you created in part
-3](/get-started/part3/#docker-composeyml).
-
-Just like before, run the following command to deploy the app on `myvm1`.
-
-```bash
-docker stack deploy -c docker-compose.yml getstartedlab
-```
-
-And that's it, the app is deployed on a swarm cluster!
-
-> **Note**: If your image is stored on a private registry instead of Docker Hub,
-> you need to be logged in using `docker login ` and then you
-> need to add the `--with-registry-auth` flag to the above command. For example:
->
-> ```bash
-> docker login registry.example.com
->
-> docker stack deploy --with-registry-auth -c docker-compose.yml getstartedlab
-> ```
->
-> This passes the login token from your local client to the swarm nodes where the
-> service is deployed, using the encrypted WAL logs. With this information, the
-> nodes are able to log into the registry and pull the image.
->
-
-Now you can use the same [docker commands you used in part
-3](/get-started/part3.md#run-your-new-load-balanced-app). Only this time notice
-that the services (and associated containers) have been distributed between
-both `myvm1` and `myvm2`.
-
-```bash
-$ docker stack ps getstartedlab
-
-ID NAME IMAGE NODE DESIRED STATE
-jq2g3qp8nzwx getstartedlab_web.1 gordon/get-started:part2 myvm1 Running
-88wgshobzoxl getstartedlab_web.2 gordon/get-started:part2 myvm2 Running
-vbb1qbkb0o2z getstartedlab_web.3 gordon/get-started:part2 myvm2 Running
-ghii74p9budx getstartedlab_web.4 gordon/get-started:part2 myvm1 Running
-0prmarhavs87 getstartedlab_web.5 gordon/get-started:part2 myvm2 Running
-```
-
-> Connecting to VMs with `docker-machine env` and `docker-machine ssh`
->
-> * To set your shell to talk to a different machine like `myvm2`, simply re-run
-`docker-machine env` in the same or a different shell, then run the given
-command to point to `myvm2`. This is always specific to the current shell. If
-you change to an unconfigured shell or open a new one, you need to re-run the
-commands. Use `docker-machine ls` to list machines, see what state they are in,
-get IP addresses, and find out which one, if any, you are connected to. To learn
-more, see the [Docker Machine getting started topics](/machine/get-started.md#create-a-machine).
->
-> * Alternatively, you can wrap Docker commands in the form of
-`docker-machine ssh ""`, which logs directly into
-the VM but doesn't give you immediate access to files on your local host.
->
-> * On Mac and Linux, you can use `docker-machine scp :~`
-to copy files across machines, but Windows users need a Linux terminal emulator
-like [Git Bash](https://git-for-windows.github.io/){: target="_blank" class="_"} for this to work.
->
-> This tutorial demos both `docker-machine ssh` and
-`docker-machine env`, since these are available on all platforms via the `docker-machine` CLI.
-
-### Accessing your cluster
-
-You can access your app from the IP address of **either** `myvm1` or `myvm2`.
-
-The network you created is shared between them and load-balancing. Run
-`docker-machine ls` to get your VMs' IP addresses and visit either of them on a
-browser on port 4000, hitting refresh (or just `curl` them).
-
-
-
-There are five possible container IDs all cycling by randomly, demonstrating
-the load-balancing.
-
-The reason both IP addresses work is that nodes in a swarm participate in an
-ingress **routing mesh**. This ensures that a service deployed at a certain port
-within your swarm always has that port reserved to itself, no matter what node
-is actually running the container. Here's a diagram of how a routing mesh for a
-service called `my-web` published at port `8080` on a three-node swarm would
-look:
-
-
-
-> Having connectivity trouble?
->
-> Keep in mind that to use the ingress network in the swarm,
-> you need to have the following ports open between the swarm nodes
-> before you enable swarm mode:
->
-> - Port 7946 TCP/UDP for container network discovery.
-> - Port 4789 UDP for the container ingress network.
->
-> Double check what you have in the ports section under your web
-> service and make sure the ip addresses you enter in your browser
-> or curl reflects that
-
-## Iterating and scaling your app
-
-From here you can do everything you learned about in parts 2 and 3.
-
-Scale the app by changing the `docker-compose.yml` file.
-
-Change the app behavior by editing code, then rebuild, and push the new image.
-(To do this, follow the same steps you took earlier to [build the
-app](part2.md#build-the-app) and [publish the
-image](part2.md#publish-the-image)).
-
-In either case, simply run `docker stack deploy` again to deploy these changes.
-
-You can join any machine, physical or virtual, to this swarm, using the
-same `docker swarm join` command you used on `myvm2`, and capacity is added
-to your cluster. Just run `docker stack deploy` afterwards, and your app can
-take advantage of the new resources.
-
-## Cleanup and reboot
-
-### Stacks and swarms
-
-You can tear down the stack with `docker stack rm`. For example:
-
-```
-docker stack rm getstartedlab
-```
-
-> Keep the swarm or remove it?
->
-> At some point later, you can remove this swarm if you want to with
-> `docker-machine ssh myvm2 "docker swarm leave"` on the worker
-> and `docker-machine ssh myvm1 "docker swarm leave --force"` on the
-> manager, but _you need this swarm for part 5, so keep it
-> around for now_.
-
-### Unsetting docker-machine shell variable settings
-
-You can unset the `docker-machine` environment variables in your current shell
-with the given command.
-
- On **Mac or Linux** the command is:
-
- ```shell
- eval $(docker-machine env -u)
- ```
-
- On **Windows** the command is:
-
- ```shell
- & "C:\Program Files\Docker\Docker\Resources\bin\docker-machine.exe" env -u | Invoke-Expression
- ```
-
-This disconnects the shell from `docker-machine` created virtual machines,
-and allows you to continue working in the same shell, now using native `docker`
-commands (for example, on Docker Desktop for Mac or Docker Desktop for Windows). To learn more,
-see the [Machine topic on unsetting environment variables](/machine/get-started/#unset-environment-variables-in-the-current-shell).
-
-### Restarting Docker machines
-
-If you shut down your local host, Docker machines stops running. You can check the status of machines by running `docker-machine ls`.
-
-```
-$ docker-machine ls
-NAME ACTIVE DRIVER STATE URL SWARM DOCKER ERRORS
-myvm1 - virtualbox Stopped Unknown
-myvm2 - virtualbox Stopped Unknown
-```
-
-To restart a machine that's stopped, run:
-
-```
-docker-machine start
-```
-
-For example:
-
-```
-$ docker-machine start myvm1
-Starting "myvm1"...
-(myvm1) Check network to re-create if needed...
-(myvm1) Waiting for an IP...
-Machine "myvm1" was started.
-Waiting for SSH to be available...
-Detecting the provisioner...
-Started machines may have new IP addresses. You may need to re-run the `docker-machine env` command.
-
-$ docker-machine start myvm2
-Starting "myvm2"...
-(myvm2) Check network to re-create if needed...
-(myvm2) Waiting for an IP...
-Machine "myvm2" was started.
-Waiting for SSH to be available...
-Detecting the provisioner...
-Started machines may have new IP addresses. You may need to re-run the `docker-machine env` command.
-```
-
-[On to Part 5 >>](part5.md){: class="button outline-btn"}
-
-## Recap and cheat sheet (optional)
-
-Here's [a terminal recording of what was covered on this
-page](https://asciinema.org/a/113837):
-
-
-
-In part 4 you learned what a swarm is, how nodes in swarms can be managers or
-workers, created a swarm, and deployed an application on it. You saw that the
-core Docker commands didn't change from part 3, they just had to be targeted to
-run on a swarm master. You also saw the power of Docker's networking in action,
-which kept load-balancing requests across containers, even though they were
-running on different machines. Finally, you learned how to iterate and scale
-your app on a cluster.
-
-Here are some commands you might like to run to interact with your swarm and your VMs a bit:
-
-```shell
-docker-machine create --driver virtualbox myvm1 # Create a VM (Mac, Win7, Linux)
-docker-machine create -d hyperv --hyperv-virtual-switch "myswitch" myvm1 # Win10
-docker-machine env myvm1 # View basic information about your node
-docker-machine ssh myvm1 "docker node ls" # List the nodes in your swarm
-docker-machine ssh myvm1 "docker node inspect " # Inspect a node
-docker-machine ssh myvm1 "docker swarm join-token -q worker" # View join token
-docker-machine ssh myvm1 # Open an SSH session with the VM; type "exit" to end
-docker node ls # View nodes in swarm (while logged on to manager)
-docker-machine ssh myvm2 "docker swarm leave" # Make the worker leave the swarm
-docker-machine ssh myvm1 "docker swarm leave -f" # Make master leave, kill swarm
-docker-machine ls # list VMs, asterisk shows which VM this shell is talking to
-docker-machine start myvm1 # Start a VM that is currently not running
-docker-machine env myvm1 # show environment variables and command for myvm1
-eval $(docker-machine env myvm1) # Mac command to connect shell to myvm1
-& "C:\Program Files\Docker\Docker\Resources\bin\docker-machine.exe" env myvm1 | Invoke-Expression # Windows command to connect shell to myvm1
-docker stack deploy -c # Deploy an app; command shell must be set to talk to manager (myvm1), uses local Compose file
-docker-machine scp docker-compose.yml myvm1:~ # Copy file to node's home dir (only required if you use ssh to connect to manager and deploy the app)
-docker-machine ssh myvm1 "docker stack deploy -c " # Deploy an app using ssh (you must have first copied the Compose file to myvm1)
-eval $(docker-machine env -u) # Disconnect shell from VMs, use native docker
-docker-machine stop $(docker-machine ls -q) # Stop all running VMs
-docker-machine rm $(docker-machine ls -q) # Delete all VMs and their disk images
-```
diff --git a/get-started/part5.md b/get-started/part5.md
index 0b0de8f010..fb2b464d7f 100644
--- a/get-started/part5.md
+++ b/get-started/part5.md
@@ -1,302 +1,61 @@
---
-title: "Get Started, Part 5: Stacks"
-keywords: stack, data, persist, dependencies, redis, storage, volume, port
-description: Learn how to create a multi-container application that uses all the machines in a cluster.
+title: "Get Started, Part 5: Sharing Images on Docker Hub"
+keywords: docker hub, push, images
+description: Learn how to share images on Docker Hub.
---
{% include_relative nav.html selected="5" %}
## Prerequisites
-- [Install Docker version 1.13 or higher](/engine/installation/).
-- Get [Docker Compose](/compose/overview.md) as described in [Part 3 prerequisites](/get-started/part3.md#prerequisites).
-- Get [Docker Machine](/machine/overview.md) as described in [Part 4 prerequisites](/get-started/part4.md#prerequisites).
-- Read the orientation in [Part 1](index.md).
-- Learn how to create containers in [Part 2](part2.md).
-
-- Make sure you have published the `friendlyhello` image you created by
-[pushing it to a registry](/get-started/part2.md#share-your-image). We
-use that shared image here.
-
-- Be sure your image works as a deployed container. Run this command,
-slotting in your info for `username`, `repo`, and `tag`: `docker run -p 80:80
-username/repo:tag`, then visit `http://localhost/`.
-
-- Have a copy of your `docker-compose.yml` from [Part 3](part3.md) handy.
-
-- Make sure that the machines you set up in [part 4](part4.md) are running
-and ready. Run `docker-machine ls` to verify this. If the machines are
-stopped, run `docker-machine start myvm1` to boot the manager, followed
-by `docker-machine start myvm2` to boot the worker.
-
-- Have the swarm you created in [part 4](part4.md) running and ready. Run
-`docker-machine ssh myvm1 "docker node ls"` to verify this. If the swarm is up,
-both nodes report a `ready` status. If not, reinitialize the swarm and join
-the worker as described in [Set up your
-swarm](/get-started/part4.md#set-up-your-swarm).
+- Work through containerizing an application in [Part 2](part2.md).
## Introduction
-In [part 4](part4.md), you learned how to set up a swarm, which is a cluster of
-machines running Docker, and deployed an application to it, with containers
-running in concert on multiple machines.
+At this point, you've built a containerized application in [Part 2](part2.md), and potentially run it on Kubernetes in [Part 3](part3.md) or Swarm in [Part 4](part4.md), all on your local development machine thanks to Docker Desktop. The final step in developing a containerized application is to share your images on a registry like [Docker Hub](https://hub.docker.com/), so they can be easily downloaded and run on any destination cluster.
-Here in part 5, you reach the top of the hierarchy of distributed
-applications: the **stack**. A stack is a group of interrelated services that
-share dependencies, and can be orchestrated and scaled together. A single stack
-is capable of defining and coordinating the functionality of an entire
-application (though very complex applications may want to use multiple stacks).
+## Setting Up Your Docker Hub Account
-Some good news is, you have technically been working with stacks since part 3,
-when you created a Compose file and used `docker stack deploy`. But that was a
-single service stack running on a single host, which is not usually what takes
-place in production. Here, you can take what you've learned, make
-multiple services relate to each other, and run them on multiple machines.
+If you don't yet have a Docker ID, follow these steps to set one up; this will allow you to share images on Docker Hub.
-You're doing great, this is the home stretch!
+1. Visit the Docker Hub sign up page, [https://hub.docker.com/signup](https://hub.docker.com/signup).
-## Add a new service and redeploy
+2. Fill out the form and submit to create your Docker ID.
-It's easy to add services to our `docker-compose.yml` file. First, let's add
-a free visualizer service that lets us look at how our swarm is scheduling
-containers.
+3. Click on the Docker icon in your toolbar or system tray, and click **Sign In / Create Docker ID**. Fill in your new Docker ID and password. If everything worked, your Docker ID will appear in the Docker Desktop dropdown in place of the 'Sign In' option you just used.
-1. Open up `docker-compose.yml` in an editor and replace its contents
-with the following. Be sure to replace `username/repo:tag` with your image details.
+ > You can do the same thing from the command line by typing `docker login`.
- ```yaml
- version: "3"
- services:
- web:
- # replace username/repo:tag with your name and image details
- image: username/repo:tag
- deploy:
- replicas: 5
- restart_policy:
- condition: on-failure
- resources:
- limits:
- cpus: "0.1"
- memory: 50M
- ports:
- - "80:80"
- networks:
- - webnet
- visualizer:
- image: dockersamples/visualizer:stable
- ports:
- - "8080:8080"
- volumes:
- - "/var/run/docker.sock:/var/run/docker.sock"
- deploy:
- placement:
- constraints: [node.role == manager]
- networks:
- - webnet
- networks:
- webnet:
- ```
+## Creating and Pushing to a Docker Hub Repository
- The only thing new here is the peer service to `web`, named `visualizer`.
- Notice two new things here: a `volumes` key, giving the visualizer
- access to the host's socket file for Docker, and a `placement` key, ensuring
- that this service only ever runs on a swarm manager -- never a worker.
- That's because this container, built from [an open source project created by
- Docker](https://github.com/ManoMarks/docker-swarm-visualizer), displays
- Docker services running on a swarm in a diagram.
+At this point, you've set up your Docker Hub account and have connected it to your Docker Desktop. Now lets make our first repo, and share our bulletin board app there.
- We talk more about placement constraints and volumes in a moment.
+1. Click on the Docker icon in your menu bar, and navigate to **Repositories -> Create...**. You'll be taken to a Docker Hub page to create a new repository.
-2. Make sure your shell is configured to talk to `myvm1` (full examples are [here](part4.md#configure-a-docker-machine-shell-to-the-swarm-manager)).
+2. Fill out the Repository Name as `bulletinboard`. Leave all the other options alone for now, and click **Create** at the bottom.
- * Run `docker-machine ls` to list machines and make sure you are connected to `myvm1`, as indicated by an asterisk next to it.
+ {:width="100%"}
- * If needed, re-run `docker-machine env myvm1`, then run the given command to configure the shell.
-
- On **Mac or Linux** the command is:
-
- ```shell
- eval $(docker-machine env myvm1)
- ```
-
- On **Windows** the command is:
-
- ```shell
- & "C:\Program Files\Docker\Docker\Resources\bin\docker-machine.exe" env myvm1 | Invoke-Expression
- ```
-
-3. Re-run the `docker stack deploy` command on the manager, and
-whatever services need updating are updated:
+3. Now we're ready to share our image on Docker Hub, but there's one thing we must do first: images must be *namespaced correctly* to share on Docker Hub. Specifically, images must be named like `/:`. We can relabel our `bulletinboard:1.0` image like this (of course please replace `gordon` with your Docker ID):
```shell
- $ docker stack deploy -c docker-compose.yml getstartedlab
- Updating service getstartedlab_web (id: angi1bf5e4to03qu9f93trnxm)
- Creating service getstartedlab_visualizer (id: l9mnwkeq2jiononb5ihz9u7a4)
+ docker image tag bulletinboard:1.0 gordon/bulletinboard:1.0
```
-4. Take a look at the visualizer.
-
- You saw in the Compose file that `visualizer` runs on port 8080. Get the
- IP address of one of your nodes by running `docker-machine ls`. Go
- to either IP address at port 8080 and you can see the visualizer running:
-
- 
-
- The single copy of `visualizer` is running on the manager as you expect, and
- the 5 instances of `web` are spread out across the swarm. You can
- corroborate this visualization by running `docker stack ps `:
+4. Finally, push your image to Docker Hub:
```shell
- docker stack ps getstartedlab
+ docker image push gordon/bulletinboard:1.0
```
- The visualizer is a standalone service that can run in any app
- that includes it in the stack. It doesn't depend on anything else.
- Now let's create a service that *does* have a dependency: the Redis
- service that provides a visitor counter.
+ Visit your repository in Docker Hub, and you'll see your new image there. Remember, Docker Hub repositories are public by default.
-## Persist the data
+ > **Having trouble pushing?** Remember, you must be signed in to Docker Hub through Docker Desktop or the command line, and you must also name your images correctly, per the above steps. If the push seemed to work but you don't see it in Docker Hub, refresh your browser after a couple of minutes and check again.
-Let's go through the same workflow once more to add a Redis database for storing
-app data.
+## Conclusion
-1. Save this new `docker-compose.yml` file, which finally adds a
-Redis service. Be sure to replace `username/repo:tag` with your image details.
+Now that your image is available on Docker Hub, you'll be able to run it anywhere; if you try to use it on a new cluster that doesn't have it yet, Docker will automatically try and download it from Docker Hub. By moving images around in this way, we no longer need to install any dependencies except Docker and our orchestrator on the machines we want to run our software on; the dependencies of our containerized applications are completely encapsulated and isolated within our images, which we can share via Docker Hub in the manner above.
- ```yaml
- version: "3"
- services:
- web:
- # replace username/repo:tag with your name and image details
- image: username/repo:tag
- deploy:
- replicas: 5
- restart_policy:
- condition: on-failure
- resources:
- limits:
- cpus: "0.1"
- memory: 50M
- ports:
- - "80:80"
- networks:
- - webnet
- visualizer:
- image: dockersamples/visualizer:stable
- ports:
- - "8080:8080"
- volumes:
- - "/var/run/docker.sock:/var/run/docker.sock"
- deploy:
- placement:
- constraints: [node.role == manager]
- networks:
- - webnet
- redis:
- image: redis
- ports:
- - "6379:6379"
- volumes:
- - "/home/docker/data:/data"
- deploy:
- placement:
- constraints: [node.role == manager]
- command: redis-server --appendonly yes
- networks:
- - webnet
- networks:
- webnet:
- ```
-
- Redis has an official image in the Docker library and has been granted the
- short `image` name of just `redis`, so no `username/repo` notation here. The
- Redis port, 6379, has been pre-configured by Redis to be exposed from the
- container to the host, and here in our Compose file we expose it from the
- host to the world, so you can actually enter the IP for any of your nodes
- into Redis Desktop Manager and manage this Redis instance, if you so choose.
-
- Most importantly, there are a couple of things in the `redis` specification
- that make data persist between deployments of this stack:
-
- - `redis` always runs on the manager, so it's always using the
- same filesystem.
- - `redis` accesses an arbitrary directory in the host's file system
- as `/data` inside the container, which is where Redis stores data.
-
- Together, this is creating a "source of truth" in your host's physical
- filesystem for the Redis data. Without this, Redis would store its data in
- `/data` inside the container's filesystem, which would get wiped out if that
- container were ever redeployed.
-
- This source of truth has two components:
-
- - The placement constraint you put on the Redis service, ensuring that it
- always uses the same host.
- - The volume you created that lets the container access `./data` (on the host) as `/data` (inside the Redis container). While containers come and go, the files stored on `./data` on the specified host persists, enabling continuity.
-
- You are ready to deploy your new Redis-using stack.
-
-2. Create a `./data` directory on the manager:
-
- ```shell
- docker-machine ssh myvm1 "mkdir ./data"
- ```
-
-3. Make sure your shell is configured to talk to `myvm1` (full examples are [here](part4.md#configure-a-docker-machine-shell-to-the-swarm-manager)).
-
- * Run `docker-machine ls` to list machines and make sure you are connected to `myvm1`, as indicated by an asterisk next to it.
-
- * If needed, re-run `docker-machine env myvm1`, then run the given command to configure the shell.
-
- On **Mac or Linux** the command is:
-
- ```shell
- eval $(docker-machine env myvm1)
- ```
-
- On **Windows** the command is:
-
- ```shell
- & "C:\Program Files\Docker\Docker\Resources\bin\docker-machine.exe" env myvm1 | Invoke-Expression
- ```
-
-4. Run `docker stack deploy` one more time.
-
- ```shell
- $ docker stack deploy -c docker-compose.yml getstartedlab
- ```
-
-5. Run `docker service ls` to verify that the three services are running as expected.
-
- ```shell
- $ docker service ls
- ID NAME MODE REPLICAS IMAGE PORTS
- x7uij6xb4foj getstartedlab_redis replicated 1/1 redis:latest *:6379->6379/tcp
- n5rvhm52ykq7 getstartedlab_visualizer replicated 1/1 dockersamples/visualizer:stable *:8080->8080/tcp
- mifd433bti1d getstartedlab_web replicated 5/5 gordon/getstarted:latest *:80->80/tcp
-
- ```
-
-6. Check the web page at one of your nodes, such as `http://192.168.99.101`, and take a look at the results of the visitor counter, which is now live and storing information on Redis.
-
- 
-
- Also, check the visualizer at port 8080 on either node's IP address, and notice the `redis` service running along with the `web` and `visualizer` services.
-
- 
+Another thing to keep in mind: at the moment, we've only pushed your image to Docker Hub; what about your Dockerfiles, Kube YAML and stack files? A crucial best practice is to keep these in version control, perhaps alongside your source code for your application, and add a link or note in your Docker Hub repository description indicating where these files can be found, preserving the record not only of how your image was built, but how it's meant to be run as a full application.
-[On to Part 6 >>](part6.md){: class="button outline-btn"}
-
-## Recap (optional)
-
-Here's [a terminal recording of what was covered on this page](https://asciinema.org/a/113840):
-
-
-
-You learned that stacks are inter-related services all running in concert, and
-that -- surprise! -- you've been using stacks since part three of this tutorial.
-You learned that to add more services to your stack, you insert them in your
-Compose file. Finally, you learned that by using a combination of placement
-constraints and volumes you can create a permanent home for persisting data, so
-that your app's data survives when the container is torn down and redeployed.
diff --git a/get-started/part6.md b/get-started/part6.md
deleted file mode 100644
index bf8e8c596f..0000000000
--- a/get-started/part6.md
+++ /dev/null
@@ -1,217 +0,0 @@
----
-title: "Get Started, Part 6: Deploy your app"
-keywords: deploy, production, datacenter, cloud, aws, azure, provider, admin, enterprise
-description: Deploy your app to production using Docker Engine - Community or EE.
----
-{% include_relative nav.html selected="6" %}
-
-## Prerequisites
-
-- [Install Docker](/install/index.md).
-- Get [Docker Compose](/compose/overview.md) as described in [Part 3 prerequisites](/get-started/part3.md#prerequisites).
-- Get [Docker Machine](/machine/overview.md) as described in [Part 4 prerequisites](/get-started/part4.md#prerequisites).
-- Read the orientation in [Part 1](index.md).
-- Learn how to create containers in [Part 2](part2.md).
-
-- Make sure you have published the `friendlyhello` image you created by
-[pushing it to a registry](/get-started/part2.md#share-your-image). We use that
-shared image here.
-
-- Be sure your image works as a deployed container. Run this command,
-slotting in your info for `username`, `repo`, and `tag`: `docker run -p 80:80
-username/repo:tag`, then visit `http://localhost/`.
-
-- Have [the final version of `docker-compose.yml` from Part 5](/get-started/part5.md#persist-the-data) handy.
-
-## Introduction
-
-You've been editing the same Compose file for this entire tutorial. Well, we
-have good news. That Compose file works just as well in production as it does
-on your machine. In this section, we will go through some options for running your
-Dockerized application.
-
-## Choose an option
-
-{% capture community %}
-
-### Install Docker Engine --- Community
-
-Find the [install instructions](/install/#supported-platforms) for Docker Engine --- Community on the platform of your choice.
-
-### Create your swarm
-
-Run `docker swarm init` to create a swarm on the node.
-
-### Deploy your app
-
-Run `docker stack deploy -c docker-compose.yml getstartedlab` to deploy
-the app on the cloud hosted swarm.
-
-```shell
-docker stack deploy -c docker-compose.yml getstartedlab
-
-Creating network getstartedlab_webnet
-Creating service getstartedlab_web
-Creating service getstartedlab_visualizer
-Creating service getstartedlab_redis
-```
-
-Your app is now running on your cloud provider.
-
-#### Run some swarm commands to verify the deployment
-
-You can use the swarm command line, as you've done already, to browse and manage
-the swarm. Here are some examples that should look familiar by now:
-
-* Use `docker node ls` to list the nodes in your swarm.
-
-```shell
-[getstartedlab] ~ $ docker node ls
-ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS
-n2bsny0r2b8fey6013kwnom3m * ip-172-31-20-217.us-west-1.compute.internal Ready Active Leader
-```
-
-* Use `docker service ls` to list services.
-
-```shell
-[getstartedlab] ~/sandbox/getstart $ docker service ls
-ID NAME MODE REPLICAS IMAGE PORTS
-ioipby1vcxzm getstartedlab_redis replicated 0/1 redis:latest *:6379->6379/tcp
-u5cxv7ppv5o0 getstartedlab_visualizer replicated 0/1 dockersamples/visualizer:stable *:8080->8080/tcp
-vy7n2piyqrtr getstartedlab_web replicated 5/5 sam/getstarted:part6 *:80->80/tcp
-```
-
-* Use `docker service ps ` to view tasks for a service.
-
-```shell
-[getstartedlab] ~/sandbox/getstart $ docker service ps vy7n2piyqrtr
-ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS
-qrcd4a9lvjel getstartedlab_web.1 sam/getstarted:part6 ip-172-31-20-217.us-west-1.compute.internal Running Running 20 seconds ago
-sknya8t4m51u getstartedlab_web.2 sam/getstarted:part6 ip-172-31-20-217.us-west-1.compute.internal Running Running 17 seconds ago
-ia730lfnrslg getstartedlab_web.3 sam/getstarted:part6 ip-172-31-20-217.us-west-1.compute.internal Running Running 21 seconds ago
-1edaa97h9u4k getstartedlab_web.4 sam/getstarted:part6 ip-172-31-20-217.us-west-1.compute.internal Running Running 21 seconds ago
-uh64ez6ahuew getstartedlab_web.5 sam/getstarted:part6 ip-172-31-20-217.us-west-1.compute.internal Running Running 22 seconds ago
-```
-
-#### Open ports to services on cloud provider machines
-
-At this point, your app is deployed as a swarm on your cloud provider servers,
-as evidenced by the `docker` commands you just ran. But, you still need to
-open ports on your cloud servers in order to:
-
-* if using many nodes, allow communication between the `redis` service and `web` service
-
-* allow inbound traffic to the `web` service on any worker nodes so that
-Hello World and Visualizer are accessible from a web browser.
-
-* allow inbound SSH traffic on the server that is running the `manager` (this may be already set on your cloud provider)
-
-{: id="table-of-ports"}
-
-These are the ports you need to expose for each service:
-
-| Service | Type | Protocol | Port |
-| :--- | :--- | :--- | :--- |
-| `web` | HTTP | TCP | 80 |
-| `visualizer` | HTTP | TCP | 8080 |
-| `redis` | TCP | TCP | 6379 |
-
-Methods for doing this vary depending on your cloud provider.
-
-We use Amazon Web Services (AWS) as an example.
-
-> What about the redis service to persist data?
->
-> To get the `redis` service working, you need to `ssh` into
-the cloud server where the `manager` is running, and make a `data/`
-directory in `/home/docker/` before you run `docker stack deploy`.
-Another option is to change the data path in the `docker-stack.yml` to
-a pre-existing path on the `manager` server. This example does not
-include this step, so the `redis` service is not up in the example output.
-
-### Iteration and cleanup
-
-From here you can do everything you learned about in previous parts of the
-tutorial.
-
-* Scale the app by changing the `docker-compose.yml` file and redeploy
-on-the-fly with the `docker stack deploy` command.
-
-* Change the app behavior by editing code, then rebuild, and push the new image.
-(To do this, follow the same steps you took earlier to [build the
-app](part2.md#build-the-app) and [publish the
-image](part2.md#publish-the-image)).
-
-* You can tear down the stack with `docker stack rm`. For example:
-
- ```
- docker stack rm getstartedlab
- ```
-
-Unlike the scenario where you were running the swarm on local Docker machine
-VMs, your swarm and any apps deployed on it continue to run on cloud
-servers regardless of whether you shut down your local host.
-
-{% endcapture %}
-{% capture enterpriseboilerplate %}
-Customers of Docker Enterprise Edition run a stable, commercially-supported
-version of Docker Engine, and as an add-on they get our first-class management
-software, Docker Datacenter. You can manage every aspect of your application
-through the interface using Universal Control Plane, run a private image registry with Docker
-Trusted Registry, integrate with your LDAP provider, sign production images with
-Docker Content Trust, and many other features.
-
-{% endcapture %}
-{% capture enterprisedeployapp %}
-Once you're all set up and Docker Enterprise is running, you can [deploy your Compose
-file from directly within the UI](/ee/ucp/swarm/deploy-multi-service-app/){: onclick="ga('send', 'event', 'Get Started Referral', 'Enterprise', 'Deploy app in UI');"}.
-
-
-
-After that, you can see it running, and can change any aspect of the application
-you choose, or even edit the Compose file itself.
-
-
-{% endcapture %}
-{% capture enterprise %}
-{{ enterpriseboilerplate }}
-
-Bringing your own server to Docker Enterprise and setting up Docker Datacenter
-essentially involves two steps:
-
-1. [Get Docker Enterprise for your server's OS from Docker Hub](https://hub.docker.com/search?offering=enterprise&type=edition){: onclick="ga('send', 'event', 'Get Started Referral', 'Enterprise', 'Get Docker EE for your OS');"}.
-2. Follow the [instructions to install Docker Enterprise on your own host](/datacenter/install/linux/){: onclick="ga('send', 'event', 'Get Started Referral', 'Enterprise', 'BYOH setup guide');"}.
-
-> **Note**: Running Windows containers? View our [Windows Server setup guide](/install/windows/docker-ee.md){: onclick="ga('send', 'event', 'Get Started Referral', 'Enterprise', 'Windows Server setup guide');"}.
-
-{{ enterprisedeployapp }}
-{% endcapture %}
-
-
-
-
-## Congratulations!
-
-You've taken a full-stack, dev-to-deploy tour of the entire Docker platform.
-
-There is much more to the Docker platform than what was covered here, but you
-have a good idea of the basics of containers, images, services, swarms, stacks,
-scaling, load-balancing, volumes, and placement constraints.
-
-Want to go deeper? Here are some resources we recommend:
-
-- [Samples](/samples/): Our samples include multiple examples of popular software
- running in containers, and some good labs that teach best practices.
-- [User Guide](/engine/userguide/): The user guide has several examples that
- explain networking and storage in greater depth than was covered here.
-- [Admin Guide](/engine/admin/): Covers how to manage a Dockerized production
- environment.
-- [Training](https://training.docker.com/): Official Docker courses that offer
- in-person instruction and virtual classroom environments.
-- [Blog](https://blog.docker.com): Covers what's going on with Docker lately.
diff --git a/install/linux/docker-ee/rhel.md b/install/linux/docker-ee/rhel.md
index cfb50d1242..078ac50657 100644
--- a/install/linux/docker-ee/rhel.md
+++ b/install/linux/docker-ee/rhel.md
@@ -46,22 +46,34 @@ On {{ linux-dist-long }}, Docker EE supports storage drivers, `overlay2` and `de
### FIPS 140-2 cryptographic module support
-[Federal Information Processing Standards (FIPS) Publication 140-2](https://csrc.nist.gov/csrc/media/publications/fips/140/2/final/documents/fips1402.pdf) is a United States Federal security requirement for cryptographic modules.
+[Federal Information Processing Standards (FIPS) Publication 140-2](https://csrc.nist.gov/csrc/media/publications/fips/140/2/final/documents/fips1402.pdf)
+is a United States Federal security requirement for cryptographic modules.
-With Docker EE Basic license for versions 18.03 and later, Docker provides FIPS 140-2 support in RHEL 7.3, 7.4 and 7.5. This includes a FIPS supported cryptographic module. If the RHEL implementation already has FIPS support enabled, FIPS is automatically enabled in the Docker engine.
+With Docker Engine - Enterprise Basic license for versions 18.03 and later,
+Docker provides FIPS 140-2 support in RHEL 7.3, 7.4 and 7.5. This includes a
+FIPS supported cryptographic module. If the RHEL implementation already has FIPS
+support enabled, FIPS is also automatically enabled in the Docker engine. If
+FIPS support is not already enabled in your RHEL implementation, visit the
+[Red Hat Product Documentation](https://access.redhat.com/documentation/en-us/)
+for instructions on how to enable it.
-To verify the FIPS-140-2 module is enabled in the Linux kernel, confirm the file `/proc/sys/crypto/fips_enabled` contains `1`.
+To verify the FIPS-140-2 module is enabled in the Linux kernel, confirm the file
+`/proc/sys/crypto/fips_enabled` contains `1`.
```
$ cat /proc/sys/crypto/fips_enabled
1
```
-> **Note**: FIPS is only supported in the Docker Engine EE. UCP and DTR currently do not have support for FIPS-140-2.
+> **Note**: FIPS is only supported in the Docker Engine Engine - Enterprise. UCP
+> and DTR currently do not have support for FIPS-140-2.
-To enable FIPS 140-2 compliance on a system that is not in FIPS 140-2 mode, do the following:
+You can override FIPS 140-2 compliance on a system that is not in FIPS 140-2
+mode. Note, this **does not** change FIPS 140-2 mode on the system. To override
+the FIPS 140-2 mode, follow ths steps below.
-Create a file called `/etc/systemd/system/docker.service.d/fips-module.conf`. It needs to contain the following:
+Create a file called `/etc/systemd/system/docker.service.d/fips-module.conf`.
+Add the following:
```
[Service]
@@ -76,7 +88,8 @@ Restart the Docker service as root.
`$ sudo systemctl restart docker`
-To confirm Docker is running with FIPS-140-2 enabled, run the `docker info` command:
+To confirm Docker is running with FIPS-140-2 enabled, run the `docker info`
+command:
{% raw %}
```
@@ -85,13 +98,13 @@ docker info --format {{.SecurityOptions}}
```
{% endraw %}
-### Disabling FIPS-140-2
+### Disabling FIPS-140-2
-If the system has the FIPS 140-2 cryptographic module installed on the operating system,
-it is possible to disable FIPS-140-2 compliance.
+If the system has the FIPS 140-2 cryptographic module installed on the operating
+system, it is possible to disable FIPS-140-2 compliance.
-To disable FIPS 140-2 in Docker but not the operating system, set the value `DOCKER_FIPS=0`
-in the `/etc/systemd/system/docker.service.d/fips-module.conf`.
+To disable FIPS 140-2 in Docker but not the operating system, set the value
+`DOCKER_FIPS=0` in the `/etc/systemd/system/docker.service.d/fips-module.conf`.
Reload the Docker configuration to systemd.
diff --git a/install/linux/docker-ee/ubuntu.md b/install/linux/docker-ee/ubuntu.md
index e8c788503e..ccbce47f43 100644
--- a/install/linux/docker-ee/ubuntu.md
+++ b/install/linux/docker-ee/ubuntu.md
@@ -57,39 +57,11 @@ networks, are preserved. The Docker EE package is now called `docker-ee`.
#### Extra steps for aufs
-If your version supports the `aufs` storage driver, you need some preparation
-before installing Docker.
-
-
-
-
-
For Ubuntu 16.04 and higher, the Linux kernel includes support for overlay2,
and Docker EE uses it as the default storage driver. If you need
to use `aufs` instead, you need to configure it manually.
See [aufs](/engine/userguide/storagedriver/aufs-driver.md)
-
-
-
-Unless you have a strong reason not to, install the
-`linux-image-extra-*` packages, which allow Docker to use the `aufs` storage
-drivers.
-
-```bash
-$ sudo apt-get update
-
-$ sudo apt-get install \
- linux-image-extra-$(uname -r) \
- linux-image-extra-virtual
-```
-
-
-
-
## Install Docker EE
You can install Docker EE in different ways, depending on your needs:
@@ -137,13 +109,17 @@ from the repository.
4. Temporarily add a `$DOCKER_EE_VERSION` variable into your environment.
- > **Note**: If you need to run something other than Docker EE 2.0, please see the following instructions:
- > * [18.03](https://docs.docker.com/v18.03/ee/supported-platforms/) - Older Docker EE Engine only release
- > * [17.06](https://docs.docker.com/v17.06/engine/installation/) - Docker Enterprise Edition 2.0 (Docker Engine,
- > UCP, and DTR).
+ > **Note**: If you need to run something other than Docker EE 3.0, please
+ > see the following instructions:
+ > * [18.09](https://docs.docker.com/v18.09/ee/supported-platforms/) - Docker
+ > Enterprise Edition 2.1 (Docker Engine, UCP, and DTR).
+ > * [18.03](https://docs.docker.com/v18.03/ee/supported-platforms/) - Older
+ > Docker EE Engine only release
+ > * [17.06](https://docs.docker.com/v17.06/engine/installation/) - Docker
+ > Enterprise Edition 2.0 (Docker Engine, UCP, and DTR).
```bash
- $ DOCKER_EE_VERSION=18.09
+ $ DOCKER_EE_VERSION={{ site.docker_ee_version }}
```
5. Add Docker's official GPG key using your customer Docker EE repository URL:
diff --git a/js/collections_tocs.js b/js/collections_tocs.js
deleted file mode 100644
index 25f12b9820..0000000000
--- a/js/collections_tocs.js
+++ /dev/null
@@ -1,12 +0,0 @@
----
-layout: null
----
-var collectionsTOC = new Array()
-collectionsTOC["library"] = [
- {% for page in site.samples %}
- {
- "path":{{ page.url | jsonify }},
- "title":{{ page.title | jsonify }}
- }{% unless forloop.last%},{% endunless %}
- {% endfor %}
-]
diff --git a/js/docs.js b/js/docs.js
index 46102a04e1..13bfacadc9 100644
--- a/js/docs.js
+++ b/js/docs.js
@@ -98,9 +98,6 @@ function walkTree(tree)
var subTree = tree[j].section;
walkTree(subTree);
outputLetNav.push('
');
- } else if (tree[j].generateTOC) {
- // auto-generate a TOC from a collection
- walkTree(collectionsTOC[tree[j].generateTOC])
} else {
// just a regular old topic; this is a leaf, not a branch; render a link!
outputLetNav.push('
backup.tar
```
@@ -40,13 +43,47 @@ Note:
docker run --mount type=bind,src=/home/user/backup:/backup docker/ucp --file /backup/backup.tar
```
+### SELinux
+
+If you are installing UCP on a manager node with SELinunx enabled at the daemon
+and operating system level, you will need to pass `--security-opt
+label=disable` in to your install command. This flag will disable SELinux
+policies on the installation container. The UCP installation container mounts
+and configures the Docker Socket as part of the UCP installation container,
+therefore the UCP installation will fail with a permission denied error if you
+fail to pass in this flag.
+
+```
+FATA[0000] unable to get valid Docker client: unable to ping Docker daemon: Got
+permission denied while trying to connect to the Docker daemon socket at
+unix:///var/run/docker.sock: Get http://%2Fvar%2Frun%2Fdocker.sock/_ping: dial
+unix /var/run/docker.sock: connect: permission denied - If SELinux is enabled
+on the Docker daemon, make sure you run UCP with "docker run --security-opt
+label=disable -v /var/run/docker.sock:/var/run/docker.sock ..."
+```
+
+An installation command for a system with SELinux enabled at the daemon level
+would be:
+
+```bash
+docker container run \
+ --rm \
+ --interactive \
+ --name ucp \
+ --security-opt label=disable \
+ --volume /var/run/docker.sock:/var/run/docker.sock \
+ docker/ucp \
+ backup [command options] > backup.tar
+```
+
## Options
-| Option | Description |
-|:-----------------------|:------------------------------------------------------------------------------|
-| `--debug, -D` | Enable debug mode |
-| `--file *value*` | Name of the file to write the backup contents to. Ignored in interactive mode |
-| `--jsonlog` | Produce json formatted output for easier parsing |
-| `--interactive, -i` | Run in interactive mode and prompt for configuration values |
-| `--no-passphrase` | Opt out to encrypt the tar file with a passphrase (not recommended) |
-| `--passphrase` *value* | Encrypt the tar file with a passphrase |
+| Option | Description |
+|:-----------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `--debug, -D` | Enable debug mode |
+| `--file *value*` | Name of the file to write the backup contents to. Ignored in interactive mode |
+| `--jsonlog` | Produce json formatted output for easier parsing |
+| `--include-logs` | Only relevant if `--file` is also included. If true, an encrypted `backup.log` file will be stored alongside the `backup.tar` in the mounted directory. Default is `true`. |
+| `--interactive, -i` | Run in interactive mode and prompt for configuration values |
+| `--no-passphrase` | Opt out to encrypt the tar file with a passphrase (not recommended) |
+| `--passphrase` *value* | Encrypt the tar file with a passphrase |
diff --git a/reference/ucp/3.2/cli/install.md b/reference/ucp/3.2/cli/install.md
index ffca866e58..1cb22955b6 100644
--- a/reference/ucp/3.2/cli/install.md
+++ b/reference/ucp/3.2/cli/install.md
@@ -76,9 +76,9 @@ docker container run \
If you are installing on a public cloud platform there is cloud specific UCP
installation documentation:
-- For [Microsoft Azure](./cloudproviders/install-on-azure/) this is
+- For [Microsoft Azure](/ee/ucp/admin/install/cloudproviders/install-on-azure/) this is
**mandatory**
-- For [AWS](./cloudproviders/install-on-aws/) this is optional.
+- For [AWS](/ee/ucp/admin/install/cloudproviders/install-on-aws/) this is optional.
## Options
diff --git a/reference/ucp/3.2/cli/restore.md b/reference/ucp/3.2/cli/restore.md
index 6d4f440fe7..731506c0fa 100644
--- a/reference/ucp/3.2/cli/restore.md
+++ b/reference/ucp/3.2/cli/restore.md
@@ -68,5 +68,5 @@ Notes:
| `--host-address` *value* | The network address to advertise to other nodes. Format: IP address or network interface name |
| `--passphrase` *value* | Decrypt the backup tar file with the provided passphrase |
| `--san` *value* | Add subject alternative names to certificates (e.g. --san www1.acme.com --san www2.acme.com) |
-| `--swarm-grpc-port *value* | Port for communication between nodes (default: 2377) |
+| `--swarm-grpc-port` *value* | Port for communication between nodes (default: 2377) |
| `--unlock-key` *value* | The unlock key for this swarm-mode cluster, if one exists. |
diff --git a/reference/ucp/3.2/cli/upgrade.md b/reference/ucp/3.2/cli/upgrade.md
index 115eee6ed1..4fcacb0a32 100644
--- a/reference/ucp/3.2/cli/upgrade.md
+++ b/reference/ucp/3.2/cli/upgrade.md
@@ -39,7 +39,7 @@ healthy and that all nodes have been upgraded successfully.
| `--force-minimums` | Force the install/upgrade even if the system does not meet the minimum requirements |
| `--host-address` *value* | Override the previously configured host address with this IP or network interface |
| `--id` | The ID of the UCP instance to upgrade |
-| --manual-worker-upgrade | Whether to manually upgrade worker nodes. Defaults to false |
+| `--manual-worker-upgrade` | Whether to manually upgrade worker nodes. Defaults to false |
| `--pull` | Pull UCP images: `always`, when `missing`, or `never` |
| `--registry-password` *value* | Password to use when pulling images |
| `--registry-username` *value* | Username to use when pulling images |
diff --git a/samples/index.md b/samples/index.md
index 37f9044ad7..5749c4602d 100644
--- a/samples/index.md
+++ b/samples/index.md
@@ -23,27 +23,27 @@ repository]({{ labsbase }}).
| [Docker Security]({{ labsbase }}/security/README.md){: target="_blank"} | How to take advantage of Docker security features. |
| [Building a 12-factor application with Docker]({{ labsbase}}/12factor){: target="_blank"} | Use Docker to create an app that conforms to Heroku's "12 factors for cloud-native applications." |
-## Library references
-
-The following table provides a list of popular official Docker images. For detailed documentation, select the specific image name.
-
-| Image name | Description |
-| ---------- | ----------- |
-{% for page in site.samples %}| [{{ page.title }}](https://hub.docker.com/_/{{ page.title }}) | {{ page.description | strip }} |
-{% endfor %}
-
## Sample applications
Run popular software using Docker.
| Sample | Description |
| ------ | ----------- |
-| [apt-cacher-ng](/engine/examples/apt-cacher-ng) | Run a Dockerized apt-cacher-ng instance. |
-| [.Net Core application](/engine/examples/dotnetcore) | Run a Dockerized ASP.NET Core application. |
-| [ASP.NET Core + SQL Server on Linux](/compose/aspnet-mssql-compose) | Run a Dockerized ASP.NET Core + SQL Server environment. |
-| [CouchDB](/engine/examples/couchdb_data_volumes) | Run a Dockerized CouchDB instance. |
+| [apt-cacher-ng](/engine/examples/apt-cacher-ng/) | Run a Dockerized apt-cacher-ng instance. |
+| [.Net Core application](/engine/examples/dotnetcore/) | Run a Dockerized ASP.NET Core application. |
+| [ASP.NET Core + SQL Server on Linux](/compose/aspnet-mssql-compose/) | Run a Dockerized ASP.NET Core + SQL Server environment. |
+| [CouchDB](/engine/examples/couchdb_data_volumes/) | Run a Dockerized CouchDB instance. |
| [Django + PostgreSQL](/compose/django/) | Run a Dockerized Django + PostgreSQL environment. |
-| [PostgreSQL](/engine/examples/postgresql_service) | Run a Dockerized PostgreSQL instance. |
+| [PostgreSQL](/engine/examples/postgresql_service/) | Run a Dockerized PostgreSQL instance. |
| [Rails + PostgreSQL](/compose/rails/) | Run a Dockerized Rails + PostgreSQL environment. |
-| [Riak](/engine/examples/running_riak_service) | Run a Dockerized Riak instance. |
-| [SSHd](/engine/examples/running_ssh_service) | Run a Dockerized SSHd instance. |
+| [Riak](/engine/examples/running_riak_service/) | Run a Dockerized Riak instance. |
+| [SSHd](/engine/examples/running_ssh_service/) | Run a Dockerized SSHd instance. |
+
+## Library references
+
+The following table provides a list of popular official Docker images. For detailed documentation, select the specific image name.
+
+| Image name | Description |
+| ---------- | ----------- |
+{% for page in site.samples %}| [{{ page.title }}](https://hub.docker.com/_/{{ page.title }}){: target="_blank"} | {{ page.description | strip }} |
+{% endfor %}
diff --git a/storage/storagedriver/device-mapper-driver.md b/storage/storagedriver/device-mapper-driver.md
index 2edf5c832b..c69c78861c 100644
--- a/storage/storagedriver/device-mapper-driver.md
+++ b/storage/storagedriver/device-mapper-driver.md
@@ -687,7 +687,7 @@ the Devicemapper configuration itself and about each image and container layer
that exist. The `devicemapper` storage driver uses snapshots, and this metadata
include information about those snapshots. These files are in JSON format.
-The `/var/lib/devicemapper/mnt/` directory contains a mount point for each image
+The `/var/lib/docker/devicemapper/mnt/` directory contains a mount point for each image
and container layer that exists. Image layer mount points are empty, but a
container's mount point shows the container's filesystem as it appears from
within the container.
diff --git a/storage/tmpfs.md b/storage/tmpfs.md
index a7ffe16b90..d104f9ef6b 100644
--- a/storage/tmpfs.md
+++ b/storage/tmpfs.md
@@ -50,7 +50,7 @@ the `--mount` flag was used for swarm services. However, starting with Docker
is mounted in the container. May be specified as `destination`, `dst`,
or `target`.
- The `tmpfs-type` and `tmpfs-mode` options. See
- [tmpfs options](#tmpfs-options).
+ [tmpfs options](#specify-tmpfs-options).
The examples below show both the `--mount` and `--tmpfs` syntax where possible,
and `--mount` is presented first.