diff --git a/_data/toc.yaml b/_data/toc.yaml index cb6b7a25c4..48338c5f2f 100644 --- a/_data/toc.yaml +++ b/_data/toc.yaml @@ -3028,177 +3028,7 @@ manuals: title: Get support - title: Get support path: /ee/get-support/ -- sectiontitle: Docker Cloud - section: - - sectiontitle: Migration - section: - - path: /docker-cloud/migration/ - title: Migration overview - - path: /docker-cloud/migration/cloud-to-swarm/ - title: Migrate to Docker CE - - path: /docker-cloud/migration/cloud-to-kube-aks/ - title: Migration to AKS - - path: /docker-cloud/migration/cloud-to-kube-gke/ - title: Migrate to GKE - - path: /docker-cloud/migration/deregister-swarms/ - title: Deregister swarms - - path: /docker-cloud/migration/kube-primer/ - title: Kubernetes primer - - path: /docker-cloud/ - title: About Docker Cloud - - path: /docker-cloud/dockerid/ - title: Docker Cloud settings and Docker ID - - path: /docker-cloud/orgs/ - title: Organizations and teams - - sectiontitle: Manage builds and images - section: - - path: /docker-cloud/builds/ - title: Builds and images overview - - path: /docker-cloud/builds/repos/ - title: Docker Cloud repositories - - path: /docker-cloud/builds/link-source/ - title: Link to a source code repository - - path: /docker-cloud/builds/push-images/ - title: Push images to Docker Cloud - - path: /docker-cloud/builds/automated-build/ - title: Automated builds - - path: /docker-cloud/builds/automated-testing/ - title: Automated repository tests - - path: /docker-cloud/builds/advanced/ - title: Advanced options for autobuild and autotest - - sectiontitle: Manage swarms (beta swarm mode) - section: - - path: /docker-cloud/cloud-swarm/ - title: Overview - - path: /docker-cloud/cloud-swarm/using-swarm-mode/ - title: Using Swarm mode - - path: /docker-cloud/cloud-swarm/register-swarms/ - title: Register existing swarms - - path: /docker-cloud/cloud-swarm/create-cloud-swarm-aws/ - title: Create a new swarm on Amazon Web Services in Docker Cloud - - path: /docker-cloud/cloud-swarm/create-cloud-swarm-azure/ - title: Create a new swarm on Microsoft Azure in Docker Cloud - - path: /docker-cloud/cloud-swarm/connect-to-swarm/ - title: Connect to a swarm through Docker Cloud - - path: /docker-cloud/cloud-swarm/link-aws-swarm/ - title: Link Amazon Web Services to Docker Cloud - - path: /docker-cloud/cloud-swarm/link-azure-swarm/ - title: Link Microsoft Azure Cloud Services to Docker Cloud - - path: /docker-cloud/cloud-swarm/ssh-key-setup/ - title: Set up SSH keys - - sectiontitle: Manage Infrastructure (standard mode) - section: - - path: /docker-cloud/infrastructure/ - title: Infrastructure overview - - path: /docker-cloud/infrastructure/deployment-strategies/ - title: Container distribution strategies - - path: /docker-cloud/infrastructure/link-aws/ - title: Link to Amazon Web Services hosts - - path: /docker-cloud/infrastructure/link-do/ - title: Link to DigitalOcean hosts - - path: /docker-cloud/infrastructure/link-azure/ - title: Link to Microsoft Azure hosts - - path: /docker-cloud/infrastructure/link-packet/ - title: Link to Packet hosts - - path: /docker-cloud/infrastructure/link-softlayer/ - title: Link to SoftLayer hosts - - path: /docker-cloud/infrastructure/ssh-into-a-node/ - title: SSH into a Docker Cloud-managed node - - path: /docker-cloud/infrastructure/docker-upgrade/ - title: Upgrade Docker on a node - - path: /docker-cloud/infrastructure/byoh/ - title: Use the Docker Cloud agent - - path: /docker-cloud/infrastructure/cloud-on-packet.net-faq/ - title: Use Docker Cloud and Packet.net - - path: /docker-cloud/infrastructure/cloud-on-aws-faq/ - title: Use Docker Cloud on AWS - - sectiontitle: Manage nodes and apps (standard mode) - section: - - path: /docker-cloud/standard/ - title: Overview - - sectiontitle: Getting started - section: - - path: /docker-cloud/getting-started/ - title: Getting started with Docker Cloud - - path: /docker-cloud/getting-started/intro_cloud/ - title: Introducing Docker Cloud - - path: /docker-cloud/getting-started/connect-infra/ - title: Link to your infrastructure - - path: /docker-cloud/getting-started/your_first_node/ - title: Deploy your first node - - path: /docker-cloud/getting-started/your_first_service/ - title: Deploy your first service - - sectiontitle: Deploy an application - section: - - path: /docker-cloud/getting-started/deploy-app/1_introduction/ - title: Introduction to deploying an app in Docker Cloud - - path: /docker-cloud/getting-started/deploy-app/2_set_up/ - title: Set up your environment - - path: /docker-cloud/getting-started/deploy-app/3_prepare_the_app/ - title: Prepare the application - - path: /docker-cloud/getting-started/deploy-app/4_push_to_cloud_registry/ - title: Push the image to Docker Cloud's Registry - - path: /docker-cloud/getting-started/deploy-app/5_deploy_the_app_as_a_service/ - title: Deploy the app as a Docker Cloud service - - path: /docker-cloud/getting-started/deploy-app/6_define_environment_variables/ - title: Define environment variables - - path: /docker-cloud/getting-started/deploy-app/7_scale_the_service/ - title: Scale the service - - path: /docker-cloud/getting-started/deploy-app/8_view_logs/ - title: View service logs - - path: /docker-cloud/getting-started/deploy-app/9_load-balance_the_service/ - title: Load-balance the service - - path: /docker-cloud/getting-started/deploy-app/10_provision_a_data_backend_for_your_service/ - title: Provision a data backend for the service - - path: /docker-cloud/getting-started/deploy-app/11_service_stacks/ - title: Stackfiles for your service - - path: /docker-cloud/getting-started/deploy-app/12_data_management_with_volumes/ - title: Data management with volumes - - sectiontitle: Manage applications - section: - - path: /docker-cloud/apps/ - title: Applications in Docker Cloud - - path: /docker-cloud/apps/deploy-to-cloud-btn/ - title: Add a deploy to Docker Cloud button - - path: /docker-cloud/apps/auto-destroy/ - title: Automatic container destroy - - path: /docker-cloud/apps/autorestart/ - title: Automatic container restart - - path: /docker-cloud/apps/auto-redeploy/ - title: Automatic service redeploy - - path: /docker-cloud/apps/load-balance-hello-world/ - title: Create a proxy or load balancer - - path: /docker-cloud/apps/deploy-tags/ - title: Deployment tags - - path: /docker-cloud/apps/stacks/ - title: Manage service stacks - - path: /docker-cloud/apps/ports/ - title: Publish and expose service or container ports - - path: /docker-cloud/apps/service-redeploy/ - title: Redeploy running services - - path: /docker-cloud/apps/service-scaling/ - title: Scale your service - - path: /docker-cloud/apps/api-roles/ - title: Service API roles - - path: /docker-cloud/apps/service-links/ - title: Service discovery and links - - path: /docker-cloud/apps/triggers/ - title: Use triggers - - path: /docker-cloud/apps/volumes/ - title: Work with data volumes - - path: /docker-cloud/apps/stack-yaml-reference/ - title: Cloud stack file YAML reference - - path: /docker-cloud/slack-integration/ - title: Docker Cloud notifications in Slack - - path: /apidocs/docker-cloud/ - title: Docker Cloud API - nosync: true - - path: /docker-cloud/installing-cli/ - title: The Docker Cloud CLI - - path: /docker-cloud/docker-errors-faq/ - title: Known issues in Docker Cloud - - path: /docker-cloud/release-notes/ - title: Release notes + - sectiontitle: Docker Compose section: - path: /compose/overview/ @@ -3453,48 +3283,62 @@ manuals: title: Migrate from Boot2Docker to Machine - path: /release-notes/docker-machine/ title: Docker Machine release notes -- sectiontitle: Docker Store - section: - - path: /docker-store/ - title: About Docker Store - - sectiontitle: Docker Store FAQs - section: - - path: /docker-store/customer_faq/ - title: Customer FAQs - - path: /docker-store/publisher_faq/ - title: Publisher FAQs - - sectiontitle: For Publishers - section: - - path: /docker-store/publish/ - title: Publish content on Docker Store - - path: /docker-store/certify-images/ - title: Certify Docker images - - path: /docker-store/certify-plugins-logging/ - title: Certify Docker logging plugins - - path: /docker-store/trustchain/ - title: Docker Store trust chain - - path: /docker-store/byol/ - title: Bring Your Own License (BYOL) + - sectiontitle: Docker Hub section: - - path: /docker-hub/ - title: Overview of Docker Hub - - path: /docker-hub/accounts/ - title: Use Docker Hub with Docker ID - - path: /docker-hub/orgs/ - title: Teams & organizations - - path: /docker-hub/repos/ - title: Repositories on Docker Hub - - path: /docker-hub/builds/ - title: Automated builds - - path: /docker-hub/webhooks/ - title: Webhooks for automated builds - - path: /docker-hub/bitbucket/ - title: Automated builds with Bitbucket - - path: /docker-hub/github/ - title: Automated builds from GitHub - - path: /docker-hub/official_repos/ - title: Official repositories on Docker Hub + - title: Docker Hub overview + path: /docker-hub/ + - title: Create Docker Hub account + path: /docker-hub/accounts/ + - title: Run Docker CLI commands + path: /docker-hub/commandline/ + - sectiontitle: Discover content + section: + - title: Content overview + path: /docker-hub/discover/ + - title: Official repos + path: /docker-hub/discover/official-repos/ + - sectiontitle: Manage repositories + section: + - title: Repository overview + path: /docker-hub/manage/ + - title: Create and configure repos + path: /docker-hub/manage/repos/ + - title: Create orgs and teams + path: /docker-hub/manage/orgs-teams/ + - title: Push images + path: /docker-hub/manage/push-images/ + - sectiontitle: Autobuild images + section: + - title: Autobuild Docker images + path: /docker-hub/build/ + - title: Autotest repositories + path: /docker-hub/build/autotest/ + - title: Advanced options + path: /docker-hub/build/advanced/ + - title: Build from GitHub + path: /docker-hub/build/github/ + - title: Build from Bitbucket + path: /docker-hub/build/bitbucket/ + - title: Webhooks + path: /docker-hub/build/webhooks/ + - sectiontitle: Publish content + section: + - title: Publish Docker images + path: /docker-hub/publish/ + - title: Certify Docker images + path: /docker-hub/publish/certify-images/ + - title: Certify Docker logging plugins + path: /docker-hub/publish/certify-plugins-logging/ + - title: Docker Hub trust chain + path: /docker-hub/publish/trustchain/ + - title: Bring Your Own License (BYOL) + path: /docker-hub/publish/byol/ + - title: FAQs on publishing center + path: /docker-hub/publish/faq-publisher/ + - title: Customer FAQs + path: /docker-hub/publish/faq-customer/ + - sectiontitle: Open-source projects section: - sectiontitle: Docker Notary @@ -3617,7 +3461,7 @@ manuals: title: Docker Compose nosync: true - path: /docker-cloud/release-notes/ - title: Docker Cloud + title: nosync: true - path: /docker-for-aws/release-notes/ title: Docker for AWS diff --git a/_includes/docker-hub-cli-commands.md b/_includes/docker-hub-cli-commands.md new file mode 100644 index 0000000000..de50bcb03b --- /dev/null +++ b/_includes/docker-hub-cli-commands.md @@ -0,0 +1,71 @@ +1. Open a terminal and log into Docker Hub with the Docker CLI: + + ``` + $ docker login + + Login with your Docker ID to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com to create one. + Username: gordon + Password: + WARNING! Your password will be stored unencrypted in /home/gwendolynne/.docker/config.json. + Configure a credential helper to remove this warning. See + https://docs.docker.com/engine/reference/commandline/login/#credentials-store + ``` + +2. Search for the `busybox` image: + + ``` + $ docker search busybox + + NAME DESCRIPTION STARS OFFICIAL AUTOMATED + busybox Busybox base image. 1268 [OK] + progrium/busybox 66 [OK] + hypriot/rpi-busybox-httpd Raspberry Pi compatible … 41 + radial/busyboxplus Full-chain, Internet enabled, … 19 [OK] + ... + ``` + + > Private repos are not returned at the commandline. Go to the Docker Hub UI + > to see your allowable repos. + +3. Pull the official busybox image to your machine and list it (to ensure it was + pulled): + + ``` + $ docker pull busybox + + Using default tag: latest + latest: Pulling from library/busybox + 07a152489297: Pull complete + Digest: sha256:141c253bc4c3fd0a201d32dc1f493bcf3fff003b6df416dea4f41046e0f37d47 + Status: Downloaded newer image for busybox:latest + + $ docker image ls + + REPOSITORY TAG IMAGE ID CREATED SIZE + busybox latest 8c811b4aec35 11 days ago 1.15MB + + ``` + +4. Tag the official image (to differentiate it), list it, and push it to your + personal repo: + + ``` + $ docker tag busybox /busybox:test-tag + + $ docker image ls + + REPOSITORY TAG IMAGE ID CREATED SIZE + gordon/busybox v1 8c811b4aec35 11 days ago 1.15MB + busybox latest 8c811b4aec35 11 days ago 1.15MB + + $ docker push /busybox:test-tag + ``` + +5. Log out from Docker Hub: + + ``` + $ docker logout + ``` + +6. Log on to the [Docker Hub UI](https://hub.docker.com){: target="_blank" class="_"} and view the image you + pushed. diff --git a/_includes/register-for-docker-id.md b/_includes/register-for-docker-id.md new file mode 100644 index 0000000000..e537ecf4ac --- /dev/null +++ b/_includes/register-for-docker-id.md @@ -0,0 +1,21 @@ +When you register for a Docker ID, your Docker ID is your user namespace +in Docker Hub and your username on the [Docker Forums](https://forums.docker.com/){: target="_blank" class="_"}. + +1. Go to [Docker Hub](https://hub.docker.com/){: target="_blank" class="_"}. + +2. Click **Create Docker ID** (top right). + +3. Fill out the required fields: + + - **Docker ID** (or username): Must be 4 to 30 characters long, only numbers + and lowercase letters. + + - **Email address**: Must be unique and valid. + + - **Password**: Must be 6 to 128 characters long. + +4. Click **Sign Up**. Docker sends a verification email to the address you + provided. + +5. Go to your email and click the link to verify your address. You cannot log + in until you verify. diff --git a/docker-cloud/apps/api-roles.md b/docker-cloud/apps/api-roles.md deleted file mode 100644 index 1a8abdc133..0000000000 --- a/docker-cloud/apps/api-roles.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -description: API Roles -keywords: API, Services, roles -redirect_from: -- /docker-cloud/feature-reference/api-roles/ -title: Service API roles -notoc: true ---- - -You can configure a service so that it can access the Docker Cloud API. When you -grant API access to a service, its containers receive a token through an -environment variable, which is used to query the Docker Cloud API. - -Docker Cloud has a "full access" role which when granted allows any operation -to be performed on the API. You can enable this option on the **Environment variables** screen of the Service wizard, or [specify it in your service's stackfile](stack-yaml-reference.md#roles). When enabled, Docker Cloud generates an authorization token for the -service's containers which is stored in an environment variable called -`DOCKERCLOUD_AUTH`. - -Use this variable to set the `Authorization` HTTP header when calling -Docker Cloud's API: - -```bash -$ curl -H "Authorization: $DOCKERCLOUD_AUTH" -H "Accept: application/json" https://cloud.docker.com/api/app/v1/service/ -``` - -You can use this feature with Docker Cloud's [automatic environment variables](service-links.md), to let your application inside a container read and perform operations using Docker Cloud's API. - -```bash -$ curl -H "Authorization: $DOCKERCLOUD_AUTH" -H "Accept: application/json" $WEB_DOCKERCLOUD_API_URL -``` - -For example, you can use information retrieved using the API to read the linked -endpoints, and use them to reconfigure a proxy container. - -See the [API documentation](/apidocs/docker-cloud.md) for more information on the different API operations available. diff --git a/docker-cloud/apps/auto-destroy.md b/docker-cloud/apps/auto-destroy.md deleted file mode 100644 index 8e0340d2e9..0000000000 --- a/docker-cloud/apps/auto-destroy.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -description: Autodestroy -keywords: Autodestroy, service, terminate, container -redirect_from: -- /docker-cloud/feature-reference/auto-destroy/ -title: Destroy containers automatically ---- - -When enabled on a service, **Autodestroy** automatically terminates containers -when they stop. **This destroys all data in the container on stop.** This is -useful for one-time actions that store their results in an external system. - -The following Autodestroy options are available: - -- `OFF`: the container remains in the **Stopped** state regardless of exit code, and is not destroyed. -- `ON_SUCCESS`: if the container stops with an exit code of 0 (normal shutdown), Docker Cloud automatically destroys it. If it stops with any other exit code, Docker Cloud leaves it in the **Stopped** state. -- `ALWAYS`: if the container stops, Docker Cloud automatically terminates it regardless of the exit code. - -If **Autorestart** is activated, Docker Cloud evaluates whether to try restarting the container or not before evaluating **Autodestroy**. - -## Launch a service with Autodestroy - -You can enable **Autodestroy** on the **Service configuration** step of the **Launch new service** wizard. - -![](images/autodestroy.png) - -Autodestroy is set to `OFF` (deactivated) by default. - -### Use the API or CLI - -You can enable autodestroy when launching a service through the API or CLI. - -If not provided, it has a default value of `OFF`. Check our [API documentation](/apidocs/docker-cloud.md) for more information. - -#### Launch with autodestroy using the API -``` -POST /api/app/v1/service/ HTTP/1.1 -{ - "autodestroy": "ALWAYS", - [...] -} -``` - -#### Launch with autodestroy using the CLI -``` -$ docker-cloud service run --autodestroy ALWAYS [...] -``` - -## Enable autodestroy on an already deployed service - -You can also activate or deactivate the **Autodestroy** setting on a service -after it has been deployed, by editing the service. - -1. Go to the service detail page. -2. Click **Edit**. -3. Select the new autodestroy setting. -4. Click **Save**. - -### Use the API or CLI - -You can set the **Autodestroy** option after the service has been -deployed, using the API or CLI. - -Check our [API documentation](/apidocs/docker-cloud.md) for more information. - -#### Enable autodestroy using the API -``` -PATCH /api/app/v1/service/(uuid)/ HTTP/1.1 -{ - "autodestroy": "ALWAYS" -} -``` - -#### Enable autodestroy using the CLI -``` -$ docker-cloud service set --autodestroy ALWAYS (name or uuid) -``` diff --git a/docker-cloud/apps/auto-redeploy.md b/docker-cloud/apps/auto-redeploy.md deleted file mode 100644 index 3fb1daa33d..0000000000 --- a/docker-cloud/apps/auto-redeploy.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -description: Autoredeploy -keywords: Autoredeploy, image, store, service -redirect_from: -- /docker-cloud/feature-reference/auto-redeploy/ -title: Redeploy services automatically ---- - -[![Automated Deployments with Docker Cloud](images/video-auto-redeploy-docker-cloud.png)](https://www.youtube.com/watch?v=I4depUwfbFc "Automated Deployments with Docker Cloud"){:target="_blank"} - -Docker Cloud's **Autoredeploy** feature allows a service that uses an image -stored in Docker Hub to automatically redeploy whenever a new image is pushed or -built. - -> **Notes**: -> ->* **Autoredeploy** works only for hub images with the _latest_ tag. -> ->* To enable **autoredeploy** on an image stored in a third party registry, -> you need to use [redeploy triggers](triggers.md) instead. - -## Launch a new service with autoredeploy - -You can launch a service with **autoredeploy** enabled by enabling it from the **general settings** section of the **Launch new service** wizard. - -![](images/service-wizard-autoredeploy.png) - -By default, autoredeploy is *deactivated*. - -### Use the CLI or API - -You can enable **autoredeploy** when launching a service using the CLI or API. - -By default, autoredeploy is set to `false`. See the [API documentation](/apidocs/docker-cloud.md) for more information. - -#### Enable autoredeploy using the CLI - -``` -$ docker-cloud service run --autoredeploy [...] -``` - -#### Enable autoredeploy using the API - -``` -POST /api/app/v1/service/ HTTP/1.1 -{ - "autoredeploy": true, - [...] -} -``` - -## Enable autoredeploy to an already deployed service - -You can activate or deactivate **autoredeploy** on a service after it has been deployed. - -1. Click into the service detail page. -2. Click **Edit**. -3. Change the **autoredeploy** setting on the form to `true`. -4. Click **Save changes**. - - -### Use the CLI or API - -You can set the **autoredeploy** option after the service has been deployed, -using the CLI or API. - -Check our [API documentation](/apidocs/docker-cloud.md) for more information. - - -#### Enable autoredeploy using the CLI - -```bash -$ docker-cloud service set --autoredeploy (name or uuid) -``` - -### Enable autoredeploy using the API - -``` -PATCH /api/app/v1/service/(uuid)/ HTTP/1.1 -{ - "autoredeploy": true -} -``` diff --git a/docker-cloud/apps/autorestart.md b/docker-cloud/apps/autorestart.md deleted file mode 100644 index fe04542daf..0000000000 --- a/docker-cloud/apps/autorestart.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -description: Automatically restart a container in Docker Cloud -keywords: container, restart, automated -redirect_from: -- /docker-cloud/feature-reference/autorestart/ -title: Restart a container automatically ---- - -**Autorestart** is a service-level setting that can automatically start your -containers if they stop or crash. You can use this setting as an automatic crash -recovery mechanism. - -Autorestart uses Docker's `--autorestart` flag. When called, the Docker daemon -attempts to restart the container until it succeeds. If the first restart -attempts fail, the daemon continues to attempt a restart, but uses an -incremental back-off algorithm. - -The following Autorestart options are available: - -- `OFF`: the container does not restart, regardless of the exit code. -- `ON_FAILURE`: the container restarts *only* if it stops with an exit code other than 0. (0 is for normal shutdown.) -- `ALWAYS`: the container restarts automatically, regardless of the exit code. - -> **Note**: If you are using **Autorestart** set to `ALWAYS`, **Autodestroy** must be set to `OFF`. - -If the Docker daemon in a node restarts (because it was upgraded, or because the -underlying node was restarted), the daemon only restarts containers that -have **Autorestart** set to `ALWAYS`. - -## Launching a Service with Autorestart - -You can enable **Autorestart** on the **Service configuration** step of the **Launch new service wizard**. - -![](images/autorestart.png) - -Autorestart is set to `OFF` by default, which means that autorestart is *deactivated*. - -### Using the API and CLI - -You can set the **Autorestart** option when launching a service through the -API and through the CLI. Autorestart is set to `OFF` by default.  - -#### Set autorestart using the API - -``` -POST /api/app/v1/service/ HTTP/1.1 -{ - "autorestart": "ON_FAILURE", - [...] -} -``` - -#### Set autorestart using the CLI - -``` -$ docker-cloud service run --autorestart ON_FAILURE [...] -``` - -See our [API documentation](/apidocs/docker-cloud.md) for more information. - -## Enabling autorestart on an already deployed service - -You can activate or deactivate **Autorestart** on a service after it has been deployed by editing the service. - -1. Go to the service detail page. -2. Click **Edit**. -3. Choose the autorestart option to apply. -4. Click **Save**. - -### Using the API and CLI - -You can change the **Autorestart** setting after the service has been deployed using the API or CLI. - -#### Enable autorestart using the API -``` -PATCH /api/app/v1/service/(uuid)/ HTTP/1.1 -{ - "autorestart": "ALWAYS", -} -``` - -#### Enable autorestart using the CLI - -``` -$ docker-cloud service set --autorestart ALWAYS (name or uuid) -``` - -See the [API documentation](/apidocs/docker-cloud.md) for more information. diff --git a/docker-cloud/apps/deploy-tags.md b/docker-cloud/apps/deploy-tags.md deleted file mode 100644 index 9948923f41..0000000000 --- a/docker-cloud/apps/deploy-tags.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -description: Deployment tags -keywords: Deployment, tags, services -redirect_from: -- /docker-cloud/feature-reference/deploy-tags/ -title: Deployment tags ---- - -You can use **Deployment tags** to make sure certain services are deployed only -to specific nodes. Tagged services only deploy to nodes that match **all** of -the tags on that service. Docker Cloud shows an error if no nodes match all of -the service's deployment tags. A node might have extra tags that are not -specified on the service, but these do not prevent the service from deploying. - -You can specify multiple tags on services, on individual nodes, and on node clusters. All nodes that are members of a node cluster inherit the tags specified on the cluster. See [Automatic deployment tags](deploy-tags.md#automatic-deployment-tags) to learn more. - -#### Deployment tags example - -In this example, we have five nodes. One is used for development and testing, and four are used for production. The production nodes are distributed between frontend and backend. The table below summarizes their names and tags: - -| Node name | Tags | -| --------- | ---- | -| my-node-dev | `aws` `us-east-1` `development` `test` `frontend` `backend`| -| my-node-prod-1 | `aws` `us-east-1` `production` `frontend` | -| my-node-prod-2 | `aws` `us-east-2` `production` `frontend` | -| my-node-prod-3 | `aws` `us-east-1` `production` `backend` | -| my-node-prod-4 | `aws` `us-east-2` `production` `backend` | - -Imagine that you deploy a service called **my-webapp-dev** with two tags: -`development` and `frontend`. All containers for the service would be deployed -to the node labeled **my-node-dev**, because the node is tagged with both -`development` *and* `frontend`. - -Similarly, if you deploy a production service called **my-webapp-prod** with the -two tags `production` and `frontend`, all containers for that service -would be deployed to the two nodes **my-node-prod-1** and **my-node-prod-2** -because those two nodes are tagged with both `production` *and* `frontend`. - -> **Tip**: Containers are distributed between the two nodes based on the -[deployment strategy](../infrastructure/deployment-strategies.md) selected. - -## Automatic deployment tags - -When you launch a node cluster, four tags are automatically assigned to the -node cluster and all nodes in that cluster: - -* Provider name (for example `digitalocean`, `aws`) -* "[Bring your own node](../infrastructure/byoh.md)" (BYON) status (for example `byon=false` or `byon=true`) -* Region name (for example `us-east-1`, `lon1`) -* Node cluster name (for example `my-node-cluster-dev-1`) - -## Add tags to a node or node cluster at launch - -A single node is considered a node cluster with a size of 1. Because of this, you create a node cluster even if you are only launching a single node. - -1. Click **Node clusters** in the left navigation menu. -2. Click **Create**. -3. In the **Deploy tags** field, enter the tags to assign to the cluster and all -of its member nodes. - - ![](images/nodecluster-wizard-tags.png) - - When the node cluster scales up, new nodes automatically inherit the - node cluster's tags, including the [Automatic deployment tags](deploy-tags.md#automatic-deployment-tags) described above. - - You can see a node cluster's tags on the left side of the cluster's detail page. - -4. Click **Launch node cluster**. - -### Update or add tags on a node or node cluster - -To change the tags on an existing node or node cluster: - -1. Go to the node or node cluster's detail page. -2. Click the tags below the node or node cluster status line to edit them. - - ![](images/node-detail-tags.png) - - If there are no tags assigned to the cluster, move your cursor under the deployment status line and click the tag icon that appears. - -3. In the dialog that appears, add or remove tags. - - The individual nodes in a cluster inherit all tags from the cluster, including automatic tags. Each individual node can have extra tags in addition to the tags it inherits as a member of a node cluster. - -4. Click **Save** to save your tag changes to the nodes. - -## Add tags to a service at launch - -To deploy a service to a specific node using tags, you must first specify one or more tags on the service. If you don't add any tags to a service, the service is deployed to all available nodes. - -1. Use the **Create new service** wizard to start a new service. - - ![](images/service-wizard-tags.png) - -2. Select tags from the **deployment constraints** list to add to this service. Only tags that already exist on your nodes appear in the list. - - Tags in a service define which nodes are used on deployment: only nodes that match *all* tags specified in the service are used for deployment. - -### Update or add tags to a service - -You can add or remove tags on a running service from the service's detail view. - -1. From the service detail view, click **Edit**. -2. Select tags from the **deployment constraints** list to add to this service. Only tags that already exist on your nodes appear in the list. - - ![](images/service-wizard-tags.png) - -3. Click **Save Changes**. - -**If you update the tags on a service, you must redeploy the service for them to take effect.** To do this you can terminate all containers and relaunch them, or you can scale -your service down to zero nodes and then scale it back up. New containers are -deployed to the nodes that match the new tags. - -## Using deployment tags in the API and CLI - -See the [tags API and CLI documentation](/apidocs/docker-cloud.md#tags) for more information on how to use tags with our API and CLI. diff --git a/docker-cloud/apps/deploy-to-cloud-btn.md b/docker-cloud/apps/deploy-to-cloud-btn.md deleted file mode 100644 index edc8c2df74..0000000000 --- a/docker-cloud/apps/deploy-to-cloud-btn.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -description: Deploy to Docker Cloud -keywords: deploy, docker, cloud -redirect_from: -- /docker-cloud/feature-reference/deploy-to-cloud/ -- /docker-cloud/tutorials/deploy-to-cloud/ -title: Add a "Deploy to Docker Cloud" button ---- - -The **Deploy to Docker Cloud** button allows developers to deploy stacks with -one click in Docker Cloud as long as they are logged in. The button is intended -to be added to `README.md` files in public GitHub repositories, although it can -be used anywhere else. - -> **Note**: You must be _logged in_ to Docker Cloud for the button to work -> Otherwise, the link results in a 404 error. - -This is an example button to deploy our [python quickstart](https://github.com/docker/dockercloud-quickstart-python){: target="_blank" class="_"}: - - - -The button redirects the user to the **Launch new Stack** wizard, with the stack -definition already filled with the contents of any of the following files (which -are fetched in the order shown) from the repository (taking into account branch -and relative path): - -* `docker-cloud.yml` -* `docker-compose.yml` -* `fig.yml` - -The user can still modify the stack definition before deployment. - -## Add the 'Deploy to Docker Cloud' button in GitHub - -You can simply add the following snippet to your `README.md` file: - -```md -[![Deploy to Docker Cloud](https://files.cloud.docker.com/images/deploy-to-dockercloud.svg)](https://cloud.docker.com/stack/deploy/) -``` - -Docker Cloud detects the HTTP referer header and deploy the stack file found in the repository, branch and relative path where the source `README.md` file is stored. - - -## Add the 'Deploy to Docker Cloud' button in Docker Hub - -If the button is displayed on the Docker Hub, Docker Cloud cannot automatically detect the source GitHub repository, branch and path. In this case, edit the repository description and add the following code: - -```md -[![Deploy to Docker Cloud](https://files.cloud.docker.com/images/deploy-to-dockercloud.svg)](https://cloud.docker.com/stack/deploy/?repo=) -``` - -where `` is the path to your GitHub repository (see below). - - -## Add the 'Deploy to Docker Cloud' button anywhere else - -If you want to use the button somewhere else, such as from external documentation or a landing site, you just need to create a link to the following URL: - -```html -https://cloud.docker.com/stack/deploy/?repo= -``` - -where `` is the path to your GitHub repository. For example: - -* `https://github.com/docker/dockercloud-quickstart-python` -* `https://github.com/docker/dockercloud-quickstart-python/tree/staging` to use branch `staging` instead of the default branch -* `https://github.com/docker/dockercloud-quickstart-python/tree/master/example` to use branch `master` and the relative path `/example` inside the repository - -You can use your own image for the link (or no image). Our **Deploy to Docker Cloud** image is available at the following URL: - -* `https://files.cloud.docker.com/images/deploy-to-dockercloud.svg` diff --git a/docker-cloud/apps/images/autodestroy.png b/docker-cloud/apps/images/autodestroy.png deleted file mode 100644 index b4593df596..0000000000 Binary files a/docker-cloud/apps/images/autodestroy.png and /dev/null differ diff --git a/docker-cloud/apps/images/autorestart.png b/docker-cloud/apps/images/autorestart.png deleted file mode 100644 index 723c88ee34..0000000000 Binary files a/docker-cloud/apps/images/autorestart.png and /dev/null differ diff --git a/docker-cloud/apps/images/data-volumes-wizard.png b/docker-cloud/apps/images/data-volumes-wizard.png deleted file mode 100644 index aefdb35d21..0000000000 Binary files a/docker-cloud/apps/images/data-volumes-wizard.png and /dev/null differ diff --git a/docker-cloud/apps/images/exposing-port.png b/docker-cloud/apps/images/exposing-port.png deleted file mode 100644 index e1adc50157..0000000000 Binary files a/docker-cloud/apps/images/exposing-port.png and /dev/null differ diff --git a/docker-cloud/apps/images/host-volumes-wizard.png b/docker-cloud/apps/images/host-volumes-wizard.png deleted file mode 100644 index 3ea0a15917..0000000000 Binary files a/docker-cloud/apps/images/host-volumes-wizard.png and /dev/null differ diff --git a/docker-cloud/apps/images/lbd-containers-start.png b/docker-cloud/apps/images/lbd-containers-start.png deleted file mode 100644 index f5a8a59401..0000000000 Binary files a/docker-cloud/apps/images/lbd-containers-start.png and /dev/null differ diff --git a/docker-cloud/apps/images/lbd-endpoints.png b/docker-cloud/apps/images/lbd-endpoints.png deleted file mode 100644 index b516687fc6..0000000000 Binary files a/docker-cloud/apps/images/lbd-endpoints.png and /dev/null differ diff --git a/docker-cloud/apps/images/lbd-four-nodes.png b/docker-cloud/apps/images/lbd-four-nodes.png deleted file mode 100644 index 67e59152e6..0000000000 Binary files a/docker-cloud/apps/images/lbd-four-nodes.png and /dev/null differ diff --git a/docker-cloud/apps/images/lbd-hello-world-jumpstart.png b/docker-cloud/apps/images/lbd-hello-world-jumpstart.png deleted file mode 100644 index b32b09d354..0000000000 Binary files a/docker-cloud/apps/images/lbd-hello-world-jumpstart.png and /dev/null differ diff --git a/docker-cloud/apps/images/lbd-hostname-1.png b/docker-cloud/apps/images/lbd-hostname-1.png deleted file mode 100644 index 6a9db436bf..0000000000 Binary files a/docker-cloud/apps/images/lbd-hostname-1.png and /dev/null differ diff --git a/docker-cloud/apps/images/lbd-lb-conf.png b/docker-cloud/apps/images/lbd-lb-conf.png deleted file mode 100644 index 0dd024f781..0000000000 Binary files a/docker-cloud/apps/images/lbd-lb-conf.png and /dev/null differ diff --git a/docker-cloud/apps/images/lbd-lb-endpoint.png b/docker-cloud/apps/images/lbd-lb-endpoint.png deleted file mode 100644 index 1a043e2713..0000000000 Binary files a/docker-cloud/apps/images/lbd-lb-endpoint.png and /dev/null differ diff --git a/docker-cloud/apps/images/lbd-lb-envvar.png b/docker-cloud/apps/images/lbd-lb-envvar.png deleted file mode 100644 index 08ddb795a0..0000000000 Binary files a/docker-cloud/apps/images/lbd-lb-envvar.png and /dev/null differ diff --git a/docker-cloud/apps/images/lbd-lb-ports.png b/docker-cloud/apps/images/lbd-lb-ports.png deleted file mode 100644 index 623b34e5e1..0000000000 Binary files a/docker-cloud/apps/images/lbd-lb-ports.png and /dev/null differ diff --git a/docker-cloud/apps/images/lbd-node-wizard.png b/docker-cloud/apps/images/lbd-node-wizard.png deleted file mode 100644 index 7a4ee34071..0000000000 Binary files a/docker-cloud/apps/images/lbd-node-wizard.png and /dev/null differ diff --git a/docker-cloud/apps/images/lbd-reload.gif b/docker-cloud/apps/images/lbd-reload.gif deleted file mode 100644 index f2c87c467d..0000000000 Binary files a/docker-cloud/apps/images/lbd-reload.gif and /dev/null differ diff --git a/docker-cloud/apps/images/lbd-web-conf.png b/docker-cloud/apps/images/lbd-web-conf.png deleted file mode 100644 index 004959ed5a..0000000000 Binary files a/docker-cloud/apps/images/lbd-web-conf.png and /dev/null differ diff --git a/docker-cloud/apps/images/new-trigger-created.png b/docker-cloud/apps/images/new-trigger-created.png deleted file mode 100644 index 4fba3c9a9c..0000000000 Binary files a/docker-cloud/apps/images/new-trigger-created.png and /dev/null differ diff --git a/docker-cloud/apps/images/node-detail-tags.png b/docker-cloud/apps/images/node-detail-tags.png deleted file mode 100644 index 58c8b397d1..0000000000 Binary files a/docker-cloud/apps/images/node-detail-tags.png and /dev/null differ diff --git a/docker-cloud/apps/images/nodecluster-wizard-tags.png b/docker-cloud/apps/images/nodecluster-wizard-tags.png deleted file mode 100644 index fa164c19a1..0000000000 Binary files a/docker-cloud/apps/images/nodecluster-wizard-tags.png and /dev/null differ diff --git a/docker-cloud/apps/images/ports-published.png b/docker-cloud/apps/images/ports-published.png deleted file mode 100644 index 531ba0d51f..0000000000 Binary files a/docker-cloud/apps/images/ports-published.png and /dev/null differ diff --git a/docker-cloud/apps/images/publishing-port.png b/docker-cloud/apps/images/publishing-port.png deleted file mode 100644 index 40f6620c5a..0000000000 Binary files a/docker-cloud/apps/images/publishing-port.png and /dev/null differ diff --git a/docker-cloud/apps/images/redeploy-service.png b/docker-cloud/apps/images/redeploy-service.png deleted file mode 100644 index 75645aae09..0000000000 Binary files a/docker-cloud/apps/images/redeploy-service.png and /dev/null differ diff --git a/docker-cloud/apps/images/revoke-trigger.png b/docker-cloud/apps/images/revoke-trigger.png deleted file mode 100644 index 42808caaf1..0000000000 Binary files a/docker-cloud/apps/images/revoke-trigger.png and /dev/null differ diff --git a/docker-cloud/apps/images/service-before-scaling.png b/docker-cloud/apps/images/service-before-scaling.png deleted file mode 100644 index ccb8e4568a..0000000000 Binary files a/docker-cloud/apps/images/service-before-scaling.png and /dev/null differ diff --git a/docker-cloud/apps/images/service-during-scaling.png b/docker-cloud/apps/images/service-during-scaling.png deleted file mode 100644 index 1d0b722b2a..0000000000 Binary files a/docker-cloud/apps/images/service-during-scaling.png and /dev/null differ diff --git a/docker-cloud/apps/images/service-links-diagram.png b/docker-cloud/apps/images/service-links-diagram.png deleted file mode 100644 index 8a7ec7acf7..0000000000 Binary files a/docker-cloud/apps/images/service-links-diagram.png and /dev/null differ diff --git a/docker-cloud/apps/images/service-wizard-autoredeploy.png b/docker-cloud/apps/images/service-wizard-autoredeploy.png deleted file mode 100644 index 36622f094a..0000000000 Binary files a/docker-cloud/apps/images/service-wizard-autoredeploy.png and /dev/null differ diff --git a/docker-cloud/apps/images/service-wizard-scale.png b/docker-cloud/apps/images/service-wizard-scale.png deleted file mode 100644 index f38fa245b3..0000000000 Binary files a/docker-cloud/apps/images/service-wizard-scale.png and /dev/null differ diff --git a/docker-cloud/apps/images/service-wizard-sequential-deployment.png b/docker-cloud/apps/images/service-wizard-sequential-deployment.png deleted file mode 100644 index 07e91758aa..0000000000 Binary files a/docker-cloud/apps/images/service-wizard-sequential-deployment.png and /dev/null differ diff --git a/docker-cloud/apps/images/service-wizard-tags.png b/docker-cloud/apps/images/service-wizard-tags.png deleted file mode 100644 index f77bc8dd53..0000000000 Binary files a/docker-cloud/apps/images/service-wizard-tags.png and /dev/null differ diff --git a/docker-cloud/apps/images/stack-create.png b/docker-cloud/apps/images/stack-create.png deleted file mode 100644 index fe7a57d946..0000000000 Binary files a/docker-cloud/apps/images/stack-create.png and /dev/null differ diff --git a/docker-cloud/apps/images/stack-edit.png b/docker-cloud/apps/images/stack-edit.png deleted file mode 100644 index e13d67d463..0000000000 Binary files a/docker-cloud/apps/images/stack-edit.png and /dev/null differ diff --git a/docker-cloud/apps/images/triggers-tab-blank.png b/docker-cloud/apps/images/triggers-tab-blank.png deleted file mode 100644 index ef02849e14..0000000000 Binary files a/docker-cloud/apps/images/triggers-tab-blank.png and /dev/null differ diff --git a/docker-cloud/apps/images/video-auto-redeploy-docker-cloud.png b/docker-cloud/apps/images/video-auto-redeploy-docker-cloud.png deleted file mode 100644 index 9c53e8b845..0000000000 Binary files a/docker-cloud/apps/images/video-auto-redeploy-docker-cloud.png and /dev/null differ diff --git a/docker-cloud/apps/images/volumes-from-wizard.png b/docker-cloud/apps/images/volumes-from-wizard.png deleted file mode 100644 index c3557427b1..0000000000 Binary files a/docker-cloud/apps/images/volumes-from-wizard.png and /dev/null differ diff --git a/docker-cloud/apps/index.md b/docker-cloud/apps/index.md deleted file mode 100644 index 7b131a251c..0000000000 --- a/docker-cloud/apps/index.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -description: Manage your Docker Cloud Applications -keywords: applications, reference, Cloud -title: Applications in Docker Cloud -notoc: true ---- - -Applications in Docker Cloud are usually several Services linked together using -the specifications from a [Stackfile](stacks.md) or a Compose file. You can also -create individual services using the Docker Cloud Services wizard, and you can -attach [Volumes](volumes.md) to use as long-lived storage for your services. - -If you are using Docker Cloud's autobuild and autotest features, you can also -use [autoredeploy](auto-redeploy.md) to automatically redeploy the application -each time its underlying services are updated. - -* [Deployment tags](deploy-tags.md) -* [Add a Deploy to Docker Cloud button](deploy-to-cloud-btn.md) -* [Manage service stacks](stacks.md) - * [Stack YAML reference](stack-yaml-reference.md) -* [Publish and expose service or container ports](ports.md) -* [Redeploy running services](service-redeploy.md) -* [Scale your service](service-scaling.md) -* [Service API Roles](api-roles.md) -* [Service discovery and links](service-links.md) -* [Work with data volumes](volumes.md) -* [Create a proxy or load balancer](load-balance-hello-world.md) - -### Automate your applications - -Use the following features to automate specific actions on your Docker Cloud applications. - -* [Automatic container destroy](auto-destroy.md) -* [Automatic container restart](autorestart.md) -* [Autoredeploy](auto-redeploy.md) -* [Use triggers](triggers.md) diff --git a/docker-cloud/apps/load-balance-hello-world.md b/docker-cloud/apps/load-balance-hello-world.md deleted file mode 100644 index 3dbaa05a7f..0000000000 --- a/docker-cloud/apps/load-balance-hello-world.md +++ /dev/null @@ -1,199 +0,0 @@ ---- -description: Create a proxy or load balancer -keywords: proxy, load, balancer -redirect_from: -- /docker-cloud/getting-started/intermediate/load-balance-hello-world/ -- /docker-cloud/tutorials/load-balance-hello-world/ -title: Create a proxy or load balancer ---- - -When you deploy a web service to multiple containers, you might want to load -balance between the containers using a proxy or load balancer. - -In this tutorial, you use the **dockercloud/hello-world** image as a sample -web service and **dockercloud/haproxy** to load balance traffic to the service. -If you follow this tutorial exactly, your traffic is distributed evenly -between eight containers in a node cluster containing four nodes. - -## Create a Node Cluster - -First, deploy a node cluster of four nodes. - -1. If you have not linked to a host or cloud services provider, do that now. - - You can find instructions on how to link to your own hosts, or to different providers [here](../infrastructure/index.md). - -2. Click **Node Clusters** in the left-hand navigation menu. - -3. Click **Create**. - -4. Enter a name for the node cluster, select the **Provider**, **Region**, and **Type/Size**. - -5. Add a **deployment tag** of `web`. (This is used to make sure the right services are deployed to the correct nodes.) - -5. Drag or increment the **Number of nodes** slider to **4**. - - ![](images/lbd-node-wizard.png) - -4. Click **Launch node cluster**. - - This might take up to 10 minutes while the nodes are provisioned. This a great time to grab a cup of coffee. - -Once the node cluster is deployed and all four nodes are running, we're -ready to continue and launch our web service. - -![](images/lbd-four-nodes.png) - -## Launch the web service - -1. Click **Services** in the left hand menu, and click **Create**. - -3. Click the **rocket icon** at the top of page, and select the **dockercloud/hello-world** image. - - ![](images/lbd-hello-world-jumpstart.png) - -4. On the **Service configuration** screen, configure the service using these values: - - * **image**: Set the tag to `latest` so you get the most recent build of the image. - * **service name**: `web`. This is what we call the service internally. - * **number of containers**: 8 - * **deployment strategy**: `high availability`. Deploy evenly to all nodes. - * **deployment constraints**: `web`. Deploy only to nodes with this tag. - - > **Note**: For this tutorial, make sure you change the *deployment strategy* to **High Availability**, and add the *tag* **web** to ensure this service is deployed to the right nodes. - - ![](images/lbd-web-conf.png) - -5. Last, scroll down to the **Ports** section and make sure the **published** box is checked next to port 80. - - We're going to access these containers from the public internet, and - publishing the port makes them available externally. Make sure you leave the - `node port` field unset so that it stays dynamic. - -6. Click **Create and deploy**. - - Docker Cloud switches to the **Service detail** view after you create the - service. - -7. Scroll up to the **Containers** section to see the containers as they deploy. - - The icons for each container change color to indicate what phase of deployment they're in. Once all containers are green (successfully started), continue to the next step. - -![](images/lbd-containers-start.png) - -## Test the web service - -1. Once your containers are all green (running), scroll down to the - **Endpoints** section. - - A list shows all the endpoints available for this service on the public internet. - - ![Available endpoints](images/lbd-endpoints.png) - -2. Click an endpoint URL (it should look something like - `http://web-1.username.cont.dockerapp.io:49154`) to open a new tab in your - browser and view the **dockercloud/hello-world** web page. Note the hostname - for the page that loads. - - ![Endpoint URL details](images/lbd-hostname-1.png) - -3. Click other endpoints and check the hostnames. You see different hostnames - which match the container name (web-2, web-3, and so on). - -## Launch the load balancer - -We verified that the web service is working, so now we can set up the load balancer. - -1. Click **Services** in the left navigation bar, and click **Create** again. - - This time we launch a load balancer that listens on port 80 and balances the traffic across the 8 containers that are running the `web` service.  - -3. Click the **rocket icon** if necessary and find the **Proxies** section. - -4. Click the **dockercloud/haproxy** image. - -5. On the next screen, set the **service name** to `lb`. - - Leave the tag, deployment strategy, and number of containers at their default values. - - ![](images/lbd-lb-conf.png) - -6. Locate the **API Roles** field at end of the **General settings** section. - -7. Set the **API Role** to `Full access`. - - When you assign the service an API role, it passes a `DOCKERCLOUD_AUTH` - environment variable to the service's containers, which allows them to query - Docker Cloud's API on your behalf. You can [read more about API Roles here](../apps/api-roles.md). - - The **dockercloud/haproxy** image uses the API to check how many containers - are in the `web` service we launched earlier. **HAproxy** then uses this - information to update its configuration dynamically as the web service - scales.  - -8. Next, scroll down to the **Ports** section. - -9. Click the **Published** checkbox next to the container port 80. - -10. Click the word *dynamic* next to port 80, and enter 80 to set the published -port to also use port 80.  - - ![](images/lbd-lb-ports.png) - -11. Scroll down to the **Links** section. - -12. Select `web` from the drop down list, and click the blue **plus sign** to -add the link. - - This links the load balancing service `lb` with the web service `web`. The - link appears in the table in the Links section. - - ![Links section](images/lbd-lb-envvar.png) - - A new set of `WEB` environment variables appears in the service we're about - to launch. You can read more about - service link environment variables [here](../apps/service-links.md). - -13. Click **Create and deploy** and confirm that the service launches. - -## Test the load-balanced web service - -1. On the load balancer service detail page, scroll down to the **endpoints** - section. - - Unlike on the web service, this time the HTTP URL for the load balancer is - mapped to port 80.  - - ![Load balancer mapped to port 80](images/lbd-lb-endpoint.png) - -2. Click the endpoint URL to open it in a new tab. - - The same hello-world webpage you saw earlier is shown. Make note of the - hostname. - -3. Refresh the web page. - - With each refresh, the hostname changes as the requests are load-balanced to - different containers.  - - ![Changing hostname](images/lbd-reload.gif) - - Each container in the web service has a different hostname, which - appears in the webpage as `container_name-#`. When you refresh the - page, the load balancer routes the request to a new host and the displayed hostname changes. - - > **Tip**: If you don't see the hostname change, clear your browser's cache - or load the page from a different web browser.  - -Congratulations! You just deployed a load balanced web service using Docker -Cloud! - -## Further reading: load balancing the load balancer - -What if you had so many `web` containers that you needed more than one `lb` -container? - -Docker Cloud automatically assigns a DNS endpoint to all services. This endpoint -routes to all of the containers of that service. You can use the DNS endpoint to -load balance your load balancer. To learn more, read up on [service -links](service-links.md). diff --git a/docker-cloud/apps/ports.md b/docker-cloud/apps/ports.md deleted file mode 100644 index 0e35a3cbb7..0000000000 --- a/docker-cloud/apps/ports.md +++ /dev/null @@ -1,124 +0,0 @@ ---- -description: Publish and expose service or container ports -keywords: publish, expose, ports, containers, services -redirect_from: -- /docker-cloud/feature-reference/ports/ -title: Publish and expose service or container ports ---- - -In Docker Cloud you can **publish** or **expose** ports in services and -containers, just like you can in Docker Engine (as documented -[here](/engine/reference/run.md#expose-incoming-ports)). - -* **Exposed ports** are ports that a container or service is using either to -provide a service, or listen on. By default, exposed ports in Docker Cloud are -only privately accessible. This means only other services that are linked to -the service which is exposing the ports can communicate over the -exposed port. - - *Exposed ports* cannot be accessed publicly over the internet. - -* **Published ports** are exposed ports that are accessible publicly over the internet. Published ports are published to the public-facing network interface in which the container is running on the node (host). - - *Published ports* **can** be accessed publicly over the internet. - -## Launch a Service with an exposed port - -If the image that you are using for your service already exposes any ports, these appear in Docker Cloud in the **Launch new service** wizard. - -1. From the **Launch new service** wizard, select the image to use. -2. Scroll down to the **Ports** section. - - ![](images/exposing-port.png) - -The image in this example screenshot *exposes* port 80. Remember, this means -that the port is only accessible to other services that link this service. It -is not accessible publicly over the internet. - -You can expose more ports from this screen by clicking **Add Port**. - -### Using the API/CLI - -See the API and CLI documentation [here](/apidocs/docker-cloud.md#service) for -information on how to launch a service with an exposed port. - -## Launch a Service with a published port - -If the image that you are using for your service already exposes any ports, -these appear in Docker Cloud in the **Launch new service** wizard. You can -choose to publish and map them from the wizard. - -1. From the **Launch new service** wizard, select the image to use. -2. Scroll down to the **Ports** section. - This section displays any ports configured in the image. -4. Click the **Published** checkbox. -5. Optionally, choose which port on the node where you want to make the exposed port available. - - By default, Docker Cloud assigns a published port dynamically. You can also - choose a specific port. For example, you might choose to take a port that is - exposed internally on port 80, and publish it externally on port 8080. - ![](images/publishing-port.png) - -To access the published port over the internet, connect to the port you -specified in the "Node port" section. If you used the default **dynamic** -option, find the published port on the service detail page. - -### Using the API/CLI - -See the API and CLI documentation [here](/apidocs/docker-cloud.md#service) on -how to launch a service with a published port. - - -## Check which ports a service has published - -The **Endpoints** section in the Service view lists the published ports for a service. Ports that are exposed internally are not listed in this section but can be viewed by editing the service configuration. - -* The **Service endpoints** list shows the endpoints that automatically round-robin route to the containers in a service. -* The **Container endpoints** list shows the endpoints for each individual container. Click the blue "link" icon to open the endpoint URL in a new tab. - - - -![](images/ports-published.png) - -### Using the API/CLI - -See the API and CLI documentation [here](/apidocs/docker-cloud.md#service) to learn how to list a service's exposed and published ports. - -## Service and container DNS endpoints - -The short word before `dockerapp.io` in an endpoint URL tells you what type of endpoint it is. The three available types are: - -* `node` routes to a specific node or host -* `svc` routes round-robin style to the containers of a service -* `cont` routes to a specific container within a service regardless of which host the container is deployed on - -For example, you might see an endpoint such as `web.quickstart-python.0a0b0c0d.svc.dockerapp.io`. You would know that this is a `service` endpoint, for reaching the `web` service in the `quickstart-python` stack. - -### Container endpoints - -Each container that has one or more published ports is automatically assigned a -DNS endpoint in the format -`container-name[.stack-name].shortuuid.cont.dockerapp.io`. This DNS endpoint -(single A record) resolves to the public IP of the node where the container is -running. If the container is redeployed into another node, the DNS updates -automatically and resolves to the new node or host. - -You can see a list of container endpoints on the stack, service or container -detail views, in the **Endpoints** tab. - -### Service endpoints - -Each service that has at least one port published with a fixed (not dynamic) -host port is assigned a DNS endpoint in the format -`service-name[.stack-name].shortuuid.svc.dockerapp.io`. This DNS endpoint -(multiple A record) resolves to the IPs of the nodes where the containers are -running, in a [round-robin -fashion](https://en.wikipedia.org/wiki/Round-robin_DNS). - -You can see a list of service endpoints on the stack and service detail views, under the **Endpoints** tab. diff --git a/docker-cloud/apps/service-links.md b/docker-cloud/apps/service-links.md deleted file mode 100644 index 1f4ad3a1da..0000000000 --- a/docker-cloud/apps/service-links.md +++ /dev/null @@ -1,257 +0,0 @@ ---- -description: Service discovery -keywords: service, discover, links -redirect_from: -- /docker-cloud/feature-reference/service-links/ -title: Service discovery and links ---- - -Docker Cloud creates a per-user overlay network which connects all containers -across all of the user's hosts. This network connects all of your containers on -the `10.7.0.0/16` subnet, and gives every container a local IP. This IP persists -on each container even if the container is redeployed and ends up on a different -host. Every container can reach any other container on any port within the -subnet. - -Docker Cloud gives your containers two ways find other services: - -* Using service and container names directly as **hostnames** - -* Using **service links**, which are based on [Docker Compose links](/compose/compose-file/#links) - -**Service and Container Hostnames** update automatically when a service scales -up or down or redeploys. As a user, you can configure service names, and Docker -Cloud uses these names to find the IP of the services and containers for you. -You can use hostnames in your code to provide abstraction that allows you to -easily swap service containers or components. - -**Service links** create environment variables which allow containers to -communicate with each other within a stack, or with other services outside of a -stack. You can specify service links explicitly when you create a new service -or edit an existing one, or specify them in the stackfile for a service stack. - -### Hostnames vs service links - -When a service is scaled up, a new hostname is created and automatically -resolves to the new IP of the container, and the parent service hostname record -also updates to include the new container's IP. However, new service link -environment variables are not created, and existing ones are not removed, when a -service scales up or down. - -## Using service and container names as hostnames - -You can use hostnames to connect any container in your Docker Cloud account to -any other container on your account without having to create service links or -manage environment variables. This is the recommended service discovery method. - -Hostnames always resolve to the correct IP for the service or container, -and update as the service scales up, scales down, or redeploys. The Docker -Cloud automatic DNS service resolves the service name to the correct IP on the -overlay network, even if the container has moved or is now on a different host. - -### Discovering containers on the same service or stack - -A container can always discover other containers on the same stack using just -the **container name** as hostname. This includes containers of the same -service. Similarly, a container can always discover other services on the same -stack using the **service name**. - -For example, a container `webapp-1` in the service `webapp` can connect to the -container `db-1` in the service `db` by using `db-1` as the hostname. It can -also connect to a peer container, `webapp-2`, by using `webapp-2` as the -hostname. - -A container `proxy-1` on the same stack could discover all `webapp` containers -by using the **service name** `webapp` as hostname. Connecting to the service -name resolves as an `A` -[round-robin](http://en.wikipedia.org/wiki/Round-robin_DNS) record, listing all -IPs of all containers on the service `webapp`. - -### Discovering services or containers on another stack - -To find a service or a container on another stack, append `.` to the -service or container name. For example, if `webapp-1` on the stack `production` -needs to access container `db-1` on the stack `common`, it could use the -hostname `db-1.common` which Docker Cloud resolves to the appropriate IP. - -### Discovering services or containers not included in a stack - -To find a container or service that is not included in a stack, use the service -or container name as the hostname. - -If the container making the query is part of a stack, and there is a local match -on the same stack, the local match takes precedence over the service or -container that is outside the stack. - -> **Tip**: To work around this, you can rename the local match so that it has a -more specific name. You might also put the external service or container in a -dedicated stack so that you can specify the stack name as part of the namespace. - -## Using service links for service discovery - -Docker Cloud's service linking is modeled on [Docker Compose -links](/compose/compose-file/#links) to provide a basic service discovery -functionality using directional links recorded in environment variables. - -When you link a "client" service to a "server" service, Docker Cloud performs -the following actions on the "client" service: - -1. Creates a group of environment variables that contain information about the exposed ports of the "server" service, including its IP address, port, and protocol. - -2. Copies all of the "server" service environment variables to the "client" service with an `HOSTNAME_ENV_` prefix. - -3. Adds a DNS hostname to the Docker Cloud DNS service that resolves to the "server" service IP address. - -Some environment variables such as the API endpoint are updated when a service -scales up or down. Service links are only updated when a service is deployed or -redeployed, but are not updated during runtime. No new service link environment -variables are created when a service scales up or down. - ->**Tip:** You can specify one of several [container distribution strategies](/docker-cloud/infrastructure/deployment-strategies.md) for -applications deployed to multiple nodes. These strategies enable automatic -deployments of containers to nodes, and sometimes auto-linking of containers. -If a service with -[EVERY_NODE](/docker-cloud/infrastructure/deployment-strategies.md#every-node) -strategy is linked to another service with EVERY_NODE strategy, containers are -linked one-to-one on each node. - -### Service link example - -For the explanation of service linking, consider the following application -diagram. - -![](images/service-links-diagram.png) - -Imagine that you are running a web service (`my-web-app`) with 2 containers -(`my-web-app-1` and `my-web-app-2`). You want to add a proxy service -(`my-proxy`) with one container (`my-proxy-1`) to balance HTTP traffic to -each of the containers in your `my-web-app` application, with a link name of -`web`. - -### Service link environment variables - -Several environment variables are set on each container at startup to provide -link details to other containers. The links created are directional. These are -similar to those used by Docker Compose. - -For our example app above, the following environment variables are set in the -proxy containers to provide service links. The example proxy application can use -these environment variables to configure itself on startup, and start balancing -traffic between the two containers of `my-web-app`. - -| Name | Value | -|:------------------------|:----------------------| -| WEB_1_PORT | `tcp://172.16.0.5:80` | -| WEB_1_PORT_80_TCP | `tcp://172.16.0.5:80` | -| WEB_1_PORT_80_TCP_ADDR | `172.16.0.5` | -| WEB_1_PORT_80_TCP_PORT | `80` | -| WEB_1_PORT_80_TCP_PROTO | `tcp` | -| WEB_2_PORT | `tcp://172.16.0.6:80` | -| WEB_2_PORT_80_TCP | `tcp://172.16.0.6:80` | -| WEB_2_PORT_80_TCP_ADDR | `172.16.0.6` | -| WEB_2_PORT_80_TCP_PORT | `80` | -| WEB_2_PORT_80_TCP_PROTO | `tcp` | - -To create these service links, you would specify the following in your stackfile: - -```yml -my-proxy: - links: - - my-web-app:web -``` - -This example snippet creates a directional link from `my-proxy` to `my-web-app`, and calls that link `web`. - -### DNS hostnames vs service links - -> **Note**: Hostnames are updated during runtime if the service scales up or down. Environment variables are only set or updated at deploy or redeploy. If your services scale up or down frequently, you should use hostnames rather than service links. - -In the example, the `my-proxy` containers can access the service links using following hostnames: - -| Hostname | Value | -|:---------|:--------------------------| -| `web` | `172.16.0.5 172.16.0.6` | -| `web-1` | `172.16.0.5` | -| `web-2` | `172.16.0.6` | - -The best way for the `my-proxy` service to connect to the `my-web-app` service -containers is using the hostnames, because they are updated during runtime if -`my-web-app` scales up or down. If `my-web-app` scales up, the new hostname -`web-3` automatically resolves to the new IP of the container, and the hostname -`web` is updated to include the new IP in its round-robin record. - -However, the service link environment variables are not added or updated until -the service is redeployed. If `my-web-app` scales up, no new service link -environment variables (such as `WEB_3_PORT`, `WEB_3_PORT_80_TCP`, etc) are added -to the "client" container. This means the client does not know how to contact -the new "server" container. - -### Service environment variables - -Environment variables specified in the service definition are instantiated in -each individual container. This ensures that each container has a copy of the -service's defined environment variables, and also allows other connecting -containers to read them. - -These environment variables are prefixed with the `HOSTNAME_ENV_` in each -container. - -In our example, if we launch our `my-web-app` service with an environment -variable of `WEBROOT=/login`, the following environment variables are set and -available in the proxy containers: - -| Name | Value | -|:------------------|:---------| -| WEB_1_ENV_WEBROOT | `/login` | -| WEB_2_ENV_WEBROOT | `/login` | - -In our example, this enables the "client" service (`my-proxy-1`) to read -configuration information such as usernames and passwords, or simple -configuration, from the "server" service containers (`my-web-app-1` and -`my-web-app-2`). - -#### Docker Cloud specific environment variables - -In addition to the standard Docker environment variables, Docker Cloud also sets -special environment variables that enable containers to self-configure. These -environment variables are updated on redeploy. - -In the example above, the following environment variables are available in the `my-proxy` containers: - -| Name | Value | -|:-------------------------------|:--------------------------------------------------------------------------------------| -| WEB_DOCKERCLOUD_API_URL | `https://cloud.docker.com/api/app/v1/service/3b5fbc69-151c-4f08-9164-a4ff988689ff/` | -| DOCKERCLOUD_SERVICE_API_URI | `/api/v1/service/651b58c47-479a-4108-b044-aaa274ef6455/` | -| DOCKERCLOUD_SERVICE_API_URL | `https://cloud.docker.com/api/app/v1/service/651b58c47-479a-4108-b044-aaa274ef6455/` | -| DOCKERCLOUD_CONTAINER_API_URI | `/api/v1/container/20ae2cff-44c0-4955-8fbe-ac5841d1286f/` | -| DOCKERCLOUD_CONTAINER_API_URL | `https://cloud.docker.com/api/app/v1/container/20ae2cff-44c0-4955-8fbe-ac5841d1286f/` | -| DOCKERCLOUD_NODE_API_URI | `/api/v1/node/d804d973-c8b8-4f5b-a0a0-558151ffcf02/` | -| DOCKERCLOUD_NODE_API_URL | `https://cloud.docker.com/api/infra/v1/node/d804d973-c8b8-4f5b-a0a0-558151ffcf02/` | -| DOCKERCLOUD_CONTAINER_FQDN | `my-proxy-1.20ae2cff.cont.dockerapp.io` | -| DOCKERCLOUD_CONTAINER_HOSTNAME | `my-proxy-1` | -| DOCKERCLOUD_SERVICE_FQDN | `my-proxy.651b58c47.svc.dockerapp.io` | -| DOCKERCLOUD_SERVICE_HOSTNAME | `my-proxy` | -| DOCKERCLOUD_NODE_FQDN | `d804d973-c8b8-4f5b-a0a0-558151ffcf02.node.dockerapp.io` | -| DOCKERCLOUD_NODE_HOSTNAME | `d804d973-c8b8-4f5b-a0a0-558151ffcf02` | - -Where: - -* `WEB_DOCKERCLOUD_API_URL` is the Docker Cloud API resource URL of the linked service. Because this is a link, the link name is the environment variable prefix. - -* `DOCKERCLOUD_SERVICE_API_URI` and `DOCKERCLOUD_SERVICE_API_URL` are the Docker Cloud API resource URI and URL of the service running in the container. - -* `DOCKERCLOUD_CONTAINER_API_URI` and `DOCKERCLOUD_CONTAINER_API_URL` are the Docker Cloud API resource URI and URL of the container itself. - -* `DOCKERCLOUD_NODE_API_URI` and `DOCKERCLOUD_NODE_API_URL` are the Docker Cloud API resource URI and URL of the node where the container is running. - -* `DOCKERCLOUD_CONTAINER_HOSTNAME` and `DOCKERCLOUD_CONTAINER_FQDN` are the external hostname and Fully Qualified Domain Name (FQDN) of the container itself. - -* `DOCKERCLOUD_SERVICE_HOSTNAME` and `DOCKERCLOUD_SERVICE_FQDN` are the external hostname and Fully Qualified Domain Name (FQDN) of the service to which the container belongs. - -* `DOCKERCLOUD_NODE_HOSTNAME` and `DOCKERCLOUD_NODE_FQDN` are the external hostname and Fully Qualified Domain Name (FQDN) of the node where the container is running. - -These environment variables are also copied to linked containers with the `NAME_ENV_` prefix. - -If you provide API access to your service, you can use the generated token -(stored in `DOCKERCLOUD_AUTH`) to access these API URLs to gather information or -automate operations, such as scaling. diff --git a/docker-cloud/apps/service-redeploy.md b/docker-cloud/apps/service-redeploy.md deleted file mode 100644 index a81ee78195..0000000000 --- a/docker-cloud/apps/service-redeploy.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -description: Redeploy running services -keywords: redeploy, running, services -redirect_from: -- /docker-cloud/feature-reference/service-redeploy/ -title: Redeploy a running service ---- - -You can **redeploy** services in Docker Cloud while they are running to -regenerate a service's containers. You might do this when a new version of the -image is pushed to the registry, or to apply changes that you made to -the service's settings. - -When you redeploy a service, Docker Cloud terminates the current service -containers. It then deploys new containers using the most recent service -definition, including service and deployment tags, deployment strategies, port -mappings, and so on. - -> **Note**: Your containers might be redeployed to different nodes during redeployment. - -#### Container hostnames - -*Container* **hostnames** change on redeployment, and if your service uses -**dynamic published ports**, new ports might be used on redeployment. - -Container hostnames appear in the following format: -`servicename-1.new-container-short-uuid.cont.dockerapp.io` - -However, containers keep their local IPs after redeployment, even if they end up -in different nodes. This means that linked services do not need to be -redeployed. To learn more, see [Service Links](service-links.md). - -#### Service hostnames - -*Service* hostnames remain the same after redeployment. Service hostnames are only -available for ports that are bound to a specific port on the host. They are -_not_ available if the port is dynamically allocated. - -Service hostnames appear in the following format: -`servicename.service-short-uuid.svc.dockerapp.io` - -#### Redeploy with volumes - -If your containers use volumes, the new containers can **reuse** the -existing volumes. If you chose to reuse the volumes, the containers redeploy to the same nodes to preserve their links to the volumes. - -> **Note**: When you redeploy services with reused volumes, your redeployment can fail if the service's deployment tags no longer allow it to be deployed on the node that the volume resides on. To learn more, see [Deployment Tags](deploy-tags.md). - -## Redeploy a service using the web interface - -1. Click **Services** in the left menu to view a list of services. -2. Click the checkbox to the left of the service or services you want to redeploy. -2. From the **Actions** menu at the top right, choose **Redeploy**. - ![](images/redeploy-service.png) -The service begins redeploying immediately. - - - -## Redeploy a service using the API and CLI - -See the Docker Cloud [API and CLI documentation](/apidocs/docker-cloud.md#redeploy-a-service) for more information -on using our API and CLI to redeploy services. - -## Autoredeploy on image push to Docker Hub - -If your service uses an image stored in Docker Hub or Docker Cloud, you can -enable **Autoredeploy** on the service. Autoredeploy triggers a redeployment -whenever a new image is pushed. See the [Autoredeploy documentation](auto-redeploy.md) to learn more. - -## Redeploy a service using webhooks - -You can also use **triggers** to redeploy a service, for example when its image -is pushed or rebuilt in a third-party registry. See the [Triggers documentation](triggers.md) to learn more. diff --git a/docker-cloud/apps/service-scaling.md b/docker-cloud/apps/service-scaling.md deleted file mode 100644 index 9754d3253d..0000000000 --- a/docker-cloud/apps/service-scaling.md +++ /dev/null @@ -1,157 +0,0 @@ ---- -description: Scale your service, spawn new containers -keywords: spawn, container, service, deploy -redirect_from: -- /docker-cloud/feature-reference/service-scaling/ -title: Scale your service ---- - -Docker Cloud makes it easy to spawn new containers of your service to handle -additional load. Two modes are available to allow you to scale services with -different configuration requirements. - -## Deployment and scaling modes - -Any service that handles additional load by increasing the number of containers -of the service is considered "horizontally scalable". - -There are two deployment modes when scaling a service: - -- **Parallel mode** (default): all containers of a service are - deployed at the same time without any links between them. This is - the fastest way to deploy, and is the default. - -- **Sequential mode**: each new container is deployed in the service one at a - time. Each container is linked to all previous containers using service - links. This makes complex configuration possible within the containers - startup logic. This mode is explained in detail in the following sections. - -## When should I use Parallel scaling? - - When the containers in a service work independently of each other and do not - need to coordinate between themselves, they can be scaled up in parallel mode. - -Examples include: - -- Stateless web servers and proxies -- “Worker” instances that process jobs from a queue -- “Cron”-style instances that execute periodic tasks - -The default scaling mode is parallel, so no additional configuration is -required to use this mode. - -## When should I use Sequential scaling? - -Some services require coordination between different containers to ensure that -the service functions correctly. Many databases, such as MySQL for example, -require that the containers know about each other at startup time so that -traffic can be routed to them appropriately. When this is the case, you should -use [sequential scaling](service-scaling.md#sequential-deployment-and-scaling). - -To allow peer-aware container startup, you can enable sequential scaling mode. See [Sequential Scaling](service-scaling.md#sequential-deployment-and-scaling) for more information. - -## Set the initial number of containers - -When you configure a service in Docker Cloud, you can specify an initial number of containers for the service before you launch. - -![](images/service-wizard-scale.png) - -Docker Cloud immediately launches as many containers as you specified. - -### Set the initial containers using the API - -You can specify the initial number of containers for a service when deploying it through the API: - -``` -POST /api/app/v1/service/ HTTP/1.1 -{ - "target_num_containers": 2, - [...] -} -``` - -If you don’t specify the number of containers to deploy, this command defaults to `1`. See the [API documentation](/apidocs/docker-cloud.md) for more information. - -### Set the initial containers using the CLI - -You can also specify the initial number of containers for a service when deploying it using the CLI: - -```bash -$ docker-cloud service run -t 2 [...] -``` - -If you don’t specify the number of containers to deploy, the CLI uses the default value of `1`. See the [CLI documentation](/apidocs/docker-cloud.md) for more information. - -## Scale an already running service - -If you need to scale a service up or down while it is running, you can change the number of containers from the service detail page: - -![](images/service-before-scaling.png) - -1. Click the slider at the top of the service detail page. -2. Drag the slider to the number of containers you want. -3. Click **Scale**. - - The application starts scaling immediately, whether this means starting new containers, or gracefully shutting down existing ones. - -![](images/service-during-scaling.png) - -### Scale a running service using the API - -You can scale an already running service through the API: - -``` -PATCH /api/app/v1/service/(uuid)/ HTTP/1.1 -{ - "target_num_containers": 2 -} -``` -See the [scale a service API documentation](/apidocs/docker-cloud.md#scale-a-service). - -### Scale a running service using the CLI - -You can also scale an already running service using the CLI: - -```bash -$ docker-cloud service scale (uuid or name) 2 -``` - -See the [scale a service CLI documentation](/apidocs/docker-cloud.md#scale-a-service). - -## Sequential deployment and scaling - -When a service with more than one container is deployed using **sequential deployment** mode, the second and subsequent containers are linked to all the -previous ones using [service links](service-links.md). These links are useful if -your service needs to know about other instances, for example to allow automatic -configuration on startup. - -See the [Service links](service-links.md) topic for a list of environment variables that the links create in your containers. - -You can set the **Sequential deployment** setting on the **Service configuration** step of the **Launch new service** wizard: - -![](images/service-wizard-sequential-deployment.png) - -### Set the scaling mode using the API - -You can also set the `sequential_deployment` option when deploying an -application through the API: - -``` -POST /api/app/v1/service/ HTTP/1.1 -{ - "sequential_deployment": true, - [...] -} -``` - -See [create a new service](/apidocs/docker-cloud.md#create-a-new-service) for -more information. - -### Set the scaling mode using the CLI - -You can also set the `sequential_deployment` option when deploying an -application through the CLI:  - -```bash -$ docker-cloud service run --sequential [...]  -``` diff --git a/docker-cloud/apps/stack-yaml-reference.md b/docker-cloud/apps/stack-yaml-reference.md deleted file mode 100644 index 0614e1d6b5..0000000000 --- a/docker-cloud/apps/stack-yaml-reference.md +++ /dev/null @@ -1,329 +0,0 @@ ---- -description: Stack YAML reference for Docker Cloud -keywords: YAML, stack, reference, docker cloud -redirect_from: -- /docker-cloud/feature-reference/stack-yaml-reference/ -title: Docker Cloud stack file YAML reference ---- - -A stack is a collection of services that make up an application in a specific environment. Learn more about stacks for Docker Cloud [here](stacks.md). A **stack file** is a file in YAML format that defines one or more services, similar to a `docker-compose.yml` file for Docker Compose but with a few extensions. The default name for this file is `docker-cloud.yml`. - -**Looking for information on stack files for Swarm?** A good place to start is the [Compose reference file](/compose/compose-file/index.md), particularly the section on `deploy` key and its sub-options, and the reference on [Docker stacks](/compose/bundles.md). Also, the new [Getting Started tutorial](/get-started/index.md) demos use of a stack file to deploy an application to a swarm. - -## Stack file example - -Below is an example `docker-cloud.yml`: - -```yml -lb: - image: dockercloud/haproxy - links: - - web - ports: - - "80:80" - roles: - - global -web: - image: dockercloud/quickstart-python - links: - - redis - target_num_containers: 4 -redis: - image: redis -``` - -Each key defined in `docker-cloud.yml` creates a service with that name in Docker Cloud. In the example above, three services are created: `lb`, `web`, and `redis`. Each service is a dictionary whose possible keys are documented below. - -The `image` key is mandatory. Other keys are optional and are analogous to their [Docker Cloud Service API](/apidocs/docker-cloud.md#create-a-new-service) counterparts. - -## image (required) - -The image used to deploy this service. This is the only mandatory key. - -```yml -image: drupal -image: dockercloud/hello-world -image: my.registry.com/redis -``` - -## autodestroy -Whether the containers for this service should be terminated if they stop (default: `no`, possible values: `no`, `on-success`, `always`). - -```yml -autodestroy: always -``` - -## autoredeploy -Whether to redeploy the containers of the service when its image is updated in Docker Cloud registry (default: `false`). - -```yml -autoredeploy: true -``` - -## cap_add, cap_drop -Add or drop container capabilities. See `man 7 capabilities` for a full list. - -```yml -cap_add: - - ALL -cap_drop: - - NET_ADMIN - - SYS_ADMIN -``` - -## cgroup_parent -Specify an optional parent cgroup for the container. - -```yml -cgroup_parent: m-executor-abcd -``` - -## command -Override the default command in the image. - -```yml -command: echo 'Hello World!' -``` - -## deployment_strategy -Container distribution among nodes (default: `emptiest_node`, possible values: `emptiest_node`, `high_availability`, `every_node`). Learn more [here](../infrastructure/deployment-strategies.md). - -```yml -deployment_strategy: high_availability -``` - -## devices -List of device mappings. Uses the same format as the `--device` docker client create option. - -```yml -devices: - - "/dev/ttyUSB0:/dev/ttyUSB0" -``` - -## dns -Specify custom DNS servers. Can be a single value or a list. - -```yml -dns: 8.8.8.8 -dns: - - 8.8.8.8 - - 9.9.9.9 -``` - -## dns_search -Specify custom DNS search domains. Can be a single value or a list. - -```yml -dns_search: example.com -dns_search: - - dc1.example.com - - dc2.example.com -``` - -## environment -A list of environment variables to add in the service's containers at launch. The environment variables specified here override any image-defined environment variables. You can use either an array or a dictionary format. - -Dictionary format: -```yml -environment: - PASSWORD: my_password -``` - -Array format: -```yml -environment: - - PASSWORD=my_password -``` - -When you use the Docker Cloud CLI to create a stack, you can use the environment variables defined locally in your shell to define those in the stack. This is useful if you don't want to store passwords or other sensitive information in your stack file: - -```yml -environment: - - PASSWORD -``` - -## expose -Expose ports without publishing them to the host machine - they'll only be accessible from your nodes in Docker Cloud. `udp` ports can be specified with a `/udp` suffix. - -```yml -expose: - - "80" - - "90/udp" -``` - -## extra_hosts -Add hostname mappings. Uses the same values as the docker client `--add-host` parameter. - -```yml -extra_hosts: - - "somehost:162.242.195.82" - - "otherhost:50.31.209.229" -``` - -## labels -Add metadata to containers using Docker Engine labels. You can use either an array or a dictionary. - -We recommend using reverse-DNS notation to prevent your labels from conflicting with those used by other software. - -```yml -labels: - com.example.description: "Accounting webapp" - com.example.department: "Finance" - com.example.label-with-empty-value: "" - -labels: - - "com.example.description=Accounting webapp" - - "com.example.department=Finance" - - "com.example.label-with-empty-value" -``` - -## links -Link to another service. - -Either specify both the service unique name and the link alias (`SERVICE:ALIAS`), or just the service unique name (which is also used for the alias). If a service you want to link to is part of a different stack, specify the external stack name too. - -- If the target service belongs to *this* stack, its service unique name is its service name. -- If the target service does not belong to *any* stacks (it is a standalone service), its service unique name is its service name. -- If the target service belongs to another stack, its service unique name is its service name plus the service stack name, separated by a period (`.`). - -```yml -links: - - mysql - - redis:cache - - amqp.staging:amqp -``` - -Environment variables are created for each link that Docker Cloud resolves to the containers IPs of the linked service. More information [here](service-links.md). - -## net -Networking mode. Only "bridge" and "host" options are supported for now. - -```yml -net: host -``` - -## pid -Sets the PID mode to the host PID mode. This turns on sharing between container and the host operating system PID address space. Containers launched with this (optional) flag can access and be accessed by other containers in the namespace belonging to the host running the Docker daemon. - -```yml -pid: "host" -``` - -## ports -Expose ports. Either specify both ports (`HOST:CONTAINER`), or just the container port (an ephemeral host port is chosen). `udp` ports can be specified with a `/udp` suffix. - -```yml -ports: - - "80" - - "443:443" - - "500/udp" - - "4500:4500/udp" - - "49022:22" -``` - -## privileged - -Whether to start the containers with Docker Engine's privileged flag set or not (default: `false`). - -```yml -privileged: true -``` - -## restart -Whether the containers for this service should be restarted if they stop (default: `no`, possible values: `no`, `on-failure`, `always`). - -```yml -restart: always -``` - -## roles -A list of Docker Cloud API roles to grant the service. The only supported value is `global`, which creates an environment variable `DOCKERCLOUD_AUTH` used to authenticate against Docker Cloud API. Learn more [here](api-roles.md). - -```yml -roles: - - global -``` - -## security_opt -Override the default labeling scheme for each container. - -```yml -security_opt: - - label:user:USER - - label:role:ROLE -``` - -## sequential_deployment -Whether the containers should be launched and scaled in sequence (default: `false`). Learn more [here](service-scaling.md). - -```yml -sequential_deployment: true -``` - -## tags -Indicates the [deploy tags](deploy-tags.md) to select the nodes where containers are created. - -```yml -tags: - - staging - - web -``` - -## target_num_containers -The number of containers to run for this service (default: `1`). - -```yml -target_num_containers: 3 -``` - -## volumes -Mount paths as volumes, optionally specifying a path on the host machine (`HOST:CONTAINER`), or an access mode (`HOST:CONTAINER:ro`). - -```yml -volumes: - - /etc/mysql - - /sys:/sys - - /etc:/etc:ro -``` - -## volumes_from -Mount all of the volumes from another service by specifying a service unique name. - -- If the target service belongs to this stack, its service unique name is its service name. -- If the target service does not belong to any stack, its service unique name is its service name. -- If the target service belongs to another stack, its service unique name is its service name plus the service stack name, separated by ".". Learn more [here](volumes.md). - -```yml -volumes_from: - - database - - mongodb.staging -``` - -## Single value keys analogous to a `docker run` counterpart - -``` -working_dir: /app -entrypoint: /app/entrypoint.sh -user: root -hostname: foo -domainname: foo.com -mac_address: 02:42:ac:11:65:43 -cpu_shares: 512 -cpuset: 0,1 -mem_limit: 100000m -memswap_limit: 200000m -privileged: true -read_only: true -stdin_open: true -tty: true -``` - -## Unsupported Docker-compose keys - -Stack files (`docker-cloud.yml`) were designed with `docker-compose.yml` in mind to maximize compatibility. However the following keys used in Compose are not supported in Docker Cloud stackfiles: - -``` -build -external_links -env_file -``` diff --git a/docker-cloud/apps/stacks.md b/docker-cloud/apps/stacks.md deleted file mode 100644 index 7fdfd48129..0000000000 --- a/docker-cloud/apps/stacks.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -description: Manage service stacks -keywords: service, stack, yaml -redirect_from: -- /docker-cloud/feature-reference/stacks/ -title: Manage service stacks ---- - -A **stack** is a collection of services that make up an application in a specific environment. A **stack file** is a file in YAML format, similar to a `docker-compose.yml` file, that defines one or more services. The YAML reference is documented [here](stack-yaml-reference.md). - -Stacks are a convenient way to automatically deploy multiple services that are linked to each other, without needing to define each one separately. - -Stack files define environment variables, deployment tags, the number of containers, and related environment-specific configuration. Because of this, you should use a separate stack file for development, staging, production, and other environments. - -### Stack file example - -Below is an example `docker-cloud.yml`: - -```yml -lb: - image: dockercloud/haproxy - links: - - web - ports: - - "80:80" - roles: - - global -web: - image: dockercloud/quickstart-python - links: - - redis - target_num_containers: 4 -redis: - image: redis -``` - -Each key defined in `docker-cloud.yml` creates a service with that name in Docker Cloud. In the example above, three services are created: `lb`, `web` and `redis`. Each service is a dictionary and its keys are specified below. - -Only the `image` key is mandatory. Other keys are optional and are analogous to their [Docker Cloud Service API](/apidocs/docker-cloud.md#create-a-new-service) counterparts. - -## Create a stack - -Docker Cloud allows you to create stacks from the web interface, as well as via the Docker Cloud API and the `docker-cloud` command line. - -To create a stack from the Docker Cloud web interface: - -1. Log in to Docker Cloud. -2. Click **Stacks**. -3. Click **Create**. -4. Enter a name for the stackfile. -5. Enter or paste the stack file in the **Stackfile** field, or drag a file to the field to upload it. (You can also click in the field to browse for and upload a file on your computer.) - - ![](images/stack-create.png) - -6. Click **Create** or **Create and deploy**. - -### Create a stack using the API - -You can also create a new stack by uploading a stack file directly using the Docker Cloud API. When you use the API, the stack file is in **JSON** format, like the following example: - -```json -POST /api/v1/stack/ HTTP/1.1 -{ - "name": "my-new-stack", - "services": [ - { - "name": "hello-word", - "image": "dockercloud/hello-world", - "target_num_containers": 2 - } - ] -} -``` - -Check our [API documentation](/apidocs/docker-cloud.md#stacks) for more information. - -### Create a stack using the CLI - -You can create a stack from a YAML file by executing: - -```bash -$ docker-cloud stack create -f docker-cloud.yml -``` - -Check our [CLI documentation](/apidocs/docker-cloud.md#stacks) for more information. - - -## Update an existing stack - -You can specify an existing stack when you create a service, however you might not always have the stack definition ready at that time, or you might later want to add a service to an existing stack. - -To update a stack from the Docker Cloud web interface: - -1. Navigate to the stack you want to update. -2. Click **Edit**. - - ![](images/stack-edit.png) -3. Edit the stack file, or upload a new one from your computer. -4. Click **Save**. - -### Update an existing stack using the API - -You can also update a stack by uploading the new stack file directly using the Docker Cloud API. When you use the API, the stack file is in **JSON** format, like the following example: - -```json -PATCH /api/app/v1/stack/(uuid)/ HTTP/1.1 -{ - "services": [ - { - "name": "hello-word", - "image": "dockercloud/hello-world", - "target_num_containers": 2 - } - ] -} -``` - -Check our [API documentation](/apidocs/docker-cloud.md#stacks) for more information. - -### Update an existing stack using the CLI - -You can update a stack from a YAML file by executing: - -```bash -docker-cloud stack update -f docker-cloud.yml (uuid or name) -``` - -Check our [CLI documentation](/apidocs/docker-cloud.md#stacks) for more information. \ No newline at end of file diff --git a/docker-cloud/apps/triggers.md b/docker-cloud/apps/triggers.md deleted file mode 100644 index 154f0ae69a..0000000000 --- a/docker-cloud/apps/triggers.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -description: Use triggers -keywords: API, triggers, endpoints -redirect_from: -- /docker-cloud/feature-reference/triggers/ -title: Use triggers ---- - -## What are triggers? - -**Triggers** are API endpoints that redeploy or scale a specific service -whenever a `POST` HTTP request is sent to them. You can create one or more -triggers per service. - -Triggers do not require any authentication. This allows third party services -like Docker Hub to call them, however because of this it is important that you -keep their URLs secret. - -The body of the `POST` request is passed in to the new containers as an -environment variable called `DOCKERCLOUD_TRIGGER_BODY`. - -### Trigger types - -Docker Cloud supports two types of triggers: - -* **Redeploy** triggers, which redeploy the service when called -* **Scale up** triggers, which scale the service by one or more containers when called - -## Create a trigger - -1. Click the name of the service you want to create a trigger for. -2. Go to the detail page and scroll down to the **Triggers** section. - - ![](images/triggers-tab-blank.png) - -3. In the **Trigger name** field, enter a name for the trigger. -4. Select a trigger type. -5. Click the **+** (plus sign) icon. - - ![](images/new-trigger-created.png) - -6. Use the POST request URL provided to configure the webhook in your -application or third party service. - -## Revoke triggers - -To stop a trigger from automatically scaling or redeploying, you must revoke it. - -1. Go to the detail page of the service. -2. Scroll down to the **Triggers** section. -3. Click the **trashcan** icon for the trigger you want to revoke. - - ![](images/revoke-trigger.png) - -Once the trigger is revoked, it stops accepting requests. - -## Use triggers in the API and CLI - -See our [API and CLI documentation](/apidocs/docker-cloud.md#triggers) to learn how to use triggers with our API and the CLI. diff --git a/docker-cloud/apps/volumes.md b/docker-cloud/apps/volumes.md deleted file mode 100644 index 1d31e3f51b..0000000000 --- a/docker-cloud/apps/volumes.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -description: Work with data volumes -keywords: data, volumes, create, reuse -redirect_from: -- /docker-cloud/tutorials/download-volume-data/ -- /docker-cloud/feature-reference/volumes/ -title: Work with data volumes ---- - -In Docker Cloud, you can define one or more data volumes for a service. -**Volumes** are directories that are stored outside of the container's -filesystem and which hold reusable and shareable data that can persist even when -containers are terminated. This data can be reused by the same service on -redeployment, or shared with other services. - -## Add a data volume to a service - -Data volumes can be either specified in the image's `Dockerfile` using the -[VOLUME instruction](/engine/reference/builder/#volume), or when -creating a service. - -To define a data volume in a service, specify the **container path** where it -should be created in the **Volumes** step of the **Create new service** wizard. -Each container of the service has its own volume. Data volumes are reused -when the service is redeployed (data persists in this case), and deleted if the -service is terminated. - -![](images/data-volumes-wizard.png) - -If you don't define a **host path**, Docker Cloud creates a new empty volume. -Otherwise, the specified **host path** is mounted on the **container path**. -When you specify a host path, you can also specify whether to mount the volume -read-only, or read/write. - -![](images/host-volumes-wizard.png) - - -## Reuse data volumes from another service - -You can reuse data volumes from another service. To do this when creating a service, go through the **Create new service**, and continue to the **Volumes** step. From the **Volumes** page, choose a source service from the **Add volumes from** menu. - -![](images/volumes-from-wizard.png) - -All reused data volumes are mounted on the same paths as in the source service. -Containers must be on the same host to share volumes, so the containers -of the new service deploy to the same nodes where the source service -containers are deployed. - -> **Note**: A service with data volumes cannot be terminated until all services that are using its volumes have also been terminated. - -## Back up data volumes - -You might find it helpful to download or back up the data from volumes that are attached to running containers. - -1. Run an SSH service that mounts the volumes of the service you want to back up. - - In the example snippet below, replace `mysql` with the actual service name. - - ``` - $ docker-cloud service run -n downloader -p 22:2222 -e AUTHORIZED_KEYS="$(cat ~/.ssh/id_rsa.pub)" --volumes-from mysql tutum/ubuntu - ``` - -2. Run a `scp` (secure-copy) to download the files to your local machine. - - In the example snippet below, replace `downloader-1.uuid.cont.dockerapp.io` with the container's Fully Qualified Domain Name (FQDN), and replace `/var/lib/mysql` with the path within the container from which you want to download the data. The data is downloaded to the current local folder. - - ``` - $ scp -r -P 2222 root@downloader-1.uuid.cont.dockerapp.io:/var/lib/mysql . - ``` diff --git a/docker-cloud/builds/advanced.md b/docker-cloud/builds/advanced.md deleted file mode 100644 index 28f3ecb1fd..0000000000 --- a/docker-cloud/builds/advanced.md +++ /dev/null @@ -1,133 +0,0 @@ ---- -description: Automated builds -keywords: automated, build, images -title: Advanced options for Autobuild and Autotest ---- - -The following options allow you to customize your automated build and automated test processes. - -## Environment variables for building and testing - -Several utility environment variables are set by the build process, and are -available during automated builds, automated tests, and while executing -hooks. - -> **Note**: These environment variables are only available to the build and test -processes and do not affect your service's run environment. - -* `SOURCE_BRANCH`: the name of the branch or the tag that is currently being tested. -* `SOURCE_COMMIT`: the SHA1 hash of the commit being tested. -* `COMMIT_MSG`: the message from the commit being tested and built. -* `DOCKER_REPO`: the name of the Docker repository being built. -* `DOCKERFILE_PATH`: the dockerfile currently being built. -* `CACHE_TAG`: the Docker repository tag being built. -* `IMAGE_NAME`: the name and tag of the Docker repository being built. (This variable is a combination of `DOCKER_REPO`:`CACHE_TAG`.) - -If you are using these build environment variables in a -`docker-compose.test.yml` file for automated testing, declare them in your `sut` -service's environment as shown below. - -```none -sut: - build: . - command: run_tests.sh - environment: - - SOURCE_BRANCH -``` - - -## Override build, test or push commands - -Docker Cloud allows you to override and customize the `build`, `test` and `push` -commands during automated build and test processes using hooks. For example, you -might use a build hook to set build arguments used only during the build -process. (You can also set up [custom build phase hooks](#custom-build-phase-hooks) to perform actions in between these commands.) - -**Use these hooks with caution.** The contents of these hook files replace the -basic `docker` commands, so you must include a similar build, test or push -command in the hook or your automated process does not complete. - -To override these phases, create a folder called `hooks` in your source code -repository at the same directory level as your Dockerfile. Create a file called -`hooks/build`, `hooks/test`, or `hooks/push` and include commands that the -builder process can execute, such as `docker` and `bash` commands (prefixed appropriately with `#!/bin/bash`). - -## Custom build phase hooks - -You can run custom commands between phases of the build process by creating -hooks. Hooks allow you to provide extra instructions to the autobuild and -autotest processes. - -Create a folder called `hooks` in your source code repository at the same -directory level as your Dockerfile. Place files that define the hooks in that -folder. Hook files can include both `docker` commands, and `bash` commands as long as they are prefixed appropriately with `#!/bin/bash`. The builder executes the commands in the files before and after each step. - -The following hooks are available: - -* `hooks/post_checkout` -* `hooks/pre_build` -* `hooks/post_build` -* `hooks/pre_test` -* `hooks/post_test` -* `hooks/pre_push` (only used when executing a build rule or [automated build](automated-build.md) ) -* `hooks/post_push` (only used when executing a build rule or [automated build](automated-build.md) ) - -### Build hook examples - -#### Override the "build" phase to set variables - -Docker Cloud allows you to define build environment variables either in the hook files, or from the automated build UI (which you can then reference in hooks). - -In the following example, we define a build hook that uses `docker build` arguments to set the variable `CUSTOM` based on the value of variable we defined using the Docker Cloud build settings. `$DOCKERFILE_PATH` is a variable that we provide with the name of the Dockerfile we wish to build, and `$IMAGE_NAME` is the name of the image being built. - -```none -docker build --build-arg CUSTOM=$VAR -f $DOCKERFILE_PATH -t $IMAGE_NAME . -``` - -> **Caution**: A `hooks/build` file overrides the basic [docker build](/engine/reference/commandline/build.md) command -used by the builder, so you must include a similar build command in the hook or -the automated build fails. - -To learn more about Docker build-time variables, see the [docker build documentation](/engine/reference/commandline/build/#set-build-time-variables-build-arg). - -#### Two-phase build - -If your build process requires a component that is not a dependency for your application, you can use a pre-build hook (refers to the `hooks/pre_build` file) to collect and compile required components. In the example below, the hook uses a Docker container to compile a Golang binary that is required before the build. - -```bash -#!/bin/bash -echo "=> Building the binary" -docker run --privileged \ - -v $(pwd):/src \ - -v /var/run/docker.sock:/var/run/docker.sock \ - centurylink/golang-builder -``` - -#### Push to multiple repos - -By default the build process pushes the image only to the repository where the build settings are configured. If you need to push the same image to multiple repositories, you can set up a `post_push` hook to add additional tags and push to more repositories. - -```none -docker tag $IMAGE_NAME $DOCKER_REPO:$SOURCE_COMMIT -docker push $DOCKER_REPO:$SOURCE_COMMIT -``` - -## Source Repository / Branch Clones - -When Docker Cloud pulls a branch from a source code repository, it performs -a shallow clone (only the tip of the specified branch). This has the advantage -of minimizing the amount of data transfer necessary from the repository and -speeding up the build because it pulls only the minimal code necessary. - -Because of this, if you need to perform a custom action that relies on a different -branch (such as a `post_push` hook), you can't checkout that branch, unless -you do one of the following: - -* You can get a shallow checkout of the target branch by doing the following: - - git fetch origin branch:mytargetbranch --depth 1 - -* You can also "unshallow" the clone, which fetches the whole Git history (and potentially -takes a long time / moves a lot of data) by using the `--unshallow` flag on the fetch: - - git fetch --unshallow origin diff --git a/docker-cloud/builds/automated-build.md b/docker-cloud/builds/automated-build.md deleted file mode 100644 index e2fcd023de..0000000000 --- a/docker-cloud/builds/automated-build.md +++ /dev/null @@ -1,439 +0,0 @@ ---- -description: Automated builds -keywords: automated, build, images -redirect_from: -- /docker-cloud/feature-reference/automated-build/ -title: Automated builds ---- - -[![Automated Builds with Docker Cloud](images/video-auto-builds-docker-cloud.png)](https://youtu.be/sl2mfyjnkXk "Automated Builds with Docker Cloud"){:target="_blank" class="_"} - -> **Note**: Docker Cloud's Build functionality is in BETA. - -Docker Cloud can automatically build images from source code in an external -repository and automatically push the built image to your Docker -repositories. - -When you set up automated builds (also called autobuilds), you create a list of -branches and tags that you want to build into Docker images. When you push code -to a source code branch (for example in Github) for one of those listed image -tags, the push uses a webhook to trigger a new build, which produces a Docker -image. The built image is then pushed to the Docker Cloud registry or to an -external registry. - -If you have automated tests configured, these run after building but before -pushing to the registry. You can use these tests to create a continuous -integration workflow where a build that fails its tests does not push the built -image. Automated tests do not push images to the registry on their own. [Learn more about automated image testing here.](automated-testing.md) - -You can also just use `docker push` to push pre-built images to these -repositories, even if you have automatic builds set up. - -![An automated build dashboard](images/build-dashboard.png) - -## Configure automated build settings - -You can configure repositories in Docker Cloud so that they automatically -build an image each time you push new code to your source provider. If you have -[automated tests](automated-testing.md) configured, the new image is only pushed -when the tests succeed. - -Before you set up automated builds you need to [create a repository](repos.md) to build, and [link to your source code provider](link-source.md). - -1. From the **Repositories** section, click into a repository to view its details. - -2. Click the **Builds** tab. - -3. If you are setting up automated builds for the first time, select -the code repository service where the image's source code is stored. - - Otherwise, if you are editing the build settings for an existing automated - build, click **Configure automated builds**. - -4. Select the **source repository** to build the Docker images from. - - You might need to specify an organization or user (the _namespace_) from the - source code provider. Once you select a namespace, its source code - repositories appear in the **Select repository** dropdown list. - -5. Choose where to run your build processes. - - You can either run the process on your own infrastructure and optionally [set up specific nodes to build on](automated-build.md#set-up-builder-nodes), or select **Build on Docker Cloud’s infrastructure** you can use the hosted build service - offered on Docker Cloud's infrastructure. If you use - Docker's infrastructure, select a builder size to run the build - process on. This hosted build service is free while it is in Beta. - - ![Editing build configurations](images/edit-repository-builds.png) - -6. If in the previous step you selected **Build on Docker - Cloud’s infrastructure**, then you are given the option to select the - **Docker Version** used to build this repository. You can choose between - the **Stable** and **Edge** versions of Docker. - - Selecting **Edge** lets you to take advantage of [multi-stage builds](/engine/userguide/eng-image/multistage-build/). For more information and examples, see the topic on how to [use multi-stage builds](/engine/userguide/eng-image/multistage-build/#use-multi-stage-builds). - - You can learn more about **stable** and **edge** channels in the - [Install Docker overview](/install/) - and the [Docker CE Edge](/edge/) topics. - -7. Optionally, enable [autotests](automated-testing.md#enable-automated-tests-on-a-repository). - -8. Review the default **Build Rules**, and optionally click the -**plus sign** to add and configure more build rules. - - _Build rules_ control what Docker Cloud builds into images from the contents - of the source code repository, and how the resulting images are tagged - within the Docker repository. - - A default build rule is set up for you, which you can edit or delete. This - default set builds from the `Branch` in your source code repository called - `master`, and creates a Docker image tagged with `latest`. - -9. For each branch or tag, enable or disable the **Autobuild** toggle. - - Only branches or tags with autobuild enabled are built, tested, *and* have - the resulting image pushed to the repository. Branches with autobuild - disabled are built for test purposes (if enabled at the repository - level), but the built Docker image is not pushed to the repository. - -10. For each branch or tag, enable or disable the **Build Caching** toggle. - - [Build caching](/engine/userguide/eng-image/dockerfile_best-practices/#/build-cache) can save time if you are building a large image frequently or have - many dependencies. You might want to leave build caching disabled to - make sure all of your dependencies are resolved at build time, or if - you have a large layer that is quicker to build locally. - -11. Click **Save** to save the settings, or click **Save and build** to save and -run an initial test. - - A webhook is automatically added to your source code repository to notify - Docker Cloud on every push. Only pushes to branches that are listed as the - source for one or more tags trigger a build. - -### Set up build rules - -By default when you set up autobuilds, a basic build rule is created for you. -This default rule watches for changes to the `master` branch in your source code -repository, and builds the `master` branch into a Docker image tagged with -`latest`. You - -In the **Build Rules** section, enter one or more sources to build. - -For each source: - -* Select the **Source type** to build either a **tag** or a - **branch**. This tells the build system what to look for in the source code - repository. - -* Enter the name of the **Source** branch or tag you want to build. - - The first time you configure automated builds, a default build rule is set up - for you. This default set builds from the `Branch` in your source code called - `master`, and creates a Docker image tagged with `latest`. - - You can also use a regex to select which source branches or tags to build. - To learn more, see - [regexes](automated-build.md#regexes-and-automated-builds). - -* Enter the tag to apply to Docker images built from this source. - - If you configured a regex to select the source, you can reference the - capture groups and use its result as part of the tag. To learn more, see - [regexes](automated-build.md#regexes-and-automated-builds). - -* Specify the **Dockerfile location** as a path relative to the root of the source code repository. (If the Dockerfile is at the repository root, leave this path set to `/`.) - -> **Note:** When Docker Cloud pulls a branch from a source code repository, it performs -a shallow clone (only the tip of the specified branch). Refer to [Advanced options for Autobuild and Autotest](advanced.md) -for more information. - -### Environment variables for builds - -You can set the values for environment variables used in your build processes -when you configure an automated build. Add your build environment variables by -clicking the plus sign next to the **Build environment variables** section, and -then entering a variable name and the value. - -When you set variable values from the Docker Cloud UI, they can be used by the -commands you set in `hooks` files, but they are stored so that only users who -have `admin` access to the Docker Cloud repository can see their values. This -means you can use them to safely store access tokens or other information that -should remain secret. - -> **Note**: The variables set on the build configuration screen are used during -the build processes _only_ and should not be confused with the environment -values used by your service (for example to create service links). - -## Check your active builds - -A summary of a repository's builds appears both on the repository **General** -tab, and in the **Builds** tab. The **Builds** tab also displays a color coded -bar chart of the build queue times and durations. Both views display the -pending, in progress, successful, and failed builds for any tag of the -repository. - -From either location, you can click a build job to view its build report. The -build report shows information about the build job including the source -repository and branch (or tag), the build duration, creation time and location, -and the user namespace the build occurred in. - -![screen showing a build report](images/build-report.png) - -## Cancel or retry a build - -While a build is queued or running, a **Cancel** icon appears next to its build -report link on the General tab and on the Builds tab. You can also click the -**Cancel** button from the build report page, or from the Timeline tab's logs -display for the build. - -![list of builds showing the cancel icon](images/build-cancelicon.png) - -If a build fails, a **Retry** icon appears next to the build report line on the -General and Builds tabs, and the build report page and Timeline logs also -display a **Retry** button. - -![Timeline view showing the retry build button](images/retry-build.png) - -> **Note**: If you are viewing the build details for a repository that belongs -to an Organization, the Cancel and Retry buttons only appear if you have `Read & Write` access to the repository. - - -## Disable an automated build - -Automated builds are enabled per branch or tag, and can be disabled and -re-enabled easily. You might do this when you want to only build manually for -awhile, for example when you are doing major refactoring in your code. Disabling -autobuilds does not disable [autotests](automated-testing.md). - -To disable an automated build: - -1. From the **Repositories** page, click into a repository, and click the **Builds** tab. - -2. Click **Configure automated builds** to edit the repository's build settings. - -3. In the **Build Rules** section, locate the branch or tag you no longer want -to automatically build. - -4. Click the **autobuild** toggle next to the configuration line. - - The toggle turns gray when disabled. - -5. Click **Save** to save your changes. - -## Advanced automated build options - -At the minimum you need a build rule composed of a source branch (or tag) and -destination Docker tag to set up an automated build. You can also change where -the build looks for the Dockerfile, set a path to the files the build use -(the build context), set up multiple static tags or branches to build from, and -use regular expressions (regexes) to dynamically select source code to build and -create dynamic tags. - -All of these options are available from the **Build configuration** screen for -each repository. Click **Repositories** from the left navigation, click the name -of the repository you want to edit, click the **Builds** tab, and click -**Configure Automated builds**. - -### Tag and Branch builds - -You can configure your automated builds so that pushes to specific branches or tags triggers a build. - -1. In the **Build Rules** section, click the plus sign to add more sources to build. - -2. Select the **Source type** to build: either a **tag** or a **branch**. - - This tells the build system what type of source to look for in the code - repository. - -3. Enter the name of the **Source** branch or tag you want to build. - - You can enter a name, or use a regex to match which source branch or tag - names to build. To learn more, see - [regexes](automated-build.md#regexes-and-automated-builds). - -4. Enter the tag to apply to Docker images built from this source. - - If you configured a regex to select the source, you can reference the - capture groups and use its result as part of the tag. To learn more, see - [regexes](automated-build.md#regexes-and-automated-builds). - -5. Repeat steps 2 through 4 for each new build rule you set up. - -### Set the build context and Dockerfile location - -Depending on how the files are arranged in your source code repository, the -files required to build your images may not be at the repository root. If that's -the case, you can specify a path where the build looks for the files. - -The _build context_ is the path to the files needed for the build, relative to the root of the repository. Enter the path to these files in the **Build context** field. Enter `/` to set the build context as the root of the source code repository. - -> **Note**: If you delete the default path `/` from the **Build context** field and leave it blank, the build system uses the path to the Dockerfile as the build context. However, to avoid confusion we recommend that you specify the complete path. - -You can specify the **Dockerfile location** as a path relative to the build -context. If the Dockerfile is at the root of the build context path, leave the -Dockerfile path set to `/`. (If the build context field is blank, set the path -to the Dockerfile from the root of the source repository.) - -### Regexes and automated builds - -You can specify a regular expression (regex) so that only matching branches or -tags are built. You can also use the results of the regex to create the Docker -tag that is applied to the built image. - -You can use the variable `{sourceref}` to use the branch or tag name that -matched the regex in the Docker tag applied to the resulting built image. (The -variable includes the whole source name, not just the portion that matched the -regex.) You can also use up to nine regular expression capture groups -(expressions enclosed in parentheses) to select a source to build, and reference -these in the Docker Tag field using `{\1}` through `{\9}`. - -**Regex example: build from version number branch and tag with version number** - -You might want to automatically build any branches that end with a number -formatted like a version number, and tag their resulting Docker images using a -name that incorporates that branch name. - -To do this, specify a `branch` build with the regex `/[0-9.]+$/` in the -**Source** field, and use the formula `version-{sourceref}` in the **Docker -tag** field. - - - -### Create multiple Docker tags from a single build - -By default, each build rule builds a source branch or tag into a Docker image, -and then tags that image with a single tag. However, you can also create several -tagged Docker images from a single build rule. - -To create multiple tags from a single build rule, enter a comma-separated list -of tags in the **Docker tag** field in the build rule. If an image with that tag -already exists, Docker Cloud overwrites the image when the build completes -successfully. If you have automated tests configured, the build must pass these -tests as well before the image is overwritten. You can use both regex references -and plain text values in this field simultaneously. - -For example if you want to update the image tagged with `latest` at the same -time as you a tag an image for a specific version, you could enter -`{sourceref},latest` in the Docker Tag field. - -If you need to update a tag _in another repository_, use [a post_build hook](advanced.md#push-to-multiple-repos) to push to a second repository. - -## Build repositories with linked private submodules - -Docker Cloud sets up a deploy key in your source code repository that allows it -to clone the repository and build it, however this key only works for a single, -specific code repository. If your source code repository uses private Git -submodules (or requires that you clone other private repositories to build), -Docker Cloud cannot access these additional repos, your build cannot complete, -and an error is logged in your build timeline. - -To work around this, you can set up your automated build using the `SSH_PRIVATE` environment variable to override the deployment key and grant Docker Cloud's build system access to the repositories. - -> **Note**: If you are using autobuild for teams, use [the process below](automated-build.md#service-users-for-team-autobuilds) instead, and configure a service user for your source code provider. You can also do this for an individual account to limit Docker Cloud's access to your source repositories. - -1. Generate a SSH keypair that you use for builds only, and add the public key to your source code provider account. - - This step is optional, but allows you to revoke the build-only keypair without removing other access. - -2. Copy the private half of the keypair to your clipboard. -3. In Docker Cloud, navigate to the build page for the repository that has linked private submodules. (If necessary, follow the steps [here](automated-build.md#configure-automated-build-settings) to configure the automated build.) -4. At the bottom of the screen, click the plus sign ( **+** ) next to **Build Environment variables**. -5. Enter `SSH_PRIVATE` as the name for the new environment variable. -6. Paste the private half of the keypair into the **Value** field. -7. Click **Save**, or **Save and Build** to validate that the build now completes. - -> **Note**: You must configure your private git submodules using git clone over SSH (`git@submodule.tld:some-submodule.git`) rather than HTTPS. - -## Autobuild for Teams - -When you create an automated build repository in your own account namespace, you can start, cancel, and retry builds, and edit and delete your own repositories. - -These same actions are also available for team repositories from Docker Hub if -you are a member of the Organization's `Owners` team. If you are a member of a -team with `write` permissions you can start, cancel and retry builds in your -team's repositories, but you cannot edit the team repository settings or delete -the team repositories. If your user account has `read` permission, or if you're -a member of a team with `read` permission, you can view the build configuration -including any testing settings. - -| Action/Permission | read | write | admin | owner | -| --------------------- | ---- | ----- | ----- | ----- | -| view build details | x | x | x | x | -| start, cancel, retry | | x | x | x | -| edit build settings | | | x | x | -| delete build | | | | x | - -### Service users for team autobuilds - -> **Note**: Only members of the `Owners` team can set up automated builds for teams. - -When you set up automated builds for teams, you grant Docker Cloud access to -your source code repositories using OAuth tied to a specific user account. This -means that Docker Cloud has access to everything that the linked source provider -account can access. - -For organizations and teams, we recommend creating a dedicated service account -(or "machine user") to grant access to the source provider. This ensures that no -builds break as individual users' access permissions change, and that an -individual user's personal projects are not exposed to an entire organization. - -This service account should have access to any repositories to be built, -and must have administrative access to the source code repositories so it can -manage deploy keys. If needed, you can limit this account to only a specific -set of repositories required for a specific build. - -If you are building repositories with linked private submodules (private -dependencies), you also need to add an override `SSH_PRIVATE` environment -variable to automated builds associated with the account. - -1. Create a service user account on your source provider, and generate SSH keys for it. -2. Create a "build" team in your organization. -3. Ensure that the new "build" team has access to each repository and submodule you need to build. - - Go to the repository's **Settings** page. On Github, add the new "build" team to the list of **Collaborators and Teams**. On Bitbucket, add the "build" team to the list of approved users on the **Access management** screen. - -4. Add the service user to the "build" team on the source provider. - -5. Log in to Docker Cloud as a member of the `Owners` team, switch to the organization, and follow the instructions to [link to source code repository](link-source.md) using the service account. - - > **Note**: You may need to log out of your individual account on the source code provider to create the link to the service account. - -6. Optionally, use the SSH keys you generated to set up any builds with private submodules, using the service account and [the instructions above](automated-build.md#build-repositories-with-linked-private-submodules). - -## What's Next? - -### Customize your build process - -Additional advanced options are available for customizing your automated builds, -including utility environment variables, hooks, and build phase overrides. To -learn more see [Advanced options for Autobuild and Autotest](advanced.md). - -### Set up builder nodes - -If you are building on your own infrastructure, you can run the build process on -specific nodes by adding the `builder` label to them. If no builder nodes are -specified, the build containers are deployed using an "emptiest node" strategy. - -You can also limit the number of concurrent builds (including `autotest` builds) -on a specific node by using a `builder=n` tag, where the `n` is the number of -builds to allow. For example a node tagged with `builder=5` only allows up to -five concurrent builds or autotest-builds at the same time. - -### Autoredeploy services on successful build - -You can configure your services to automatically redeploy once the build -succeeds. [Learn more about autoredeploy](../apps/auto-redeploy.md) - -### Add automated tests - -To test your code before the image is pushed, you can use -Docker Cloud's [Autotest](automated-testing.md) feature which -integrates seamlessly with autobuild and autoredeploy. - -> **Note**: While the Autotest feature builds an image for testing purposes, it -does not push the resulting image to Docker Cloud or the external registry. diff --git a/docker-cloud/builds/images/build-cancelicon.png b/docker-cloud/builds/images/build-cancelicon.png deleted file mode 100644 index 33fbc9e3cb..0000000000 Binary files a/docker-cloud/builds/images/build-cancelicon.png and /dev/null differ diff --git a/docker-cloud/builds/images/build-dashboard.png b/docker-cloud/builds/images/build-dashboard.png deleted file mode 100644 index 6fba720120..0000000000 Binary files a/docker-cloud/builds/images/build-dashboard.png and /dev/null differ diff --git a/docker-cloud/builds/images/build-report.png b/docker-cloud/builds/images/build-report.png deleted file mode 100644 index e5b271fc2a..0000000000 Binary files a/docker-cloud/builds/images/build-report.png and /dev/null differ diff --git a/docker-cloud/builds/images/cancel-build.png b/docker-cloud/builds/images/cancel-build.png deleted file mode 100644 index 35f3b6918a..0000000000 Binary files a/docker-cloud/builds/images/cancel-build.png and /dev/null differ diff --git a/docker-cloud/builds/images/create-repository.png b/docker-cloud/builds/images/create-repository.png deleted file mode 100644 index 501c949b15..0000000000 Binary files a/docker-cloud/builds/images/create-repository.png and /dev/null differ diff --git a/docker-cloud/builds/images/edit-repository-builds.png b/docker-cloud/builds/images/edit-repository-builds.png deleted file mode 100644 index eeff989cb1..0000000000 Binary files a/docker-cloud/builds/images/edit-repository-builds.png and /dev/null differ diff --git a/docker-cloud/builds/images/edit-repository.png b/docker-cloud/builds/images/edit-repository.png deleted file mode 100644 index 0202076fa4..0000000000 Binary files a/docker-cloud/builds/images/edit-repository.png and /dev/null differ diff --git a/docker-cloud/builds/images/link-source-github-ind-revoke.png b/docker-cloud/builds/images/link-source-github-ind-revoke.png deleted file mode 100644 index f20cb81d07..0000000000 Binary files a/docker-cloud/builds/images/link-source-github-ind-revoke.png and /dev/null differ diff --git a/docker-cloud/builds/images/link-source-github-ind.png b/docker-cloud/builds/images/link-source-github-ind.png deleted file mode 100644 index 811de546d6..0000000000 Binary files a/docker-cloud/builds/images/link-source-github-ind.png and /dev/null differ diff --git a/docker-cloud/builds/images/link-source-github-org-lite.png b/docker-cloud/builds/images/link-source-github-org-lite.png deleted file mode 100644 index aa96087b8f..0000000000 Binary files a/docker-cloud/builds/images/link-source-github-org-lite.png and /dev/null differ diff --git a/docker-cloud/builds/images/link-source-github-org-revoke.png b/docker-cloud/builds/images/link-source-github-org-revoke.png deleted file mode 100644 index 97a8c6b61f..0000000000 Binary files a/docker-cloud/builds/images/link-source-github-org-revoke.png and /dev/null differ diff --git a/docker-cloud/builds/images/link-source-github-org.png b/docker-cloud/builds/images/link-source-github-org.png deleted file mode 100644 index d1c1ebf0f5..0000000000 Binary files a/docker-cloud/builds/images/link-source-github-org.png and /dev/null differ diff --git a/docker-cloud/builds/images/link-source-unlink-github.png b/docker-cloud/builds/images/link-source-unlink-github.png deleted file mode 100644 index 096fb05d6d..0000000000 Binary files a/docker-cloud/builds/images/link-source-unlink-github.png and /dev/null differ diff --git a/docker-cloud/builds/images/repo-general.png b/docker-cloud/builds/images/repo-general.png deleted file mode 100644 index f2efc2e2f5..0000000000 Binary files a/docker-cloud/builds/images/repo-general.png and /dev/null differ diff --git a/docker-cloud/builds/images/retry-build.png b/docker-cloud/builds/images/retry-build.png deleted file mode 100644 index cae3d9de70..0000000000 Binary files a/docker-cloud/builds/images/retry-build.png and /dev/null differ diff --git a/docker-cloud/builds/images/source-providers.png b/docker-cloud/builds/images/source-providers.png deleted file mode 100644 index 8b0dfe70d3..0000000000 Binary files a/docker-cloud/builds/images/source-providers.png and /dev/null differ diff --git a/docker-cloud/builds/images/third-party-images-modal.png b/docker-cloud/builds/images/third-party-images-modal.png deleted file mode 100644 index 5417dd0922..0000000000 Binary files a/docker-cloud/builds/images/third-party-images-modal.png and /dev/null differ diff --git a/docker-cloud/builds/images/video-auto-builds-docker-cloud.png b/docker-cloud/builds/images/video-auto-builds-docker-cloud.png deleted file mode 100644 index 5f8c588950..0000000000 Binary files a/docker-cloud/builds/images/video-auto-builds-docker-cloud.png and /dev/null differ diff --git a/docker-cloud/builds/images/video-auto-tests-docker-cloud.png b/docker-cloud/builds/images/video-auto-tests-docker-cloud.png deleted file mode 100644 index e694f53dcf..0000000000 Binary files a/docker-cloud/builds/images/video-auto-tests-docker-cloud.png and /dev/null differ diff --git a/docker-cloud/builds/index.md b/docker-cloud/builds/index.md deleted file mode 100644 index 97069aaaac..0000000000 --- a/docker-cloud/builds/index.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -description: Manage Builds and Images in Docker Cloud -keywords: builds, images, Cloud -title: Builds and images overview -notoc: true ---- - -Docker Cloud provides a hosted registry service where you can create -repositories to store your Docker images. You can choose to push images to the -repositories, or link to your source code and build them directly in Docker -Cloud. - -You can build images manually, or set up automated builds to rebuild your Docker -image on each `git push` to the source code. You can also create automated -tests, and when the tests pass use autoredeploy to automatically update your -running services when a build passes its tests. - -* [Repositories in Docker Cloud](repos.md) -* [Push images to Docker Cloud](push-images.md) -* [Link to a source code repository](link-source.md) -* [Automated builds](automated-build.md) -* [Automated repository tests](automated-testing.md) -* [Advanced options for Autobuild and Autotest](advanced.md) - -![Docker Cloud repository General view](images/repo-general.png){:width="650px"} diff --git a/docker-cloud/builds/link-source.md b/docker-cloud/builds/link-source.md deleted file mode 100644 index b7ea024254..0000000000 --- a/docker-cloud/builds/link-source.md +++ /dev/null @@ -1,153 +0,0 @@ ---- -description: Link to your source code repository -keywords: sourcecode, github, bitbucket, Cloud -redirect_from: -- /docker-cloud/tutorials/link-source/ -title: Link Docker Cloud to a source code provider ---- - -To automate building and testing of your images, you link to your hosted source -code service to Docker Cloud so that it can access your source code -repositories. You can configure this link for user accounts or -organizations. - -If you only push pre-built images to Docker Cloud's registry, you do not -need to link your source code provider. - -> **Note**: If you are linking a source code provider to create autobuilds for a team, follow the instructions to [create a service account](automated-build.md#service-users-for-team-autobuilds) for the team before linking the account as described below. - -## Link to a GitHub user account - -1. Click **Cloud settings** in the left navigation bar. - -2. Click or scroll down to **Source providers**. - -3. Click the plug icon for the source provider you want to link. - - ![Linking source providers](images/source-providers.png) - -4. Review the settings for the **Docker Cloud Builder** OAuth application. - ![Granting access to GitHub account](images/link-source-github-ind.png) - - >**Note**: If you are the owner of any Github organizations, you might see - options to grant Docker Cloud access to them from this screen. You can also - individually edit an organization's Third-party access settings to grant or - revoke Docker Cloud's access. See [Grant access to a GitHub - organization](link-source.md#grant-access-to-a-github-organization) to learn more. - -5. Click **Authorize application** to save the link. - -You are now ready to create a new image! - -### Unlink a GitHub user account - -To revoke Docker Cloud's access to your GitHub account, you must unlink it both -from Docker Cloud, *and* from your GitHub account. - -1. Click **Cloud settings** in the left navigation, and click or scroll to the -**Source providers** section. - -2. Click the plug icon next to the source provider you want to remove. - - The icon turns gray and has a slash through it when the account is disabled - but not revoked. You can use this to _temporarily_ disable a linked source - code provider account. - -4. Go to your GitHub account's **Settings** page. - -5. Click **OAuth applications** in the left navigation bar. - ![Revoking access to GitHub account](images/link-source-github-ind-revoke.png) - -6. Click **Revoke** next to the Docker Cloud Builder application. - -> **Note**: Each repository that is configured as an automated build source -contains a webhook that notifies Docker Cloud of changes in the repository. -This webhook is not automatically removed when you revoke access to a source -code provider. - -## Grant access to a GitHub organization - -If you are the owner of a Github organization you can grant or revoke Docker -Cloud's access to the organization's repositories. Depending on the GitHub -organization's settings, you may need to be an organization owner. - -If the organization has not had specific access granted or revoked before, you -can often grant access at the same time as you link your user account. In this -case, a **Grant access** button appears next to the organization name in the -link accounts screen, as shown below. If this button does not appear, you must -manually grant the application's access. - -![Granting access to GitHub organization](images/link-source-github-org-lite.png) - -To manually grant Docker Cloud access to a GitHub organization: - -1. Link your user account using the instructions above. - -2. From your GitHub Account settings, locate the **Organization settings** -section at the lower left. - -3. Click the organization you want to give Docker Cloud access to. - -4. From the Organization Profile menu, click **Third-party access**. - - The page displays a list of third party applications and their access - status. - -5. Click the pencil icon next to Docker Cloud Builder. - -6. Click **Grant access** next to the organization. - ![Granting access to GitHub organization manually](images/link-source-github-org.png) - -### Revoke access to a GitHub organization - -To revoke Docker Cloud's access to an organization's GitHub repositories: - -1. From your GitHub Account settings, locate the **Organization settings** section at the lower left. -2. Click the organization you want to revoke Docker Cloud's access to. -3. From the Organization Profile menu, click **Third-party access**. - The page displays a list of third party applications and their access status. -4. Click the pencil icon next to Docker Cloud Builder. - ![Revoking access to GitHub organization](images/link-source-github-org-revoke.png) -5. On the next page, click **Deny access**. - -## Link to a Bitbucket user account - -1. Log in to Docker Cloud using your Docker ID. - -2. Click the gear icon in the left navigation to go to your **Cloud settings**. - -3. Scroll to the **Source providers** section. - -4. Click the plug icon for the source provider you want to link. - - ![Linking Bitbucket](images/source-providers.png) - -5. If necessary, log in to Bitbucket. - -6. On the page that appears, click **Grant access**. - -### Unlink a Bitbucket user account - -To permanently revoke Docker Cloud's access to your Bitbucket account, you must -unlink it both from Docker Cloud, *and* from your Bitbucket account. - -1. From your **Cloud settings** page, click **Source providers** - -2. Click the plug icon next to the source provider you want to remove. - - The icon turns gray and has a slash through it when the account is disabled, - however access may not have been revoked. You can use this to _temporarily_ - disable a linked source code provider account. - -4. Go to your Bitbucket account and click the user menu icon in the top right corner. - -5. Click **Bitbucket settings**. - -6. On the page that appears, click **OAuth**. - -7. Click **Revoke** next to the Docker Cloud line. - -> **Note**: Each repository that is configured as an automated build source -contains a webhook that notifies Docker Cloud of changes in the repository. This -webhook is not automatically removed when you revoke access to a source code -provider. diff --git a/docker-cloud/builds/push-images.md b/docker-cloud/builds/push-images.md deleted file mode 100644 index 2168392ce3..0000000000 --- a/docker-cloud/builds/push-images.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -description: Push images to Docker Cloud -keywords: images, private, registry -redirect_from: -- /docker-cloud/getting-started/intermediate/pushing-images-to-dockercloud/ -- /docker-cloud/tutorials/pushing-images-to-dockercloud/ -title: Push images to Docker Cloud -notoc: true ---- - -Docker Cloud uses Docker Hub as its native registry for storing both public and -private repositories. Once you push your images to Docker Hub, they are -available in Docker Cloud. - -If you don't have Swarm Mode enable, images pushed to Docker Hub automatically appear for you on the **Services/Wizard** page on Docker Cloud. - -> **Note**: You must use Docker Engine 1.6 or later to push to Docker Hub. -Follow the [official installation instructions](/install/index.md){: target="_blank" class="_" } depending on your system. - -1. In a terminal window, set the environment variable **DOCKER_ID_USER** as *your username* in Docker Cloud. - - This allows you to copy and paste the commands directly from this tutorial. - - ``` - $ export DOCKER_ID_USER="username" - ``` - - If you don't want to set this environment variable, change the examples in - this tutorial to replace `DOCKER_ID_USER` with your Docker Cloud username. - -2. Log in to Docker Cloud using the `docker login` command. - - ``` - $ docker login - ``` - This logs you in using your Docker ID, which is shared between both Docker Hub and Docker Cloud. - - If you have never logged in to Docker Hub or Docker Cloud and do not have a Docker ID, running this command prompts you to create a Docker ID. - -3. Tag your image using `docker tag`. - - In the example below replace `my_image` with your image's name, and `DOCKER_ID_USER` with your Docker Cloud username if needed. - - ``` - $ docker tag my_image $DOCKER_ID_USER/my_image - ``` - -4. Push your image to Docker Hub using `docker push` (making the same replacements as in the previous step). - - ``` - $ docker push $DOCKER_ID_USER/my_image - ``` - -5. Check that the image you just pushed appears in Docker Cloud. - - Go to Docker Cloud and navigate to the **Repositories** tab and confirm that your image appears in this list. - ->**Note**: If you're a member of any organizations that are using Docker -> Cloud, you might need to switch to the organization account namespace using the -> account menu at the upper right to see other repositories. diff --git a/docker-cloud/builds/repos.md b/docker-cloud/builds/repos.md deleted file mode 100644 index 3bf175ac7d..0000000000 --- a/docker-cloud/builds/repos.md +++ /dev/null @@ -1,145 +0,0 @@ ---- -description: Create and edit Docker Cloud repositories -keywords: Docker Cloud repositories, automated, build, images -title: Docker Cloud repositories ---- - -Repositories in Docker Cloud store your Docker images. You can create -repositories and manually [push images](push-images.md) using `docker push`, or -you can link to a source code provider and use [automated builds](automated-build.md) to build the images for you. These repositories -can be either public or private. - -![Docker Cloud repository General view](images/repo-general.png) - -Additionally, you can access your Docker Hub repositories and automated builds -from within Docker Cloud. - -## Create a new repository in Docker Cloud - -To store your images in Docker Cloud, you create a repository. All individual users can create one private repository for free, and can create unlimited public repositories. - -1. Click **Repositories** in the left navigation. -2. Click **Create**. -3. Enter a **name** and an optional **description**. -4. Choose a visibility setting for the repository. -5. Optionally, click a linked source code provider to set up automated builds. - 1. Select a namespace from that source code provider. - 2. From that namespace, select a repository to build. - 3. Optionally, expand the build settings section to set up build rules and enable or disable Autobuilds. - - > **Note**: You do not need to set up automated builds right away, and you can change the build settings at any time after the repository is created. If you choose not to enable automated builds, you can still push images to the repository using the `docker` or `docker-cloud` CLI. -6. Click **Create**. - - ![Create repository page](images/create-repository.png) - -### Repositories for Organizations - -Only members of an organization's `Owners` team can create new repositories for -the organization. Members of `Owners` can also change the organization's billing -information, and link the organization to a source code provider to set up -automated builds. - -A member of the `Owners` team must also set up the repository's access -permissions so that other teams within the organization can use it. To learn -more, see the [organizations and teams documentation](../orgs.md#set-team-permissions). - -## Edit an existing repository in Docker Cloud - -You can edit repositories in Docker Cloud to change the description and build configuration. - -From the **General** page, you can edit the repository's short description, or click to edit the version of the ReadMe displayed on the repository page. - -> **Note**: Edits to the Docker Cloud **ReadMe** are not reflected in the source code linked to a repository. - -To run a build, or to set up or change automated build settings, click the **Builds** tab, and click **Configure Automated Builds**. See the documentation on [configuring automated build settings](automated-build.md#configure-automated-build-settings) for more -information. - -## Change repository privacy settings - -Repositories in Docker Cloud can be either public or private. Public -repositories are visible from the Docker Store's Community Content section, and -can also be searched for from Docker Cloud's **Create Service** wizard. Private -repositories are only visible to the user account that created it (unless it -belongs to an Organization, see below). - -> **Note**: These _privacy_ settings are separate from the [repository _access_ permissions](../orgs.md#change-team-permissions-for-an-individual-repository) available for repositories shared among members of an [organization](../orgs.md). - -If a private repository belongs to an [Organization](../orgs.md), members of the -`Owners` team configure access. Only members of the `Owners` team can change an -organization's repository privacy settings. - -Each Docker Cloud account comes with one free private repository. Additional -private repositories are available for subscribers on paid plans. - -To change a repository's privacy settings: - -1. Navigate to the repository in Docker Cloud. -2. Click the **Settings** tab. -3. Click the **Make public** or **Make private** button. -4. In the dialog that appears, enter the name of the repository to confirm the change. -5. Click the button to save the change. - -## Delete a repository - -When you delete a repository in Docker Cloud, all of the images in that -repository are also deleted. - -If automated builds are configured for the repository, the build rules and -settings are deleted along with any Docker Security Scan results. However, this -does not affect the code in the linked source code repository, and does not -remove the source code provider link. - -If you are running a service from deleted repository , the service continues -to run, but cannot be scaled up or redeployed. If any builds use the Docker -`FROM` directive and reference a deleted repository, those builds fail. - -To delete a repository: - -1. Navigate to the repository, and click the **Settings** tab. -2. Click **Delete**. -3. Enter the name of the repository to confirm deletion, and click **Delete**. - -External (third-party) repositories cannot be deleted from within Docker Cloud, -however you can remove a link to them using the same process for a repository in -Docker Cloud. The link is removed, but images in the external repository are not -deleted. - -> **Note**: If the repository to be deleted or removed belongs to an [Organization](../orgs.md), only members of the `Owners` team can delete it. - - -## Link to a repository from a third party registry - -You can link to repositories hosted on a third party registry. This allows you -to deploy images from the third party registry to nodes in Docker Cloud, and -also allows you to enable automated builds which push built images back to the -registry. - -> **Note**: To link to a repository that you want to share with an organization, contact a member of the organization's `Owners` team. Only the Owners team can import new external registry repositories for an organization. - -1. Click **Repositories** in the side menu. - -2. Click the down arrow menu next to the **Create** button. - -3. Select **Import**. - -4. Enter the name of the repository that you want to add. - - For example, `registry.com/namespace/reponame` where `registry.com` is the - hostname of the registry. - ![Import repository popup](images/third-party-images-modal.png) - -5. Enter credentials for the registry. - - > **Note**: These credentials must have **push** permission to push - built images back to the repository. If you provide **read-only** - credentials, you can run automated tests and deploy from the - repository to your nodes, but you cannot push built images to - it. - -6. Click **Import**. - -7. Confirm that the repository on the third-party registry now appears in your **Repositories** dropdown list. - -## What's next? - -Once you create or link to a repository in Docker Cloud, you can set up [automated testing](automated-testing.md) and [automated builds](automated-build.md). diff --git a/docker-cloud/cloud-swarm/connect-to-swarm.md b/docker-cloud/cloud-swarm/connect-to-swarm.md deleted file mode 100644 index 384c841b2a..0000000000 --- a/docker-cloud/cloud-swarm/connect-to-swarm.md +++ /dev/null @@ -1,170 +0,0 @@ ---- -previewflag: cloud-swarm -description: how to register and unregister swarms in Docker Cloud -keywords: swarm mode, swarms, orchestration Cloud, fleet management -title: Connect to a swarm through Docker Cloud ---- - -Docker Cloud allows you to connect your local Docker Engine to any swarm you -have access to in Docker Cloud. There are a couple of different ways to do this, -depending on how you are running Docker on your local system: - -- [Connect to a swarm with a Docker Cloud generated run command](#connect-to-a-swarm-with-a-docker-cloud-generated-run-command) -- [Use Docker for Mac or Docker for Windows (Edge) to connect to swarms](#use-docker-for-mac-or-windows-edge-to-connect-to-swarms) - -## Connect to a swarm with a Docker Cloud generated run command - -On platforms other than Docker for Mac or Docker for Windows (Edge channel), you -can connect to a swarm manually at the command line by running a proxy container -in your local Docker instance, which connects to a manager node on the target -swarm. - -1. Log in to Docker Cloud in your web browser. -2. Click **Swarms** in the top navigation, and click the name of the swarm you want to connect to. -3. Copy the command provided in the dialog that appears. - - ![Connect to swarm popup](images/swarm-connect.png) - -4. In a terminal window connected to your local Docker Engine, paste the command, and press **Enter**. - - You are prompted for your Docker ID and password, then the local Docker Engine downloads a containerized Docker Cloud client tool, and connects to the swarm. - - ``` - $ docker run --rm -ti -v /var/run/docker.sock:/var/run/docker.sock -e DOCKER_HOST dockercloud/client orangesnap/vote-swarm - Use your Docker ID credentials to authenticate: - Username: orangesnap - Password: - - => You can now start using the swarm orangesnap/vote-swarm by executing: - export DOCKER_HOST=tcp://127.0.0.1:32770 -``` - -5. To complete the connection process, run the `export DOCKER_HOST` command as provided in the output of the previous command. This connects your local shell to the client proxy. - - Be sure to include the given client connection port in the URL. For our example, the command is: `export DOCKER_HOST=tcp://127.0.0.1:32770`. - - (If you are connecting to your first swarm, the _command:port_ is likely to be `export DOCKER_HOST=tcp://127.0.0.1:32768`.) - -6. Now, you can run `docker node ls` to verify that the swarm is running. - - Here is an example of `docker node ls` output for a swarm running one manager and two workers on **Amazon Web Services**. - - ``` - $ docker node ls - ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS - dhug6p7arwrm3a9j62zh0a0hf ip-172-31-23-167.us-west-1.compute.internal Ready Active - xmbxtffkrzaveqhyuouj0rxso ip-172-31-4-109.us-west-1.compute.internal Ready Active - yha4q9bleg80kvbn9tqgxd69g * ip-172-31-24-61.us-west-1.compute.internal Ready Active Leader - ``` - - Here is an example of `docker node ls` output for a swarm running one manager and two workers on **Microsoft Azure Cloud Services**. - - ``` - $ docker node ls - ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS - 6uotpiv8vyxsjzdtux13nkvj4 swarm-worker000001 Ready Active - qmvk4swo9rdv1viu9t88dw0t3 swarm-worker000000 Ready Active - w7kgzzdkka0k2svssz1dk1fzw * swarm-manager000000 Ready Active Leader - ``` - - From this point on, you can use the - [CLI commands](/engine/swarm/index.md#swarm-mode-cli-commands) - to manage your cloud-hosted [swarm mode](/engine/swarm/) just as you - would a local swarm. - -7. Now that your swarm is set up, try out the example to [deploy a service to the swarm](/engine/swarm/swarm-tutorial/deploy-service/), -and other subsequent tasks in the Swarm getting started tutorial. - -### Switch between your swarm and Docker hosts in the same shell - -To switch to Docker hosts: - -* If you are running Docker for Mac or Docker for Windows, and want to -connect to the Docker Engine for those apps, run `docker-machine env -u` -as a preview, then run the unset command: `eval $(docker-machine env -u)`. -For example: - - ``` - $ docker-machine env -u - unset DOCKER_TLS_VERIFY - unset DOCKER_HOST - unset DOCKER_CERT_PATH - unset DOCKER_MACHINE_NAME - # Run this command to configure your shell: - # eval $(docker-machine env -u) - ``` - -* If you are using Docker Machine, and want to switch to one of your local VMs, be sure to unset `DOCKER_TLS_VERIFY`. Best practice is similar to the previous step. Run `docker-machine env -u` as a preview, then run the unset command: `eval $(docker-machine env -u)`. Follow this with `docker-machine ls` to view your current machines, then connect to the one you want with `docker-machine env my-local-machine` and run the given `eval` command. For example: - - ``` - $ docker-machine env my-local-machine - export DOCKER_TLS_VERIFY="1" - export DOCKER_HOST="tcp://192.168.99.100:2376" - export DOCKER_CERT_PATH="/Users/victoriabialas/.docker/machine/machines/my-local-machine" - export DOCKER_MACHINE_NAME="my-local-machine" - # Run this command to configure your shell: - # eval $(docker-machine env my-local-machine) - ``` - -To switch back to the deployed swarm, re-run the `export DOCKER_HOST` command with the connection port for the swarm you want to work with. (For example, `export DOCKER_HOST=tcp://127.0.0.1:32770`) - -To learn more, see [Unset environment variables in the current shell](/machine/get-started/#unset-environment-variables-in-the-current-shell). - -## Use Docker for Mac or Windows (Edge) to connect to swarms - -On Docker for Mac and Docker for Windows current Edge releases, -you can access your Docker Cloud account and connect directly to your swarms through those Docker desktop application menus. - -* See [Docker Cloud (Edge feature) in Docker for Mac topics](/docker-for-mac/#docker-cloud-edge-feature) - -* See [Docker Cloud (Edge feature) in Docker for Windows topics](/docker-for-windows/#docker-cloud-edge-feature) - -> **Tip**: This is different from using Docker for Mac or Windows with -Docker Machine as described in previous examples. Here, we are -by-passing Docker Machine, and using the desktop Moby VM directly, so -there is no need to manually set shell environment variables. - -This works the same way on both Docker for Mac and Docker for Windows. - -Here is an example, showing the Docker for Mac UI. - -1. Make sure you are logged in to your Docker Cloud account on the desktop app. - - ![Docker for Mac Cloud login](images/d4mac-cloud-login.png) - -2. Choose the swarm you want from the menu. - - ![Docker for Mac Cloud login](images/d4mac-swarm-connect.png) - -3. A new terminal window opens and connects to the swarm you chose. The swarm name is shown at the prompt. For this example, we connected to `vote-swarm`. - - ```shell - [vote-swarm] ~ - ``` - -4. Now, you can run `docker node ls` to verify that the swarm is running. - - ```shell - [vote-swarm] ~ $ docker node ls - ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS - 7ex8inrg8xzgonaunwp35zxfl ip-172-31-6-204.us-west-1.compute.internal Ready Active - ec3kxibdxqhgw5aele7x853er * ip-172-31-0-178.us-west-1.compute.internal Ready Active Leader - z4ngrierv27wdm6oy0z3t9r1z ip-172-31-31-240.us-west-1.compute.internal Ready Active - ``` - -## Reconnect a swarm - -If you accidentally unregister a swarm from Docker Cloud, or decide that you -want to re-register the swarm after it has been removed, you can -[re-register it](register-swarms.md#register-a-swarm) using the same -process as a normal registration. If the swarm is registered to -an organization, its access permissions were deleted when it was -unregistered, and must be recreated. - -> **Note**: You cannot register a new or different swarm under the name of a -swarm that was unregistered. To re-register a swarm, it must have the same swarm -ID as it did when previously registered. - -## Where to go next - -Learn how to [create a new swarm in Docker Cloud](create-cloud-swarm.md). diff --git a/docker-cloud/cloud-swarm/create-cloud-swarm-aws.md b/docker-cloud/cloud-swarm/create-cloud-swarm-aws.md deleted file mode 100644 index 02ef330499..0000000000 --- a/docker-cloud/cloud-swarm/create-cloud-swarm-aws.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -previewflag: cloud-swarm -description: Create new swarms on AWS with Docker Cloud -keywords: swarm mode, swarms, create swarm, Cloud, AWS -title: Create a new swarm on Amazon Web Services in Docker Cloud ---- - -{% include content/cloud-swarm-overview.md %} - -## Link your service provider to Docker Cloud - -To create a swarm, you need to give Docker Cloud permission to deploy swarm -nodes on your behalf in your cloud services provider account. - -If you haven't yet linked Docker Cloud to AWS, follow the steps in [Link Amazon Web Services to Docker Cloud](link-aws-swarm.md). Once it's -linked, it shows up on the **Swarms -> Create** page as a connected service -provider. - -![](images/aws-creds-cloud.png) - -## Create a swarm - -1. If necessary, log in to Docker Cloud and switch to Swarm Mode - -2. Click **Swarms** in the top navigation, then click **Create**. - - Alternatively, you can select **+ -> Swarm** from the top navigation to get to the same page. - -3. Enter a name for the new swarm. - - Your Docker ID is pre-populated. In the example, our swarm name - is "vote-swarm". - - ![](images/aws-create-swarm-1-name.png) - - >**Tip:** For Docker Cloud, use all lower case letters for swarm names. No spaces, capitalized letters, or special characters other than `.`, `_`, or `-` are allowed. AWS does not accept underscores in the name `_`. - -4. Select Amazon Web Services as the service provider and select a channel (`Stable` or `Edge`) from the drop-down menu. - - You can learn more about **stable** and **edge** channels in the [Install Docker overview](/install/) and the [Docker CE Edge](/edge/) topics. - - In this example, we use the `Stable` channel. - - ![](images/aws-create-swarm-0.png) - -5. Select a **Region** from the drop-down menu. - - > **Tip:** The SSH keys available to you in the next steps are - filtered by the region you select here. Make sure that you have - appropriate SSH keys available on the region you select. - - Optionally, click **Region Advanced Settings** to configure a - [Virtual Private Cloud(VPC)](http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Introduction.html) on which to run this swarm. - - ![](images/aws-create-swarm-3-region.png) - - For guidance on setting up a VPC, see [Recommended VPC and subnet setup](/docker-for-aws/faqs/#can-i-use-my-existing-vpc) in the Docker for AWS topics. - -6. Choose how many swarm managers and swarm worker nodes to deploy. - - Here, we create one manager and two worker nodes. (This maps nicely to the [Swarm tutorial setup](/engine/swarm/swarm-tutorial/index.md) and the [voting app sample in Docker Labs](https://github.com/docker/labs/blob/master/beginner/chapters/votingapp.md).) - - ![](images/cloud-create-swarm-4-size.png) - -7. Configure swarm properties. - - ![](images/aws-create-swarm-5-properties.png) - - * Select a public SSH key for Docker Cloud to use to connect to the - nodes on AWS. Public keys from the [key pairs you configured on AWS](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) are provided in the drop-down menu. Only keys associated with the - Region you selected (in step 5) are shown. - - * Choose whether to provide daily resource cleanup. - - Enabling this option helps to avoid charges for resources that you are no longer using. (See also, topics on [resource cleanup](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_CleaningUp.html) in the AWS documentation.) - - * Enable or disable Cloudwatch for container logging. - - When enabled, Docker sends container logs to [Amazon Cloudwatch](https://aws.amazon.com/cloudwatch/), as described in the Docker for AWS topic on [Logging](/docker-for-aws/index.md#logging). - -7. Select the instance sizes for the managers, and for the workers. - - ![](images/aws-create-swarm-6-manager-worker.png) - - In general, the larger your swarm, the larger the instance sizes you should use. See the Docker for AWS topics for more on [resource configuration](/docker-for-aws/index.md#configuration). - -9. Click **Create**. - - Docker for AWS bootstraps all of the recommended infrastructure to - start using Docker on AWS automatically. You don't need to worry - about rolling your own instances, security groups, or load balancers - when using Docker for AWS. (To learn more, see - [Why Docker for AWS](/docker-for-aws/why.md).) - - This takes a few minutes. When the swarm is ready, its indicator on the Swarms page shows steady green. - - ![](images/aws-create-swarm-7-list.png) - - > **Note**: At this time, you cannot add nodes to a swarm from - within Docker Cloud. To add new nodes to an existing swarm, - log in to your AWS account, and add nodes manually. (You can - unregister or dissolve swarms directly from Docker Cloud.) - -## Where to go next - -Learn how to [connect to a swarm through Docker Cloud](connect-to-swarm.md). - -Learn how to [register existing swarms](register-swarms.md). - -You can get an overivew of topics on [swarms in Docker Cloud](index.md). - -To find out more about Docker swarm in general, see the Docker engine -[Swarm Mode overview](/engine/swarm/). diff --git a/docker-cloud/cloud-swarm/create-cloud-swarm-azure.md b/docker-cloud/cloud-swarm/create-cloud-swarm-azure.md deleted file mode 100644 index fc54bfd44c..0000000000 --- a/docker-cloud/cloud-swarm/create-cloud-swarm-azure.md +++ /dev/null @@ -1,119 +0,0 @@ ---- -previewflag: cloud-swarm -description: Create new swarms on Azure with Docker Cloud -keywords: swarm mode, swarms, create swarm, Cloud, Azure -title: Create a new swarm on Microsoft Azure in Docker Cloud ---- - -[![Deploying Swarms on Microsoft Azure with Docker Cloud](images/video-azure-docker-cloud.png)](https://www.youtube.com/watch?v=LlpyiGAVBVg "Deploying Swarms on Microsoft Azure with Docker Cloud"){:target="_blank" class="_"} - -{% include content/cloud-swarm-overview.md %} - -## Link Docker Cloud to your service provider - -To create a swarm, you need to give Docker Cloud permission to deploy swarm -nodes on your behalf in your cloud services provider account. - -If you haven't yet linked Docker Cloud to Azure, follow the steps in [Link Microsoft Azure Cloud Services to Docker Cloud](link-azure-swarm/). Once it's -linked, it shows up on the **Swarms -> Create** page as a connected service -provider. - -![](images/azure-creds-cloud.png) - -> **Note:** If you are using a Microsoft Azure Visual Studio MSDN -subscription, you need to enable _programmatic deployments_ on the Docker CE -VM Azure Marketplace item. See the Microsoft Azure blog post on [Working with -Marketplace Images on Azure Resource -Manager](https://azure.microsoft.com/en-us/blog/working-with-marketplace-images-on-azure-resource-manager/){: target="_blank" class="_"} for instructions on how to do this. - -## Create a swarm - -1. If necessary, log in to Docker Cloud and switch to Swarm Mode - -2. Click **Swarms** in the top navigation, then click **Create**. - - Alternatively, you can select **+ -> Swarm** from the top navigation to - get to the same page. - -3. Enter a name for the new swarm. - - Your Docker ID is pre-populated. In the example, our swarm name - is "vote_swarm". - - ![](images/azure-create-swarm-1-name.png) - - >**Tip:** Use all lower case letters for swarm names. No spaces, capitalized letters, or special characters other than `.`, `_`, or `-` are allowed. - -4. Select Microsoft Azure as the service provider, select a channel (`Stable` or `Edge`) from the drop-down menu, provide an App name, and select the Azure -Subscription you want to use. - - You can learn more about **stable** and **edge** channels in the [Install Docker overview](install/) and the [Docker CE Edge](/edge/) topics. - - In this example, we use the `Stable` channel, our app name is "voting_app" and we've selected a Pay-As-You-Go subscription. - - ![](images/azure-create-swarm-0.png) - -5. Make sure that **Create new resource group** is selected, provide a name for the group, and select a location from the drop-down menu. - - Our example app is called `swarm_vote_resources`, and it is located in West US. - - ![](images/azure-create-swarm-3-resource-group.png) - - >**Tip:** Be sure to create a new resource group for a swarm. If you choose to use an existing group, the swarm fails as Azure does not currently support this. - -6. Choose how many swarm managers and worker nodes to deploy. - - Here, we create one manager and two worker nodes. (This maps nicely to the [Swarm tutorial setup](/engine/swarm/swarm-tutorial/index.md) and the [voting app sample in Docker Labs](https://github.com/docker/labs/blob/master/beginner/chapters/votingapp.md).) - - ![](images/cloud-create-swarm-4-size.png) - -8. Configure swarm properties, SSH key and resource cleanup. - - Copy-paste the public [SSH key](ssh-key-setup.md) you want to use to connect to the nodes. (Provide the one for which you have the private key locally.) - - ![](images/azure-create-swarm-5-properties.png) - - * To list existing SSH keys: `ls -al ~/.ssh` - - * To copy the public SSH key to your clipboard: `pbcopy < ~/.ssh/id_rsa.pub` - - Choose whether to provide daily resource cleanup. (Enabling this - option helps avoid charges for resources that you are no longer - using.) - -7. Select the machine sizes for the managers, and for the workers. - - ![](images/azure-create-swarm-6-manager-worker.png) - - The larger your swarm, the larger the machine size you should use. - To learn more about resource setup, see [configuration options](/docker-for-azure/index.md#configuration) in the Docker - for Azure topics. - - You can find Microsoft Azure Linux Virtual Machine pricing and options [here](https://azure.microsoft.com/en-us/pricing/details/virtual-machines/linux/). - -9. Click **Create**. - - Docker for Azure bootstraps all of the recommended infrastructure to start - using Docker on Azure automatically. You don’t need to worry about rolling - your own instances, security groups, or load balancers when using Docker for - Azure. (To learn more, see [Why Docker for Azure](/docker-for-azure/why.md).) - - This takes a few minutes. When the swarm is ready, its indicator on the Swarms page shows steady green. - - ![](images/azure-create-swarm-7-list.png) - - > **Note**: At this time, you cannot add nodes to a swarm from - within Docker Cloud. To add new nodes to an existing swarm, - log in to your Azure account, and add nodes manually. (You can - unregister or dissolve swarms directly from Docker Cloud.) - -## Where to go next - -Learn how to [connect to a swarm through Docker Cloud](connect-to-swarm.md). - -Learn how to [register existing swarms](register-swarms.md). - -You can get an overivew of topics on [swarms in Docker Cloud](index.md). - -To find out more about Docker swarm in general, see the Docker engine -[Swarm Mode overview](/engine/swarm/). diff --git a/docker-cloud/cloud-swarm/images/aws-arn-wizard.png b/docker-cloud/cloud-swarm/images/aws-arn-wizard.png deleted file mode 100644 index 657107207b..0000000000 Binary files a/docker-cloud/cloud-swarm/images/aws-arn-wizard.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/aws-create-swarm-0.png b/docker-cloud/cloud-swarm/images/aws-create-swarm-0.png deleted file mode 100644 index caa0f97b35..0000000000 Binary files a/docker-cloud/cloud-swarm/images/aws-create-swarm-0.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/aws-create-swarm-1-name.png b/docker-cloud/cloud-swarm/images/aws-create-swarm-1-name.png deleted file mode 100644 index 7dbadf9177..0000000000 Binary files a/docker-cloud/cloud-swarm/images/aws-create-swarm-1-name.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/aws-create-swarm-3-region-x.png b/docker-cloud/cloud-swarm/images/aws-create-swarm-3-region-x.png deleted file mode 100644 index 74fba134e9..0000000000 Binary files a/docker-cloud/cloud-swarm/images/aws-create-swarm-3-region-x.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/aws-create-swarm-3-region.png b/docker-cloud/cloud-swarm/images/aws-create-swarm-3-region.png deleted file mode 100644 index 099941b340..0000000000 Binary files a/docker-cloud/cloud-swarm/images/aws-create-swarm-3-region.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/aws-create-swarm-4-size.png b/docker-cloud/cloud-swarm/images/aws-create-swarm-4-size.png deleted file mode 100644 index 3780f762d4..0000000000 Binary files a/docker-cloud/cloud-swarm/images/aws-create-swarm-4-size.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/aws-create-swarm-5-properties.png b/docker-cloud/cloud-swarm/images/aws-create-swarm-5-properties.png deleted file mode 100644 index 97c5c172dc..0000000000 Binary files a/docker-cloud/cloud-swarm/images/aws-create-swarm-5-properties.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/aws-create-swarm-6-manager-worker.png b/docker-cloud/cloud-swarm/images/aws-create-swarm-6-manager-worker.png deleted file mode 100644 index 817f3276cb..0000000000 Binary files a/docker-cloud/cloud-swarm/images/aws-create-swarm-6-manager-worker.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/aws-create-swarm-7-list-x.png b/docker-cloud/cloud-swarm/images/aws-create-swarm-7-list-x.png deleted file mode 100644 index 9f99aa6e73..0000000000 Binary files a/docker-cloud/cloud-swarm/images/aws-create-swarm-7-list-x.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/aws-create-swarm-7-list.png b/docker-cloud/cloud-swarm/images/aws-create-swarm-7-list.png deleted file mode 100644 index 59904fe9be..0000000000 Binary files a/docker-cloud/cloud-swarm/images/aws-create-swarm-7-list.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/aws-create-swarm-vpc.png b/docker-cloud/cloud-swarm/images/aws-create-swarm-vpc.png deleted file mode 100644 index 18b7d247f6..0000000000 Binary files a/docker-cloud/cloud-swarm/images/aws-create-swarm-vpc.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/aws-creds-cloud.png b/docker-cloud/cloud-swarm/images/aws-creds-cloud.png deleted file mode 100644 index 3c111375fa..0000000000 Binary files a/docker-cloud/cloud-swarm/images/aws-creds-cloud.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/aws-patch-snip-it.png b/docker-cloud/cloud-swarm/images/aws-patch-snip-it.png deleted file mode 100644 index 9125315f3c..0000000000 Binary files a/docker-cloud/cloud-swarm/images/aws-patch-snip-it.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/aws-swarm-iam-role-1.png b/docker-cloud/cloud-swarm/images/aws-swarm-iam-role-1.png deleted file mode 100644 index 0e742dab51..0000000000 Binary files a/docker-cloud/cloud-swarm/images/aws-swarm-iam-role-1.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/aws-swarm-iam-role-2.png b/docker-cloud/cloud-swarm/images/aws-swarm-iam-role-2.png deleted file mode 100644 index 24eccfb864..0000000000 Binary files a/docker-cloud/cloud-swarm/images/aws-swarm-iam-role-2.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/aws-swarm-iam-role-3.png b/docker-cloud/cloud-swarm/images/aws-swarm-iam-role-3.png deleted file mode 100644 index 6030be5517..0000000000 Binary files a/docker-cloud/cloud-swarm/images/aws-swarm-iam-role-3.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/aws-swarm-iam-role-4-policy.png b/docker-cloud/cloud-swarm/images/aws-swarm-iam-role-4-policy.png deleted file mode 100644 index 01b8c3b92d..0000000000 Binary files a/docker-cloud/cloud-swarm/images/aws-swarm-iam-role-4-policy.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/aws-swarm-iam-role-orig.png b/docker-cloud/cloud-swarm/images/aws-swarm-iam-role-orig.png deleted file mode 100644 index fba6ee61c8..0000000000 Binary files a/docker-cloud/cloud-swarm/images/aws-swarm-iam-role-orig.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/azure-create-swarm-0.png b/docker-cloud/cloud-swarm/images/azure-create-swarm-0.png deleted file mode 100644 index 4167302eff..0000000000 Binary files a/docker-cloud/cloud-swarm/images/azure-create-swarm-0.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/azure-create-swarm-1-name.png b/docker-cloud/cloud-swarm/images/azure-create-swarm-1-name.png deleted file mode 100644 index 4cc613ae47..0000000000 Binary files a/docker-cloud/cloud-swarm/images/azure-create-swarm-1-name.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/azure-create-swarm-2-appname.png b/docker-cloud/cloud-swarm/images/azure-create-swarm-2-appname.png deleted file mode 100644 index a5f715815f..0000000000 Binary files a/docker-cloud/cloud-swarm/images/azure-create-swarm-2-appname.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/azure-create-swarm-3-resource-group.png b/docker-cloud/cloud-swarm/images/azure-create-swarm-3-resource-group.png deleted file mode 100644 index 0ebfcc402e..0000000000 Binary files a/docker-cloud/cloud-swarm/images/azure-create-swarm-3-resource-group.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/azure-create-swarm-5-properties.png b/docker-cloud/cloud-swarm/images/azure-create-swarm-5-properties.png deleted file mode 100644 index 759497d077..0000000000 Binary files a/docker-cloud/cloud-swarm/images/azure-create-swarm-5-properties.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/azure-create-swarm-6-manager-worker.png b/docker-cloud/cloud-swarm/images/azure-create-swarm-6-manager-worker.png deleted file mode 100644 index 0711479e83..0000000000 Binary files a/docker-cloud/cloud-swarm/images/azure-create-swarm-6-manager-worker.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/azure-create-swarm-7-list-x.png b/docker-cloud/cloud-swarm/images/azure-create-swarm-7-list-x.png deleted file mode 100644 index fac47de79c..0000000000 Binary files a/docker-cloud/cloud-swarm/images/azure-create-swarm-7-list-x.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/azure-create-swarm-7-list.png b/docker-cloud/cloud-swarm/images/azure-create-swarm-7-list.png deleted file mode 100644 index 9453722d2f..0000000000 Binary files a/docker-cloud/cloud-swarm/images/azure-create-swarm-7-list.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/azure-create-swarm.png b/docker-cloud/cloud-swarm/images/azure-create-swarm.png deleted file mode 100644 index 86baf90e31..0000000000 Binary files a/docker-cloud/cloud-swarm/images/azure-create-swarm.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/azure-creds-cloud.png b/docker-cloud/cloud-swarm/images/azure-creds-cloud.png deleted file mode 100644 index a7766c2278..0000000000 Binary files a/docker-cloud/cloud-swarm/images/azure-creds-cloud.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/azure-docker-app-enabled.png b/docker-cloud/cloud-swarm/images/azure-docker-app-enabled.png deleted file mode 100644 index 11c908e37c..0000000000 Binary files a/docker-cloud/cloud-swarm/images/azure-docker-app-enabled.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/azure-eula-1-marketplace.png b/docker-cloud/cloud-swarm/images/azure-eula-1-marketplace.png deleted file mode 100644 index 3089b329a6..0000000000 Binary files a/docker-cloud/cloud-swarm/images/azure-eula-1-marketplace.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/azure-eula-2-deploy-vm.png b/docker-cloud/cloud-swarm/images/azure-eula-2-deploy-vm.png deleted file mode 100644 index 0c667eacb4..0000000000 Binary files a/docker-cloud/cloud-swarm/images/azure-eula-2-deploy-vm.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/azure-eula-3-enable-subscription.png b/docker-cloud/cloud-swarm/images/azure-eula-3-enable-subscription.png deleted file mode 100644 index 5cff303299..0000000000 Binary files a/docker-cloud/cloud-swarm/images/azure-eula-3-enable-subscription.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/azure-eula-4-verify.png b/docker-cloud/cloud-swarm/images/azure-eula-4-verify.png deleted file mode 100644 index ddb9d37e24..0000000000 Binary files a/docker-cloud/cloud-swarm/images/azure-eula-4-verify.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/azure-extra-create-swarm-1.png b/docker-cloud/cloud-swarm/images/azure-extra-create-swarm-1.png deleted file mode 100644 index ff82d3e423..0000000000 Binary files a/docker-cloud/cloud-swarm/images/azure-extra-create-swarm-1.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/azure-extra-create-swarm-2-details.png b/docker-cloud/cloud-swarm/images/azure-extra-create-swarm-2-details.png deleted file mode 100644 index 7278b11d6e..0000000000 Binary files a/docker-cloud/cloud-swarm/images/azure-extra-create-swarm-2-details.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/azure-extra-create-swarm-3-properties.png b/docker-cloud/cloud-swarm/images/azure-extra-create-swarm-3-properties.png deleted file mode 100644 index 7efb06a586..0000000000 Binary files a/docker-cloud/cloud-swarm/images/azure-extra-create-swarm-3-properties.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/azure-global-admin-permissions.png b/docker-cloud/cloud-swarm/images/azure-global-admin-permissions.png deleted file mode 100644 index 9f5c061ba5..0000000000 Binary files a/docker-cloud/cloud-swarm/images/azure-global-admin-permissions.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/azure-global-admin-plugin.png b/docker-cloud/cloud-swarm/images/azure-global-admin-plugin.png deleted file mode 100644 index 327736686d..0000000000 Binary files a/docker-cloud/cloud-swarm/images/azure-global-admin-plugin.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/azure-id-wizard.png b/docker-cloud/cloud-swarm/images/azure-id-wizard.png deleted file mode 100644 index eecde012f1..0000000000 Binary files a/docker-cloud/cloud-swarm/images/azure-id-wizard.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/azure-permissions.png b/docker-cloud/cloud-swarm/images/azure-permissions.png deleted file mode 100644 index e45ec29554..0000000000 Binary files a/docker-cloud/cloud-swarm/images/azure-permissions.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/azure-subscription-id.png b/docker-cloud/cloud-swarm/images/azure-subscription-id.png deleted file mode 100644 index bac8c075f1..0000000000 Binary files a/docker-cloud/cloud-swarm/images/azure-subscription-id.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/cloud-create-swarm-4-size.png b/docker-cloud/cloud-swarm/images/cloud-create-swarm-4-size.png deleted file mode 100644 index 3ee7159da8..0000000000 Binary files a/docker-cloud/cloud-swarm/images/cloud-create-swarm-4-size.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/cloud-swarm-home.png b/docker-cloud/cloud-swarm/images/cloud-swarm-home.png deleted file mode 100644 index 9a79204792..0000000000 Binary files a/docker-cloud/cloud-swarm/images/cloud-swarm-home.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/cloud-swarms.png b/docker-cloud/cloud-swarm/images/cloud-swarms.png deleted file mode 100644 index 753922a0d0..0000000000 Binary files a/docker-cloud/cloud-swarm/images/cloud-swarms.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/d4mac-cloud-login.png b/docker-cloud/cloud-swarm/images/d4mac-cloud-login.png deleted file mode 100644 index 911d86435b..0000000000 Binary files a/docker-cloud/cloud-swarm/images/d4mac-cloud-login.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/d4mac-swarm-connect.png b/docker-cloud/cloud-swarm/images/d4mac-swarm-connect.png deleted file mode 100644 index b8241b075d..0000000000 Binary files a/docker-cloud/cloud-swarm/images/d4mac-swarm-connect.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/d4win-swarm-connect.png b/docker-cloud/cloud-swarm/images/d4win-swarm-connect.png deleted file mode 100644 index 323f7a5f5c..0000000000 Binary files a/docker-cloud/cloud-swarm/images/d4win-swarm-connect.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/d4win-swarm-connected-shell.png b/docker-cloud/cloud-swarm/images/d4win-swarm-connected-shell.png deleted file mode 100644 index 154ba80e0e..0000000000 Binary files a/docker-cloud/cloud-swarm/images/d4win-swarm-connected-shell.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/swarm-connect.png b/docker-cloud/cloud-swarm/images/swarm-connect.png deleted file mode 100644 index cace323c98..0000000000 Binary files a/docker-cloud/cloud-swarm/images/swarm-connect.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/swarm-toggle.png b/docker-cloud/cloud-swarm/images/swarm-toggle.png deleted file mode 100644 index afe2c03e6f..0000000000 Binary files a/docker-cloud/cloud-swarm/images/swarm-toggle.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/images/video-azure-docker-cloud.png b/docker-cloud/cloud-swarm/images/video-azure-docker-cloud.png deleted file mode 100644 index 17c7d8cc13..0000000000 Binary files a/docker-cloud/cloud-swarm/images/video-azure-docker-cloud.png and /dev/null differ diff --git a/docker-cloud/cloud-swarm/index.md b/docker-cloud/cloud-swarm/index.md deleted file mode 100644 index 16404faa86..0000000000 --- a/docker-cloud/cloud-swarm/index.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -previewflag: cloud-swarm -description: Work with swarms in Docker Cloud -keywords: swarm mode, swarms, orchestration Cloud, fleet management -redirect_from: -- /docker-cloud/cloud-swarm/create-cloud-swarm/ -title: Swarms in Docker Cloud (Beta) -notoc: true ---- - -Docker Cloud now allows you to connect to clusters of Docker Engines in [swarm mode](/engine/swarm/). - -With Beta Swarm Mode in Docker Cloud, you can provision swarms to popular cloud -providers, or register existing swarms to Docker Cloud. Use your Docker ID to -authenticate and securely access personal or team swarms. - -* [Using Swarm Mode with Docker Cloud](using-swarm-mode.md) - -* [Swarm Mode and organizations](using-swarm-mode.md#swarm-mode-and-organizations) - -* [Register existing swarms](register-swarms.md) - -* [Create a new swarm on AWS in Docker Cloud](create-cloud-swarm-aws.md) - -* [Create a new swarm on Microsoft Azure in Docker Cloud](create-cloud-swarm-azure.md) - -* [Connect to a swarm through Docker Cloud](connect-to-swarm.md) - -* [Link Amazon Web Services to Docker Cloud](link-aws-swarm.md) - -* [Link Microsoft Azure Cloud Services to Docker Cloud](link-azure-swarm.md) - -* [Set up SSH keys](ssh-key-setup.md) diff --git a/docker-cloud/cloud-swarm/link-aws-swarm.md b/docker-cloud/cloud-swarm/link-aws-swarm.md deleted file mode 100644 index 410001fdd2..0000000000 --- a/docker-cloud/cloud-swarm/link-aws-swarm.md +++ /dev/null @@ -1,148 +0,0 @@ ---- -previewflag: cloud-swarm -description: Link your Amazon Web Services account -keywords: AWS, Cloud, link -title: Link Amazon Web Services to Docker Cloud ---- - -You can create a role with AWS IAM (Identity and Access Management) so that -Docker Cloud can provision and manage swarms on your behalf. - -> **Note**: Your AWS account must support EC2-VPC to deploy swarms, and -you must also have an [SSH key in each AWS region](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) -you deploy swarms in. - -If you used Docker Cloud with AWS to deploy classic Node Clusters, you can add -the new policy to your existing role by following the instructions -[below](#attach-a-policy-for-legacy-aws-links). - -## Create a dockercloud-swarm-role role with an embedded policy - -1. Go to the AWS IAM Role creation panel at https://console.aws.amazon.com/iam/home#roles. Click **Create role**. - -2. Select **Another AWS account** to allow your Docker Cloud account to perform actions in this AWS account. - - ![link aws accounts](images/aws-swarm-iam-role-1.png) - -3. In the **Account ID** field, enter the ID for the Docker Cloud service: `689684103426`. - -4. Select **Require external ID (Best practice when a third party will assume this role)**. - - * In the **External ID** field, enter the namespace - to link. - - This is either your Docker Cloud username, - or if you are using Organizations in Docker Cloud, - the organization name. Failure to use the correct - name results in the following error - message: `Invalid AWS credentials or insufficient - EC2 permissions` when attempting to link your - Docker account to your AWS account. - - * Leave **Require MFA** unchecked. - - Click **Next: Permissions**. - -5. On the next screen, do not select a policy (you add the policy in a later step). - - Click **Next: Review**. - - ![review settings](images/aws-swarm-iam-role-3.png) - -6. Give the new role a name, such as `dockercloud-swarm-role`. - - > **Note**: You must use one role per Docker Cloud account - namespace, so if you use a single AWS account for - multiple Docker Cloud accounts, you should add an - identifying namespace to the end of the name. For example, - you might have `dockercloud-swarm-role-moby` and - `dockercloud-swarm-role-teamawesome`. - -7. Click **Create Role**. - - AWS IAM creates the new role and returns you to the **Roles** list. - -8. Click the name of the role you just created to view its details. - -9. On the **Permissions** tab, click **+ Add an inline policy**. - -11. Choose the **JSON** tab. - -12. Copy and paste the policy document found in the [Docker for AWS page](/docker-for-aws/iam-permissions/). - - ![attach a policy](images/aws-swarm-iam-role-4-policy.png) - -13. Click **Review Policy**. The policy validator reports any syntax errors. - Give the policy a name like `dockercloud-swarm-policy` and an optional - description. - -14. Click **Create Policy** to save your work. - -15. Back on the role view, click into the new role to view details, and copy the full **Role ARN** string. - - The ARN string should look something like `arn:aws:iam::123456789123:role/dockercloud-swarm-role`. The next step requires the. - - ![Role summary showing Role ARN](images/aws-swarm-iam-role-2.png) - -Now skip down to the topic on how to -[Add your AWS account credentials to Docker Cloud](#add-your-aws-account-credentials-to-docker-cloud). - -## Attach a policy for legacy AWS links - -If you already have your AWS account connected to Docker Cloud and used the -legacy node cluster functionality you need to create and attach a new -policy, and re-link your account. - -1. Go to the AWS IAM Roles list at https://console.aws.amazon.com/iam/home#roles. - -2. Click your existing version of the `dockercloud-role`. - -3. On the **Permissions** tab, click **+ Add an inline policy**. - -5. On the next page, click **Custom Policy** and click **Select**. - -6. On the **Policy Editor** page that appears, give the policy a name like `dockercloud-swarm-policy`. - -7. In the **Policy Document** section, copy and paste the policy document found in the [Docker for AWS page](/docker-for-aws/iam-permissions/). - -8. Click **Validate Policy**. - -9. If the validation succeeds, click **Apply Policy**. - -10. Select and copy the **Role ARN** on the role screen. - It shouldn't have changed, but you need it to re-link your account. - -Because you edited the role's permissions, you need to re-link -to your account. Back in Docker Cloud, click the account menu and -select **Cloud Settings**, and in the **Service providers** section, -click the green plug icon to _unlink_ your AWS account. - -Then, follow the instructions below to re-link your account. - -## Add your AWS account credentials to Docker Cloud - -Once you've created the a `dockercloud-swarm-policy`, -added the `dockercloud-swarm-role` inline, and have the role's -Role ARN, go back to Docker Cloud to connect the account. - -1. In Docker Cloud, click the account menu at the upper right and select **Cloud settings**. -2. In the **Service providers** section, click the plug icon next to Amazon Web Services. - - ![Add AWS Credentials popup](images/aws-arn-wizard.png) - -3. Enter the full `Role ARN` for the role you just created. -4. Click **Save**. - - ![Service providers list, showing newly added AWS credentials](images/aws-creds-cloud.png) - -You are now ready to deploy a swarm! - -## Where to go next - -**Ready to create swarms on AWS?** See [Create a new swarm on Amazon Web Services in Docker Cloud](create-cloud-swarm-aws.md). - -You can get an overview of topics on [swarms in Docker Cloud](index.md). - -**Using Standard Mode to managing Docker nodes on AWS?** If you are -setting up nodes on AWS in [Standard Mode](/docker-cloud/standard/), -go back to [What's next in Standard Mode](/docker-cloud/infrastructure/link-aws.md##whats-next). diff --git a/docker-cloud/cloud-swarm/link-azure-swarm.md b/docker-cloud/cloud-swarm/link-azure-swarm.md deleted file mode 100644 index ab6c74a210..0000000000 --- a/docker-cloud/cloud-swarm/link-azure-swarm.md +++ /dev/null @@ -1,119 +0,0 @@ ---- -previewflag: cloud-swarm -description: Link your Microsoft Azure account -keywords: Azure, Cloud, link -title: Link Microsoft Azure Cloud Services to Docker Cloud ---- - -You can link your [Microsoft Azure Cloud Services](https://portal.azure.com/) account so that Docker Cloud can provision and -manage swarms on your behalf. - -For this, you need an SSH key and your Azure subscription ID to authenticate Docker to your service provider. Also, you need to enable your Azure subscription on behalf of Docker Cloud. - -## Create or locate the SSH key you want to use - -When you are ready to create and deploy swarms, you must have an [SSH](`/engine/reference/glossary.md#ssh`) key to authenticate Docker Cloud to your Azure account. See the topic [Set up SSH keys](/docker-cloud/cloud-swarm/ssh-key-setup.md) to learn how to check for existing keys or set up a new one, and copy the public key. - -## Find your Azure subscription ID - -You also need your Azure Cloud Services subscription ID to provide to -Docker Cloud. There are a few ways to navigate to it on Azure. - -You can click a resource from the Dashboard and find the subscription ID under -"Essentials" on the resulting display. Alternatively, from the left menu, go to -**Billing -> Subscriptions -> Subscription ID** or simply click -**Subscriptions**, then click a subscription in the list to drill down. - -![Azure subscriptions page](images/azure-subscription-id.png) - -When you are ready to add your subscription ID to Docker Cloud, -copy it from your Azure Dashboard. - -## Add your Azure account credentials to Docker Cloud - -Go to Docker Cloud to connect the account. - -1. In Docker Cloud, click the account menu at the upper right and -select **Cloud settings**. - -2. In the **Service Providers** section, click the plug icon next to -Microsoft Azure. - - ![Enter Azure subscription ID popup](images/azure-id-wizard.png) - - >**Tip:** If you are a member of an Azure Organization, your - administrator must first link to Docker Cloud as described in - [Link an Azure Organization as Global Admin](#link-an-azure-organization-as-global-admin). - -3. Provide your subscription ID and click **Save**. - - This grants Docker Cloud access to your Microsoft Azure account, and links - the two accounts. Your Azure login credentials automatically populate - to Docker Cloud under **Service Providers -> Microsoft Azure**. - - ![Microsoft Azure entry in Service providers list](images/azure-creds-cloud.png) - -## Enable your Azure subscription for Docker Cloud - -You need to verify Microsoft Azure terms of use and manually enable your Azure subscription on behalf of Docker Cloud. You need do this only once, before you attempt to deploy a Docker Cloud Microsoft Azure swarm for the first time. - ->**Tip:** If your Azure subscription is not enabled first, the swarm deployment fails. The error shows up on your **Azure portal -> resource groups -> _ResourceGroupForYourSwarm_ -> deployments**. - -1. Go to the [Microsoft Azure Marketplace](https://portal.azure.com/#blade/Microsoft_Azure_Marketplace/GalleryFeaturedMenuItemBlade/selectedMenuItemId/home) and search for **Docker**, or specifically **Docker for Azure CE**. - - ![](images/azure-eula-1-marketplace.png) - -2. Select **Docker for Azure CE** and click the option on the lower right to deploy programmatically. - - ![](images/azure-eula-2-deploy-vm.png) - -3. Read the terms of use, click **Enable** for your subscription, and click **Save**. - - ![](images/azure-eula-3-enable-subscription.png) - -4. Verify that your subscription is enabled. - - Go to **Dashboard -> Subscriptions** to view details on your current subscriptions. Docker for Azure CE should be listed as enabled Programmatic deployment. - - ![](images/azure-eula-4-verify.png) - -You are now ready to deploy a swarm! - -## Link an Azure Organization as Global Admin - -For members of Azure Organizations, your gobal administrator first needs to link -their account and grant permissions to Docker Cloud for the organization as a -whole. Then, you can link your individual user account to Docker Cloud with your -subscription ID. - -The Global Admin steps are as follows: - -1. Go to [Microsoft Azure](https://portal.azure.com/), find your - Subscription ID for Global Admin rights (**Azure -> Billing -> - Subscriptions -> Subscriptions ID**), and copy the ID. - -2. On Docker Cloud, go to **Cloud Settings -> Service Providers**. - - ![](images/azure-global-admin-plugin.png) - -3. Click **Global Admin**, paste your Global Admin Subscription ID from -Azure into the field, and click **Save**. - - ![](images/azure-global-admin-permissions.png) - - Azure Organization members can now link their user accounts to - Docker Cloud per the individual user instructions above. - -## Where to go next - -**Ready to create swarms on Azure?** See [Create a new swarm on Microsoft Azure in Docker Cloud](create-cloud-swarm-azure.md). - -You need an SSH key to provide to Docker Cloud during the swarm create -process. If you haven't done so yet, check out how to [Set up SSH -keys](ssh-key-setup.md). - -You can get an overivew of topics on [swarms in Docker Cloud](index.md). - -**Using Standard Mode to managing Docker nodes on Azure?** If you are -setting up nodes on Azure in [Standard Mode](/docker-cloud/standard/), -go back to [What's next in Standard Mode](/docker-cloud/infrastructure/link-azure.md##whats-next). diff --git a/docker-cloud/cloud-swarm/register-swarms.md b/docker-cloud/cloud-swarm/register-swarms.md deleted file mode 100644 index 0597e58de0..0000000000 --- a/docker-cloud/cloud-swarm/register-swarms.md +++ /dev/null @@ -1,119 +0,0 @@ ---- -previewflag: cloud-swarm -description: how to register and unregister swarms in Docker Cloud -keywords: register swarms, unregister swarms -title: Register existing swarms ---- - -With Beta Swarm Mode, you can register existing -swarms with Docker Cloud to easily -manage multiple swarms running anywhere. - -Before you begin, you need the following: - -- a Docker ID -- a Docker swarm composed of v1.13 (or later) Docker Engine nodes -- a terminal session connected to one of the swarm's manager nodes -- incoming port 2376 unblocked on that manager node - -> **Note**: The IP to the manager node for your swarm must be open and publicly accessible so that Docker Cloud can connect and run commands. - -## Register a swarm - -To register an existing swarm in Docker Cloud: - -1. Log in to Docker Cloud if necessary. -2. If necessary, click the **Swarm Mode** toggle to activate the Swarm Mode interface. -3. Click **Swarms** in the top navigation. -4. Click **Bring your own swarm**. -5. Select the whole command displayed in the dialog, and copy it to your clipboard. -6. In terminal or another shell, connect to the Docker Engine running in the swarm's manager node using SSH. -7. Paste the command you copied into the terminal session connected to the manager node. -8. When prompted, log in using your Docker ID and password. - - The registration process uses your Docker ID to determine which namespaces you have access to. Once you log in, the CLI lists these namespaces to help you with the next step. - -9. Enter a name, with a namespace before the name if needed, and press Enter. - - If you do not enter a name, the swarm is registered to your Docker ID account using the swarm ID, which is the long string displayed before the shell prompt. For example, the prompt might look like this: - - ```none - Enter a name for the new cluster [mydockerid/5rdshkgzn1sw016zimgckzx3j]: - ``` - - Enter a name at the prompt to prevent Docker Cloud from registering the swarm using the long swarm ID as the name. - - To register a swarm with an organization, prefix the new name with the organization name, for example `myorganization/myteamswarm`. - -The manager node pulls the `dockercloud/registration` container which creates a -global service called `dockercloud-server-proxy`. This service runs on _all_ of -the swarm's manager nodes. - -The swarm then appears in the **Swarms** screen in Docker Cloud. - -### Swarm Registration example - -```none -$ docker run -ti --rm -v /var/run/docker.sock:/var/run/docker.sock dockercloud/registration -Use your Docker ID credentials to authenticate: -Username: myusername -Password: - -Available namespaces: -* myorganization -* pacificocean -* sealife -Enter name for the new cluster [myusername/1btbwtge4xwjj0mjpdpr7jutn]: myusername/myswarm -Registering this Docker cluster with Docker Cloud... -Successfully registered the node as myswarm -You can now access this cluster using the following command in any Docker Engine: - docker run --rm -ti -v /var/run/docker.sock:/var/run/docker.sock -e DOCKER_HOST dockercloud/client myswarm -``` - -![List of swarms in Docker Cloud](images/cloud-swarms.png) - -## Swarm states in Docker Cloud - -Swarms that are registered in Docker Cloud appear in the Swarms list. Each line in the list shows the swarm's state. The states are: - -| State | Description | Actions available | -|:---------------|:--------------------------------------------------------|:---------------------------------| -| **DEPLOYING** | Docker Cloud is in the process of provisioning the swarm. | None | -| **DEPLOYED** | The swarm is running, connected, and sending heartbeat pings to Docker Cloud, and Cloud can contact it to run a health check. | All (Edit endpoint, remove) | -| **UNREACHABLE** | The swarm is sending heartbeat pings and Docker Cloud is receiving them, but Cloud cannot connect to the swarm. | Remove | -| **UNAVAILABLE** | Docker Cloud is not receiving heartbeats from the swarm. | Remove | -| **TERMINATING** | Docker Cloud is in the process of destroying this swarm. | None | -| **TERMINATED** | The swarm has been destroyed and is removed from the list in 5 minutes. | None | -| **REMOVED** | The swarm was unregistered from Docker Cloud but not destroyed. The swarm is removed from list in 5 minutes. | None | -| **FAILED** | Provisioning failed. | Remove | - -### Understanding and resolving problems - -* If a swarm is UNREACHABLE, it may be behind a firewall or NAT. - -* If a swarm is UNAVAILABLE check the swarm from your infrastructure provider. The manager node(s) may be unresponsive or the server proxy service might not be running. You can SSH into an UNAVAILABLE swarm. - -* Removing a swarm only removes the swarm from the interface in Docker Cloud, effectively - [unregistering](#unregister-a-swarm-from-docker-cloud) it. It does not - change the swarm itself or any processes running on the swarm. - -## Unregister a swarm from Docker Cloud - -Unregistering a swarm from Docker Cloud only removes the swarm from Docker -Cloud, deletes any access rights granted to teams, and disables proxy -connections. Unregistering does not stop the services, containers, or processes on the swarm, and it does not disband the swarm or terminate the nodes. - -To unregister a swarm from Docker Cloud: - -1. Log in to Docker Cloud if necessary. -2. Click **Swarms** in the top navigation. -3. Put your mouse cursor on the swarm you want to unregister. -4. Click the trash can icon that appears. -5. In the confirmation dialog that appears, click **Remove**. - -Docker Cloud marks the swarm as `REMOVED` and removes the swarm from the list in -the next few minutes. - -## Where to go next - -Learn how to [connect to a swarm through Docker Cloud](connect-to-swarm.md). diff --git a/docker-cloud/cloud-swarm/ssh-key-setup.md b/docker-cloud/cloud-swarm/ssh-key-setup.md deleted file mode 100644 index 116df1f5b5..0000000000 --- a/docker-cloud/cloud-swarm/ssh-key-setup.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -previewflag: cloud-swarm -description: How to set up SSH keys -keywords: Cloud, SSH keys, Azure, link -title: Set up SSH keys ---- - -You can link your Cloud and Service providers so that Docker Cloud can provision and manage swarms on your behalf. For this, you need an SSH key to authenticate Docker to your provider. - -## About SSH - -{% include content/ssh/ssh-overview.md %} - -## Check for existing SSH keys - -You may not need to generate a new SSH key if you have an existing key that you -want to reuse. - -{% include content/ssh/ssh-find-keys.md %} - -If you find an existing key you want to use, skip to the topic that describes -how to [copy your public key for use with Docker -Cloud](#copy-your-public-key-for-use-with-docker-cloud). - -Otherwise, [create a new SSH -key](#create-a-new-ssh-key-for-use-by-docker-cloud). - -## Create a new SSH key - -{% include content/ssh/ssh-gen-keys.md %} - -## Add your key to the ssh-agent - -{% include content/ssh/ssh-add-keys-to-agent.md %} - -## Copy your public key for use with Docker Cloud - -You need your SSH public key to provide to Docker Cloud. When you are ready -to add it, you can copy the public key as follows. - -{% include content/ssh/ssh-copy-key.md %} - -## Related topics - -* [Swarms in Docker Cloud](index.md) - -* [Link to Docker Cloud to Amazon Web Services](link-aws-swarm.md) - -* [Link Docker Cloud to Microsoft Azure Cloud Services](link-azure-swarm.md) - -* [Create a new swarm on Microsoft Azure in Docker Cloud](create-cloud-swarm-azure.md) - -* [Create a new swarm on AWS in Docker Cloud](create-cloud-swarm-azure.md) diff --git a/docker-cloud/cloud-swarm/using-swarm-mode.md b/docker-cloud/cloud-swarm/using-swarm-mode.md deleted file mode 100644 index 9ccfa31aac..0000000000 --- a/docker-cloud/cloud-swarm/using-swarm-mode.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -previewflag: cloud-swarm -description: Work with swarms in Docker Cloud -keywords: swarm mode, swarms, orchestration Cloud, fleet management -title: Using Swarm Mode with Docker Cloud ---- - -Docker Cloud now allows you to connect to clusters of Docker Engines running in v1.13 [swarm mode](/engine/swarm/). - -## Swarm Mode - -The current release of Docker Cloud automatically opts in new users to Beta -Swarm Mode. - -> **Note**: All Swarm management features in Docker Cloud are free while in Beta. - -### New to Docker Cloud? - -If you just signed up as a new Docker Cloud user, -Swarm Mode is baked into the standard web -interface. Once you sign in, you are ready to -get started managing and deploying your apps in -Docker Cloud! - -### Already a Docker Cloud user? - -If you are already a Docker Cloud user, you still have access to legacy -features for managing [node clusters](/docker-cloud/infrastructure/). You also -have the option to switch to the Beta Swarm Mode interface. Swarm management -replaces node cluster management features when Swarm Mode is enabled. - -#### Enable Swarm Mode in Docker Cloud - -Click the **Swarm Mode** toggle to enable the Swarm Mode interface. - -![the Swarm Mode toggle](images/swarm-toggle.png) - -You can switch between node cluster and Swarm Mode at any time, and enabling -Swarm Mode does _not_ remove or disconnect existing node clusters. The content, -stacks, and node clusters you already created are available to you and -collaborators. - -## Swarm Mode and organizations - -If you use Docker Cloud in an [organization](/docker-cloud/orgs/), you can -use Swarm Mode to access any Docker swarms available to your organization. -Members of the `owners` team grant each team in an organization access to the -swarms they need. If necessary, you can create new teams to manage beta swarm -access. - -If you use Swarm Mode as a member of a team other than the `owners` team, -you only see the swarms that you have been granted access to. - -Members of the `owners` team must switch to the Swarm Mode Docker Cloud -interface to grant teams access to an organization's swarms. Swarms only appear -in the [resource management](/docker-cloud/orgs/#/set-team-permissions) screens -for teams when in the Swarm Mode interface. - -## Where to go next - -Learn how to [register an existing swarm](register-swarms.md). diff --git a/docker-cloud/docker-errors-faq.md b/docker-cloud/docker-errors-faq.md deleted file mode 100644 index f31a547cb8..0000000000 --- a/docker-cloud/docker-errors-faq.md +++ /dev/null @@ -1,163 +0,0 @@ ---- -description: Known Docker Engine issues in Docker Cloud -keywords: Engine, issues, troubleshoot -redirect_from: -- /docker-cloud/faq/docker-errors-faq/ -title: Known issues in Docker Cloud -toc_max: 2 -toc_min: 1 ---- - -This is a list of known issues with current versions of Docker Engine along with -our recommended workaround. You might encounter these errors in Docker Cloud. - -## Errors and messages - ---- - -## Get i/o timeout - - - -*Get https://index.docker.io/v1/repositories/\/images: dial tcp: lookup \ on \:53: read udp \:53: i/o timeout* - -### Description - -The DNS resolver configured on the host cannot resolve the registry's hostname. - -### GitHub link - -N/A - -### Workaround - -Retry the operation, or if the error persists, use another DNS resolver. You can do this by updating your `/etc/resolv.conf` file with these or other DNS servers: - - nameserver 8.8.8.8 - nameserver 8.8.4.4 - ---- - -## 500 Server Error: userland proxy - -*500 Server Error: Internal Server Error ("Cannot start container \: Error starting userland proxy: listen tcp 0.0.0.0:\: bind: address already in use")* - -### Description - -Docker Cloud is trying to deploy a container publishing a port which is already -being used by a process on the host (like the SSH server listening in port -`22`). - -### GitHub link - -N/A - -### Workaround - -Either choose another port, or SSH into the node and manually stop the process -which is using the port that you are trying to use. - ---- - -## 500 Server Error: bind failed - -*500 Server Error: Internal Server Error ("Cannot start container \: Bind for 0.0.0.0:\ failed: port is already allocated")* - -### Description - -Docker Cloud is trying to deploy a container publishing a port which is already -used by another container outside of the scope of Docker Cloud. - -### GitHub link - -N/A - -### Workaround - -Either choose another port, or SSH into the node and manually stop the container -which is using the port that you are trying to use. - ---- - -## 500 Server Error: cannot start, executable not found - -*500 Server Error: Internal Server Error ("Cannot start container \: [8] System error: exec: "\": executable file not found in $PATH")* - -### Description - -The `run` command you specified for the container does not exist on the -container. - -### GitHub link - -N/A - -### Workaround - -Edit the service to fix the run command. - ---- - -## Timeout when pulling image from the registry - -*Timeout when pulling image from the registry* - -### Description - -Timeouts occur when pulling the image takes more than 10 minutes. This can -sometimes be caused by the Docker daemon waiting for a nonexistent process while -pulling the required image. - -### GitHub link - - -[docker/docker#12823](https://github.com/moby/moby/issues/12823){: target="_blank" class="_" } - -### Workaround - -Restart the `dockercloud-agent` service (`sudo service dockercloud-agent -restart`) on the node, or restart the node. - ---- - -## Docker Cloud CLI does not currently support Python 3 - -### Description - -The `docker-cloud` command line interface (CLI) does not currently support -Python 3.x. - - -### GitHub link - -[docker/docker-cloud#21](https://github.com/docker/dockercloud-cli/issues/21){: target="_blank" class="_"} - -### Workarounds - -* Use Python 2.x with the Docker Cloud CLI. - ---- - -## Problems installing and running Docker Cloud with Python 3 - -### Description - -* Some users have encountered problems installing and/or running -Docker Cloud with Anaconda Python 3.5.2 on a Windows host. - -* Some users running Python on Windows have encountered problems -running `docker-cloud` inside a container using `docker run`. - -### GitHub link - -[docker/for-win#368](https://github.com/docker/for-win/issues/368){: target="_blank" class="_" } - -[docker/dockercloud-cli#45](https://github.com/docker/dockercloud-cli/issues/45){: target="_blank" class"_" } - -### Workarounds - -* If you encounter problems with the installation, use Python 2.x. - -* Before attempting to run `docker-cloud` in a container with `docker run`, -make sure that you [have Linux containers -enabled](/docker-for-windows/index.md#switch-between-windows-and-linux-containers){: target="_blank" class"_" }. diff --git a/docker-cloud/dockerid.md b/docker-cloud/dockerid.md deleted file mode 100644 index 563de95ef8..0000000000 --- a/docker-cloud/dockerid.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -description: Using your DockerID to log in to Docker Cloud -keywords: one, two, three -title: Docker Cloud settings and Docker ID ---- - -Docker Cloud uses your Docker ID for access and access control, and this allows -you to link your Hub and Cloud accounts. - -If you already have an account on Docker Hub, you can use the same credentials to -log in to Docker Cloud. - -If you don't have a [Docker ID](../docker-id/) yet, you can sign up for one from -the Cloud website, or using the `docker login` command in the Docker CLI. The -name you choose for your Docker ID becomes part of your account namespace. - -## Manage cloud services and source providers - -You can link to your own hosts, or to hosted nodes from a Cloud Services -Provider such as Amazon Web Services or Microsoft Azure from your Docker Cloud -account. - -You can also link to source code repositories such as GitHub and -Bitbucket from your Docker Cloud account settings. - -## Email addresses - -You can associate multiple email addresses with your Docker ID, and one of these -becomes the primary address for the account. The primary address is used by -Docker to send password reset notifications and other important information, so -be sure to keep it updated. - -To add another email address to your Docker ID: - -1. In Docker Cloud, click the user icon menu at top right, and click **Account Settings**. -2. In the **Emails** section, enter a new email address for the account. -3. Click the **plus sign** icon (**+**) to add the address and send a verification email. - -The new email address is not added to the account until you confirm it by -clicking the link in the verification email. This link is only good for a -limited time. To send a new verification email, click the envelope icon next to -the email address that you want to verify. - -If you have multiple verified email addresses associated with the account, you can click **Set as primary** to change the primary email address. - -## Notifications - -You can configure your account so that you receive email notifications for certain types of events in Docker Cloud. - -You can also connect Slack to your Docker Cloud account so you can get notifications through your chat channels. To learn more, see [Docker Cloud notifications in Slack](slack-integration.md). - -## Paid accounts - -Like free Docker Hub accounts, free Docker Cloud accounts come with one free -private repository. - -If you require more private repositories, visit your **Cloud settings** and -select **Plan** to see the available paid accounts. diff --git a/docker-cloud/getting-started/connect-infra.md b/docker-cloud/getting-started/connect-infra.md deleted file mode 100644 index 6cbaa1031c..0000000000 --- a/docker-cloud/getting-started/connect-infra.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -description: How to link Docker Cloud to a hosted cloud services provider or your own hosts -keywords: node, create, understand -redirect_from: -- /docker-cloud/getting-started/use-hosted/ -title: Link to your infrastructure ---- - -To deploy Docker Cloud nodes, you first need to grant Docker Cloud access to your infrastructure. - -This could mean granting access to a cloud services provider such as AWS or Azure, or installing the Docker Cloud Agent on your own hosts. Once this is done, you can provision nodes directly from within Docker Cloud using the Web UI, CLI, or API. - -## Link to a cloud service provider -To link your cloud provider accounts, first go to your [Docker Cloud dashboard](https://cloud.docker.com/). - -Then, use one of the detailed tutorials below to link your account. You should open the detailed linking tutorial in a new tab or window so you can continue the tutorial when you're finished. - - - [Amazon Web Services](../infrastructure/link-aws.md) (uses an Access Key ID + Secret Access Key) - - [DigitalOcean](../infrastructure/link-do.md) (uses OAuth) - - [Microsoft Azure](../infrastructure/link-azure.md) (uses OAuth) - - [IBM SoftLayer](../infrastructure/link-softlayer.md) (uses an API key) - - [Packet.net](../infrastructure/link-packet.md) (uses an API key) - - You can always come back and link more cloud service providers later. - -## Link to your own hosts (Bring Your Own Node - BYON) - -If you are not using a cloud services provider but using your own hosts, install the Docker Cloud Agent on those hosts so that Docker Cloud can communicate with them. Follow the directions at [Bring Your Own Node instructions](../infrastructure/byoh.md). Open these instructions in a new window or tab so you can return to this tutorial once you're done linking your hosts. - -## Ready to go? -Once you've linked to your cloud services provider or to your own hosts, [continue the tutorial and deploy your first node](your_first_node.md). diff --git a/docker-cloud/getting-started/deploy-app/10_provision_a_data_backend_for_your_service.md b/docker-cloud/getting-started/deploy-app/10_provision_a_data_backend_for_your_service.md deleted file mode 100644 index 49266ac8ad..0000000000 --- a/docker-cloud/getting-started/deploy-app/10_provision_a_data_backend_for_your_service.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -description: Provision a data backend for the service -keywords: provision, Python, service -redirect_from: -- /docker-cloud/getting-started/python/10_provision_a_data_backend_for_your_service/ -- /docker-cloud/getting-started/golang/10_provision_a_data_backend_for_your_service/ -title: Provision a data backend for your service ---- - -Docker Cloud offers a large number of data stores in the *Jumpstart* library, -including Redis, MongoDB, PostgreSQL, and MySQL. - -You may have noticed that your app has a visit counter that's been disabled up -until now. In this step you add a data backend for your service to use. In -this specific tutorial we use a Redis cache, but most concepts apply to any -data backend. - -## Provision the service - -The first step is to provision the data service itself. Run this command to -create and run the Redis service using the [redis](https://github.com/docker-library/redis/){: target="_blank" class="_"} -image: - -```none -$ docker-cloud service run \ ---env REDIS_PASS="password" \ ---name redis \ -redis -``` -**--env REDIS_PASS="password"** defines an environment variable that sets the password for Redis. Because we are not publishing any ports for this service, only services **linked** to your *Redis service* can connect to it. - -Use `docker-cloud service ps` to check if your new redis service is *running*. This might take a minute or two. - -```none -$ docker-cloud service ps -NAME UUID STATUS IMAGE DEPLOYED -redis 89806f93 ▶ Running redis:latest 29 minutes ago -web bf644f91 ▶ Running my-username/python-quickstart:latest 26 minutes ago -lb 2f0d4b38 ▶ Running dockercloud/haproxy:latest 25 minutes ago -``` - -## Link the web service to the redis service - -Next, we set up the link between the `redis` service and the `web` service. - -```bash -$ docker-cloud service set --link redis:redis --redeploy web -``` - -In this command, we're creating a link from the `web` service (specified at the end of the command) to the `redis` service, and naming the link `redis`. - -Next, visit or `curl` the load balanced web endpoint again. The web service now counts of the number of visits to the web service. This uses the Redis data backend, and is synchronized between all of the service's containers. - -If you're using curl, you should see the counter incrementing like this: - -```none -$ curl lb-1.$DOCKER_ID_USER.cont.dockerapp.io -Hello World
Hostname: web-1
Counter: 1% -$ curl lb-1.$DOCKER_ID_USER.cont.dockerapp.io -Hello World
Hostname: web-3
Counter: 2% -$ curl lb-1.$DOCKER_ID_USER.cont.dockerapp.io -Hello World
Hostname: web-2
Counter: 3% -$ curl lb-1.$DOCKER_ID_USER.cont.dockerapp.io -Hello World
Hostname: web-5
Counter: 4% -$ curl lb-1.$DOCKER_ID_USER.cont.dockerapp.io -Hello World
Hostname: web-3
Counter: 5% -``` - -## What's Next? - -Next, we look at [Stackfiles for your service](11_service_stacks.md). diff --git a/docker-cloud/getting-started/deploy-app/11_service_stacks.md b/docker-cloud/getting-started/deploy-app/11_service_stacks.md deleted file mode 100644 index 4d15a9b1bc..0000000000 --- a/docker-cloud/getting-started/deploy-app/11_service_stacks.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -description: Stackfiles for your service -keywords: Python, service, stack -redirect_from: -- /docker-cloud/getting-started/python/11_service_stacks/ -title: Stackfiles for your service ---- - -## What are stack files? - -A stack is a logical grouping of related services that are usually deployed -together and require each other to work as intended. If you are familiar with -*fig* or *Docker Compose* then you should feel right at home with **stacks**. -You can learn more about stacks [here](../../apps/stacks.md). - -Stack files are YAML files, and you can learn more about the available syntax -[here](../../apps/stack-yaml-reference.md). You can also interact with stacks -using the [stack commands in our API](/apidocs/docker-cloud.md#stacks). - -## Service definitions in the stack file - -The services that you created in this tutorial form a stack with three services: -the load-balancer, the web application, and the redis cache. - -Look at the file called `docker-cloud.yml` in your quickstart to see the stack -file that defines the three services (lb, web, redis) you created in the -previous steps, including all modifications and environment variables. - -This is what the `docker-cloud.yml` file looks like. (If you are using the -quickstart-go version, you see `quickstart-go` instead of -`quickstart-python`.) - -```yml -lb: - image: dockercloud/haproxy - autorestart: always - links: - - web - ports: - - "80:80" - roles: - - global -web: - image: dockercloud/quickstart-python - autorestart: always - links: - - redis - environment: - - NAME=Friendly Users - deployment_strategy: high_availability - target_num_containers: 4 -redis: - image: redis - autorestart: always - environment: - - REDIS_PASS=password - - REDIS_APPENDONLY=yes - - REDIS_APPENDFSYNC=always -``` - -You can use this stack file to quickly deploy this cluster of three services to -another set of nodes. You can also edit the file to change the configuration. - -## Run a stack - -To create the services in a stack file you use the simple `stack up` command. - -You can run this in the path containing your stackfile (docker-cloud.yml), like -so: - -```bash -$ docker-cloud stack up -``` - -Or you can specify the YML file to use and its location: - -```bash -$ docker-cloud up -f /usr/dockercloud/quickstart-python/docker-cloud.yml -``` - -## What's Next? - -Next, we do some [data management with volumes](12_data_management_with_volumes.md). diff --git a/docker-cloud/getting-started/deploy-app/12_data_management_with_volumes.md b/docker-cloud/getting-started/deploy-app/12_data_management_with_volumes.md deleted file mode 100644 index b1309f5d9c..0000000000 --- a/docker-cloud/getting-started/deploy-app/12_data_management_with_volumes.md +++ /dev/null @@ -1,154 +0,0 @@ ---- -description: Data management with Volumes -keywords: Python, data, management -redirect_from: -- /docker-cloud/getting-started/python/12_data_management_with_volumes/ -title: Data management with volumes ---- - -In the previous step, we set up Redis but didn't provide it a way to store the -data it's caching. This means that if you redeployed the redis service, or if -the container crashed, the data would be lost. To save the data so it persists -beyond the life of a container, or share data from one container to another, -you need to define a volume. - -## Data persistence - -To persist, data in Docker Cloud must be stored in a volume. The volume -can be defined on the image (for example in the Dockerfile), or specified when -you create a new service in the Docker Cloud web UI. Learn more about volumes in -Docker Cloud [here](/docker-cloud/apps/volumes.md). - -### Test for lack of persistence - -If you `redeploy` the Redis service you created earlier, the counter resets. - -Let's try that. First, redeploy the redis service to reset the counter. - -```bash -$ docker-cloud service redeploy redis --not-reuse-volumes -``` - -Check the container status using the `container ps` command, and wait until the new container is running again. In the example below you can see the original container in the "Terminated" state, and the new container that is "Starting". - -```none -$ docker-cloud container ps --service redis -NAME UUID STATUS IMAGE RUN COMMAND EXIT CODE DEPLOYED PORTS -redis-1 5ddc0d66 ✘ Terminated redis:staging /run.sh 0 15 minutes ago 6379/tcp -redis-1 3eff67a9 ⚙ Starting redis:staging /run.sh -``` - -Once the container is running, get the web endpoint using `container ps`, then try curling or visiting the web endpoint again - -```none -$ curl lb-1.$DOCKER_ID_USER.cont.dockerapp.io:80 -

Hello Friendly Users!

Hostname: web-1
Visits: 1% -``` - -The Redis cache service redeployment caused the counter to reset. - -### Enabling persistence - -The specific Redis image (*redis*) in this tutorial supports data persistence. -This is not a common requirement for a Redis cache and it's not enabled by -default in most images. However to activate this in *our* image, you only need -to set two environment variables. - -Run the following command to create and set these two environment variables. - -```none -$ docker-cloud service set \ --e REDIS_APPENDONLY=yes \ --e REDIS_APPENDFSYNC=always \ -redis --redeploy -``` - -This command defines two new environment variables in the **redis** service and -then redeploys the service so they take effect. You can learn more about our -open source `redis` image [here](https://github.com/docker-library/redis/){: target="_blank" class="_"}. - -With these settings, Redis can create and store its data in a volume. The volume is in `/data`. - -Visit the web endpoint a few more times to make sure that the cache is working -as expected. Then redeploy the Redis service to see if the counter resets, or if -it persists even after the container is terminated and re-created. - -Curl the service to increment the counter: - -```none -$ curl lb-1.$DOCKER_ID_USER.cont.dockerapp.io:80 -

Hello Python users!!

Hostname: web-1
Visits: 1% -$ curl lb-1.$DOCKER_ID_USER.cont.dockerapp.io:80 -

Hello Python users!!

Hostname: web-2
Visits: 2% -$ curl lb-1.$DOCKER_ID_USER.cont.dockerapp.io:80 -

Hello Python users!!

Hostname: web-3
Visits: 3% -``` - -Next, redeploy the service using the `service redeploy` command: - -```none -$ docker-cloud service redeploy redis -``` - -Check the service status: - -```none -$ docker-cloud container ps --service redis -NAME UUID STATUS IMAGE RUN COMMAND EXIT CODE DEPLOYED PORTS -cache-1 8193cc1b ✘ Terminated redis:staging /run.sh 0 10 minutes ago 6379/tcp -cache-1 61f63d97 ▶ Running redis:staging /run.sh 37 seconds ago 6379/tcp -``` - -Once the service is running again, curl the web page again to see what the counter value is. - -```none -$ curl lb-1.$DOCKER_ID_USER.cont.dockerapp.io:80 -

Hello Python users!!

Hostname: web-3
Visits: 4% -``` - -Congratulations! You've set up data persistence in Docker Cloud! - -## Sharing/reusing data volumes between services - -A service's volume can be accessed by another service. To do this you use the `--volumes-from` flag when creating the new service. - -You might use this functionality to share data between two services, or to back -up, restore, or migrate a volume to a local host or a cloud storage provider. - -## Download volume data for backup - -In this next step, you download the `/data` volume from Redis to your local host using SCP (secure copy). - -First, run an SSH service that mounts the volumes of the redis you want to back up: - -```bash -$ docker-cloud service run -n download -p 2222:22 -e AUTHORIZED_KEYS="$(cat ~/.ssh/id_rsa.pub)" --volumes-from redis tutum/ubuntu -``` - -Then run **scp** to download the data volume files in Redis: - -```bash -$ scp -r -P 2222 root@downloader-1.$DOCKER_ID_USER.svc.dockerapp.io:/data . -``` - -You now have a backup copy of the Redis data on your local host machine! - -## What's Next? - -Congratulations! You've completed the tutorials! You can now push an image to -Docker Cloud, deploy an app to your Cloud nodes, set environment variables, -scale the service, view logs, set up a load balancer and a data back end, and -set up a volume to save the data. - -There's lots more to learn about Docker Cloud, so check out [the rest of our documentation](/docker-cloud/), the [API and CLI Documentation](../../../apidocs/docker-cloud.md), and our [Knowledge Hub](https://success.docker.com/Cloud) and [Docker Cloud Forums](https://forums.docker.com/c/docker-cloud). - -You might also want to delete or remove all of your hello world Stacks, Services, and Nodes running in Docker Cloud. To clean up when you're finished with the tutorial: - -- Click **Stacks** in the left navigation, hover over the stack you created and click the selection box that appears, then click **Terminate**. -- Once the Stack has terminated, click **Services** in the left navigation, hover over each service you created, click the selection box that appears, then click **Terminate**. -- Click **Node Clusters** in the left navigation, hover over the node cluster you created, click the selection box that appears, then click **Terminate**. - -Objects (Stacks, Services, Node Clusters, and Containers and nodes) still appear -in the list in Docker Cloud for about five minutes after they are terminated. - -Happy Docking! diff --git a/docker-cloud/getting-started/deploy-app/1_introduction.md b/docker-cloud/getting-started/deploy-app/1_introduction.md deleted file mode 100644 index 85ce41acbb..0000000000 --- a/docker-cloud/getting-started/deploy-app/1_introduction.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -description: Deploy an app to Docker Cloud -keywords: deploy, Python, application -redirect_from: -- /docker-cloud/getting-started/python/1_introduction/ -- /docker-cloud/getting-started/golang/1_introduction/ -title: Introduction to deploying an app in Docker Cloud -notoc: true ---- - -In this tutorial you bring an application to Docker Cloud using either Go -or Python. This tutorial is intended for more advanced beginners who have some -experience with web applications, and who want to learn more about -multi-container services in Docker Cloud. - -This tutorial assumes that you have: - -- a free [Docker ID account](https://hub.docker.com/){: target="_blank" class="_"}. -- at least one node running. If you don't have any nodes set up in Docker Cloud yet, [start here](../../getting-started/your_first_node.md) to set these up. -- Docker Engine installed - see the installation guides for [macOS, Windows, and Linux](/engine/installation/){: target="_blank" class="_"}. Use the `docker login` command to connect to your account in Docker Cloud, so you can run `docker-cloud` CLI commands. - -## What's next? - -Let's get started! - -[Set up your environment](2_set_up.md). diff --git a/docker-cloud/getting-started/deploy-app/2_set_up.md b/docker-cloud/getting-started/deploy-app/2_set_up.md deleted file mode 100644 index 8d9b938e42..0000000000 --- a/docker-cloud/getting-started/deploy-app/2_set_up.md +++ /dev/null @@ -1,121 +0,0 @@ ---- -description: Set up the application -keywords: Python, application, setup -redirect_from: -- /docker-cloud/getting-started/python/2_set_up/ -- /docker-cloud/getting-started/golang/2_set_up/ -title: Set up your environment ---- - -In this step you install the Docker Cloud CLI to interact with the service using your command shell. This tutorial uses CLI commands to complete actions. - -## Install the Docker Cloud CLI - -Install the docker-cloud CLI using the package manager for your system. - -#### Run the CLI in a Docker container - -If you have Docker Engine installed locally, you can run the following `docker` -command in your shell regardless of which operating system you are using. - -```none -docker run dockercloud/cli -h -``` - -This command runs the `docker-cloud` CLI image in a container for you. Learn -more about how to use this container -[here](https://github.com/docker/dockercloud-cli#docker-image). - -#### Install for Linux or Windows - -You can install the CLI locally using the [pip](https://pip.pypa.io/en/stable/) -package manager, which is a package manager for -[Python](https://www.python.org/) applications. - -* If you already have 2.x or Python 3.x installed, you probably have `pip` and -`setuptools`, but need to upgrade per the instructions -[here](https://packaging.python.org/installing/). - - > The Docker Cloud CLI does not currently support Python 3.x. - > - > We recommend using Python 2.x with Docker Cloud. To learn more, - see the Python and CLI issues described in - [Known issues in Docker Cloud](/docker-cloud/docker-errors-faq.md). - -* If you do not have Python or `pip` installed, you can either [install -Python](https://wiki.python.org/moin/BeginnersGuide/Download) or use this -[standalone pip -installer](https://pip.pypa.io/en/latest/installing/#installing-with-get-pip-py). You do not need Python for our purposes, just `pip`. - -Now that you have `pip`, open a shell or terminal -window and run the following command to install the docker-cloud CLI: - -```bash -$ pip install docker-cloud -``` - -#### Install on macOS - -We recommend installing Docker CLI for macOS using Homebrew. If you don't have `brew` installed, follow the instructions at [http://brew.sh](http://brew.sh){:target="_blank" class="_"}. - -Once Homebrew is installed, open Terminal and run the following command: - -```bash -$ brew install docker-cloud -``` - -> **Note**: You can also use [pip](https://pip.pypa.io/en/stable/) to install on macOS, but we suggest Homebrew since it is a package manager designed for the Mac. - -## Validate the CLI installation -Check that the CLI installed correctly, using the `docker-cloud -v` command. (This command is the same for every platform.) - -```bash -$ docker-cloud -v -docker-cloud 1.0.0 -``` - -You can now use the `docker-cloud` CLI commands from your shell. - -The documentation for the Docker Cloud CLI tool and API [here](/apidocs/docker-cloud.md). - - -## Log in - -Use the `login` CLI command to log in to Docker Cloud. Use the username and password you used when creating your Docker ID. If you use Docker Hub, you can use the same username and password you use to log in to Docker Hub. - -``` -$ docker login -Username: my-username -Password: -Login succeeded! -``` - -You must log in to continue this tutorial. - -## Set your username as an environment variable - -For simplicity in this tutorial, we use an environment variable for your Docker Cloud username. If you plan to copy and paste the tutorial commands, set the environment variable using the command below. (Change `my-username` to your username.) - -If you don't want to do this, make sure you substitute your username for $DOCKER_ID_USER whenever you see it in the example commands. - -```none -$ export DOCKER_ID_USER=my-username -``` - -**If you are running the tutorial with an organization's resources:** - -By default, the `docker-cloud` CLI uses your default user namespace, meaning the -repositories, nodes, and services associated with your individual Docker ID -account name. To use the CLI to interact with objects that belong to an -[organization](../../orgs.md), prefix these commands with -`DOCKERCLOUD_NAMESPACE=my-organization`, or set this variable as in the example below. - -```none -$ export DOCKERCLOUD_NAMESPACE=my-organization -``` - - - See the [CLI documentation](../../installing-cli.md#use-the-docker-cloud-cli-with-an-organization) for more information. - - -Next up, we [prepare the app](3_prepare_the_app.md). diff --git a/docker-cloud/getting-started/deploy-app/3_prepare_the_app.md b/docker-cloud/getting-started/deploy-app/3_prepare_the_app.md deleted file mode 100644 index 4f465d77a9..0000000000 --- a/docker-cloud/getting-started/deploy-app/3_prepare_the_app.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -description: Prepare the application -keywords: Python, prepare, application -redirect_from: -- /docker-cloud/getting-started/python/3_prepare_the_app/ -- /docker-cloud/getting-started/golang/3_prepare_the_app/ -title: Prepare the application ---- - -In this step, you prepare a simple application that can be deployed. - -## Clone the sample app - -Run the following command to clone the sample application. You can use -either the Python or the Go version of this application, but you don't need to -install Python or Go to follow the tutorial. - -### Python quickstart - -```bash -$ git clone https://github.com/docker/dockercloud-quickstart-python.git -$ cd dockercloud-quickstart-python -``` - -### Go quickstart - -```bash -$ git clone https://github.com/docker/dockercloud-quickstart-go.git -$ cd dockercloud-quickstart-go -``` - -## Build the application - -*Skip the following step if you don't have Docker Engine installed locally.* - -Next, we build this application to create an image. Run the following command to build the app. This creates a Docker image and tags it with whatever follows the word `tag`. Tag the image either `quickstart-python` or `quickstart-go` depending on which quickstart you are using. - -### Python quickstart - -```bash -$ docker build --tag quickstart-python . -``` - -### Go quickstart - -```bash -$ docker build --tag quickstart-go . -``` - -## What's next? - -Next, we [Push the Docker image to Docker Cloud's Registry](4_push_to_cloud_registry.md). diff --git a/docker-cloud/getting-started/deploy-app/4_push_to_cloud_registry.md b/docker-cloud/getting-started/deploy-app/4_push_to_cloud_registry.md deleted file mode 100644 index e902995748..0000000000 --- a/docker-cloud/getting-started/deploy-app/4_push_to_cloud_registry.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -description: Push the Docker image to Docker Cloud's Registry -keywords: image, Docker, cloud -redirect_from: -- /docker-cloud/getting-started/python/4_push_to_cloud_registry/ -- /docker-cloud/getting-started/golang/4_push_to_cloud_registry/ -title: Push the image to Docker Cloud's registry ---- - -*Skip this step if you don't have Docker Engine installed locally.* - -## Overview - -In this step you take the image that you built in the previous step, and push it to Docker Cloud. - -In step 2, you set your Docker Cloud username as an environment variable called **DOCKER_ID_USER**. If you skipped this step, change the `$DOCKER_ID_USER` to your Docker ID username before running this command. - -> **Note**: By default, the `docker-cloud` CLI uses your default user namespace, -meaning the repositories, nodes, and services associated with your individual -Docker ID account name. To use the CLI to interact with objects that belong to -an [organization](/docker-cloud/orgs.md), prefix these commands with -`DOCKERCLOUD_NAMESPACE=my-organization`. See the [CLI documentation](/docker-cloud/installing-cli.md#use-the-docker-cloud-cli-with-an-organization) for more information. - -## Tag the image - -First tag the image. Tags in this case denote different builds of an image. - -### Python quickstart - -```bash -$ docker tag quickstart-python $DOCKER_ID_USER/quickstart-python -``` - -### Go quickstart - -```bash -$ docker tag quickstart-go $DOCKER_ID_USER/quickstart-go -``` - -## Publish the image - -Next, push the tagged image to the repository. - -### Python quickstart - -``` -$ docker push $DOCKER_ID_USER/quickstart-python -``` - -### Go quickstart - -``` -$ docker push $DOCKER_ID_USER/quickstart-go -``` - -## Verify the image location - -After the push command completes, verify that the image is now in Docker Cloud's -registry. Do this by logging in to [Docker Cloud](https://cloud.docker.com) and -clicking **Repositories** in the left navigation. Your image should appear in -the repository list. - -## What's next? - -[Deploy the app as a Docker Cloud service](5_deploy_the_app_as_a_service.md). diff --git a/docker-cloud/getting-started/deploy-app/5_deploy_the_app_as_a_service.md b/docker-cloud/getting-started/deploy-app/5_deploy_the_app_as_a_service.md deleted file mode 100644 index e15716af5d..0000000000 --- a/docker-cloud/getting-started/deploy-app/5_deploy_the_app_as_a_service.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -description: Deploy the app as a Docker Cloud service -keywords: Python, deploy, Cloud -redirect_from: -- /docker-cloud/getting-started/python/5_deploy_the_app_as_a_service/ -- /docker-cloud/getting-started/golang/5_deploy_the_app_as_a_service/ -title: Deploy the app as a Docker Cloud service ---- - -In this step you deploy the app as a Docker Cloud Service. Remember that a service is a group of containers of the same **image:tag**. - -## Deploy app with Docker Engine installed locally - -> **Note**: By default, the `docker-cloud` CLI uses your default user namespace, -meaning the repositories, nodes, and services associated with your individual -Docker ID account name. To use the CLI to interact with objects that belong to -an [organization](../../orgs.md), prefix these commands with -`DOCKERCLOUD_NAMESPACE=my-organization`. See the [CLI documentation](../../installing-cli.md#use-the-docker-cloud-cli-with-an-organization) for more information. - -Start by running the service. - -```bash -$ docker-cloud service run -p 80 --name web $DOCKER_ID_USER/quickstart-python -``` - -or - -```bash -$ docker-cloud service run -p 80 --name web $DOCKER_ID_USER/quickstart-go -``` - -## The run command - -The `run` command **creates and runs** the service using the image you chose. -The **-p 80** flag publishes port 80 in the container so that it is publicly -accessible, and maps it to a dynamically assigned port in the node. - -It might take a minute or two to get your service up and running. Once it -completes the startup process, it is in the *running* state. - -To check the status of your service from the CLI use the `docker-cloud service ps` command. - -```none -$ docker-cloud service ps -NAME UUID STATUS IMAGE DEPLOYED -web 68a6fb2c ▶ Running my-username/quickstart-python:latest 1 hour ago -``` - -Make sure that the **STATUS** for your service is **Running**. Next, visit the -app at the URL generated by its service name. Find this URL by running -`docker-cloud container ps --no-trunc`. - -```none -$ docker-cloud container ps --no-trunc -NAME UUID STATUS IMAGE RUN COMMAND EXIT CODE DEPLOYED PORTS -web-1 6c89f20e ▶ Running my-username/quickstart-python:latest python app.py 1 minute ago web-1.my-username.cont.dockerapp.io:49162->80/tcp -``` - -The **PORTS** column contains the URL you can use to see the service running in -a browser. Copy the URL, open a browser, and go to that URL. In the example above, the URL is -`web-1.my-username.cont.dockerapp.io:49162`. - -If you don't want to leave the command line, you can use the `curl` command instead. - -```bash -$ curl web-1.$DOCKER_ID_USER.cont.dockerapp.io:49162 -Hello World!
Hostname: web-1
Counter: Redis Cache not found, counter disabled.% -``` -> **Tip**: Your Docker ID is used as part of the namespace when running containers in Docker Cloud. In the example above, instead of copying the URL entirely, you can see we used the $DOCKER_ID_USER variable. - -**CONGRATULATIONS!** You've deployed your first service using Docker Cloud. - -## What's Next? - -[Define environment variables](6_define_environment_variables.md). diff --git a/docker-cloud/getting-started/deploy-app/6_define_environment_variables.md b/docker-cloud/getting-started/deploy-app/6_define_environment_variables.md deleted file mode 100644 index 291bf1338c..0000000000 --- a/docker-cloud/getting-started/deploy-app/6_define_environment_variables.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -description: Define environment variables -keywords: Python, service, environment, service -redirect_from: -- /docker-cloud/getting-started/python/6_define_environment_variables/ -- /docker-cloud/getting-started/golang/6_define_environment_variables/ -title: Define environment variables ---- - -Docker lets you store data such as configuration settings, encryption keys, and external resource addresses in environment variables. Docker Cloud makes it easy to define, share, and update the environment variables for your services. - -At runtime, environment variables are exposed to the application inside the container. - -## Look inside your deployed app - -Let's look inside the app you just deployed. - -### Python quickstart - -Open the file in `quickstart-python/app.py`, and look at the return statement in the method *hello()*. The code uses **os.getenv('NAME', "world")** to get the environment variable -**NAME**. - -```python -return html.format(name=os.getenv('NAME', "world"), hostname=socket.gethostname(), visits=visits) -``` - -### Go quickstart - -Open the file in `quickstart-go/main.go`, and look at the *fmt.Fprintf* call in the *indexHandler* method. The code uses **os.Getenv("NAME")** to get the environment variable **NAME**. - -```go -fmt.Fprintf(w, "

hello, %s

\nHostname: %s
MongoDB Status: %s", os.Getenv("NAME"), hostname, mongostatus) -``` - -## Edit an environment variable - -If you modify the environment variable, the message the app shows when you curl or visit the service webpage changes accordingly. Let's try it! - -Run the following command to change the **NAME** variable, and then redeploy the `web` service. - -```bash -$ docker-cloud service set --env NAME="Friendly Users" --redeploy web -``` - -## Check endpoint status - -Execute `docker-cloud container ps` again to see the container's new endpoint. -You should now see two `web-1` containers, one with a status of **terminated** -(that's the original container) and another one either **starting** or already -**running**. - -```none -$ docker-cloud container ps -NAME UUID STATUS IMAGE RUN COMMAND EXIT CODE DEPLOYED PORTS -web-1 a2ff2247 ✘ Terminated my-username/quickstart-python:latest python app.py 40 minutes ago web-1.my-username.cont.dockerapp.io:49165->80/tcp -web-1 ae20d960 ▶ Running my-username/quickstart-python:latest python app.py 20 seconds ago web-1.my-username.cont.dockerapp.io:49166->80/tcp -``` - -Now curl the new endpoint to see the updated greeting. - -> **Note**: If `docker-cloud container ps` doesn't show an endpoint for the container yet, wait until the container status changes to **running**. - -```none -$ curl web-1.$DOCKER_ID_USER.cont.dockerapp.io:49162 -Hello Friendly Users!
Hostname: e360d05cdb81
Counter: Redis Cache not found, counter disabled.% -``` - -Your service now returns `Hello Friendly Users!`. Great! You've modified your service using environment variables! - -### Environment Variables and the Dockerfile - -Environment variables can also be set in the Dockerfile, and modified at runtime -(like you just did). - -Wondering where the default value for the **NAME** environment variable is set? -Look in the quickstart's Dockerfile. - -```none -# Environment Variables -ENV NAME World -``` - -## What's Next? - -Next, we [scale the service](7_scale_the_service.md). diff --git a/docker-cloud/getting-started/deploy-app/7_scale_the_service.md b/docker-cloud/getting-started/deploy-app/7_scale_the_service.md deleted file mode 100644 index 60528a97ea..0000000000 --- a/docker-cloud/getting-started/deploy-app/7_scale_the_service.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -description: Scale the service -keywords: scale, Python, service -redirect_from: -- /docker-cloud/getting-started/python/7_scale_the_service/ -- /docker-cloud/getting-started/golang/7_scale_the_service/ -title: Scale the service -notoc: true ---- - -Right now, your service is running on a single container. That's great for now. - -You can check how many containers are running using the `docker-cloud container ps` command. - -```none -$ docker-cloud container ps -NAME UUID STATUS IMAGE RUN COMMAND EXIT CODE DEPLOYED PORTS -web-1 6c89f20e ▶ Running my-username/python-quickstart:latest python app.py 1 hour ago web-1.my-username.cont.dockerapp.io:49162->80/tcp -``` - -A single container works just fine for now, but it could be a problem if that container becomes unresponsive. To avoid this, you can scale to more than one container. You do this with the `service scale` command: - -```bash -$ docker-cloud service scale web 2 -``` - -In this example, you can see we're scaling the service called `web` to `2` containers. - -Run `service ps` again, and you should now see your service scaling: - -```none -$ docker-cloud service ps -NAME UUID STATUS IMAGE DEPLOYED -web 68a6fb2c ⚙ Scaling my-username/python-quickstart:latest 1 hour ago -``` - -If you run `container ps` you should see multiple containers: - -```none -$ docker-cloud container ps -NAME UUID STATUS IMAGE RUN COMMAND EXIT CODE DEPLOYED PORTS -web-1 6c89f20e ▶ Running my-username/python-quickstart:latest python app.py 1 hour ago web-1.my-username.cont.dockerapp.io:49162->80/tcp -web-2 ab045c42 ⚙ Starting my-username/python-quickstart:latest 80/tcp -``` - -Containers aren't assigned a *PORT* until they are *running*, so you need to wait until the Service status goes from *Scaling* to *Running* to see what port is assigned to them. - -```none -$ docker-cloud container ps -NAME UUID STATUS IMAGE RUN COMMAND EXIT CODE DEPLOYED PORTS -web-1 6c89f20e ▶ Running my-username/python-quickstart:latest python app.py 1 hour ago web-1.my-username.cont.dockerapp.io:49162->80/tcp -web-2 ab045c42 ▶ Running my-username/python-quickstart:latest python app.py 1 minute ago web-2.my-username.cont.dockerapp.io:49156->80/tcp -``` - -Use either of the URLs from the `container ps` command to visit one of your service's containers, either using your browser or curl. - -In the example output above, the URL `web-1.my-username.cont.dockerapp.io:49162` reaches the web app on the first container, and `web-2.my-username.cont.dockerapp.io:49156` reaches the web app on the second container. - -If you use curl to visit the pages, you should see something like this: - -```none -$ curl web-1.$DOCKER_ID_USER.cont.dockerapp.io:49166 -Hello Python Users!
Hostname: web-1
Counter: Redis Cache not found, counter disabled.% -$ curl web-2.$DOCKER_ID_USER.cont.dockerapp.io:49156 -Hello Python Users!
Hostname: web-2
Counter: Redis Cache not found, counter disabled.% -``` - -Congratulations! You now have *two* containers running in your **web** service. - -## What's Next? - -[View service logs](8_view_logs.md) diff --git a/docker-cloud/getting-started/deploy-app/8_view_logs.md b/docker-cloud/getting-started/deploy-app/8_view_logs.md deleted file mode 100644 index 21231b7ffe..0000000000 --- a/docker-cloud/getting-started/deploy-app/8_view_logs.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -description: View service logs -keywords: View, logs, Python -redirect_from: -- /docker-cloud/getting-started/python/8_view_logs/ -- /docker-cloud/getting-started/golang/8_view_logs/ -title: View service logs -notoc: true ---- - -Docker Cloud grants you access to the logs your application writes to `stdout`. -An internal service multiplexes all the logs from all the containers of a -service into a single stream. To see a service's logs run the `docker-cloud -service logs` command with the name of the service. - -If you run `docker-cloud service logs web`, you see logs for both *web-1* and -*web-2*, like the example below. - -```none -$ docker-cloud service logs web -[web-1] 2015-01-13T22:45:37.250431077Z * Running on http://0.0.0.0:80/ -[web-1] 2015-01-07T17:20:19.076174813Z 83.50.33.64 - - [07/Jan/2015 17:20:19] "GET / HTTP/1.1" 200 - -[web-1] 2015-01-07T17:20:34.209098162Z 83.50.33.64 - - [07/Jan/2015 17:20:34] "GET / HTTP/1.1" 200 - -[web-1] 2015-01-07T18:46:07.116759956Z 83.50.33.64 - - [07/Jan/2015 18:46:07] "GET / HTTP/1.1" 200 - -[web-2] 2015-01-07T18:48:24.550419508Z * Running on http://0.0.0.0:5000/ -[web-2] 2015-01-07T18:48:37.116759956Z 83.50.33.64 - - [07/Jan/2015 18:48:37] "GET / HTTP/1.1" 200 - -``` - -To see a specific container's logs, use the `container logs` and the -specific container's name. To learn more about service and container -hostnames, see [Service Discovery](../../apps/service-links.md#using-service-and-container-names-as-hostnames). - -```none -$ docker-cloud container logs web-1 -2015-01-07T17:18:24.550419508Z * Running on http://0.0.0.0:80/ -2015-01-07T17:20:19.076174813Z 83.50.33.64 - - [07/Jan/2015 17:20:19] "GET / HTTP/1.1" 200 - -2015-01-07T17:20:34.209098162Z 83.50.33.64 - - [07/Jan/2015 17:20:34] "GET / HTTP/1.1" 200 - -2015-01-07T18:46:07.116759956Z 83.50.33.64 - - [07/Jan/2015 18:46:07] "GET / HTTP/1.1" 200 - -``` - -Visit your application using curl or your browser again. Run the `service logs -web` command again to see another log message for your visit. - -## What's Next? - -Now, let's explore how to -[Load balance the service](9_load-balance_the_service.md). diff --git a/docker-cloud/getting-started/deploy-app/9_load-balance_the_service.md b/docker-cloud/getting-started/deploy-app/9_load-balance_the_service.md deleted file mode 100644 index 6da1e7b4b4..0000000000 --- a/docker-cloud/getting-started/deploy-app/9_load-balance_the_service.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -description: Load-balance the service -keywords: load, balance, Python -redirect_from: -- /docker-cloud/getting-started/python/9_load-balance_the_service/ -- /docker-cloud/getting-started/golang/9_load-balance_the_service/ -title: Load-balance the service -notoc: true ---- - -To load-balance your application, you need to deploy a load-balancing service. -This service distributes incoming requests to all of the available containers in -the application. - -In this example, you need a load balancer that forwards incoming requests to -both container #1 (web-1) and container #2 (web-2). For this tutorial, you'll -use [Docker Cloud's HAProxy image](https://github.com/docker/dockercloud-haproxy){: target="_blank" class="_"} to load balance, but you could also use other custom load balancers. - -You can configure and run the `haproxy` load balancer service from the command line using a command like the example below. (If you are using the Go quickstart, edit the `link-service` value before running the command.) - -```none -$ docker-cloud service run \ --p 80:80/tcp \ ---role global \ ---autorestart ALWAYS \ ---link-service web:web \ ---name lb \ -dockercloud/haproxy -``` - -**-p 80:80/tcp** publishes port 80 of the container, and maps it to port 80 of the node. - -**--role global** grants [API access](../../apps/api-roles.md) to this service. You can use this to query the Docker Cloud API from within the service. - -**--autorestart ALWAYS** tells Docker Cloud to always [restart the containers](../../apps/autorestart.md) if they stop. - -**--link-service web:web** links your load balancer service *haproxy* with the *web* service, and names the link *web*. (Learn more about Service Linking [here](../../apps/service-links.md).) - -**--name lb** names the service *lb* (short for *load balancer*). - -**dockercloud/haproxy** specifies the public image that we're using to make this service. - -Run the `service ps` command to check if your service is already running. - -```none -$ docker-cloud service ps -NAME UUID STATUS IMAGE DEPLOYED -web 68a6fb2c ▶ Running my-username/quickstart-python:latest 2 hours ago -lb e81f3815 ▶ Running dockercloud/haproxy:latest 11 minutes ago -``` - -Now let's check the container for this service. Run `docker-cloud container ps`. - -```none -$ docker-cloud container ps -NAME UUID STATUS IMAGE RUN COMMAND EXIT CODE DEPLOYED PORTS -web-1 6c89f20e ▶ Running my-username/quickstart-python:latest python app.py 2 hours ago web-1.my-username.cont.dockerapp.io:49162->80/tcp -web-2 ab045c42 ▶ Running my-username/quickstart-python:latest python app.py 33 minutes ago web-2.my-username.cont.dockerapp.io:49156->80/tcp -lb-1 9793e58b ▶ Running dockercloud/haproxy:latest /run.sh 14 minutes ago 443/tcp, lb-1.my-username.cont.dockerapp.io:80->80/tcp -``` - -You should notice an URL endpoint in the *PORT* column for haproxy-1. In the -example above, this is `lb-1.my-username.cont.dockerapp.io:80`. Open the `lb-1` -URL in your browser or curl from the CLI. - -If you refresh or run curl multiple times, you should see requests distributed -between the two containers of the `web` service. You can see which container -responds to your request in the `Hostname` section of the response. - -```none -$ curl lb-1.$DOCKER_ID_USER.cont.dockerapp.io -Hello Friendly Users!
Hostname: web-1
Counter: Redis Cache not found, counter disabled.% -$ curl lb-1.$DOCKER_ID_USER.cont.dockerapp.io -Hello Friendly Users!
Hostname: web-2
Counter: Redis Cache not found, counter disabled.% -``` - -You can learn more about *dockercloud/haproxy*, our free open source HAProxy image here. - -## What's Next? - -[Provision a data backend for your service](10_provision_a_data_backend_for_your_service.md) diff --git a/docker-cloud/getting-started/deploy-app/index.md b/docker-cloud/getting-started/deploy-app/index.md deleted file mode 100644 index 8f50ea9007..0000000000 --- a/docker-cloud/getting-started/deploy-app/index.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -description: Go or Python and Docker Cloud -keywords: Python, Go, Docker, Cloud, application -redirect_from: -- /docker-cloud/getting-started/python/ -- /docker-cloud/getting-started/golang/ -title: Deploy an application ---- - -* [Introduction to Deploying an app to Docker Cloud](1_introduction.md) -* [Set up your environment](2_set_up.md) -* [Prepare the application](3_prepare_the_app.md) -* [Push an image to Docker Cloud's Registry](4_push_to_cloud_registry.md) -* [Deploy the app as a Docker Cloud service](5_deploy_the_app_as_a_service.md) -* [Define environment variables](6_define_environment_variables.md) -* [Scale the service](7_scale_the_service.md) -* [View service logs](8_view_logs.md) -* [Load-balance the service](9_load-balance_the_service.md) -* [Provision a data backend for the service](10_provision_a_data_backend_for_your_service.md) -* [Stackfiles for your service](11_service_stacks.md) -* [Data management with Volumes](12_data_management_with_volumes.md) \ No newline at end of file diff --git a/docker-cloud/getting-started/images/create-first-service.png b/docker-cloud/getting-started/images/create-first-service.png deleted file mode 100644 index afafcc731f..0000000000 Binary files a/docker-cloud/getting-started/images/create-first-service.png and /dev/null differ diff --git a/docker-cloud/getting-started/images/first-service-container-list.png b/docker-cloud/getting-started/images/first-service-container-list.png deleted file mode 100644 index 694d0039d4..0000000000 Binary files a/docker-cloud/getting-started/images/first-service-container-list.png and /dev/null differ diff --git a/docker-cloud/getting-started/images/first-service-container.png b/docker-cloud/getting-started/images/first-service-container.png deleted file mode 100644 index 293a8ca1eb..0000000000 Binary files a/docker-cloud/getting-started/images/first-service-container.png and /dev/null differ diff --git a/docker-cloud/getting-started/images/first-service-create-and-deploy-button.png b/docker-cloud/getting-started/images/first-service-create-and-deploy-button.png deleted file mode 100644 index a0747af0f7..0000000000 Binary files a/docker-cloud/getting-started/images/first-service-create-and-deploy-button.png and /dev/null differ diff --git a/docker-cloud/getting-started/images/first-service-ports.png b/docker-cloud/getting-started/images/first-service-ports.png deleted file mode 100644 index 1400b1123a..0000000000 Binary files a/docker-cloud/getting-started/images/first-service-ports.png and /dev/null differ diff --git a/docker-cloud/getting-started/images/first-service-timeline.png b/docker-cloud/getting-started/images/first-service-timeline.png deleted file mode 100644 index 870dc058dc..0000000000 Binary files a/docker-cloud/getting-started/images/first-service-timeline.png and /dev/null differ diff --git a/docker-cloud/getting-started/images/first-service-webpage.png b/docker-cloud/getting-started/images/first-service-webpage.png deleted file mode 100644 index ce1a8754fe..0000000000 Binary files a/docker-cloud/getting-started/images/first-service-webpage.png and /dev/null differ diff --git a/docker-cloud/getting-started/images/first-service-wizard.png b/docker-cloud/getting-started/images/first-service-wizard.png deleted file mode 100644 index ee89ee5107..0000000000 Binary files a/docker-cloud/getting-started/images/first-service-wizard.png and /dev/null differ diff --git a/docker-cloud/getting-started/images/first_node.png b/docker-cloud/getting-started/images/first_node.png deleted file mode 100644 index 51473705d6..0000000000 Binary files a/docker-cloud/getting-started/images/first_node.png and /dev/null differ diff --git a/docker-cloud/getting-started/index.md b/docker-cloud/getting-started/index.md deleted file mode 100644 index a8604e3927..0000000000 --- a/docker-cloud/getting-started/index.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -description: 'Getting Started with Docker Cloud: Setting up a node using a hosted - cloud provider or your own nodes, deploying a service and clustering.' -keywords: one, two, three -title: Getting started with Docker Cloud -notoc: true ---- - -If you're completely new to Docker Cloud, start here! - -* [Introducing Docker Cloud](intro_cloud.md) Start here! Then, you can either: - * [Link to your Infrastructure](connect-infra.md) Link one of the following providers: - * [Link your Amazon Web Services account](../infrastructure/link-aws.md) - * [Link your Microsoft Azure account](../infrastructure/link-azure.md) - * [Link your Digital Ocean account](../infrastructure/link-do.md) - * [Link your Packet account](../infrastructure/link-packet.md) - * [Link your SoftLayer account](../infrastructure/link-softlayer.md) - * [Set up self-hosted nodes](../infrastructure/byoh.md) ...Or bring your own hosts -* [Deploy your first node](your_first_node.md) Then, deploy your first nodes, -* [Create your first service](your_first_service.md) ...And finish by deploying your first service. - -Ready to deploy your first web service? [Deploy a web application in Docker Cloud](deploy-app/index.md) diff --git a/docker-cloud/getting-started/intro_cloud.md b/docker-cloud/getting-started/intro_cloud.md deleted file mode 100644 index 868c0f4862..0000000000 --- a/docker-cloud/getting-started/intro_cloud.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -description: Introducing Docker Cloud concepts and terminology -keywords: node, create, understand -redirect_from: -- /docker-cloud/getting-started/beginner/intro_cloud/ -title: Introducing Docker Cloud ---- - -This page introduces core Docker Cloud concepts and features so you can easily follow along with the tutorial. - -The tutorial goes through the following steps: - -1. Set up your hosts by linking to a cloud service provider or your own Linux hosts. -2. Deploy your first node cluster. -3. Deploy your first service. - -Know all this stuff already? Skip to [Link to your infrastructure](connect-infra.md). - -## What is a node? -A node is an individual Linux host used to deploy and run your applications. Docker Cloud does not provide hosting services, so all of your applications, services, and containers run on your own hosts. Your hosts can come from several different sources, including physical servers, virtual machines or cloud providers. - -## What is a node cluster? -When launching a node from a cloud provider you actually create a node cluster. Node Clusters are groups of nodes of the same type and from the same cloud provider. Node clusters allow you to scale the infrastructure by provisioning more nodes with a drag of a slider. - -### Use cloud service providers -Docker Cloud makes it easy to provision nodes from existing cloud providers. If you already have an account with an infrastructure as a service provider, you can provision new nodes directly from within Docker Cloud. Today we have native support for Amazon Web Services, DigitalOcean, Microsoft Azure, Packet.net, and IBM SoftLayer. - -### Use your own hosts ("Bring your own nodes") -You can also provide your own node or nodes. This means you can use any Linux host connected to the Internet as a Docker Cloud node as long as you can install a Cloud agent. The agent registers itself with your Docker account, and allows you to use Docker Cloud to deploy containerized applications. - -## What is a service? -Services are logical groups of containers from the same image. Services make it simple to scale your application across different nodes. In Docker Cloud you drag a slider to increase or decrease the availability, performance, and redundancy of the application. Services can also be linked one to another even if they are deployed on different nodes, regions, or even cloud providers. - -## Let's get started! -Log in to Docker Cloud using your Docker ID. (These are the same credentials you used for Docker Hub if you had an account there.) - -Start here [by linking your infrastructure to Docker Cloud](connect-infra.md). diff --git a/docker-cloud/getting-started/your_first_node.md b/docker-cloud/getting-started/your_first_node.md deleted file mode 100644 index fadb06c5bf..0000000000 --- a/docker-cloud/getting-started/your_first_node.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -description: Deploy your first node on Docker Cloud -keywords: node, create, understand -redirect_from: -- /docker-cloud/getting-started/beginner/your_first_node/ -- /docker-cloud/getting-started/beginner/deploy_first_node/ -title: Deploy your first node -notoc: true ---- - -In this step you create your first node (inside a node cluster) on Docker Cloud. - -After you link your Docker Cloud account with your hosts (either your own hosts or one or more cloud providers), the next step is to launch your first node. - -When launching a node you actually create a _node cluster_. Node clusters are groups of nodes of the same type and from the same cloud provider, and they allow you to scale the infrastructure by provisioning more nodes with a drag of a slider. - - To start, go to the **Nodes** section and click **Create**. - -![](images/first_node.png) - -Enter the following: - - - **Name**: name for the node cluster. This can contain alphanumeric characters, dashes, and underscores. - - **Deploy tags**: (optional) these are used to limit what can be deployed on the specific cluster. Read more about deployment tags [here](../apps/deploy-tags.md). - - **Provider**: the cloud provider or host to use. Only providers you have configured appear in this menu. - - **Region**: the region on which to provision the node cluster. - - **Type/size**: the type and size of the nodes in the cluster. - - **Number of nodes**: the number of nodes to create in the node cluster. This can be modified later. - - **Disk size**: the disk size for each node. - -> **Note**: You might see more or different options in this screen depending on which hosts or providers you're using. For example, DigitalOcean nodes have a fixed disk size depending on the type and size of the node you choose. - -Click **Launch node cluster** to provision this node cluster. It may take several minutes for the cluster to launch. To view and follow along with deployment progress, click into the node cluster, click the **Timeline** tab, then expand the **Node Cluster Deploy** item to view the console. - -Once the node cluster is deployed, a Success message appears near the top of the page. - -From the Node cluster detail view you can see the status of your nodes, destroy individual nodes or the whole cluster, upgrade individual nodes, and scale your node cluster from 1 to 10 nodes. You can also click an individual node's hostname to see which containers are running on it. - -## What's next? - -Now that you've got at least one **node** deployed, it's time to deploy your first **service**. - -Remember that a service is a group of containers from the same container image. Services make it simple to scale your application across a number of nodes. They can also be linked one to another even if they are deployed on different nodes, regions, or even on different cloud providers. - -[Continue the tutorial and deploy your first service](your_first_service.md). diff --git a/docker-cloud/getting-started/your_first_service.md b/docker-cloud/getting-started/your_first_service.md deleted file mode 100644 index 97f1ffdf8a..0000000000 --- a/docker-cloud/getting-started/your_first_service.md +++ /dev/null @@ -1,130 +0,0 @@ ---- -description: Deploy your first service on Docker Cloud -keywords: service, Cloud, three -redirect_from: -- /docker-cloud/getting-started/beginner/your_first_service/ -- /docker-cloud/getting-started/beginner/deploy_first_service/ -title: Create your first service ---- - -This page describes how to create a service on Docker Cloud. - -## What is a service? - -A service is a group of containers of the same **image:tag**. Services make it simple to scale your application. With Docker Cloud, you simply drag a slider to change the number of containers in a service. - -Before you can deploy a service in Docker Cloud, you must have at least one node deployed. If you haven't done this yet [follow the tutorial to deploy a node](your_first_node.md). - -When you create a service in the Docker Cloud web interface, a wizard walks you through configuring the service in three steps. - -1. **Choose a Container Image** Images can come from Docker Cloud's Jumpstarts library, your personal Docker Hub account or Docker Hub's public index, or from third party registries you connect. -1. **Configure the Service** From here, give the service a name, set the initial number of containers, expose/publish ports, modify the run command or entrypoint, set memory and CPU limits. -1. **Set Environment variables** Set the edit environment variables and link your service to other existing services in Docker Cloud. - -> **Note**: In this Quickstart tutorial we don't work with environment variables or connect [data volumes](../apps/volumes.md), but these are also available as optional steps in the wizard. - -## Select a Service Image - -From any page on Docker Cloud, click the **Services** section, then click **Create**. - -![](images/create-first-service.png) - -For the purposes of this tutorial, click the rocket icon and look for the **Miscellaneous** section. - -You see an image called `dockercloud/hello-world`. -![](images/first-service-wizard.png) - -Click the **dockercloud/hello-world** image. This image creates a container that runs NGINX, and shows a simple *hello world* web page. - -## Configure the Service - -In this step Docker Cloud loads all of the Image tags available for the image. In this case our tutorial image **dockercloud/hello-world** only has one image tag called **latest**. - -For the purposes of this tutorial, you don't actually need to enter or change -anything for most of the fields on the Create Service page. - -### Publishing a port - -Since we need to access this container over the Internet, we first need to publish a port. By default, ports are not accessible publicly. To learn more about ports click [here](../apps/ports.md). - -Click the **Ports** table, where it says *Click to override ports defined in image*. This activates that section so you can make changes. Then click the **Published** checkbox. - -![](images/first-service-ports.png) - -For this tutorial leave the Node port set to *dynamic*. This means that **port 80** of the container is mapped to a random available port in the node in which the container is deployed. To force a specific port in the node, click *dynamic* and specify a port. - -> **Note**: Two containers in the same node cannot publish to the same *node port*. - -## Create and Deploy - -You don't need to modify anything else in this service for the tutorial, so click **Create and deploy**. Docker Cloud creates, and deploys your new service (just like it says on the tin!) - -![](images/first-service-create-and-deploy-button.png) - -Next, Cloud sends you to the Service's detailed view. The detailed view contains six informational sections: - - - **Containers**: lists the containers that are part of this service and their status. This is also where you'd go to launch more containers to scale a service. - - **Endpoints**: shows a list of available service and container endpoints. - - **Triggers**: allows you to set triggers that perform automatic actions such as scaling a node or redeploying an image when the source updates. - - **Links**: lists the links between services. For this tutorial this section is empty. - - **Volumes**: lists the volumes attached to the service to store data. For this tutorial this section is empty. - - **Environment Variables**: lists the environment variables for the service. - -Two additional tabs of information are available for each service: - - - **Logs**: shows check the recent logs from all the containers in this service. - - **Timeline**: a timeline of all the API calls, and accompanying logs, that were performed against this service. - - -Click the **Timeline** tab to see a log output similar to the one below. It can take a couple of minutes for the container to deploy. - -``` -Deploying... -Creating 1 new containers -Preparing to deploy container f93b1a05-4444-49e5-98b0-9dc3a7618453 -hello-world-1: Choosing best node. Deployment strategy: BALANCE -hello-world-1: Deploying in 8468426e-tutorial.node.dockerapp.io -hello-world-1: Pulling image dockercloud/hello-world:latest in 8468426e-tutorial.node.dockerapp.io -hello-world-1: Creating in 8468426e-tutorial.node.dockerapp.io -hello-world-1: Starting with docker id df9525795bef5394e1a33b2ef42e26ba991bdccece4bc4f4f34e1def5c095fe9 in 8468426e-tutorial.node.dockerapp.io -hello-world-1: Inspecting and checking its configuration -hello-world-1: Running in 8468426e-tutorial.node.dockerapp.io -``` - -The web interface looks something like this: - -![](images/first-service-timeline.png) - -The **hello-world** status line updates to **Running** once the container deploys successfully. - -The **Containers** list shows all of the containers in this service. There should just be one for now. - -![](images/first-service-container-list.png) - -Click the container's name to go to the Container's detail view. From -this page you can see additional information about the containers, such as -endpoints, logs, environment variables, volumes, a terminal, and the console -timeline. - -![](images/first-service-container.png) - -The **Endpoints** section lists the endpoints (ports) that this container is publishing. In the screenshot above, there is a single endpoint: **hello-world-66622790-1.9ab56d66.container.docker.io:32768**. The endpoint is composed of both the container's hostname and a port number. - -Click the links icon to the right of the endpoint. This opens a new tab and shows the webpage that the **hello-world** container is hosting. - -![](images/first-service-webpage.png) - -**Congratulations!** You've successfully deployed your first service using Docker Cloud. - -## Optional cleanup - -You probably don't need the `hello-world` container to run for very long after you complete the tutorial. To clean up and remove all of the resources you created during this tutorial: - -- Click **Services**, hover over the `hello-world` container and click the selection box that appears, then click the **Terminate** icon to the right of the service information. -- Click **Node Clusters**, select the node cluster you created, and click the **Terminate** icon at the right end of the cluster information. - -Both terminated Services and Node Clusters remain in the UI for about five minutes. After that time, they no longer appear. - -## What's next? - -Learn more about [scaling your service](../apps/service-scaling.md), or check out some of our other [Deploy an app tutorial](deploy-app/index.md). diff --git a/docker-cloud/images/Beta-Swarm-Mode-List-View.png b/docker-cloud/images/Beta-Swarm-Mode-List-View.png deleted file mode 100644 index 698fe1dc77..0000000000 Binary files a/docker-cloud/images/Beta-Swarm-Mode-List-View.png and /dev/null differ diff --git a/docker-cloud/images/Docker-Cloud-Blue.png b/docker-cloud/images/Docker-Cloud-Blue.png deleted file mode 100644 index 94de0f4666..0000000000 Binary files a/docker-cloud/images/Docker-Cloud-Blue.png and /dev/null differ diff --git a/docker-cloud/images/Docker-Cloud-Blue.svg b/docker-cloud/images/Docker-Cloud-Blue.svg deleted file mode 100644 index 56fc7a8b7c..0000000000 --- a/docker-cloud/images/Docker-Cloud-Blue.svg +++ /dev/null @@ -1,49 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docker-cloud/images/Docker-Cloud-white.svg b/docker-cloud/images/Docker-Cloud-white.svg deleted file mode 100644 index 22f38ea865..0000000000 --- a/docker-cloud/images/Docker-Cloud-white.svg +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docker-cloud/images/cloud-build.png b/docker-cloud/images/cloud-build.png deleted file mode 100644 index a9ef1ceb4b..0000000000 Binary files a/docker-cloud/images/cloud-build.png and /dev/null differ diff --git a/docker-cloud/images/cloud-clusters.png b/docker-cloud/images/cloud-clusters.png deleted file mode 100644 index 7497b598bb..0000000000 Binary files a/docker-cloud/images/cloud-clusters.png and /dev/null differ diff --git a/docker-cloud/images/cloud-stack.png b/docker-cloud/images/cloud-stack.png deleted file mode 100644 index fe7a57d946..0000000000 Binary files a/docker-cloud/images/cloud-stack.png and /dev/null differ diff --git a/docker-cloud/images/play-button.png b/docker-cloud/images/play-button.png deleted file mode 100644 index a24989917a..0000000000 Binary files a/docker-cloud/images/play-button.png and /dev/null differ diff --git a/docker-cloud/images/play-button.snagproj b/docker-cloud/images/play-button.snagproj deleted file mode 100644 index 8be8239698..0000000000 Binary files a/docker-cloud/images/play-button.snagproj and /dev/null differ diff --git a/docker-cloud/images/slack-notification-updates.png b/docker-cloud/images/slack-notification-updates.png deleted file mode 100644 index f72a02dbed..0000000000 Binary files a/docker-cloud/images/slack-notification-updates.png and /dev/null differ diff --git a/docker-cloud/images/slack-oauth-authorize.png b/docker-cloud/images/slack-oauth-authorize.png deleted file mode 100644 index 3f3a4c6d73..0000000000 Binary files a/docker-cloud/images/slack-oauth-authorize.png and /dev/null differ diff --git a/docker-cloud/index.md b/docker-cloud/index.md deleted file mode 100644 index 50435278a6..0000000000 --- a/docker-cloud/index.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -description: Docker Cloud -keywords: Docker, cloud -notoc: true -title: Welcome to the Docker Cloud docs! -redirect_from: -- /engine/installation/cloud/cloud/ -- /engine/installation/cloud/ -- /engine/installation/cloud/overview/ -- /engine/installation/google/ -- /engine/installation/softlayer/ -- /engine/installation/rackspace/ -- /engine/installation/joyent/ ---- - -
-
Docker Cloud logo
-
- -Docker Cloud provides a hosted [registry service](builds/repos.md) with -[build](builds/automated-build.md) and [testing](builds/automated-testing.md) -facilities for Dockerized application images; tools to help you set up and -[manage host infrastructure](infrastructure/); and [application lifecycle features](apps/) to automate deploying (and redeploying) services created from -images. - -Log in to Docker Cloud using your free [Docker ID](../docker-id/). - - - - - - - - - - - - - - - - - - - - - -
Manage Builds and ImagesManage Swarms (Beta Swarm Mode)

Build and test your code, and build Docker images. Link Cloud repositories to your source code provider to automate building images and pushing them to Cloud.

Provision swarms to popular cloud providers, register existing swarms, and use your Docker ID to authenticate and securely access personal or team swarms.

Manage Infrastructure (Standard Mode)Manage Nodes and Apps (Standard Mode)

Link to your hosts, upgrade the Docker Cloud agent, and manage container distribution. See the AWS FAQ and Packet.net FAQ.

Deploy and manage nodes, services, and applications in Docker Cloud (Standard Mode).

API Docs    ●    Frequently Asked Questions    ●    Release Notes
- -## About Docker Cloud - -### Images, Builds, and Testing - -Docker Cloud uses the hosted Docker Cloud Registry, which allows you to publish -Dockerized images on the internet either publicly or privately. Docker Cloud can -also store pre-built images, or link to your source code so it can build the -code into Docker images, and optionally test the resulting images before pushing -them to a repository. - -![Build configuration of repo](images/cloud-build.png) - -### Swarm Management (Beta Swarm Mode) - -With [Beta Swarm Mode](/docker-cloud/cloud-swarm/index.md), you can create new -swarms from within Docker Cloud, register existing swarms to Docker Cloud, or -provision swarms to your cloud providers. Your Docker ID authenticates and -securely accesses personal or team swarms. Docker Cloud allows you to connect -your local Docker Engine to any swarm you have access to in Docker Cloud. - -![Swarm mode list](images//Beta-Swarm-Mode-List-View.png) - -### Infrastructure management (Standard Mode) - -Before you can do anything with your images, you need somewhere to run them. -Docker Cloud allows you to link to your infrastructure or cloud services -provider so you can provision new nodes automatically. Once you have nodes set -up, you can deploy images directly from Docker Cloud repositories. - -![Node clusters](images/cloud-clusters.png) - -### Services, Stacks, and Applications (Standard Mode) - -Images are just one layer in containerized applications. Once you've built an -image, you can use it to deploy services (which are composed of one or more -containers created from an image), or use Docker Cloud's -[stackfiles](apps/stacks.md) to combine it with other services and -microservices, to form a full application. - -![Stacks/Wizard - editing yaml file](images/cloud-stack.png) diff --git a/docker-cloud/infrastructure/byoh.md b/docker-cloud/infrastructure/byoh.md deleted file mode 100644 index be0ab47ed1..0000000000 --- a/docker-cloud/infrastructure/byoh.md +++ /dev/null @@ -1,132 +0,0 @@ ---- -description: Use the Docker Cloud Agent -keywords: agent, Cloud, install -redirect_from: -- /docker-cloud/feature-reference/byoh/ -- /docker-cloud/tutorials/byoh/ -- /docker-cloud/getting-started/use-byon/ -title: Use the Docker Cloud Agent ---- - -Docker Cloud allows you to use any Linux host ("bring your own host") as a node which you can then use to deploy containers. To do this, you must install the **Docker Cloud Agent** on your Linux host so that Docker Cloud can remotely manage it. - -> **Note**: The Docker Cloud Agent only supports x64 architecture at this time. - -The **Docker Cloud Agent** installs its own Docker binary, and automatically removes any prior installation of the Docker Engine packages. See the [Known Limitations](byoh.md#known-limitations) section for more information. - -You can still run `docker` CLI commands on a host that is running the Docker Cloud Agent. If you do this, you might see the Docker Cloud system containers that start with `dockercloud/`. - - -## Install the Docker Cloud Agent - -1. Before you begin, make sure that ports **6783/tcp** and **6783/udp** are open on the target host. Optionally, open port **2375/tcp** too. - - The first two ports allow the node to join the overlay network that allows [service discovery](../apps/service-links.md) among nodes on a Docker Cloud account. Port 2375 allows Docker Cloud to contact the Docker daemon on the host directly using TLS mutual authentication. If this port is not open, Docker Cloud sets up a reverse tunnel from the host to access this port. - -2. Log in to Docker Cloud and go to the **Node dashboard**. - -3. Click **Bring your own node**. - - The dialog that appears lists the currently supported distributions of Linux, and provides a command that you can copy. - This command includes a token that allows the agent to talk to Docker Cloud. - - ![](images/node-byoh-wizard-v2.png) - -4. Copy the command to your clipboard. - -5. Execute this command on your Linux host. - - The command downloads a script which installs and configures the Docker Cloud Agent, and registers it with Docker Cloud. - -6. Confirm that the new Linux host appears in the Node dashboard in Docker Cloud. The node is now ready to accept container deployments! - -7. Repeat this process for each host. - -## Install the Docker Cloud Agent using the CLI - -If you prefer not to use the web interface, you can generate the command needed -to install and configure the **Docker Cloud Agent** using the `docker-cloud` -CLI. - -To generate the command with the token, run: - -``` -$ docker-cloud node byo -``` - -The command outputs the following, including the installation command instructions and the associated registration token. - -``` -Docker Cloud lets you use your own servers as nodes to run containers. For -this you have to install our agent. - -Run the following command on your server: - -curl -Ls https://get.cloud.docker.com/ | sudo -H sh -s 63ad1c63ec5d431a9b31133e37e8a614 -``` - -Copy and paste this command, and execute it on your host. The host automatically -appears in the list of nodes once the process completes. - -## Uninstall the Docker Cloud Agent - -To uninstall `dockercloud-agent` from your host, execute the following command: - -``` -$ apt-get remove dockercloud-agent -``` - -## Upgrade Docker Cloud Agent - -To upgrade `dockercloud-agent`, execute the following command on the host: - -``` -$ apt-get update && apt-get install -y dockercloud-agent -``` - -## Restart Docker Cloud Agent - -To restart the `dockercloud-agent`, execute the following command from your BYON host: - -``` -$ service dockercloud-agent restart -``` - -## Known limitations - -### Firewall requirements - -The following ports **must** be opened in any firewalls: - -* **6783/tcp** and **6783/udp**: These ports allow the node to join the private overlay network for containers in other nodes. - -You should also open the following ports: - -* **2375/tcp**: This allows Docker Cloud to communicate with the Docker daemon running in the node. If port 2375 is not accessible, Docker Cloud attempts to connect with the node through a secure reverse tunnel. - -You must also open any ports that you plan to publish in your services, however these are not required to install the Docker Cloud Agent. - -### Supported Linux distributions - -The **Docker Cloud Agent** has been tested on: - -- Ubuntu 14.04, 15.04, 15.10 -- Debian 8 -- Centos 7 -- Red Hat Enterprise Linux 7 -- Fedora 21, 22, 23 - -Contact Docker support if you have a different distribution that you would like -to have supported. - -### Install Docker Cloud Agent on a node with Docker already installed - -If you install the Docker Cloud Agent on a node which already has Docker Engine -installed, `apt-get` removes the older `docker` installation, then installs the -`docker` binary that is bundled with `dockercloud-agent`. The installation -script also tries to install the kernel headers required for `AUFS` support. - -> **Note**: If you remove, upgrade, or install over the version of Docker Engine bundled with the Docker Cloud Agent, your nodes may not maintain a connection with Docker Cloud correctly. You must reinstall a compatible Engine version (currently version 1.11.2-cs5). Contact Docker Support for more information. - -* If you were already using the `AUFS` storage driver before installing `dockercloud-agent`, your existing containers and images appear automatically once installation finishes. -* If you were using `devicemapper` or any other storage driver, and the `AUFS` driver installs successfully you can't use your existing containers and images. You can check if the installation succeeded by running `docker info | grep Storage`. diff --git a/docker-cloud/infrastructure/cloud-on-aws-faq.md b/docker-cloud/infrastructure/cloud-on-aws-faq.md deleted file mode 100644 index 9cd0e03b38..0000000000 --- a/docker-cloud/infrastructure/cloud-on-aws-faq.md +++ /dev/null @@ -1,175 +0,0 @@ ---- -description: Docker Cloud on AWS -keywords: Cloud, AWS, faq -redirect_from: -- /docker-cloud/faq/cloud-on-aws-faq/ -title: Use Docker Cloud on AWS ---- - -This section answers frequently asked questions about using Docker Cloud with -Amazon Web Services (AWS). - -## I can't get my account to link to Docker Cloud. How do I troubleshoot it? - -To validate your AWS Security Credentials, Docker Cloud tries to dry-run an -instance on every region. Credentials are marked as valid if the operation -succeeds at least in one of the regions. If you get the following message -`Invalid AWS credentials or insufficient EC2 permissions` follow these steps to -troubleshoot it: - -1. [Download AWS CLI](https://aws.amazon.com/cli/){: target="_blank" class="_"}. -2. [Configure the CLI](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html){: target="_blank" class="_"} with your security credentials. -2. Run the following command: - - ``` - aws ec2 run-instances --dry-run --image-id ami-4d883350 --instance-type m3.medium - ``` - -This tries to dry-run an Ubuntu 14.04 LTS 64-bit in `sa-east-1` (Sao Paulo, -South America). You can look for the AMI in the region you want to deploy to -[here](http://cloud-images.ubuntu.com/locator/ec2/){: target="_blank" -class="_"}. It should show you the error message. If your configuration is -correct, you see the following message: - -``` -A client error (DryRunOperation) occurred when calling the RunInstances operation: Request would have succeeded, but DryRun flag is set. -``` - -## "AWS returned an error: unauthorized operation" using instance profiles to deploy node clusters - -This error occurs when you are using an instance profile that has more -permissions than the IAM user you are using with Docker Cloud. You can fix this -by adding the `"Action":"iam:PassRole"` permission to the IAM policy for the -`dockercloud` service user. You can read more about this -[here](http://blogs.aws.amazon.com/security/post/Tx3M0IFB5XBOCQX/Granting-Permission-to-Launch-EC2-Instances-with-IAM-Roles-PassRole-Permission){: -target="_blank" class="_"}. - -## What objects does Docker Cloud create in my EC2 account? - -If you decide to let Docker Cloud create elements for you, it creates: - -- A VPC with the tag name `dc-vpc` and CIDR range `10.78.0.0/16`. -- A set of subnets if there are no subnets already created in the VPC. Docker Cloud creates a subnet in every Availability Zone (AZ) possible, and leaves enough CIDR space for the user to create customized subnets. Every subnet created is tagged with `dc-subnet`. -- An internet gateway named `dc-gateway` attached to the VPC. -- A route table named `dc-route-table` in the VPC, associating the subnet with the gateway. - -## How can I customize VPC/IAM elements in Docker Cloud through the AWS dashboard? - -Users with AWS EC2-VPC accounts can customize any of the elements explained -above through the AWS API or the dashboard. - -In the launch node cluster view, you can choose: - -- VPC dropdown: - 1. `Auto` - Delegates creation of the VPC to Docker Cloud. - 2. `vpc-XXXX (dc-vpc)` - Docker Cloud's default VPC. This only appears if you have already deployed nodes to that region. You can choose subnets and security groups with the VPC. See "Which objects does Docker Cloud create in my EC2 account" for detailed info. - 3. `vpc-XXXX` - You can select one of the VPCs already created by you. If you tag name them, it is displayed too. -- Subnets dropdown: - 1. `Auto` - Delegates the management of the subnets to Docker Cloud. Creates them if they do not exist or uses the ones tagged with `dc-subnet`. - 2. Multiple selection of existing subnets. See `How does Docker Cloud balance my nodes among different availability zones?` section for detailed info. -- Security groups dropdown: - 1. `Auto` - 2. Multiple selection of existing security groups. -- IAM roles dropdown: - 1. `None` - Docker Cloud does not apply any instance profiles to the node. - 2. `my_instance_role_name` - You can select one of the IAM roles already created by you. - -## How do I customize VPC/IAM elements in Docker Cloud using the API? - -Add the following section to your body parameters: - -```json -"provider_options" = { - "vpc": { # optional - "id": "vpc-xxxxxxxx", # required - "subnets": ["subnet-xxxxxxxx", "subnet-yyyyyyyy"], # optional - "security_groups": ["sg-xxxxxxxx"] # optional - }, - "iam": { # optional - "instance_profile_name": "my_instance_profile_name" # required - } -} -``` - -## How does Docker Cloud balance my nodes among different availability zones? (high availability schema) - -By default, Docker Cloud tries to deploy your node cluster using a high -availability strategy. To do this, it places every instance one by one in the -less populated availability zone for that node cluster. We can see this behavior -with some examples: - -### We allow Docker Cloud to manage VPCs and subnets - -Docker Cloud can take over VPC and subnet management for you when you deploy a -node cluster. - -For example, assume this is the first time you're deploying a node cluster. You -delegate deployment management to Docker Cloud in the Sao Paulo (South America, -`sa-east-1`) region. You don't send any `provider_options` using the API, and -you leave the VPC, subnet, security groups and IAM role values set to their -defaults on the dashboard. In this situation: - -1. Docker Cloud looks for a VPC called `dc-vpc`. The VPC does not exist on the first try, so Docker Cloud creates it and a `dc-gateway`, which attaches to the VPC. -2. Docker Cloud retrieves all subnets in the VPC. No subnets exist on the first try. -3. Docker Cloud creates the subnet. -4. For every availability zone (AZ), Docker Cloud splits the VPC CIDR IP space in (# of AZs + 1) blocks and tries to create (# of AZs) subnets. Remember, we left space for custom subnets. -5. For every subnet, Docker Cloud tries to dry-run an instance of the selected type and creates it if the operation succeeds, creating and associating a `dc-route-table` to the subnet. -6. Once all subnets have been created, Docker Cloud deploys every node of the cluster using a round-robin pattern. - -> **Note** If the `dry-run` fails on any of the availability zones, you may see fewer subnets than were originally specified by the number of zones. - -### Scaling a node cluster - -Following the example in the previous section, you have a node cluster deployed and want to scale it up. Docker Cloud: - -1. Looks for `dc-vpc`. Found! -2. Looks for `dc-subnet`s. Found! -3. Counts the nodes in every subnet. -4. Chooses the less populated subnet and deploys the next node there. -4. Repeats until all nodes are deployed. - -### We choose where to deploy - -What if you have another VPC for some other purpose, (the components already exist) and you want to deploy a node cluster in that VPC. - -Docker Cloud: - -1. Looks for the selected VPC. Found! -2. Looks for selected subnets. If you do not select any subnets, Docker Cloud tries to create them using the rules previously described. -3. If you selected more than one subnet, Docker Cloud distributes the nodes in the cluster among those subnets. If not, all nodes are placed in the same subnet. - -## What happens if I restart a node in the AWS console? - -After the node boots up, the Docker Cloud Agent tries to contact the Cloud API -and register itself with its new IP. Once it registers, Docker Cloud -automatically updates the DNS of the node and the containers on it to use the -new IP. The node's state changes from `Unreachable` to `Deployed`. - -## Can I use an elastic IP for my nodes? - -Yes. However, you must restart the Docker Cloud Agent (or the host) for the -changes to take effect in Docker Cloud. - -## What happens when I terminate a node from the AWS console? - -If you created the node using Docker Cloud, but you terminate it in the AWS -console, all data in that node is destroyed as the volume attached to it is set -to destroy on node termination. As long as the Docker Cloud IAM user still has -access, Cloud detects the termination and marks the node as `Terminated`. - -If you created the host yourself, added it to Docker Cloud as a "Bring Your Own -Node" and then terminated it, the node stays in the `Unreachable` state until -you manually remove it. - -## How do I SSH into a node? - -Use the instructions [here](ssh-into-a-node.md) to access your nodes over SSH. -If you chose a custom security group, remember to open port 22. - -## How do I back up my Docker container volumes to AWS S3? - -Use the [dockercloud/dockup](https://hub.docker.com/r/dockercloud/dockup/){: -target="_blank" class="_"} utility image to back up your volumes. You only need -to run it taking the volumes of the container you want to back up with -`volumes-from` and pass it the environment configuration of the container. You -can find more information in its Github repository. diff --git a/docker-cloud/infrastructure/cloud-on-packet.net-faq.md b/docker-cloud/infrastructure/cloud-on-packet.net-faq.md deleted file mode 100644 index f0fa027f12..0000000000 --- a/docker-cloud/infrastructure/cloud-on-packet.net-faq.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -description: Docker Cloud and Packet.net -keywords: Packet.net, Cloud, drives -redirect_from: -- /docker-cloud/faq/cloud-on-packet.net-faq/ -title: Use Docker Cloud and Packet.net ---- - -This page answers frequently asked questions about using Docker Cloud with Packet.net. - -## What does Docker Cloud create in my Packet.net account? - -Docker Cloud creates a project named "Docker Cloud" which contains all the devices that Docker Cloud deploys, no matter what type of device you chose. - -Device storage is organized as follows: - -- Type 1 devices have a RAID 1 of two SSD drives mounted in `/`. -- Type 3 devices also have a RAID 1 of two SSD drives mounted in `/`, and also offer two NVMe drives without being mounted. Docker Cloud mounts a RAID 1 in `/var/lib/docker`. - -An SSH keypair named `dockercloud-` is created if no key is found in your account. - -## How long does it take to deploy a Packet.net device? - -Docker Cloud deploys Ubuntu 14.04 LTS images on both types. Type 1 takes between -5 and 10 minutes to initialize, while type 3 can take up to 15 minutes. The Packet.net engineering team is working to reduce these deployment times. - -## What happens if I restart a node in the Packet.net portal? - -After the node boots up, the Docker Cloud Agent contacts Docker Cloud using the -API and registers itself with its new IP. Cloud then automatically updates the -DNS of the node and the containers on it to use the new IP. The node changes -state from `Unreachable` to `Deployed`. - -## Can I terminate a node from the Packet.net portal? - -If you create a node using Docker Cloud but terminate it from the Packet.net -portal, all data in the node is destroyed. Docker Cloud detects the termination -and marks the node as `Terminated`. - -If you turn off the device, Docker Cloud marks it as `Unreachable` because the -node has not been terminated, but Cloud cannot contact it. - -If you created the host yourself, added it to Docker Cloud as a "Bring Your Own -Node" and then terminated it, the node is marked as `Unreachable` until you -manually remove it. - -## How can I log in to a Packet.net node managed by Docker Cloud? - -Packet.net copies SSH keys into the created device. This means you can upload your own SSH public key to Packet.net's portal and then SSH into the node using the `root` user. You can also log in to the node from Packet's console, or use a container to copy your SSH keys into the node, as explained in [Sshing into a node](../infrastructure/ssh-into-a-node.md). - -## Packet has returned an error, what can I do? - -Here is a list of known errors thrown by Packet.net API: - -- **You have reached the maximum number of projects you can create (number)**. Please contact `help@packet.net` -> Packet.net limits the number of projects that an account can create. Delete projects in the account or contact [Packet.net](https://www.packet.net/) support to increase the limit. -- **There is an error with your Packet.net account**. Please contact `help@packet.net` -> There is something else wrong with your Packet.net account. Contact [Packet.net](https://www.packet.net/) for more details. diff --git a/docker-cloud/infrastructure/deployment-strategies.md b/docker-cloud/infrastructure/deployment-strategies.md deleted file mode 100644 index 6419bbc9c3..0000000000 --- a/docker-cloud/infrastructure/deployment-strategies.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -description: Schedule a deployment -keywords: schedule, deployment, container -redirect_from: -- /docker-cloud/feature-reference/deployment-strategies/ -title: Container distribution strategies ---- - -Docker Cloud can use different distribution strategies when deploying containers -to more than one node. You can use different strategies to change how your -service distributes new containers when scaling. - -## Set a deployment distribution strategy - -You can set the deployment strategy when creating a service, either through the -Docker Cloud web UI, or using the API or CLI. You can also specify a -deployment strategy in the [stack file](../apps/stack-yaml-reference.md) used to -define a [service stack](../apps/stacks.md). - -For all methods, the default deployment strategy is "Emptiest node". - -### Emptiest node (default) - -This is the default strategy, and is commonly used to balance the total load of -all services across all nodes. - -A service configured to deploy using the `EMPTIEST_NODE` strategy deploys its -containers to the nodes that match its [deploy tags](../apps/deploy-tags.md) -with the **fewest total containers** at the time of each container's deployment, -regardless of the service. - -### High availability - -This setting is typically used to increase the service availability. - -A service using the `HIGH_AVAILABILITY` strategy deploys its containers to the -node that matches its deploy tags with the **fewest containers of that service** -at the time of each container's deployment. This means that the containers are -spread across all nodes that match the deploy tags for the service. - -### Every node - -A service using the `EVERY_NODE` strategy deploys one container **on each node** that matches its deploy tags. - -When a service uses the `EVERY_NODE` strategy: - -* A new container is deployed to every new node that matches the service's deploy tags. -* The service cannot be manually scaled. -* If the service uses volumes, each container on each node has a different volume. -* If an `EVERY_NODE` "client" service is linked to a "server" service that is also using the `EVERY_NODE` strategy, containers are linked one-to-one on each node. The "client" services are *not* automatically linked to "server" services on other nodes. - -> **Note**: Because of how links are configured when using the **every node** -> strategy, you cannot currently switch from **every node** to **high -> availability** or **emptiest node** and vice versa. diff --git a/docker-cloud/infrastructure/docker-upgrade.md b/docker-cloud/infrastructure/docker-upgrade.md deleted file mode 100644 index 68e496f8e5..0000000000 --- a/docker-cloud/infrastructure/docker-upgrade.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -description: Upgrade Docker Engine on a node -keywords: upgrade, engine, node -redirect_from: -- /docker-cloud/feature-reference/docker-upgrade/ -- /docker-cloud/tutorials/docker-upgrade/ -title: Upgrade Docker Engine on a node ---- - -Docker Cloud helps you manage nodes that have Docker Engine installed on them. -You can upgrade the version of Docker Engine on your nodes when new versions are -released. Docker Cloud doesn't *automatically* upgrade your nodes for you -because your containers would need to be restarted to complete the upgrade. -Instead, we allow you to choose when you want to upgrade so you can plan for potential outage time during the restart. - -## Upgrade Docker Engine in a node - -Before you upgrade your nodes, go to the detail page of the node that you want to upgrade. On the left side is a **Docker Info** section that lists the currently installed version of Docker Engine. - -If a new version is available, an up-arrow icon and a **New Docker version available** message appear above the current version line. Click the up-arrow -icon to start the upgrade process. - -![](images/upgrade-message.png) - -After upgrading Docker Engine, any containers that was running before the -upgrade is automatically restarted, regardless of the -[autorestart](../apps/autorestart.md) setting. Containers that were stopped are not -restarted. - - -## Upgrade nodes using the API and CLI - -See our [API and CLI documentation](/apidocs/docker-cloud.md#upgrade-docker-daemon34) for more information on how to upgrade nodes with our API, SDKs, and the CLI. diff --git a/docker-cloud/infrastructure/images/aws-iam-role-1.png b/docker-cloud/infrastructure/images/aws-iam-role-1.png deleted file mode 100644 index a480ac26c5..0000000000 Binary files a/docker-cloud/infrastructure/images/aws-iam-role-1.png and /dev/null differ diff --git a/docker-cloud/infrastructure/images/aws-iam-role-2.png b/docker-cloud/infrastructure/images/aws-iam-role-2.png deleted file mode 100644 index e6c2916f06..0000000000 Binary files a/docker-cloud/infrastructure/images/aws-iam-role-2.png and /dev/null differ diff --git a/docker-cloud/infrastructure/images/aws-link-account.png b/docker-cloud/infrastructure/images/aws-link-account.png deleted file mode 100644 index 10223ac096..0000000000 Binary files a/docker-cloud/infrastructure/images/aws-link-account.png and /dev/null differ diff --git a/docker-cloud/infrastructure/images/aws-modal.png b/docker-cloud/infrastructure/images/aws-modal.png deleted file mode 100644 index 861a994394..0000000000 Binary files a/docker-cloud/infrastructure/images/aws-modal.png and /dev/null differ diff --git a/docker-cloud/infrastructure/images/azure-link-account.png b/docker-cloud/infrastructure/images/azure-link-account.png deleted file mode 100644 index 9dcffca716..0000000000 Binary files a/docker-cloud/infrastructure/images/azure-link-account.png and /dev/null differ diff --git a/docker-cloud/infrastructure/images/azure-link-modal.png b/docker-cloud/infrastructure/images/azure-link-modal.png deleted file mode 100644 index 0247d166e5..0000000000 Binary files a/docker-cloud/infrastructure/images/azure-link-modal.png and /dev/null differ diff --git a/docker-cloud/infrastructure/images/azure-portal-subscriptions.png b/docker-cloud/infrastructure/images/azure-portal-subscriptions.png deleted file mode 100644 index 373f1c4dfc..0000000000 Binary files a/docker-cloud/infrastructure/images/azure-portal-subscriptions.png and /dev/null differ diff --git a/docker-cloud/infrastructure/images/azure-upload-certificate.png b/docker-cloud/infrastructure/images/azure-upload-certificate.png deleted file mode 100644 index 9e80a0b5db..0000000000 Binary files a/docker-cloud/infrastructure/images/azure-upload-certificate.png and /dev/null differ diff --git a/docker-cloud/infrastructure/images/do-approve-access.png b/docker-cloud/infrastructure/images/do-approve-access.png deleted file mode 100644 index 32498d69cd..0000000000 Binary files a/docker-cloud/infrastructure/images/do-approve-access.png and /dev/null differ diff --git a/docker-cloud/infrastructure/images/do-link-account.png b/docker-cloud/infrastructure/images/do-link-account.png deleted file mode 100644 index 529a9ded48..0000000000 Binary files a/docker-cloud/infrastructure/images/do-link-account.png and /dev/null differ diff --git a/docker-cloud/infrastructure/images/do-login-screen.png b/docker-cloud/infrastructure/images/do-login-screen.png deleted file mode 100644 index 7a035f1ddd..0000000000 Binary files a/docker-cloud/infrastructure/images/do-login-screen.png and /dev/null differ diff --git a/docker-cloud/infrastructure/images/get-node-ip.png b/docker-cloud/infrastructure/images/get-node-ip.png deleted file mode 100644 index 2b5fb54927..0000000000 Binary files a/docker-cloud/infrastructure/images/get-node-ip.png and /dev/null differ diff --git a/docker-cloud/infrastructure/images/node-byoh-wizard-v2.png b/docker-cloud/infrastructure/images/node-byoh-wizard-v2.png deleted file mode 100644 index df42409f51..0000000000 Binary files a/docker-cloud/infrastructure/images/node-byoh-wizard-v2.png and /dev/null differ diff --git a/docker-cloud/infrastructure/images/packet-add-apikey.png b/docker-cloud/infrastructure/images/packet-add-apikey.png deleted file mode 100644 index 481b28d655..0000000000 Binary files a/docker-cloud/infrastructure/images/packet-add-apikey.png and /dev/null differ diff --git a/docker-cloud/infrastructure/images/packet-link-account.png b/docker-cloud/infrastructure/images/packet-link-account.png deleted file mode 100644 index 8f6751c1a4..0000000000 Binary files a/docker-cloud/infrastructure/images/packet-link-account.png and /dev/null differ diff --git a/docker-cloud/infrastructure/images/softlayer-link-account.png b/docker-cloud/infrastructure/images/softlayer-link-account.png deleted file mode 100644 index 5b175dd8e0..0000000000 Binary files a/docker-cloud/infrastructure/images/softlayer-link-account.png and /dev/null differ diff --git a/docker-cloud/infrastructure/images/softlayer-modal.png b/docker-cloud/infrastructure/images/softlayer-modal.png deleted file mode 100644 index 7fd0eb3ec1..0000000000 Binary files a/docker-cloud/infrastructure/images/softlayer-modal.png and /dev/null differ diff --git a/docker-cloud/infrastructure/images/softlayer-step-1.png b/docker-cloud/infrastructure/images/softlayer-step-1.png deleted file mode 100644 index c29ed0171b..0000000000 Binary files a/docker-cloud/infrastructure/images/softlayer-step-1.png and /dev/null differ diff --git a/docker-cloud/infrastructure/images/softlayer-step-2.png b/docker-cloud/infrastructure/images/softlayer-step-2.png deleted file mode 100644 index ed62e93528..0000000000 Binary files a/docker-cloud/infrastructure/images/softlayer-step-2.png and /dev/null differ diff --git a/docker-cloud/infrastructure/images/softlayer-step-6.png b/docker-cloud/infrastructure/images/softlayer-step-6.png deleted file mode 100644 index c7435a160e..0000000000 Binary files a/docker-cloud/infrastructure/images/softlayer-step-6.png and /dev/null differ diff --git a/docker-cloud/infrastructure/images/softlayer-step-7.png b/docker-cloud/infrastructure/images/softlayer-step-7.png deleted file mode 100644 index b153be5420..0000000000 Binary files a/docker-cloud/infrastructure/images/softlayer-step-7.png and /dev/null differ diff --git a/docker-cloud/infrastructure/images/upgrade-message.png b/docker-cloud/infrastructure/images/upgrade-message.png deleted file mode 100644 index 8bf177089e..0000000000 Binary files a/docker-cloud/infrastructure/images/upgrade-message.png and /dev/null differ diff --git a/docker-cloud/infrastructure/index.md b/docker-cloud/infrastructure/index.md deleted file mode 100644 index f8946447ac..0000000000 --- a/docker-cloud/infrastructure/index.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -description: Manage network in Docker Cloud -keywords: nodes, hosts, infrastructure, Cloud -title: Docker Cloud infrastructure overview (Standard Mode) ---- - -Docker Cloud uses an agent and system containers to deploy and manage nodes (hosts) on your behalf. All nodes accessible to your account are connected by an overlay or mesh network, regardless of host or cloud service provider. - -## Deploy nodes from Docker Cloud - -When you use Docker Cloud to deploy nodes on a hosted provider, the service stores your cloud provider credentials and then deploys nodes for you using the services' API to perform actions on your behalf. - -## Bring your own host - -If you are using [Bring Your Own Host](byoh.md), Docker Cloud provides a script that: - -- installs the Docker Cloud Agent on the host -- downloads and installs the latest Docker CS Engine version and the AUFS storage driver -- sets up TLS certificates and the Docker security configuration -- registers the host with Docker Cloud under your user account - -Once this connection is established, the Docker Cloud Agent manages the node and performs updates when the user requests them, and can also create and maintain a reverse tunnel to Docker Cloud if firewall restrictions prevent a direct connection. - -## Internal networking - -Docker Cloud communicates with the Docker daemon running in the node using the following IPs, on port **2375/tcp**. - -- 52.204.126.235/32 -- 52.6.30.174/32 -- 52.205.192.142/32 -- 52.205.2.114/32 - -If the port is not accessible, Docker Cloud creates a secure reverse tunnel from the nodes to Docker Cloud. - -When you add a node on Docker Cloud, the node joins the Weave private overlay network for containers in other nodes by connecting on ports **6783/tcp** and **6783/udp**. (You should make sure these ports are open.) - -## Node management - -Nodes managed by Docker Cloud are connected to any other nodes owned by the user or organization, regardless of the host or service provider. - -Docker Cloud uses system containers to do the following: - -- Set up a secure overlay network between all nodes using Weave -- Create a stream of Docker events from nodes to Docker Cloud -- Synchronize node clocks -- Rotate container logs when they exceed 10 MB -- Remove `Terminated` images (images not used by a container for 30 minutes) - - > **Note**: If this is not sufficient for your needs, you can add a logging container to your services. - -## Internal overlay network - -Docker Cloud creates a per-user overlay network which connects all containers across all of the user's hosts. This network connects all of your containers on the `10.7.0.0/16` subnet, and gives every container a local IP. This IP persists on each container even if the container is redeployed and ends up on a different host. Every container can reach any other container on any port within the subnet. - -## External access - -The easiest way to access nodes is to ensure that your public ssh key is available to them. You can quickly copy your public key to all of the nodes in your Docker Cloud account by running the **authorizedkeys** container. See [SSHing into a node](ssh-into-a-node.md) for more information. - -## What's in this section? -The pages in this section explain how to link Docker Cloud to your infrastructure providers or your own hosts, and how to manage your nodes from within Docker Cloud. - -* [SSH into a Docker Cloud-managed node](ssh-into-a-node.md) -* Read more about [Deployment strategies](deployment-strategies.md) -* Learn how to [Upgrade Docker Engine on a node](docker-upgrade.md) -* [Use the Docker Cloud Agent to Bring your Own Host](byoh.md) -* [Link to Amazon Web Services hosts](link-aws.md) - * [Using Docker Cloud on AWS FAQ](cloud-on-aws-faq.md) -* [Link to DigitalOcean hosts](link-do.md) -* [Link to Microsoft Azure hosts](link-azure.md) -* [Link to Packet hosts](link-packet.md) - * [Using Docker Cloud and Packet FAQ](cloud-on-packet.net-faq.md) -* [Link to SoftLayer hosts](link-softlayer.md) diff --git a/docker-cloud/infrastructure/link-aws.md b/docker-cloud/infrastructure/link-aws.md deleted file mode 100644 index 4acaee038e..0000000000 --- a/docker-cloud/infrastructure/link-aws.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -description: Link your Amazon Web Services account -keywords: AWS, Cloud, link -redirect_from: -- /docker-cloud/getting-started/beginner/link-aws/ -- /docker-cloud/getting-started/link-aws/ -title: Link an Amazon Web Services account ---- - -You can create a role with AWS IAM (Identity and Access Management) so that -Docker Cloud can provision and manage swarms on your behalf. - -## How to create the link - -For instructions on how to link your AWS account to Docker Cloud, -see [the AWS instructions that enable swarm -mode](/docker-cloud/cloud-swarm/link-aws-swarm.md). - -> **Note**: The procedure for linking these accounts is the same, regardless -of whether you are using Docker Cloud in -[Swarm Mode](/docker-cloud/index.md) or not. If you are -using standard mode, come back to these topics after you have linked -your AWS account. - -## What's next? - -You're ready to start using AWS as the infrastructure provider for Docker Cloud! -If you came here from the tutorial, [continue the tutorial and deploy your first -node](/docker-cloud/getting-started/your_first_node.md). diff --git a/docker-cloud/infrastructure/link-azure.md b/docker-cloud/infrastructure/link-azure.md deleted file mode 100644 index f29cf20c4f..0000000000 --- a/docker-cloud/infrastructure/link-azure.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -description: Link your Microsoft Azure account -keywords: Microsoft, Azure, account -redirect_from: -- /docker-cloud/getting-started/beginner/link-azure/ -- /docker-cloud/getting-started/link-azure/ -title: Link a Microsoft Azure account ---- - -You can link your Microsoft Azure account to your Docker Cloud account to deploy -**nodes** and **node clusters** using Docker Cloud's Dashboard, API, or CLI. You -must link your Azure account so that Docker Cloud can interact with Azure on -your behalf to create and manage your **nodes** (virtual machines). - -## How to create the link - -For instructions on how to link your Microsoft Azure account to Docker Cloud, -see [the Azure instructions that enable swarm -mode](/docker-cloud/cloud-swarm/link-azure-swarm.md). - -> **Note**: The procedure for linking these accounts is the same, regardless -of whether you are using Docker Cloud in -[Swarm Mode](/docker-cloud/index.md) or not. If you are -using standard mode, come back to these topics after you have linked -your Microsoft Azure account. - -## What's next? - -You're ready to start using Microsoft Azure as the infrastructure provider for -Docker Cloud! If you came here from the tutorial, click here to [continue the -tutorial and deploy your first -node](/docker-cloud/getting-started/your_first_node.md). diff --git a/docker-cloud/infrastructure/link-do.md b/docker-cloud/infrastructure/link-do.md deleted file mode 100644 index d4322e26b4..0000000000 --- a/docker-cloud/infrastructure/link-do.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -description: Link your DigitalOcean account -keywords: link, DigitalOcean, account -redirect_from: -- /docker-cloud/getting-started/beginner/link-do/ -- /docker-cloud/getting-started/link-do/ -- /engine/installation/cloud/cloud-ex-machine-ocean/ -title: Link a DigitalOcean account ---- - -You can link your DigitalOcean account to your Docker Cloud account to deploy -**nodes** and **node clusters** using Docker Cloud's Dashboard, API, and CLI. -You must link your DigitalOcean account so that Docker Cloud can interact with -DigitalOcean on your behalf to create and manage your **nodes** (droplets). - -If you don't have a **DigitalOcean** account, you can sign up with this link to -get a $10 credit: -[https://www.digitalocean.com/?refcode=bc0c34035aa5](https://www.digitalocean.com/?refcode=bc0c34035aa5) - -## Link your DigitalOcean Account - -To link your DigitalOcean account so you can launch **nodes** using Docker -Cloud, navigate to **Account info \> Cloud Providers**. A list of all -the providers that you can link to Docker Cloud is shown. Click **Link account** next to -DigitalOcean. - -![](images/do-link-account.png) - -You're redirected to a DigitalOcean login screen. Use your **DigitalOcean** -credentials to log in to your account. - -![](images/do-login-screen.png) - -Once you log in, a message appears prompting you to confirm the link. - -![](images/do-approve-access.png) - -## What's next? - -You're ready to start using DigitalOcean as the infrastructure provider for -Docker Cloud! If you came here from the tutorial, click here to [continue the -tutorial and deploy your first node](../getting-started/your_first_node.md). diff --git a/docker-cloud/infrastructure/link-packet.md b/docker-cloud/infrastructure/link-packet.md deleted file mode 100644 index 079558f29f..0000000000 --- a/docker-cloud/infrastructure/link-packet.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -description: Link your Packet account -keywords: Packet, link, Cloud -redirect_from: -- /docker-cloud/getting-started/beginner/link-packet/ -- /docker-cloud/getting-started/link-packet/ -title: Link a Packet account -notoc: true ---- - -You can register your [Packet](https://www.packet.net/promo/docker-cloud/) -account credentials in your Docker Cloud account to deploy **nodes** and **node -clusters** using Docker Cloud's Dashboard, API, and CLI. Your Packet API key is -required so Docker Cloud can interact with Packet on your behalf to create and -manage your **nodes** (Packet devices). - -To link your Packet account so you can launch **nodes** from Docker Cloud, -navigate to **Account info \> Cloud Providers**. Click **Add credentials**. - -![](images/packet-link-account.png) - -If you already have an API key to use with Docker Cloud, enter it in the -`Authentication token`. - -Otherwise, open a new tab and log into your Packet account. Click **API Keys** -from the left menu. Then, click the **+** button at the bottom right corner, -enter a description for your new API key, and click **Generate**. - -![](images/packet-add-apikey.png) - -Copy the **Token** of the new API key. - -Go back to the Docker Cloud tab and paste the new API key in the `Authentication -token` field of the `Packet credentials` dialog. - -## What's next? - -You're ready to start using Packet as the infrastructure provider for Docker -Cloud! If you came here from the tutorial, click here to [continue the tutorial -and deploy your first node](../getting-started/your_first_node.md). diff --git a/docker-cloud/infrastructure/link-softlayer.md b/docker-cloud/infrastructure/link-softlayer.md deleted file mode 100644 index b318945ffb..0000000000 --- a/docker-cloud/infrastructure/link-softlayer.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -description: Link your SoftLayer account -keywords: SoftLayer, link, cloud -redirect_from: -- /docker-cloud/getting-started/beginner/link-softlayer/ -- /docker-cloud/getting-started/link-softlayer/ -title: Link a SoftLayer account ---- - -You can register your SoftLayer account credentials in your Docker Cloud account -to deploy **nodes** using Docker Cloud's Dashboard, API, or CLI. Docker Cloud -uses your SoftLayer username and API key to interact with SoftLayer on your -behalf to create and manage your **nodes** (virtual servers). - -Although any SoftLayer account with the right privileges works, we recommend -creating a new **dockercloud-user** user. If you have already created a service -user, or do not wish to do so, continue to [Add SoftLayer Account -Credentials](link-softlayer.md#add-softlayer-account-credentials). - -### Create dockercloud-user in SoftLayer - -Go to the **Users** section in SoftLayer (using the following URL): -[https://control.softlayer.com/account/users](https://control.softlayer.com/account/users) - -Click **Add User**: - -![](images/softlayer-step-1.png) - -Fill out the **Add User - Profile** form, and enter `dockercloud-user` in the -username field. Docker Cloud uses the service user's API key, so the password -you set is less important. Click **Add User**. - -![](images/softlayer-step-2.png) - -In the next step, **Permissions**, select the following permissions: - -* Support - * View Tickets - * Add Tickets - * Edit Tickets -* Devices - * View Virtual Server Details -* Services - * Manage SSH keys -* Account - * Cancel Server - * Cancel Services - * Add Server - -Click **Add Portal Permissions**. - -Go back to the **Users** list, and click **Generate** under the **API Key** column: - -![](images/softlayer-step-6.png) - -Once generated, click the **View** link under the **API Key** column, and copy the generated API Key. - -![](images/softlayer-step-7.png) - -Once you create the new user `dockercloud-user`, have its -credentials, and set its permissions, go back to Docker Cloud. - -## Add SoftLayer account credentials - -To link your SoftLayer account so you can launch **nodes** from Docker Cloud, -navigate to **Account info \> Cloud providers**. Click **Add -credentials**. - -![](images/softlayer-link-account.png) - -Copy and paste the `username` and the `API Key` you received from SoftLayer into their corresponding fields in dialog that appears. - -![](images/softlayer-modal.png) - -## What's next? - -You're ready to start using SoftLayer as the infrastructure provider -for Docker Cloud! If you came here from the tutorial, click here to [continue the tutorial and deploy your first node](../getting-started/your_first_node.md). diff --git a/docker-cloud/infrastructure/ssh-into-a-node.md b/docker-cloud/infrastructure/ssh-into-a-node.md deleted file mode 100644 index 285c561487..0000000000 --- a/docker-cloud/infrastructure/ssh-into-a-node.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -description: SSHing into a Docker Cloud-managed node -keywords: ssh, Cloud, node -redirect_from: -- /docker-cloud/getting-started/intermediate/ssh-into-a-node/ -- /docker-cloud/tutorials/ssh-into-a-node/ -- /docker-cloud/faq/how-ssh-nodes/ -title: SSH into a Docker Cloud-managed node ---- - -You can add a public SSH key to the `authorized_keys` file in each of your Linux -nodes, so that you can log into the nodes using SSH without providing a password. - -The quickest way to do this is to create the SSH keys, then run our -[dockercloud/authorizednodes](https://hub.docker.com/r/dockercloud/authorizedkeys){:target="_blank" class="_"} -utility image. Follow the instructions at that link to add the public SSH key to -each node. - -Afterward, from a machine which has the private key available, you can SSH into -any of the nodes without providing a password. \ No newline at end of file diff --git a/docker-cloud/installing-cli.md b/docker-cloud/installing-cli.md deleted file mode 100644 index 8d06f2559a..0000000000 --- a/docker-cloud/installing-cli.md +++ /dev/null @@ -1,169 +0,0 @@ ---- -description: Using the Docker Cloud CLI on Linux, Windows, and macOS, installing, updating, uninstall -keywords: cloud, command-line, CLI -redirect_from: -- /docker-cloud/getting-started/intermediate/installing-cli/ -- /docker-cloud/getting-started/installing-cli/ -- /docker-cloud/tutorials/installing-cli/ -title: The Docker Cloud CLI ---- - -Docker Cloud maintains a Command Line Interface (CLI) tool that you can use -to interact with the service. We highly recommend installing the CLI, as it -allows you to script and automate actions in Docker Cloud without using the web -interface. If you only ever use the web interface, this is not necessary. - -## Install - -Install the docker-cloud CLI either by running a Docker container, or by using the package manager for your system. - -#### Run the CLI in a Docker container - -If you have Docker Engine installed locally, you can run the following `docker` -command in your shell regardless of which operating system you are using. - -```none -$ docker run dockercloud/cli -h -``` - -This command runs the `docker-cloud` CLI image in a container for you. Learn -more about how to use this container -[here](https://github.com/docker/dockercloud-cli#docker-image). - -#### Install for Linux or Windows - -You can install the CLI locally using the [pip](https://pip.pypa.io/en/stable/) -package manager, which is a package manager for -[Python](https://www.python.org/) applications. - -* If you already have Python 2.x or 3.x installed, you probably have `pip` and -`setuptools`, but need to upgrade per the instructions -[here](https://packaging.python.org/installing/). - -> The Docker Cloud CLI does not currently support Python 3.x. -> -> we recommend using Python 2.x. To learn more, -see the Python and CLI issues described in -[Known issues in Docker Cloud](/docker-cloud/docker-errors-faq.md). - -* If you do not have Python or `pip` installed, you can either [install -Python](https://wiki.python.org/moin/BeginnersGuide/Download) or use this -[standalone pip -installer](https://pip.pypa.io/en/latest/installing/#installing-with-get-pip-py). You do not need Python for our purposes, just `pip`. - -Now that you have `pip`, open a shell or terminal -window and run the following command to install the docker-cloud CLI: - -```bash -$ pip install docker-cloud -``` - -If you encounter errors on Linux machines, make sure that `python-dev` is -installed. For example, on Ubuntu, run the following command: - -``` -$ apt-get install python-dev -``` - -#### Install on macOS - -We recommend installing Docker CLI for macOS using Homebrew. If you don't have -`brew` installed, follow the instructions here: [http://brew.sh](http://brew.sh){: target="_blank" class="_"} - -Once Homebrew is installed, open Terminal and run the following command: - -```bash -$ brew install docker-cloud -``` - -> **Note**: You can also use [pip](https://pip.pypa.io/en/stable/) to install on macOS, but we suggest Homebrew since it is a package manager designed for the -Mac. - -#### Validate the installation - -Check that the CLI installed correctly: - -```bash -$ docker-cloud -v -docker-cloud 1.0.0 -``` - -## Getting Started - -First, you should log in using the `docker` CLI and the `docker login` command. -Your Docker ID, which you also use to log in to Docker Hub, is also used for -logging in to Docker Cloud. - -```none -$ docker login -Username: user -Password: -Email: user@example.org -Login succeeded! -``` - -#### What's next? - -See the [Developer documentation](/apidocs/docker-cloud.md) for more information on using the CLI and our APIs. - - -## Use the docker-cloud CLI with an organization - -When you use the docker-cloud CLI, it authenticates against the Docker Cloud -service with the user credentials saved by the `docker login` command. To use -the CLI to interact with objects belonging to an [Organization](orgs.md), you -must override the `DOCKERCLOUD_NAMESPACE` environment variable that sets this -user. - -For example: - -```none -$ export DOCKERCLOUD_NAMESPACE=myorganization -``` - -You can also set the `DOCKERCLOUD_NAMESPACE` variable before each CLI command. -For example: - -```none -$ DOCKERCLOUD_NAMESPACE=myteam docker container ps -``` - -To learn more, see the [Docker Cloud CLI README](https://github.com/docker/dockercloud-cli#namespace). - - -## Upgrade the docker-cloud CLI - -Periodically, Docker adds new features and fixes bugs in the existing CLI. To use these new features, you must upgrade the CLI. - -#### Upgrade the docker-cloud CLI on Linux or Windows - -```none -$ pip install -U docker-cloud -``` - -#### Upgrade the docker-cloud CLI on macOS - -```none -$ brew update && brew upgrade docker-cloud -``` - -## Uninstall the docker-cloud CLI - -If you are having trouble using the docker-cloud CLI, or find that it conflicts -with other applications on your system, you may want to uninstall and reinstall. - -#### Uninstall on Linux or Windows - -Open your terminal or command shell and execute the following command: - -```none -$ pip uninstall docker-cloud -``` - -#### Uninstall on macOS - -Open your Terminal application and execute the following command: - -```none -$ brew uninstall docker-cloud -``` diff --git a/docker-cloud/migration/cloud-to-kube-aks.md b/docker-cloud/migration/cloud-to-kube-aks.md deleted file mode 100644 index f72cf595be..0000000000 --- a/docker-cloud/migration/cloud-to-kube-aks.md +++ /dev/null @@ -1,789 +0,0 @@ ---- -description: How to migrate apps from Docker Cloud to AKS -keywords: cloud, migration, kubernetes, azure, aks -title: Migrate Docker Cloud stacks to Azure Container Service ---- - -## AKS Kubernetes - -This page explains how to prepare your applications for migration from Docker Cloud to [Azure Container Service (AKS)](https://azure.microsoft.com/en-us/free/){: target="_blank" class="_"} clusters. AKS is a hosted Kubernetes service on Microsoft Azure. It exposes standard Kubernetes APIs so that standard Kubernetes tools and apps run on it without needing to be reconfigured. - -At a high level, migrating your Docker Cloud applications requires that you: - -- **Build** a target environment (Kubernetes cluster on AKS). -- **Convert** your Docker Cloud YAML stackfiles. -- **Test** the converted YAML stackfiles in the new environment. -- **Point** your application CNAMES to new service endpoints. -- **Migrate** your applications from Docker Cloud to the new environment. - -To demonstrate, we **build** a target environment of AKS nodes, **convert** the Docker Cloud stackfile for [example-voting-app](https://github.com/dockersamples/example-voting-app){: target="_blank" class="_"} to a Kubernetes manifest, and **test** the manifest in the new environment to ensure that it is safe to migrate. - -> The actual process of migrating -- switching customers from your Docker Cloud applications to AKS applications -- will vary by application and environment. - -## Voting-app example - -The Docker Cloud stack of our example voting application is defined in [dockercloud.yml](https://raw.githubusercontent.com/dockersamples/example-voting-app/master/dockercloud.yml){: target="_blank" class="_"}. This document explains how `dockercloud.yml` is converted to a Kubernetes YAML manifest file so that you have the tools to do the same for your applications. - -In the [dockercloud.yml](https://raw.githubusercontent.com/dockersamples/example-voting-app/master/dockercloud.yml){: target="_blank" class="_"}, the voting app is defined as a stack of six microservices: - -- **vote**: Web front-end that displays voting options -- **redis**: In-memory k/v store that collects votes -- **worker**: Stores votes in database -- **db**: Persistent store for votes -- **result**: Web server that pulls and displays results from database -- **lb**: Container-based load balancer - -Votes are accepted with the `vote` service and stored in persistent backend database (`db`) with the help of services, `redis`, `worker`, and `lb`. The vote tally is displayed with the `result` service. - -![image of voting app arch](images/votingapp-arch.png){:width="500px"} - -## Migration prerequisites - -To complete the migration from Docker Cloud to Kubernetes on AKS, you need: - -- An active Azure subscription with billing enabled. - -## Build target environment - -Azure Container Service (AKS) is a managed Kubernetes service. Azure takes care of all of the Kubernetes control plane management (the master nodes) -- delivering the control plane APIs, managing control plane HA, managing control plane upgrades, etc. You only need to look after worker nodes -- how many, the size and spec, where to deploy them, etc. - -High-level steps to build a working AKS cluster are: - -1. Generate credentials to register AKS with Azure AD. -2. Deploy an AKS cluster (and register with Azure AD). -3. Connect to the AKS cluster. - -### Generate AD registration credentials - -Currently, AKS needs to be manually registered with Azure Active Directory (AD) so that it can receive security tokens and integrate with secure sign-on and authorization. - -> _When you register an [Azure AD "application"](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-application-objects){: target="_blank" class="_"}_ _in the Azure portal, two objects are created in your Azure AD tenant: an application object, and a service principal object._ - -The following steps create the registration and output the credentials required to register AKS when deploying a cluster. - -1. Log in to the [Azure portal](https://portal.azure.com){: target="_blank" class="_"}. -2. Click **Azure Active Directory** > **App registrations** > **New application registration**. -3. Assign a **Name**, select application type **Web app/API**, and enter a **Sign-on URL**. The sign-on URL needs to be a valid DNS name but does not need to be resolvable. An example might be `https://k8s-vote.com`. -4. Click **Create**. -5. Copy and save the **Application ID** (this is your **Service principal client ID**). -6. Click **Settings** > **Keys** and set a description and duration. -7. Click **Save**. -8. Copy and save the **Value** (this your **Service principal client secret**, and also the only time you will see it, so don't lose it!). - -You now have the credentials required to register AKS as part of the next section. - -### Deploy an AKS cluster - -In this section, we build a three-node cluster; your cluster should probably be based on the configuration of your Docker Cloud node cluster. - -Whereas Docker Cloud deploys work to all nodes in a cluster (managers and workers), _Kubernetes only deploys work to worker nodes_. This affects how you should size your cluster. If your Docker Cloud node cluster was working well with three managers and two workers of a particular size, you should probably size your AKS cluster to have five nodes of a similar size. - -> In Docker Cloud, to see the configuration of each of your clusters, select **Node Clusters** > _your_cluster_. - -Before continuing, ensure you know: - -- Your **Azure subscription credentials** -- **Azure region** to which you want to deploy your AKS cluster -- **SSH public key** to use when connecting to AKS nodes -- **Service principal client ID** and **Service principal client secret** (from the previous section) -- **Number, size, and spec** of the worker nodes you want. - -To deploy a cluster of AKS nodes: - -1. Select **+Create a resource** from the left-hand panel of the Azure portal dashboard. - -2. Select **Containers** > **Azure Container Service - AKS (preview)**. _Do not select the other ACS option._ - -3. Fill out the required fields and click **OK**: - - - **Cluster name**: Set any name for the cluster. - - **Kubernetes version**: Select one of the 1.8.x versions. - - **Subscription**: Select the subscription to pay for the cluster. - - **Resource group**: Create a new resource group or choose one from your existing list. - - **Location**: Select the Azure region to which to deploy the cluster. AKS may not be available in all Azure regions. - -4. Configure additional AKS cluster parameters and click **OK**: - - - **User name**: The default option should be fine. - - **SSH public key**: The public key (certificate) of a key-pair that you own and that can be used for SSH. If you need to generate a new set, you can use tools such as `ssh-keygen` or PuTTY. The key should be a minimum of 2048 bits of type `ssh-rsa`. - - **Service principal client ID**: The application ID that you copied in an earlier step. - - **Service principal client secret**: The password value that you copied in a previous step. - - **Node count**: The number of _worker_ nodes that you want in the cluster. It should probably match the _total_ number of nodes in your existing Docker Cloud node cluster (managers + workers). - - **Node virtual machine size**: The size and specification of each AKS _worker_ node. It should probably match the configuration of your existing Docker Cloud node cluster. - -5. Review the configuration on the Summary screen and click **OK** to deploy the cluster. It can take a few minutes. - -### Connect to the AKS cluster - -You can connect to your AKS cluster from the web-based [Azure Cloud Shell](https://docs.microsoft.com/en-us/azure/cloud-shell/overview){: target="_blank" class="_"}; but to do so from your laptop, or other local terminal, you must: - -- Install the Azure CLI tool (`az`). -- Install the Kubernetes CLI (`kubectl`) -- Configure `kubectl` to connect to your AKS cluster. - -To connect to your AKS cluster from a local terminal: - -1. Download and install the [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest){: target="_blank" class="_"} for your Operating System. - -2. With the Azure CLI, install the Kubernetes CLI, `kubectl`. - - ``` - > az aks install-cli - Downloading client to C:\Program Files (x86)\kubectl.exe from... - ``` - - You can install `kubectl` with or without `az`. If you have `kubectl` already installed, ensure that the current context is correct: - - ``` - > kubectl config get-context - > kubectl config use-context - ``` - -3. Start the Azure login process: - - ``` - > az login - To sign in, use a web browser to open the page https://aka.ms/devicelogin and enter... - ``` - -4. Open the "devicelogin" page in a browser and paste the authentication code. When complete, the CLI returns some JSON. - -5. Get the credentials and use them to configure `kubectl`: - - The values for `--resource-group` and `--name` are the Resource Group and Cluster Name that you set in the previous steps. Substitute the values below with the values for your environment. - - ``` - > az aks get-credentials --resource-group=k8s-vote --name=k8s-vote - Merged "k8s-vote" as current context in C:\Users\nigel\.kube\config - ``` - -6. Test that `kubectl` can connect to your cluster. - - ``` - > kubectl get nodes - NAME STATUS ROLES AGE VERSION - aks-agentpool-29046111-0 Ready agent 3m v1.8.1 - aks-agentpool-29046111-1 Ready agent 2m v1.8.1 - aks-agentpool-29046111-2 Ready agent 2m v1.8.1 - ``` - - If the values returned match your AKS cluster (number of nodes, age, and version), then you have successfully configured `kubectl` to manage your AKS cluster. - -You now have an AKS cluster and have configured `kubectl` to manage it. Let's look at how to convert your Docker Cloud app into a Kubernetes app. - -## Convert Docker Cloud stackfile - -**In the following sections, we discuss each service definition separately, but you should group them into one stackfile with the `.yml` extension, for example, [k8s-vote.yml](#combined-manifest-k8s-vote.yml){: target="_blank" class="_"}.** - -To prepare your applications for migration from Docker Cloud to Kubernetes, you must recreate your Docker Cloud stackfiles as Kubernetes _manifests_. Once you have each application converted, you can test and deploy. Like Docker Cloud stackfiles, Kubernetes manifests are YAML files but usually longer and more complex. - -> In Docker Cloud, to find the stackfiles for your existing applications, you can either: (1) Select **Stacks** > _your_stack_ > **Edit**, or (2) Select **Stacks** > _your_stack_ and scroll down. - -In the Docker Cloud stackfile, the six Docker _services_ in our `example-voting-app` stack are defined as **top-level keys**: - -``` -db: -redis: -result: -lb: -vote: -worker: -``` - -Kubernetes applications are built from objects (such as [Pods](https://kubernetes.io/docs/concepts/workloads/pods/pod/){: target="_blank" class="_"}) -and object abstractions (such as [Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/){: target="_blank" class="_"} -and [Services](https://kubernetes.io/docs/concepts/services-networking/service/){: target="_blank" class="_"}). For each _Docker service_ in our voting app stack, we create one Kubernetes Deployment and one _Kubernetes Service_. Each Kubernetes Deployment spawns Pods. A Pod represents one or more containers (usually one) and is the smallest unit of work in Kubernetes. - -> A [Docker serivce](https://docs.docker.com/engine/swarm/how-swarm-mode-works/services/){: target="_blank" class="_"} is one component of an application that is generated from one image. -> A [Kubernetes service](https://kubernetes.io/docs/concepts/services-networking/service/){: target="_blank" class="_"} is a networking construct that load balances Pods behind a proxy. - -A Kubernetes Deployment defines the application "service" -- which Docker image to use and the runtime instructions (which container ports to map and the container restart policy). The Deployment is also where you define rolling updates, rollbacks, and other advanced features. - -A Kubernetes Service object is an abstraction that provides stable networking for a set of Pods. A Service is where you can register a cluster-wide DNS name and virtual IP (VIP) for accessing the Pods, and also create cloud-native load balancers. - -This diagram shows four Pods deployed as part of a single Deployment. Each Pod is labeled as “app=vote”. The Deployment has a label selector, “app=vote”, and this combination of labels and label selector is what allows the Deployment object to manage Pods (create, terminate, scale, update, roll back, and so on). Likewise, the Service object selects Pods on the same label (“app-vote”) which allows the service to provide a stable network abstraction (IP and DNS name) for the Pods. - -![Voting app vote Kube pods](images/votingapp-kube-pods-vote.png){:width="500px"} - -### db service - -> Consider using a hosted database service for production databases. This is something that, ideally, should not change as part of your migration away from Docker Cloud stacks. - -**Docker Cloud stackfile**: The Docker Cloud stackfile defines an image and a restart policy for the `db` service. - -``` -db: - image: 'postgres:9.4' - restart: always -``` - -**Kubernetes manifest**: The Kubernetes translation defines two object types or "kinds": a _Deployment_ and a _Service_ (separated by three dashes `---`). Each object includes an API version, metadata (labels and name), and a `spec` field for object configuration (that is, the Deployment Pods and the Service). - -``` -apiVersion: apps/v1beta1 -kind: Deployment -metadata: - name: db - labels: - app: db -spec: - selector: - matchLabels: - app: db - template: - metadata: - labels: - app: db - spec: - containers: - - image: postgres:9.4 - name: db - restartPolicy: Always ---- -apiVersion: v1 -kind: Service -metadata: - name: db -spec: - clusterIP: None - ports: - - port: 55555 - targetPort: 0 - selector: - app: db -``` - -About the Kubernetes fields in general: - -- `apiVersion` sets the schema version for Kubernetes to use when managing the object. The versions set here are supported on AKS (1.7.7 and 1.8.1). -- `kind` defines the object type. In this example, we only define Deployments and Services but there are many others. -- `metadata` assigns a name and set of labels to the object. -- `spec` is where we configure the object. In a Deployment, `spec` defines the Pods to deploy. - -It is important that **Pod labels** (`Deployment.spec.template.metadata.labels`) match both the Deployment label selector (`Deployment.spec.selector.matchLabels`) and the Service label selector (`Service.spec.selector`). This is how the Deployment object knows which Pods to manage and how the Service object knows which Pods to provide networking for. - -> Deployment and Service label selectors have different fields in the YAML file because Deployments use [set-based selectors](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#set-based-requirement){: target="_blank" class="_"} -and Services use [equality-based selectors](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#equality-based-requirement){: target="_blank" class="_"}. - -For the `db` Deployment, we define a container called `db` based on the `postgres:9.4` Docker image, and define a restart policy. All Pods created by this Deployment have the label, `app=db` and the Deployment selects on them. - -The `db` Service is a “headless” service (`clusterIP: None`). Headless services are useful when you want a stable DNS name but do not need the cluster-wide VIP. They create a stable DNS record, but instead of creating a VIP, they map the DNS name to multiple -[A records](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#a-records){: target="_blank" class="_"} -- one for each Pod associated with the Service. - -The Service’s label selector (`Service.spec.selector`) has the value, "app=db". This means the Service provides stable networking and load balancing for all Pods on the cluster labeled as “app=db”. Pods defined in the Deployment section are all labelled as "app-db". It is this mapping between the Service label selector and the Pod labels that tells the Service object which Pods for which to provide networking. - -### redis service - -**Docker Cloud stackfile**: - -``` -redis: - image: 'redis:latest' - restart: always -``` - -**Kubernetes manifest**: - -``` -apiVersion: apps/v1beta1 -kind: Deployment -metadata: - labels: - app: redis - name: redis -spec: - selector: - matchLabels: - app: redis - template: - metadata: - labels: - app: redis - spec: - containers: - - image: redis:alpine - name: redis - ports: - - containerPort: 6379 - restartPolicy: Always ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app: redis - name: redis -spec: - ports: - - port: 6379 - targetPort: 6379 - selector: - app: redis -``` - -Here, the Deployment object deploys a Pod from the `redis:alpine` image and sets the container port to `6379`. It also sets the `labels` for the Pods to the same value ("app=redis") as the Deployment’s label selector to tie the two together. - -The Service object defines a cluster-wide DNS mapping for the name "redis" on port 6379. This means that traffic for `tcp://redis:6379` is routed to this Service and is load balanced across all Pods on the cluster with the "app=redis" label. The Service is accessed on the cluster-wide `port` and forwards to the Pods on the `targetPort`. Again, the label-selector for the Service and the labels for the Pods are what tie the two together. - -The diagram shows traffic intended for `tcp://redis:6379` being sent to the redis Service and then load balanced across all Pods that match the Service label selector. - -![Voting app redis Kube pods](images/votingapp-kube-pods-redis.png){:width="500px"} - -### lb service - -The Docker Cloud stackfile defines an `lb` service to balance traffic to the vote service. On AKS, this is not necessary because Kubernetes lets you define a Service object with `type=balancer`, which creates a native Azure load balancer to do this job. We demonstrate in the `vote` section. - -### vote service - -The Docker Cloud stackfile for the `vote` service defines an image, a restart policy, and a specific number of Pods (replicas: 5). It also enables the Docker Cloud `autoredeploy` feature. We can tell that it listens on port 80 because the Docker Cloud `lb` service forwards traffic to it on port 80; we can also inspect its image. - -> **Autoredeploy options**: Autoredeploy is a Docker Cloud feature that automatically updates running applications every time you push an image. It is not native to Docker CE, AKS or GKE, but you may be able to regain it with Docker Cloud auto-builds, using web-hooks from the Docker Cloud repository for your image back to the CI/CD pipeline in your dev/staging/production environment. - -**Docker Cloud stackfile**: - -``` -vote: - autoredeploy: true - image: 'docker/example-voting-app-vote:latest' - restart: always - target_num_containers: 5 -``` - -**Kubernetes manifest**: - -``` -apiVersion: apps/v1beta1 -kind: Deployment -metadata: - labels: - app: vote - name: vote -spec: - selector: - matchLabels: - app: vote - replicas: 5 - template: - metadata: - labels: - app: vote - spec: - containers: - - image: docker/example-voting-app-vote:latest - name: vote - ports: - - containerPort: 80 - restartPolicy: Always ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app: vote - name: vote -spec: - type: LoadBalancer - ports: - - port: 80 - selector: - app: vote -``` - -Again, we ensure that both Deployment and Service objects can find the Pods with matching labels ("app=vote"). We also set the number of Pod replicas to five (`Deployment.spec.replicas`) so that it matches the `target_num_containers` from the Docker Cloud stackfile. - -We define the Service as "type=loadbalancer". This creates a native Azure load balancer with a stable, publicly routable IP for the service. It also maps port 80 so that traffic hitting port 80 is load balanced across all five Pod replicas in the cluster. (This is why the `lb` service from the Docker Cloud app is not needed.) - -### worker service - -Like the `vote` service, the `worker` service defines an image, a restart policy, and a specific number of Pods (replicas: 5). It also defines the Docker Cloud `autoredeploy` policy (which is not supported in AKS). - -> **Autoredeploy options**: Autoredeploy is a Docker Cloud feature that automatically updates running applications every time you push an image. It is not native to Docker CE, AKS or GKE, but you may be able to regain it with Docker Cloud auto-builds, using web-hooks from the Docker Cloud repository for your image back to the CI/CD pipeline in your dev/staging/production environment. - -**Docker Cloud stackfile**: - -``` -worker: - autoredeploy: true - image: 'docker/example-voting-app-worker:latest' - restart: always - target_num_containers: 3 -``` - -**Kubernetes manifest**: - -``` -apiVersion: apps/v1beta1 -kind: Deployment -metadata: - labels: - app: worker - name: worker -spec: - selector: - matchLabels: - app: worker - replicas: 3 - template: - metadata: - labels: - app: worker - spec: - containers: - - image: docker/example-voting-app-worker:latest - name: worker - restartPolicy: Always ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app: worker - name: worker -spec: - clusterIP: None - ports: - - port: 55555 - targetPort: 0 - selector: - app: worker -``` - -Again, we ensure that both Deployment and Service objects can find the Pods with matching labels ("app=worker"). - -The `worker` Service (like `db`) is another ["headless" service](https://kubernetes.io/docs/concepts/services-networking/service/#headless-services){: target="_blank" class="_"} where a DNS name is created and mapped to individual -[A records](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#a-records){: target="_blank" class="_"} for each Pod rather than a cluster-wide VIP. - -### result service - -**Docker Cloud stackfile**: - -``` -result: - autoredeploy: true - image: 'docker/example-voting-app-result:latest' - ports: - - '80:80' - restart: always -``` - -**Kubernetes manifest**: - -``` -apiVersion: apps/v1beta1 -kind: Deployment -metadata: - labels: - app: result - name: result -spec: - selector: - matchLabels: - app: result - template: - metadata: - labels: - app: result - spec: - containers: - - image: docker/example-voting-app-result:latest - name: result - ports: - - containerPort: 80 - restartPolicy: Always ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app: result - name: result -spec: - type: LoadBalancer - ports: - - port: 80 - selector: - app: result -``` - -The Deployment section defines the usual names, labels and container spec. The `result` Service (like the `vote` Service) defines a native Azure load balancer to distribute external traffic to the cluster on port 80. - -### Combined manifest k8s-vote.yml - -You can combine all Deployments and Services in a single YAML file, or have individual YAML files per Docker Cloud service. The choice is yours, but it's usually easier to deploy and manage one file. - -> You should manage your Kubernetes manifest files the way you manage your application code -- checking them in and out of version control repositories etc. - -Here, we combine all the Kubernetes definitions explained above into one YAML file that we call, `k8s-vote.yml`. - -``` -apiVersion: apps/v1beta1 -kind: Deployment -metadata: - name: db - labels: - app: db -spec: - selector: - matchLabels: - app: db - template: - metadata: - labels: - app: db - spec: - containers: - - image: postgres:9.4 - name: db - restartPolicy: Always ---- -apiVersion: v1 -kind: Service -metadata: - name: db -spec: - clusterIP: None - ports: - - port: 55555 - targetPort: 0 - selector: - app: db ---- -apiVersion: apps/v1beta1 -kind: Deployment -metadata: - labels: - app: redis - name: redis -spec: - selector: - matchLabels: - app: redis - template: - metadata: - labels: - app: redis - spec: - containers: - - image: redis:alpine - name: redis - ports: - - containerPort: 6379 - restartPolicy: Always ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app: redis - name: redis -spec: - ports: - - port: 6379 - targetPort: 6379 - selector: - app: redis ---- -apiVersion: apps/v1beta1 -kind: Deployment -metadata: - labels: - app: vote - name: vote -spec: - selector: - matchLabels: - app: vote - replicas: 5 - template: - metadata: - labels: - app: vote - spec: - containers: - - image: docker/example-voting-app-vote:latest - name: vote - ports: - - containerPort: 80 - restartPolicy: Always ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app: vote - name: vote -spec: - type: LoadBalancer - ports: - - port: 80 - selector: - app: vote ---- -apiVersion: apps/v1beta1 -kind: Deployment -metadata: - labels: - app: worker - name: worker -spec: - selector: - matchLabels: - app: worker - replicas: 3 - template: - metadata: - labels: - app: worker - spec: - containers: - - image: docker/example-voting-app-worker:latest - name: worker - restartPolicy: Always ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app: worker - name: worker -spec: - clusterIP: None - ports: - - port: 55555 - targetPort: 0 - selector: - app: worker ---- -apiVersion: apps/v1beta1 -kind: Deployment -metadata: - labels: - app: result - name: result -spec: - selector: - matchLabels: - app: result - template: - metadata: - labels: - app: result - spec: - containers: - - image: docker/example-voting-app-result:latest - name: result - ports: - - containerPort: 80 - restartPolicy: Always ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app: result - name: result -spec: - type: LoadBalancer - ports: - - port: 80 - selector: - app: result -``` - -Save the Kubernetes manifest file (as `k8s-vote.yml`) and check it into version control. - -## Test the app on AKS - -Before migrating, you should thoroughly test each new Kubernetes manifest on a AKS cluster. Healthy testing includes _deploying_ the application with the new manifest file, performing _scaling_ operations, increasing _load_, running _failure_ scenarios, and doing _updates_ and _rollbacks_. These tests are specific to each of your applications. You should also manage your manifest files in a version control system. - -The following steps explain how to deploy your app from the Kubernetes manifest file and verify that it is running. The steps are based on the sample application used throughout this guide, but the general commands should work for any app. - -> Run from an [Azure Cloud Shell](https://shell.azure.com/){: target="_blank" class="_"} or local terminal with `kubectl` configured to talk to your AKS cluster. - -1. Verify that your shell/terminal is configured to talk to your AKS cluster. The output should match your cluster. - - ``` - > kubectl get nodes - NAME STATUS ROLES AGE VERSION - aks-agentpool-29046111-0 Ready agent 6h v1.8.1 - aks-agentpool-29046111-1 Ready agent 6h v1.8.1 - aks-agentpool-29046111-2 Ready agent 6h v1.8.1 - ``` - -2. Deploy your Kubernetes application to your cluster. - - The Kubernetes manifest here is `ks8-vote.yml` and lives in the system PATH. To use a different manifest, substitute `ks8-vote.yml` with the name of your manifest file. - - ``` - > kubectl create -f k8s-vote.yml - - deployment "db" created - service "db" created - deployment "redis" created - service "redis" created - deployment "vote" created - service "vote" created - deployment "worker" created - service "worker" created - deployment "result" created - service "result" created - ``` - -3. Check the status of the app (both Deployments and Services): - - ``` - > kubectl get deployments - NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE - db 1 1 1 1 43s - redis 1 1 1 1 43s - result 1 1 1 1 43s - vote 5 5 5 5 43s - worker 3 3 3 3 43s - - > kubectl get services - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - db ClusterIP None 55555/TCP 48s - kubernetes ClusterIP 10.0.0.1 443/TCP 6h - redis ClusterIP 10.0.168.188 6379/TCP 48s - result LoadBalancer 10.0.76.157 80:31033/TCP 47s - vote LoadBalancer 10.0.244.254 80:31330/TCP 48s - worker ClusterIP None 55555/TCP 48s - ``` - - Both `LoadBalancer` Services are `pending` because it takes a minute or two to provision an Azure load balancer. You can run `kubectl get svc --watch` to see when they are ready. Once provisioned, the output looks like this (with different external IPs): - - ``` - > kubectl get services - - result LoadBalancer 10.0.76.157 52.174.195.232 80:31033/TCP 7m - vote LoadBalancer 10.0.244.254 52.174.196.199 80:31330/TCP 8m - ``` - -4. Test that the application works in your new environment. - - For example, the voting app exposes two web front-ends -- one for casting votes and the other for viewing results: - - - Copy/paste the `EXTERNAL-IP` value for the `vote` service into a browser and cast a vote. - - Copy/paste the `EXTERNAL-IP` value for the `result` service into a browser and ensure your vote registered. - -If you had a CI/CD pipeline with automated tests and deployments for your Docker Cloud stacks, you should build, test, and implement one for each application on AKS. - -> You can extend your Kubernetes manifest file with advanced features to perform rolling updates and simple rollbacks. But you should not do this until you have confirmed your application is working with the simple manifest file. - -## Migrate apps from Docker Cloud - -> Remember to point your application CNAMES to new service endpoints. - -How you migrate your applications is unique to your environment and applications. - -- Plan with all developers and operations teams. -- Plan with customers. -- Plan with owners of other applications that interact with your Docker Cloud app. -- Plan a rollback strategy if problems occur. - -Once your migration is in process, check that the everything is working as expected. Ensure that users are hitting the new application on the Docker CE infrastructure and getting expected results. - -> Think before you terminate stacks and clusters -> -> Do not terminate your Docker Cloud stacks or node clusters until some time after the migration has been signed off as successful. If there are problems, you may need to roll back and try again. -{: .warning} diff --git a/docker-cloud/migration/cloud-to-kube-gke.md b/docker-cloud/migration/cloud-to-kube-gke.md deleted file mode 100644 index 2f4ae2665c..0000000000 --- a/docker-cloud/migration/cloud-to-kube-gke.md +++ /dev/null @@ -1,787 +0,0 @@ ---- -description: How to migrate apps from Docker Cloud to GKE -keywords: cloud, migration, kubernetes, google, gke -title: Migrate Docker Cloud stacks to Google Kubernetes Engine ---- - -## GKE Kubernetes - -This page explains how to prepare your applications for migration from Docker Cloud to [Google Kubernetes Engine (GKE)](https://cloud.google.com/free/){: target="_blank" class="_"} clusters. GKE is a hosted Kubernetes service on Google Cloud Platform (GCP). It exposes standard Kubernetes APIs so that standard Kubernetes tools and apps run on it without needing to be reconfigured. - -At a high level, migrating your Docker Cloud applications requires that you: - -- **Build** a target environment (Kubernetes cluster on GKE). -- **Convert** your Docker Cloud YAML stackfiles. -- **Test** the converted YAML stackfiles in the new environment. -- **Point** your application CNAMES to new service endpoints. -- **Migrate** your applications from Docker Cloud to the new environment. - -To demonstrate, we **build** a target environment of GKE nodes, **convert** the Docker Cloud stackfile for [example-voting-app](https://github.com/dockersamples/example-voting-app){: target="_blank" class="_"} to a Kubernetes manifest, and **test** the manifest in the new environment to ensure that it is safe to migrate. - -> The actual process of migrating -- switching customers from your Docker Cloud applications to GKE applications -- will vary by application and environment. - -## Voting-app example - -The Docker Cloud stack of our example voting application is defined in [dockercloud.yml](https://raw.githubusercontent.com/dockersamples/example-voting-app/master/dockercloud.yml){: target="_blank" class="_"}. This document explains how `dockercloud.yml` is converted to a Kubernetes YAML manifest file so that you have the tools to do the same for your applications. - -In the [dockercloud.yml](https://raw.githubusercontent.com/dockersamples/example-voting-app/master/dockercloud.yml){: target="_blank" class="_"}, the voting app is defined as a stack of six microservices: - -- **vote**: Web front-end that displays voting options -- **redis**: In-memory k/v store that collects votes -- **worker**: Stores votes in database -- **db**: Persistent store for votes -- **result**: Web server that pulls and displays results from database -- **lb**: Container-based load balancer - -Votes are accepted with the `vote` service and stored in persistent backend database (`db`) with the help of services, `redis`, `worker`, and `lb`. The vote tally is displayed with the `result` service. - -![image of voting app arch](images/votingapp-arch.png){:width="500px"} - -## Migration prerequisites - -To complete the migration from Docker Cloud to Kubernetes on GKE, you need: - -- An active Google Cloud subscription with billing enabled. - -## Build target environment - -Google Kubernetes Engine (GKE) is a managed Kubernetes service on the Google Cloud Platform (GCP). It takes care of all of the Kubernetes control plane management (the master nodes) -- delivering the control plane APIs, managing control plane HA, managing control plane upgrades, etc. You only need to look after worker nodes -- how many, the size and spec, where to deploy them, etc. - -High-level steps to build a working GKE cluster are: - -1. Create a new GKE project. -2. Create a GKE cluster. -3. Connect to the GKE cluster. - -### Create a new GKE project - -Everything in the Google Cloud Platform has to sit inside of a _project_. Let's create one. - -1. Log in to the [Google Cloud Platform Console](https://console.cloud.google.com){: target="_blank" class="_"}. -2. Create a new project. Either: - - - Select **Create an empty project** from the home screen, or ... - - Open **Select a project** from the top of the screen and click **+**. - -3. Name the project and click **Create**. It may take a minute. - - > The examples in this document assume a project named, `proj-k8s-vote`. - -### Create a GKE cluster - -In this section, we build a three-node cluster; your cluster should probably be based on the configuration of your Docker Cloud node cluster. - -Whereas Docker Cloud deploys work to all nodes in a cluster (managers and workers), _Kubernetes only deploys work to worker nodes_. This affects how you should size your cluster. If your Docker Cloud node cluster was working well with three managers and two workers of a particular size, you should probably size your GKE cluster to have five nodes of a similar size. - -> In Docker Cloud, to see the configuration of each of your clusters, select **Node Clusters** > _your_cluster_. - -Before continuing, ensure you know: - -- **Region and zone** in which you want to deploy your GKE cluster -- **Number, size, and spec** of the worker nodes you want. - -To build: - -1. Log into the [GCP Console](https://console.cloud.google.com){: target="_blank" class="_"}. - -2. Select your project from **Select a project** at the top of the Console screen. - -3. Click **Kubernetes Engine** from the left-hand menu. It may take a minute to start. - -4. Click **Create Cluster**. - -5. Configure the required cluster options: - - - **Name:** An arbitrary name for the cluster. - - **Description:** An arbitrary description for the cluster. - - **Location:** Determines if the Kubernetes control plane nodes (masters) are in a single availability zone or spread across availability zones within a GCP Region. - - **Zone/Region:** The zone or region in which to deploy the cluster. - - **Cluster version:** The Kubernetes version. You should probably use a 1.8.x or 1.9.x version. - - **Machine type:** The type of GKE VM for the worker nodes. This should probably match your Docker Cloud node cluster. - - **Node image:** The OS to run on each Kubernetes worker node. Use Ubuntu if you require NFS, glusterfs, Sysdig, or Debian packages, otherwise use a [COS (container-optimized OS)](https://cloud.google.com/container-optimized-os/). - - **Size:** The number of _worker_ nodes that you want in the GKE cluster. It should probably match the _total_ number of nodes in your existing Docker Cloud node cluster (managers + workers). - - You should carefully consider the other configuration options; but most deployments should be OK with default values. - -6. Click **Create**. It takes a minute or two for the cluster to create. - -Once the cluster is created, you can click its name to see more details. - -### Connect to the GKE cluster - -You can connect to your GKE cluster from the web-based [Google Cloud Shell](https://cloud.google.com/shell/){: target="_blank" class="_"}; but to do so from your laptop, or other local terminal, you must: - -- Install and configure the `gcloud` CLI tool. -- Install the Kubernetes CLI (`kubectl`) -- Configure `kubectl` to connect to your cluster. - -The `gcloud` tool is the command-line tool for interacting with the Google Cloud Platform. It is installed as part of the Google Cloud SDK. - -1. Download and install the [Cloud SDK](https://cloud.google.com/sdk/){: target="_blank" class="_"} for your operating system. - -2. Configure `gcloud` and follow all the prompts: - - ``` - $ gcloud init --console-only - ``` - - > Follow _all_ prompts, including the one to open a web browser and approve the requested authorizations. As part of the procedure you must copy and paste a code into the terminal window to authorize `gcloud`. - -3. Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl): - - ``` - $ gcloud components list - $ gcloud components install kubectl - ``` - - You can install `kubectl` with or without `glcoud`. If you have `kubectl` already installed, ensure that the current context is correct: - - ``` - $ kubectl config get-context - $ kubectl config use-context - ``` - -4. Configure `kubectl` to talk to your GKE cluster. - - - In GKE, click the **Connect** button at the end of the line representing your cluster. - - Copy the long command and paste to your local terminal window. Your command may differ. - - ``` - $ gcloud container clusters get-credentials clus-k8s-vote --zone europe-west2-c --project proj-k8s-vote - - Fetching cluster endpoint and auth data. - kubeconfig entry generated for clus-k8s-vote. - ``` - -5. Test the `kubectl` configuration: - - ``` - $ kubectl get nodes - NAME STATUS ROLES AGE VERSION - gke-clus-k8s-vote-default-pool-81bd226c-2jtp Ready 1h v1.9.2-gke.1 - gke-clus-k8s-vote-default-pool-81bd226c-mn4k Ready 1h v1.9.2-gke.1 - gke-clus-k8s-vote-default-pool-81bd226c-qjm2 Ready 1h v1.9.2-gke.1 - ``` - - If the values returned match your GKE cluster (number of nodes, age, and version), then you have successfully configured `kubectl` to manage your GKE cluster. - -You now have a GKE cluster and have configured `kubectl` to manage it. Let's look at how to convert your Docker Cloud app into a Kubernetes app. - -## Convert Docker Cloud stackfile - -**In the following sections, we discuss each service definition separately, but you should group them into one stackfile with the `.yml` extension, for example, [k8s-vote.yml](#combined-manifest-k8s-vote.yml){: target="_blank" class="_"}.** - -To prepare your applications for migration from Docker Cloud to Kubernetes, you must recreate your Docker Cloud stackfiles as Kubernetes _manifests_. Once you have each application converted, you can test and deploy. Like Docker Cloud stackfiles, Kubernetes manifests are YAML files but usually longer and more complex. - -> In Docker Cloud, to find the stackfiles for your existing applications, you can either: (1) Select **Stacks** > _your_stack_ > **Edit**, or (2) Select **Stacks** > _your_stack_ and scroll down. - -In the Docker Cloud stackfile, the six Docker _services_ in our `example-voting-app` stack are defined as **top-level keys**: - -``` -db: -redis: -result: -lb: -vote: -worker: -``` - -Kubernetes applications are built from objects (such as [Pods](https://kubernetes.io/docs/concepts/workloads/pods/pod/){: target="_blank" class="_"}) -and object abstractions (such as [Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/){: target="_blank" class="_"} -and [Services](https://kubernetes.io/docs/concepts/services-networking/service/){: target="_blank" class="_"}). For each _Docker service_ in our voting app stack, we create one Kubernetes Deployment and one _Kubernetes Service_. Each Kubernetes Deployment spawns Pods. A Pod is a set of containers and also the smallest unit of work in Kubernetes. - -> A [Docker serivce](https://docs.docker.com/engine/swarm/how-swarm-mode-works/services/){: target="_blank" class="_"} is one component of an application that is generated from one image. -> A [Kubernetes service](https://kubernetes.io/docs/concepts/services-networking/service/){: target="_blank" class="_"} is a networking construct that load balances Pods behind a proxy. - -A Kubernetes Deployment defines the application "service" -- which Docker image to use and the runtime instructions (which container ports to map and the container restart policy). The Deployment is also where you define rolling updates, rollbacks, and other advanced features. - -A Kubernetes Service object is an abstraction that provides stable networking for a set of Pods. A Service is where you can register a cluster-wide DNS name and virtual IP (VIP) for accessing the Pods, and also create cloud-native load balancers. - -This diagram shows four Pods deployed as part of a single Deployment. Each Pod is labeled as “app=vote”. The Deployment has a label selector, “app=vote”, and this combination of labels and label selector is what allows the Deployment object to manage Pods (create, terminate, scale, update, roll back, and so on). Likewise, the Service object selects Pods on the same label (“app-vote”) which allows the service to provide a stable network abstraction (IP and DNS name) for the Pods. - -![Voting app vote Kube pods](images/votingapp-kube-pods-vote.png){:width="500px"} - -### db service - -> Consider using a hosted database service for production databases. This is something that, ideally, should not change as part of your migration away from Docker Cloud stacks. - -**Docker Cloud stackfile**: The Docker Cloud stackfile defines an image and a restart policy for the `db` service. - -``` -db: - image: 'postgres:9.4' - restart: always -``` - -**Kubernetes manifest**: The Kubernetes translation defines two object types or "kinds": a _Deployment_ and a _Service_ (separated by three dashes `---`). Each object includes an API version, metadata (labels and name), and a `spec` field for object configuration (that is, the Deployment Pods and the Service). - -``` -apiVersion: apps/v1beta1 -kind: Deployment -metadata: - name: db - labels: - app: db -spec: - selector: - matchLabels: - app: db - template: - metadata: - labels: - app: db - spec: - containers: - - image: postgres:9.4 - name: db - restartPolicy: Always ---- -apiVersion: v1 -kind: Service -metadata: - name: db -spec: - clusterIP: None - ports: - - port: 55555 - targetPort: 0 - selector: - app: db -``` - -About the Kubernetes fields in general: - -- `apiVersion` sets the schema version for Kubernetes to use when managing the object. -- `kind` defines the object type. In this example, we only define Deployments and Services but there are many others. -- `metadata` assigns a name and set of labels to the object. -- `spec` is where we configure the object. In a Deployment, `spec` defines the Pods to deploy. - -It is important that **Pod labels** (`Deployment.spec.template.metadata.labels`) match both the Deployment label selector (`Deployment.spec.selector.matchLabels`) and the Service label selector (`Service.spec.selector`). This is how the Deployment object knows which Pods to manage and how the Service object knows which Pods to provide networking for. - -> Deployment and Service label selectors have different fields in the YAML file because Deployments use [set-based selectors](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#set-based-requirement){: target="_blank" class="_"} -and Services use [equality-based selectors](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#equality-based-requirement){: target="_blank" class="_"}. - -For the `db` Deployment, we define a container called `db` based on the `postgres:9.4` Docker image, and define a restart policy. All Pods created by this Deployment have the label, `app=db` and the Deployment selects on them. - -The `db` Service is a “headless” service (`clusterIP: None`). Headless services are useful when you want a stable DNS name but do not need the cluster-wide VIP. They create a stable DNS record, but instead of creating a VIP, they map the DNS name to multiple -[A records](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#a-records){: target="_blank" class="_"} -- one for each Pod associated with the Service. - -The Service’s label selector (`Service.spec.selector`) has the value, "app=db". This means the Service provides stable networking and load balancing for all Pods on the cluster labeled as “app=db”. Pods defined in the Deployment section are all labelled as "app-db". It is this mapping between the Service label selector and the Pod labels that tells the Service object which Pods for which to provide networking. - -### redis service - -**Docker Cloud stackfile**: - -``` -redis: - image: 'redis:latest' - restart: always -``` - -**Kubernetes manifest**: - -``` -apiVersion: apps/v1beta1 -kind: Deployment -metadata: - labels: - app: redis - name: redis -spec: - selector: - matchLabels: - app: redis - template: - metadata: - labels: - app: redis - spec: - containers: - - image: redis:alpine - name: redis - ports: - - containerPort: 6379 - restartPolicy: Always ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app: redis - name: redis -spec: - ports: - - port: 6379 - targetPort: 6379 - selector: - app: redis -``` - -Here, the Deployment object deploys a Pod from the `redis:alpine` image and sets the container port to `6379`. It also sets the `labels` for the Pods to the same value ("app=redis") as the Deployment’s label selector to tie the two together. - -The Service object defines a cluster-wide DNS mapping for the name "redis" on port 6379. This means that traffic for `tcp://redis:6379` is routed to this Service and is load balanced across all Pods on the cluster with the "app=redis" label. The Service is accessed on the cluster-wide `port` and forwards to the Pods on the `targetPort`. Again, the label-selector for the Service and the labels for the Pods are what tie the two together. - -The diagram shows traffic intended for `tcp://redis:6379` being sent to the redis Service and then load balanced across all Pods that match the Service label selector. - -![Voting app redis Kube pods](images/votingapp-kube-pods-redis.png){:width="500px"} - -### lb service - -The Docker Cloud stackfile defines an `lb` service to balance traffic to the vote service. On GKE, this is not necessary because Kubernetes lets you define a Service object with `type=balancer`, which creates a native GCP balancer to do this job. We demonstrate in the `vote` section. - -### vote service - -The Docker Cloud stackfile for the `vote` service defines an image, a restart policy, and a specific number of Pods (replicas: 5). It also enables the Docker Cloud `autoredeploy` feature. We can tell that it listens on port 80 because the Docker Cloud `lb` service forwards traffic to it on port 80; we can also inspect its image. - -> **Autoredeploy options**: Autoredeploy is a Docker Cloud feature that automatically updates running applications every time you push an image. It is not native to Docker CE, AKS or GKE, but you may be able to regain it with Docker Cloud auto-builds, using web-hooks from the Docker Cloud repository for your image back to the CI/CD pipeline in your dev/staging/production environment. - -**Docker Cloud stackfile**: - -``` -vote: - autoredeploy: true - image: 'docker/example-voting-app-vote:latest' - restart: always - target_num_containers: 5 -``` - -**Kubernetes manifest**: - -``` -apiVersion: apps/v1beta1 -kind: Deployment -metadata: - labels: - app: vote - name: vote -spec: - selector: - matchLabels: - app: vote - replicas: 5 - template: - metadata: - labels: - app: vote - spec: - containers: - - image: docker/example-voting-app-vote:latest - name: vote - ports: - - containerPort: 80 - restartPolicy: Always ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app: vote - name: vote -spec: - type: LoadBalancer - ports: - - port: 80 - selector: - app: vote -``` - -Again, we ensure that both Deployment and Service objects can find the Pods with matching labels ("app=vote"). We also set the number of Pod replicas to five (`Deployment.spec.replicas`) so that it matches the `target_num_containers` from the Docker Cloud stackfile. - -We define the Service as "type=loadbalancer". This creates a native GCP load balancer with a stable, publicly routable IP for the service. It also maps port 80 so that traffic hitting port 80 is load balanced across all five Pod replicas in the cluster. (This is why the `lb` service from the Docker Cloud app is not needed.) - -### worker service - -Like the `vote` service, the `worker` service defines an image, a restart policy, and a specific number of Pods (replicas: 5). It also defines the Docker Cloud `autoredeploy` policy (which is not supported in GKE). - -> **Autoredeploy options**: Autoredeploy is a Docker Cloud feature that automatically updates running applications every time you push an image. It is not native to Docker CE, AKS or GKE, but you may be able to regain it with Docker Cloud auto-builds, using web-hooks from the Docker Cloud repository for your image back to the CI/CD pipeline in your dev/staging/production environment. - -**Docker Cloud stackfile**: - -``` -worker: - autoredeploy: true - image: 'docker/example-voting-app-worker:latest' - restart: always - target_num_containers: 3 -``` - -**Kubernetes manifest**: - -``` -apiVersion: apps/v1beta1 -kind: Deployment -metadata: - labels: - app: worker - name: worker -spec: - selector: - matchLabels: - app: worker - replicas: 3 - template: - metadata: - labels: - app: worker - spec: - containers: - - image: docker/example-voting-app-worker:latest - name: worker - restartPolicy: Always ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app: worker - name: worker -spec: - clusterIP: None - ports: - - port: 55555 - targetPort: 0 - selector: - app: worker -``` - -Again, we ensure that both Deployment and Service objects can find the Pods with matching labels ("app=worker"). - -The `worker` Service (like `db`) is another ["headless" service](https://kubernetes.io/docs/concepts/services-networking/service/#headless-services){: target="_blank" class="_"} where a DNS name is created and mapped to individual -[A records](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#a-records){: target="_blank" class="_"} for each Pod rather than a cluster-wide VIP. - -### result service - -**Docker Cloud stackfile**: - -``` -result: - autoredeploy: true - image: 'docker/example-voting-app-result:latest' - ports: - - '80:80' - restart: always -``` - -**Kubernetes manifest**: - -``` -apiVersion: apps/v1beta1 -kind: Deployment -metadata: - labels: - app: result - name: result -spec: - selector: - matchLabels: - app: result - template: - metadata: - labels: - app: result - spec: - containers: - - image: docker/example-voting-app-result:latest - name: result - ports: - - containerPort: 80 - restartPolicy: Always ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app: result - name: result -spec: - type: LoadBalancer - ports: - - port: 80 - selector: - app: result -``` - -The Deployment section defines the usual names, labels and container spec. The `result` Service (like the `vote` Service) defines a GCP-native load balancer to distribute external traffic to the cluster on port 80. - -### Combined manifest k8s-vote.yml - -You can combine all Deployments and Services in a single YAML file, or have individual YAML files per Docker Cloud service. The choice is yours, but it's usually easier to deploy and manage one file. - -> You should manage your Kubernetes manifest files the way you manage your application code -- checking them in and out of version control repositories etc. - -Here, we combine all the Kubernetes definitions explained above into one YAML file that we call, `k8s-vote.yml`. - -``` -apiVersion: apps/v1beta1 -kind: Deployment -metadata: - name: db - labels: - app: db -spec: - selector: - matchLabels: - app: db - template: - metadata: - labels: - app: db - spec: - containers: - - image: postgres:9.4 - name: db - restartPolicy: Always ---- -apiVersion: v1 -kind: Service -metadata: - name: db -spec: - clusterIP: None - ports: - - port: 55555 - targetPort: 0 - selector: - app: db ---- -apiVersion: apps/v1beta1 -kind: Deployment -metadata: - labels: - app: redis - name: redis -spec: - selector: - matchLabels: - app: redis - template: - metadata: - labels: - app: redis - spec: - containers: - - image: redis:alpine - name: redis - ports: - - containerPort: 6379 - restartPolicy: Always ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app: redis - name: redis -spec: - ports: - - port: 6379 - targetPort: 6379 - selector: - app: redis ---- -apiVersion: apps/v1beta1 -kind: Deployment -metadata: - labels: - app: vote - name: vote -spec: - selector: - matchLabels: - app: vote - replicas: 5 - template: - metadata: - labels: - app: vote - spec: - containers: - - image: docker/example-voting-app-vote:latest - name: vote - ports: - - containerPort: 80 - restartPolicy: Always ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app: vote - name: vote -spec: - type: LoadBalancer - ports: - - port: 80 - selector: - app: vote ---- -apiVersion: apps/v1beta1 -kind: Deployment -metadata: - labels: - app: worker - name: worker -spec: - selector: - matchLabels: - app: worker - replicas: 3 - template: - metadata: - labels: - app: worker - spec: - containers: - - image: docker/example-voting-app-worker:latest - name: worker - restartPolicy: Always ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app: worker - name: worker -spec: - clusterIP: None - ports: - - port: 55555 - targetPort: 0 - selector: - app: worker ---- -apiVersion: apps/v1beta1 -kind: Deployment -metadata: - labels: - app: result - name: result -spec: - selector: - matchLabels: - app: result - template: - metadata: - labels: - app: result - spec: - containers: - - image: docker/example-voting-app-result:latest - name: result - ports: - - containerPort: 80 - restartPolicy: Always ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app: result - name: result -spec: - type: LoadBalancer - ports: - - port: 80 - selector: - app: result -``` - -Save the Kubernetes manifest file (as `k8s-vote.yml`) and check it into version control. - -## Test the app on GKE - -Before migrating, you should thoroughly test each new Kubernetes manifest on a GKE cluster. Healthy testing includes _deploying_ the application with the new manifest file, performing _scaling_ operations, increasing _load_, running _failure_ scenarios, and doing _updates_ and _rollbacks_. These tests are specific to each of your applications. You should also manage your manifest files in a version control system. - -The following steps explain how to deploy your app from the Kubernetes manifest file and verify that it is running. The steps are based on the sample application used throughout this guide, but the general commands should work for any app. - -> Run from a [Google Cloud Shell](https://cloud.google.com/shell/){: target="_blank" class="_"} - or local terminal with `kubectl` configured to talk to your GKE cluster. - -1. Verify that your shell/terminal is configured to talk to your GKE cluster. If the output matches your cluster, you're ready to proceed with the next steps. - - ``` - $ kubectl get nodes - NAME STATUS ROLES AGE VERSION - gke-clus-k8s-vote-default-pool-81bd226c-2jtp Ready 1h v1.9.2-gke.1 - gke-clus-k8s-vote-default-pool-81bd226c-mn4k Ready 1h v1.9.2-gke.1 - gke-clus-k8s-vote-default-pool-81bd226c-qjm2 Ready 1h v1.9.2-gke.1 - - ``` - -2. Deploy your Kubernetes application to your cluster. - - The Kubernetes manifest here is `ks8-vote.yml` and lives in the system PATH. To use a different manifest, substitute `ks8-vote.yml` with the name of your manifest file. - - ``` - $ kubectl create -f k8s-vote.yml - - deployment "db" created - service "db" created - deployment "redis" created - service "redis" created - deployment "vote" created - service "vote" created - deployment "worker" created - service "worker" created - deployment "result" created - service "result" created - ``` - -3. Check the status of the app (both Deployments and Services): - - ``` - $ kubectl get deployments - NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE - db 1 1 1 1 43s - redis 1 1 1 1 43s - result 1 1 1 1 43s - vote 5 5 5 5 43s - worker 3 3 3 3 43s - - $ kubectl get services - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - db ClusterIP None 55555/TCP 48s - kubernetes ClusterIP 10.0.0.1 443/TCP 6h - redis ClusterIP 10.0.168.188 6379/TCP 48s - result LoadBalancer 10.0.76.157 80:31033/TCP 47s - vote LoadBalancer 10.0.244.254 80:31330/TCP 48s - worker ClusterIP None 55555/TCP 48s - ``` - - Both `LoadBalancer` Services are `pending` because it takes a minute or two to provision a GCP load balancer. You can run `kubectl get svc --watch` to see when they are ready. Once provisioned, the output looks like this (with different external IPs): - - ``` - $ kubectl get services - - result LoadBalancer 10.0.76.157 52.174.195.232 80:31033/TCP 7m - vote LoadBalancer 10.0.244.254 52.174.196.199 80:31330/TCP 8m - ``` - -4. Test that the application works in your new environment. - - For example, the voting app exposes two web front-ends -- one for casting votes and the other for viewing results: - - - Copy/paste the `EXTERNAL-IP` value for the `vote` service into a browser and cast a vote. - - Copy/paste the `EXTERNAL-IP` value for the `result` service into a browser and ensure your vote registered. - -If you had a CI/CD pipeline with automated tests and deployments for your Docker Cloud stacks, you should build, test, and implement one for each application on GKE. - -> You can extend your Kubernetes manifest file with advanced features to perform rolling updates and simple rollbacks. But you should not do this until you have confirmed your application is working with the simple manifest file. - -## Migrate apps from Docker Cloud - -> Remember to point your application CNAMES to new service endpoints. - -How you migrate your applications is unique to your environment and applications. - -- Plan with all developers and operations teams. -- Plan with customers. -- Plan with owners of other applications that interact with your Docker Cloud app. -- Plan a rollback strategy if problems occur. - -Once your migration is in process, check that everything is working as expected. Ensure that users are hitting the new application on the GKE infrastructure and getting expected results. - -> Think before you terminate stacks and clusters -> -> Do not terminate your Docker Cloud stacks or node clusters until some time after the migration has been signed off as successful. If there are problems, you may need to roll back and try again. -{: .warning} diff --git a/docker-cloud/migration/cloud-to-swarm.md b/docker-cloud/migration/cloud-to-swarm.md deleted file mode 100644 index 32411e2e3f..0000000000 --- a/docker-cloud/migration/cloud-to-swarm.md +++ /dev/null @@ -1,504 +0,0 @@ ---- -description: How to migrate apps from Docker Cloud to Docker CE -keywords: cloud, migration, swarm, community -title: Migrate Docker Cloud stacks to Docker CE swarm ---- - -## Docker CE in swarm mode - -This page explains how to prepare your applications for migration from Docker Cloud to applications running as _service stacks_ on clusters of Docker Community Edition (CE) nodes in swarm mode. You can also use [Docker Enterprise Edition](https://www.docker.com/enterprise-edition){: target="_blank" class="_"} (Docker EE) for your target environment. - -At a high level, migrating your Docker Cloud applications requires that you: - -- **Build** a target environment (Docker CE in swarm mode). -- **Convert** your Docker Cloud YAML stackfiles. -- **Test** the converted YAML stackfiles in the new environment. -- **Point** your application CNAMES to new service endpoints. -- **Migrate** your applications from Docker Cloud to the new environment. - -To demonstrate, we **build** a Docker CE swarm cluster, **convert** the Docker Cloud stackfile for [example-voting-app](https://github.com/dockersamples/example-voting-app){: target="_blank" class="_"} to a service stack format, and **test** the service stack file in swarm mode to ensure that it is safe to migrate. - -> The actual process of migrating -- switching customers from your Docker Cloud applications to Docker CE applications -- will vary by application and environment. - -## Voting-app example - -The Docker Cloud stack of our example voting application is defined in [dockercloud.yml](https://raw.githubusercontent.com/dockersamples/example-voting-app/master/dockercloud.yml){: target="_blank" class="_"}. The Docker CE service stack (for our target environment) is defined in -[docker-stack.yml](https://raw.githubusercontent.com/dockersamples/example-voting-app/master/docker-stack.yml){: target="_blank" class="_"}. This document explains how `dockercloud.yml` is converted to `docker-stack.yml` so that you have the tools to do the same for your applications. - -In the [dockercloud.yml](https://raw.githubusercontent.com/dockersamples/example-voting-app/master/dockercloud.yml){: target="_blank" class="_"}, the voting app is defined as a stack of six microservices: - -- **vote**: Web front-end that displays voting options -- **redis**: In-memory k/v store that collects votes -- **worker**: Stores votes in database -- **db**: Persistent store for votes -- **result**: Web server that pulls and displays results from database -- **lb**: Container-based load balancer - -Votes are accepted with the `vote` service and stored in persistent backend database (`db`) with the help of services, `redis`, `worker`, and `lb`. The vote tally is displayed with the `result` service. - -![image of voting app arch](images/votingapp-arch.png){:width="500px"} - -## Migration prerequisites - -To complete the migration from Docker Cloud to Docker CE in swarm mode, you need: - -- **Docker CE nodes** (in a public cloud or on-premises) organized as a swarm cluster -- **SSH access** to the nodes in the swarm cluster - -You _may_ also need the following application-specific things: - -- **Permanent public IP addresses and hostnames** for nodes -- **External load balancers** configured to direct traffic to Docker CE nodes - -## Build target environment - -Our target environment is a cluster of Docker CE nodes configured in swarm mode. A swarm cluster comprises one or more manager and worker nodes. - -To ensure high availability (HA) of the swarm control plane in production, you should include an odd number (3+) of manager nodes, usually no more than seven. They should be spread across availability zones and connected by high-speed reliable networks. For information on building a secure HA swarm cluster for production, see [Swarm mode overview](https://docs.docker.com/engine/swarm/){: target="_blank" class="_"}. - -### Plan Docker CE nodes - -How you plan and build your nodes will depend on your application requirements, but you should expect to: - -- Choose a **platform** (cloud or on-premises) to host your Docker CE nodes. -- Estimate **node size and spec** (your Docker Cloud nodes can be a guide). -- Calculate the **number of nodes** for managers and workers (manager HA requires 3/5/7 managers). -- Decide **node distribution** across availability zones for high availability (HA). -- Ensure **nodes can communicate** over the network and have stable resolvable hostnames. -- Configure **load balancers**. - -Your swarm cluster of Docker CE nodes should probably resemble your existing Docker Cloud node cluster. For example, if you currently have nodes of a particular size and spec, in hosted availability zones, your target swarm cluster should probably match that. - -> In Docker Cloud, to see the configuration of each of your clusters, select **Node Clusters** > _your_cluster_. - -This diagram shows a six-node swarm cluster spread across two availability zones: - -![Swarm cluster](images/swarm-cluster.png){:width="600px"} - -### Configure swarm cluster - -Configuring a swarm cluster of Docker CE nodes involves the following high-level steps: - -1. Deploy nodes and install Docker CE. -2. Initialize swarm mode (which creates one manager). -3. _[optional] Add manager nodes (for HA)._ -4. Add worker nodes. - -In this demo, we build a swarm cluster with six nodes (3 managers/3 workers), but you can use more (or fewer, for example, 1 manager/2 workers). For manager HA, create a minimum of three manager nodes. You can add as many workers as you like. - -1. Deploy six nodes and install the latest version of [Docker CE](https://docs.docker.com/install/){: target="_blank" class="_"} on each. - -2. Initialize a swarm cluster from one node (that automatically becomes the first manager in the swarm): - - ``` - $ docker swarm init - ``` - - > Our swarm cluster uses self-signed certificates. To use an [external CA](https://docs.docker.com/engine/reference/commandline/swarm_init/#--external-ca){: target="_blank" class="_"}, initialize with the option, `--external-ca`. You should also build your nodes in appropriate availability zones. - - > You can use the flag, `--advertise-addr`, to define the IP and port that other nodes should use to connect to this manager. You can even specify an IP that does not exist on the node, such one for a load balancer. See [docker swarm init](https://docs.docker.com/engine/reference/commandline/swarm_init/#--advertise-addr){: target="_blank" class="_"}. - -3. Extract and **safely store** the manager _join-token_ required to add manager nodes. - - ``` - $ docker swarm join-token manager - ``` - -4. Extract and **safely store** the worker _join-token_ required to add worker nodes. - - ``` - $ docker swarm join-token worker - ``` - - > Keep your join tokens safe and secure as bad people can join managers with them! - -5. **[optional]** If you deployed six nodes, you can add two manager nodes with the _manager_ join token. Run the command on each node designated as a manager. The join token and network details will differ in your environment. - - ``` - $ docker swarm join --token - ``` - -6. Add two or more worker nodes with the _worker_ join token. Run the command on each node designated as a worker. The join token and network details will differ in your environment. - - ``` - $ docker swarm join --token - ``` - -7. List the nodes from one of the managers (if you have more than one) to verify the status of the swarm. In the `MANAGER STATUS` column, manager nodes are either "Leader" or "Reachable". Worker nodes are blank. - - ``` - $ docker node ls - ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS - vrx...vr1 * node1 Ready Active Leader - f4b...fbd node2 Ready Active Reachable - f2v...sdo node3 Ready Active Reachable - bvb...l55 node4 Ready Active - hf2...kvc node5 Ready Active - p49...aav node6 Ready Active - ``` - -With your target environment configured, let us look at the application and convert the Docker Cloud stackfile to a service stack. - -## Convert Docker Cloud stackfile - -**In the following sections, we discuss each service definition separately, but you should group them into one stackfile with the `.yml` extension, for example, [docker-stack.yml](https://raw.githubusercontent.com/dockersamples/example-voting-app/master/docker-stack.yml){: target="_blank" class="_"}.** - -To prepare your applications for migration from Docker Cloud to Docker CE in swarm mode, you must recreate your Docker Cloud stackfiles (**source** files) as _service stack_ stackfiles (**target** files). Once you have each application defined as a service stack, you can test and deploy. - -> In Docker Cloud, to find the stackfiles for your existing applications, you can either: (1) Select **Stacks** > _your_stack_ > **Edit**, or (2) Select **Stacks** > _your_stack_ and scroll down. - -In the sections below, we step through each service in [example-voting-app](https://github.com/dockersamples/example-voting-app){: target="_blank" class="_"} and explain how the Docker Cloud source file -([dockercloud.yml](https://raw.githubusercontent.com/dockersamples/example-voting-app/master/dockercloud.yml){: target="_blank" class="_"}) is converted to the service stack target file - ([docker-stack.yml](https://raw.githubusercontent.com/dockersamples/example-voting-app/master/docker-stack.yml){: target="_blank" class="_"}). We provide a simple version of each service definition (one that does a like-for-like conversion with no added bells and whistles), and an extended version that demonstrates more features in swarm mode. - - - **Simple example:** Only includes the necessary features for _this_ migration to work. - - **Extended example:** Includes some advanced features that improves application management. - -> This is not a best practice guide -> -> This document shows you how to convert a Docker Cloud application to a Docker CE application and run it in a swarm. Along the way it introduces some of the advanced features offered by service stacks. It is not intended to be a best practice guide, but more of a "what's possible guide". - -### Top- and sub-level keys - -In the Docker Cloud stackfile, the six services are defined as top-level keys, whereas in the _service stack_ stackfile, they are sub-level keys. - -**Cloud source**: Services are **top-level keys**: - -``` -db: -redis: -result: -lb: -vote: -worker: -``` - -**Swarm target**: Services are **sub-level keys** (below the top-level key, `services`), and the Compose file format version is defined at the top (and is required). - -``` -version: "3.5" -services: - db: - redis: - result: - vote: - worker: -``` - -Notice that we removed the `lb` service -- this is because it is not needed in swarm mode. In Docker Cloud, the `lb` service accepts incoming traffic on port 80 and load balances across all replicas in the `vote` front-end service. In swarm mode, load balancing is built-in with a native transport-layer routing mesh called the [swarm mode service mesh](/../../engine/swarm/ingress/){: target="_blank" class="_"}. - -### db service - -> Consider using a hosted database service for production databases. This is something that, ideally, should not change as part of your migration away from Docker Cloud stacks. - -**Cloud source**: The Docker Cloud `db` service defines an image and a restart policy: - -``` -db: - image: 'postgres:9.4' - restart: always -``` - -**Swarm target**: This can be translated into a service stack service as follows: - -``` -db: - image: postgres:9.4 - deploy: - restart_policy: - condition: any -``` - -**Swarm target (extended)**: You can also add best practices, documentation, and advanced features, to improve application management: - -``` -db: - image: postgres:9.4 - volumes: - - db-data:/var/lib/postgresql/data - networks: - - backend - deploy: - placement: - constraints: [node.role == manager] - restart_policy: - condition: any -``` - -Let's step through some fields: - -- `volumes` places the Postgres database on a named volume called **db-data** and mounts it into the service replica at `/var/lib/postgresql/data`. This ensures that the data written by the application persists in the event that the Postgres container fails. -- `networks` adds security by putting the service on a backend network. -- `deploy.placement.constraints` forces the service to run on manager nodes. In a single-manager swarm, this ensures that the service always starts on the same node and has access to the same volume. -- `deploy.restart_policy.condition` tells Docker to restart any service replica that has stopped (no matter the exit code). - -### redis service - -**Cloud source**: The Docker Cloud `redis` service defines an image and a restart policy. - -``` -redis: - image: 'redis:latest' - restart: always -``` - -**Swarm target**: This can be translated into a service stack service as follows. - -``` -redis: - image: redis:latest - deploy: - restart_policy: - condition: any -``` - -**Swarm target (extended)**: - -``` -redis: - image: redis:alpine - ports: - - "6379" - networks: - - frontend - deploy: - replicas: 1 - restart_policy: - condition: any -``` - -Let's step through each field. - -- `image` defines the exact same image as the Docker Cloud stackfile. -- `ports` defines the network port that the service should operate on -- this can actually be omitted as it's the default port for redis. -- `networks` deploys the service on a network called `frontend`. -- `deploy.replicas` ensures there is always one instance (one replica) of the service running. -- `deploy.restart_policy.condition` tells Docker to restart any service replica that has stopped (no matter the exit code). - -### result service - -**Cloud source**: - -``` -result: - autoredeploy: true - image: 'docker/example-voting-app-result:latest' - ports: - - '80:80' - restart: always -``` - -**Swarm target**: - -``` -result: - image: docker/example-voting-app-result:latest - ports: - - 5001:80 - deploy: - restart_policy: - condition: any -``` - -Notice the different port mappings in the two stackfiles. The Docker Cloud application makes two services available on port 80 (using different nodes). The `result` service is published directly on port 80, and the `vote` service is published indirectly on port 80 using the `lb` service. - -In the _service stack_ stackfile, we publish these two services on different ports -- `vote` on port 5000 and `result` service on port 5001. If this is a problem for your users or application, you may be able to: - -- Publish this service on port 80 and any other service on a different port. -- Use host mode and publish both services on port 80 by using placement constraints to run them on different nodes. -- Use a frontend service, such as HAProxy, and route the traffic based on a virtual host. - -**Swarm target (extended)** - -``` -result: - image: dockersamples/examplevotingapp_result:latest - ports: - - 5001:80 - networks: - - backend - depends_on: - - db - deploy: - replicas: 1 - restart_policy: - condition: any -``` - -The extended version adds the following: - -- `networks` places all service replicas on a network called `backend`. -- `depends_on` tells Docker to start the `db` service before starting this one. -- `deploy.replicas` tells Docker to create a single replica for this service. -- `deploy.restart_policy.condition` tells Docker to restart any service replica that has stopped (no matter the exit code). - -### lb service - -In Docker Cloud, the `lb` service was used to proxy connections on port 80 to the `vote` service. We do not need to migrate the `lb` service because Docker CE in swarm mode has native load balancing built into its service mesh. - -If your applications are running load balancers, such as `dockercloud/haproxy`, you _may_ no longer need them when migrating to stacks on Docker CE. Be sure to test your application and consult with your Docker technical account manager for further details. - -### vote service - -The Docker Cloud `vote` service defines an image, a restart policy, service replicas. It also defines an `autoredeploy` policy which is not supported natively in service stacks. - -> **Autoredeploy options**: Autoredeploy is a Docker Cloud feature that automatically updates running applications every time you push an image. It is not native to Docker CE, AKS or GKE, but you may be able to regain it with Docker Cloud auto-builds, using web-hooks from the Docker Cloud repository for your image back to the CI/CD pipeline in your dev/staging/production environment. - -**Cloud source**: - -``` -vote: - autoredeploy: true - image: 'docker/example-voting-app-vote:latest' - restart: always - target_num_containers: 5 -``` - -**Swarm target**: - -``` -vote: - image: dockersamples/examplevotingapp_vote:latest - ports: - - 5000:80 - deploy: - replicas: 5 - restart_policy: - condition: any -``` - -Again, the Docker Cloud version of the voting application publishes both the `result` and `vote` services on port 80 (where the `vote` service is made available on port 80 with the `lb` service). - -Docker Swarm only allows a single service to be published on a swarm-wide port (because in this example, we are in swarm mode and using the routing mesh option for network configuration). To get around this, we publish the `vote` service on port 5000 (as we did with the `result` service on port 5001). - -> For the difference between swarm mode (with ingress networking) and host mode, see [Use swarm mode routing mesh](/../../engine/swarm/ingress/). - -**Swarm target (extended)**: - -``` -vote: - image: dockersamples/examplevotingapp_vote:latest - ports: - - 5000:80 - networks: - - frontend - depends_on: - - redis - deploy: - replicas: 5 - update_config: - parallelism: 2 - restart_policy: - condition: any -``` - -About some fields: - -- `networks` places all service replicas on a network called `frontend`. -- `depends_on` tells Docker to start the `redis` service before starting the `vote` service. -- `deploy.replicas` tells Docker to create 5 replicas for the `vote` service (and we need at least 3 for the parallelism setting). -- `deploy.update_config` tells Docker how to perform rolling updates on the service. While not strictly needed, `update_config` settings are extremely helpful when doing application updates. Here, `parallelism: 2` tells swarm to update two instances of the service at a time, and wait for 10 seconds in between each set of two. -- `deploy.restart_policy.condition` tells Docker to restart any service replica that has stopped (no matter the exit code). - -### worker service - -**Cloud source**: The Docker Cloud `worker` service defines an image, a restart policy, and a number of service replicas. It also defines an `autoredeploy` policy which is not supported natively in service stacks. - -``` -worker: - autoredeploy: true - image: 'docker/example-voting-app-worker:latest' - restart: always - target_num_containers: 3 -``` - -**Swarm target**: - -``` -worker: - image: dockersamples/examplevotingapp_worker - deploy: - replicas: 3 - restart_policy: - condition: any -``` - -**Swarm target (extended)**: - -``` -worker: - image: dockersamples/examplevotingapp_worker - networks: - - frontend - - backend - deploy: - mode: replicated - replicas: 3 - labels: [APP=VOTING] - restart_policy: - condition: any - delay: 10s - max_attempts: 3 - window: 120s - placement: - constraints: [node.role == manager] -``` - -All of the settings mentioned here are application specific and may not be needed in your application. - -- `networks` tells Docker to attach replicas to two networks (named "frontend" and "backend") allowing them to communicate with services on either one. -- `deploy.placement.constraints` ensures that replicas for this service always start on a manager node. -- `deploy.restart_policy.condition` tells Docker to restart any service replica that has stopped (no matter the exit code). It makes 3 attempts to restart, gives each restart attempt 120 seconds to complete, and waits 10 seconds before trying again. - -## Test converted stackfile - -Before migrating, you should thoroughly test each new stackfile in a Docker CE cluster in swarm mode. Test the simple stackfile first -- that is, the stackfile that most literally mimics what you have in Docker Cloud. Once that works, start testing some of the more robust features in the extended examples. - -Healthy testing includes _deploying_ the application with the new stackfile, performing _scaling_ operations, increasing _load_, running _failure_ scenarios, and doing _updates_ and _rollbacks_. These tests are specific to each of your applications. You should also manage your manifest files in a version control system. - -The following steps explain how to deploy your app from the **target** Docker Swarm stackfile and verify that it is running. Perform the following from a manager node in your swarm cluster. - -1. Deploy the app from the _service stack_ stackfile you created. - - ``` - $ docker stack deploy -c example-stack.yaml example-stack - ``` - - The format of the command is `docker stack deploy -c ` where the name of the stack is arbitrary but should be probably be meaningful. - -2. Test that the stack is running. - - ``` - $ docker stack ls - NAME SERVICES - example-stack 5 - ``` - -3. Get more details about the stack and the services running as part of it. - -4. Test that the application works in your new environment. - - For example, the voting app exposes two web front-ends -- one for casting votes and the other for viewing results. We exposed the `vote` service on port 5000, and the `result` service on port 5001. To connect to either of them, open a web browser and point it to the public IP or public hostname of any swarm node on the required port: - - - Go to :5000 and cast a vote. - - Go to :5001 and view the result of your vote. - -If you had a CI/CD pipeline with automated tests and deployments for your Docker Cloud stacks, you should build, test, and implement one for each application on Docker CE. - -## Migrate apps from Docker Cloud - -> Remember to point your application CNAMES to new service endpoints. - -How you migrate your applications is unique to your environment and applications. - -- Plan with all developers and operations teams. -- Plan with customers. -- Plan with owners of other applications that interact with your Docker Cloud app. -- Plan a rollback strategy if problems occur. - -Once your migration is in process, check that the everything is working as expected. Ensure that users are hitting the new application on the Docker CE infrastructure and getting expected results. - -> Think before you terminate stacks and clusters -> -> Do not terminate your Docker Cloud stacks or node clusters until some time after the migration has been signed off as successful. If there are problems, you may need to roll back and try again. -{: .warning} diff --git a/docker-cloud/migration/deregister-swarms.md b/docker-cloud/migration/deregister-swarms.md deleted file mode 100644 index bc28601da2..0000000000 --- a/docker-cloud/migration/deregister-swarms.md +++ /dev/null @@ -1,131 +0,0 @@ ---- -description: How to deregister swarms on Docker Cloud -keywords: cloud, swarm, migration -title: Deregister Swarms on Docker Cloud ---- - -## Introduction - -This page explains how to deregister a Swarm cluster from Docker Cloud so that it can be managed independently. We explain how to deregister on both Amazon Web Services (AWS) and Microsoft Azure (because Docker Cloud swarms run on either AWS or Azure behind the scenes). - -You do not need to migrate or reconfigure your applications as part of this procedure. The only thing that changes is that your Swarm cluster no longer integrates with Docker services (such as Docker Cloud, Docker for Mac, or Docker for Windows). - -### Prerequisites - -To complete this procedure you need: - -- An AWS or Azure account that lets you inspect resources such as instances. - -### High-level steps - -- Verify that you can SSH to your Swarm nodes (on AWS and Azure). -- Deregister your Swarm from Docker Cloud. -- Clean up old Docker Cloud resources. - -## SSH to your Swarm - -It is vital that you can SSH to your Docker Cloud Swarm before you deregister it from Docker Cloud. - -Your Docker Cloud Swarm runs on either AWS or Azure, so to SSH to your Swarm nodes, you must know the public IP addresses or public DNS names of your nodes. The simplest way to find this information is with the native AWS or Azure tools. - -### How to SSH to AWS nodes - -1. Log on to the AWS console and open the **EC2 Dashboard** for the **region** that hosts your Swarm nodes. - -2. Locate your instances and note their DNS names and IPs. - - By default, AWS labels your Swarm nodes as _swarm-name_-worker or _swarm-name_-manager. For example, a Swarm called "prod-equus" in Docker Cloud, has manager and worker nodes in AWS labelled, "prod-equus-manager" and "prod-equus-worker" respectively. - - You will also have a load balancer (type=classic) that includes the name of the Swarm. It accepts Docker commands on port 2376 and balances them to the manager nodes in the Swarm (as the server proxy is only deployed on the managers). - -3. Open an SSH session to each node in the cluster. - - This example opens an SSH session to a Swarm node with: - - - Private key = “awskey.pem” - - Username = “docker” - - Public DNS name = “ec2-34-244-56-42.eu-west-1.compute.amazonaws.com” - - ``` - $ ssh -i ./awskey.pem docker@ec2-34-244-56-42.eu-west-1.compute.amazonaws.com - ``` - -Once you are certain that you are able to SSH to _all nodes_ in your Swarm, you can [deregister from Docker Cloud](#deregister-swarm-from-docker-cloud). - -> If you do not have the keys required to SSH on to your nodes, you can deploy new public keys to your nodes using [this procedure](https://github.com/docker/dockercloud-authorizedkeys/blob/master/README.md){: target="_blank" class="_"}. You should perform this operation before deregistering your Swarm from Docker Cloud. - -### How to SSH to Azure nodes - -In Azure, you can only SSH to manager nodes because worker nodes do not get public IPs and public DNS names. If you need to log on to worker nodes, you can use your manager nodes as jump hosts. - -1. Log on to the Azure portal and click **Resource groups**. - -2. Click on the resource group that contains your Swarm. The `DEPLOYMENT NAME` should match the name of your Swarm. - -3. Click into the deployment with the name of your Swarm and verify the values. For example, the `DOCKERCLOUDCLUSTERNAME` value under **Inputs** should exactly match the name of your Swarm as shown in Docker Cloud. - -4. Copy the value from `SSH TARGETS` under **Outputs** and paste it into a new browser tab. - - This takes you to the inbound NAT Rules for the external load balancer that provides SSH access to your Swarm. It displays a list of all of the **Swarm managers** (not workers) including public IP address (`DESTINATION`) and port (`SERVICE`) that you can use to gain SSH access. - -5. Open an SSH session to each manager in the cluster. Use public IP and port to connect. - - This example creates an SSH session with user `docker` to a swarm manager at `51.140.229.154` on port `50000` with the `azkey.pem` private key in the current directory. - - ``` - ssh -i ./azkey.pem -p 50000 docker@51.140.229.154 - ``` - - > If you do not know which private key to use, you can see the public key under `SSHPUBLICKEY` in the **Outputs** section of the Deployment. You can compare this value to the contents of public keys you have on file. - -6. Log on to your worker nodes by using your manager nodes as jump hosts. With - [SSH agent forwarding enabled](https://docs.docker.com/docker-for-azure/deploy/#connecting-to-your-linux-worker-nodes-using-ssh), SSH from the manager nodes to the workers nodes over the private network. - -Once you are certain that you are able to SSH to the manager nodes in your Swarm you can [deregister from Docker Cloud](#deregister-swarm-from-docker-cloud). - -> If you do not have the keys required to SSH on to your nodes, you can deploy new public keys to your nodes using [this procedure](https://github.com/docker/dockercloud-authorizedkeys/blob/master/README.md){: target="_blank" class="_"}. You should perform this operation before deregistering your Swarm from Docker Cloud. - -## Deregister swarm from Docker Cloud - -> Proceed with caution -> -> Only deregister if you know the details of your Swarm nodes (cloud provider, public DNS names, public IP address, etc.) and you have verified that you can SSH to each node with your private key. -{: .warning} - -1. Open the Docker Cloud web UI and click **Swarms**. - -2. Click the three dots to the right of the Swarm you want to deregister and select **Unregister**. - -3. Confirm the deregistration process. - -The Swarm is now deregistered from the Docker Cloud web UI and no longer is visible in other products such as Docker for Mac and Docker for Windows. - -## Clean up Docker Cloud resources - -The final step is to clean up old Docker cloud resources such as the service, network and secret. - -Docker Cloud deployed a service on your Swarm called `dockercloud-server-proxy` to proxy and load balance incoming Docker commands on port 2376 across all manager nodes. It has a network called `dockercloud-server-proxy-network` and a secret called `dockercloud-server-proxy-secret`. - -All of these should be removed: - -1. Open an SSH session to a Swarm manager _for the correct swarm!_ - -2. Remove the service: - - ``` - $ docker service rm dockercloud-server-proxy - ``` - -3. Remove the network: - - ``` - $ docker network rm dockercloud-server-proxy-network - ``` - -4. Remove the secret: - - ``` - $ docker secret rm dockercloud-server-proxy-secret - ``` - -Your Docker Swarm cluster is now deregistered from Docker Cloud and you can manage it independently. diff --git a/docker-cloud/migration/images/kube-cluster.png b/docker-cloud/migration/images/kube-cluster.png deleted file mode 100644 index 702f5de5ad..0000000000 Binary files a/docker-cloud/migration/images/kube-cluster.png and /dev/null differ diff --git a/docker-cloud/migration/images/kube-manifest.png b/docker-cloud/migration/images/kube-manifest.png deleted file mode 100644 index 7ac1b4ccb5..0000000000 Binary files a/docker-cloud/migration/images/kube-manifest.png and /dev/null differ diff --git a/docker-cloud/migration/images/swarm-cluster.png b/docker-cloud/migration/images/swarm-cluster.png deleted file mode 100644 index 789a103fd1..0000000000 Binary files a/docker-cloud/migration/images/swarm-cluster.png and /dev/null differ diff --git a/docker-cloud/migration/images/votingapp-arch.png b/docker-cloud/migration/images/votingapp-arch.png deleted file mode 100644 index 51f1519029..0000000000 Binary files a/docker-cloud/migration/images/votingapp-arch.png and /dev/null differ diff --git a/docker-cloud/migration/images/votingapp-architecture.png b/docker-cloud/migration/images/votingapp-architecture.png deleted file mode 100644 index b081369ccc..0000000000 Binary files a/docker-cloud/migration/images/votingapp-architecture.png and /dev/null differ diff --git a/docker-cloud/migration/images/votingapp-kube-pods-redis.png b/docker-cloud/migration/images/votingapp-kube-pods-redis.png deleted file mode 100644 index d846f159f4..0000000000 Binary files a/docker-cloud/migration/images/votingapp-kube-pods-redis.png and /dev/null differ diff --git a/docker-cloud/migration/images/votingapp-kube-pods-vote.png b/docker-cloud/migration/images/votingapp-kube-pods-vote.png deleted file mode 100644 index e7fb3d3c74..0000000000 Binary files a/docker-cloud/migration/images/votingapp-kube-pods-vote.png and /dev/null differ diff --git a/docker-cloud/migration/index.md b/docker-cloud/migration/index.md deleted file mode 100644 index 6d14fc6f8d..0000000000 --- a/docker-cloud/migration/index.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -description: Migrating from Docker Cloud -keywords: cloud, migration -title: Migration overview ---- - -## Introduction - -Important **Cluster and application management services in Docker Cloud are shutting down on May 21. You must migrate your applications from Docker Cloud to another platform and deregister your Swarms.** - -The Docker Cloud runtime is being discontinued. This means that you will no longer be able to manage your nodes, swarm clusters, and the applications that run on them in Docker Cloud. To protect your applications, you must must migrate them to another platform, and if applicable, deregister your Swarms from Docker Cloud. The documents in this section explain how. - -- [Migrate Docker Cloud stacks to Docker CE swarm](cloud-to-swarm){: target="_blank" class="_"} -- [Migrate Docker Cloud stacks to Azure Container Service](cloud-to-kube-aks){: target="_blank" class="_"} -- [Migrate Docker Cloud stacks to Google Kubernetes Engine](cloud-to-kube-gke){: target="_blank" class="_"} -- [Deregister Swarms on Docker Cloud](deregister-swarms){: target="_blank" class="_"} -- [Kubernetes primer](kube-primer){: target="_blank" class="_"} - -## What stays the same - -**How users and external systems interact with your Docker applications**. Your Docker images, autobuilds, automated tests, and overall application functionality remain the same. For example, if your application uses a Docker image called `myorg/webfe:v3`, and publishes container port `80` to external port `80`, none of this changes. - -Docker Cloud SaaS features stay! We are _not_ removing automated builds and registry storage services. - -## What changes - -**How you manage your applications**. We are removing cluster management and the ability to deploy and manage Docker Cloud stacks. As part of the migration, you will no longer be able to: - -- Manage your nodes and clusters in Docker Cloud. -- Deploy and manage applications from the Docker Cloud web UI. -- Autoredeploy your applications. -- Integrate users with other parts the Docker platform with their Docker ID. - -> **Autoredeploy options**: Autoredeploy is a Docker Cloud feature that automatically updates running applications every time you push an image. It is not native to Docker CE, AKS or GKE, but you may be able to regain it with Docker Cloud auto-builds, using web-hooks from the Docker Cloud repository for your image back to the CI/CD pipeline in your dev/staging/production environment. diff --git a/docker-cloud/migration/kube-primer.md b/docker-cloud/migration/kube-primer.md deleted file mode 100644 index 3532cb4505..0000000000 --- a/docker-cloud/migration/kube-primer.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -description: Kubernetes orchestration primer -keywords: cloud, migration, kubernetes, primer -title: Kubernetes primer ---- - -## Introduction - -Like Docker Cloud applications, Kubernetes applications are defined in YAML files and can run on public cloud infrastructure. Important Kubernetes concepts are: - -- The Kubernetes cluster -- The Kubernetes application - -## Kubernetes cluster - -A Kubernetes cluster is made up of _masters_ and _nodes_. These can be cloud instances or VMs in your data center. - -The diagram below shows a Kubernetes cluster with three masters and three nodes. - -![Kubernetes cluster](images/kube-cluster.png){:width="400px"} - -### Masters - -**Masters** run the control plane services and also issue work to nodes. They are the equivalent to _managers_ in a Docker Cloud or Docker Swarm cluster. They handle: - -- Exposing the main Kubernetes API -- The cluster store -- The scheduler -- All of the _controllers_ (such as Deployments) -- Assigning jobs to nodes - -### Nodes - -**Nodes** receive and execute work assigned by masters. They are equivalent to _workers_ in a Docker Cloud or Docker Swarm cluster. - -You should run all of your work on nodes and _not_ on masters. This may differ from Docker Cloud where you may have run some work on manager nodes. - -### Hosted services - -You can run a Kubernetes cluster on-premises where you manage everything yourself -- masters (control plane) and nodes. But Control plane high availability (HA) can be difficult to configure. - -Cloud providers such as [Microsoft Azure](https://azure.microsoft.com/en-us/free/){: target="_blank" class="_"}, -[Google Cloud Platform (GCP)](https://cloud.google.com/free/){: target="_blank" class="_"}, and -[Amazon Web Services (AWS)](https://aws.amazon.com/free/){: target="_blank" class="_"}, provide hosted Kubernetes services: - -- Azure Container Service (AKS) -- Google Kubernetes Engine (GKE) -- Amazon Elastic Container Service for Kubernetes (EKS) - -Each provides the Kubernetes control plane as a managed service, meaning the platform takes care of things such as control plane high availability (HA) and control plane upgrades. In fact, you have no access to the control plane (masters). - - -> The managed control plane service is usually free but worker nodes are not. - -## Kubernetes application - -A Kubernetes app is any containerized application defined in a Kubernetes manifest file. - -### Manifest - -The manifest file (usually written in YAML) tells Kubernetes everything it needs to know about the application, as well as how to deploy and manage it. For example: - -- Images and containers to run -- Network ports to publish -- How to scale the app (up or down as demand requires) -- How to perform rolling updates -- How to perform rollbacks - -### Pods and Services - -In the Docker world, the atomic unit of deployment is the _Docker container_. In the Kubernetes world, it is the _Pod_. If you already understand containers, you can think of a **[Pod](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/){: target="_blank" class="_"}** as one or more related containers. For the most part, Pods have a single container and are almost analogous to a container. - -A Kubernetes **[Service](https://kubernetes.io/docs/concepts/services-networking/service/){: target="_blank" class="_"}** is an object abstraction that sits in front of a set of Pods and provides a static virtual IP (VIP) address and DNS name. The main purpose of a Kubernetes Service is to provide stable networking for groups of Pods. - -Kubernetes Services can also be used to provision cloud-native load balancers and provide load balancing of requests coming in to the cluster from external sources. Examples include integration with native load balancers on AWS, Azure, and GCP. - -### Deployments - -Docker has a higher level construct called a _Docker service_ (different from a Kubernetes Service) that wraps around a container and adds things such as scalability and rolling updates. Kubernetes also has a higher level construct called a _Deployment_. A Kubernetes **[Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/){: target="_blank" class="_"}** is a "controller" that wraps around a set of Pods and adds things such as scalability, rolling updates, and simple rollbacks. - -The diagram below shows a Service object providing a DNS name and stable IP for a Deployment of 4 Pods. - -![Voting app redis Kube pods](images/votingapp-kube-pods-redis.png){:width="500px"} - -## Managing Kubernetes apps - -Docker apps are usually managed with the `docker` command line utility. Docker Cloud apps can be managed with the Docker Cloud CLI. Kubernetes apps are managed with the `kubectl` command line utility. - -### Common commands - -This command deploys a Docker application, named `test-app`, from a YAML configuration file called `app1.yml`: - -``` -$ docker stack deploy -c app1.yml test-app -``` - -This command deploys a Kubernetes application from a YAML manifest file called `k8s-app1.yml`: - -``` -$ kubectl create -f k8s-app.yml -``` - -Some other useful `kubectl` commands include: - -- `kubectl get` prints a short description about an object. For Deployments, run: `kubectl get deploy`. -- `kubectl describe` prints detailed information about an object. For a Deployment named "app1", run: `kubectl describe deploy app1` -- `kubectl delete` deletes a resource on the cluster. To delete a Deployment created with the `app1.yml` manifest file, run: `kubectl delete -f app1.yml`. - -### Sample manifest - -Below is a simple Kubernetes manifest file containing a Deployment and a Service. - -- The Deployment lists everything about the app, including how many Pod replicas to deploy, and the spec of the Pods to be deployed. -- The Service defines an external load balancer that listens on port 80 and load-balances traffic across all ports with the "app=vote" label. - -Everything in Kubernetes is loosely connected with labels. The three blue boxes show the **[labels and label selectors](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/){: target="_blank" class="_"}** that connect the service to the Pods, and the Pods to the Deployment. - -> Indentation is important in Kubernetes manifests, and you should indent with two spaces. - -![Kubernetes YAML manifest](images/kube-manifest.png){:width="650px"} diff --git a/docker-cloud/orgs.md b/docker-cloud/orgs.md deleted file mode 100644 index 408771adc7..0000000000 --- a/docker-cloud/orgs.md +++ /dev/null @@ -1,295 +0,0 @@ ---- -description: Docker Cloud for Organizations and Teams -keywords: organizations, teams, Docker Cloud, resources, permissions -title: Organizations and Teams in Docker Cloud ---- - -You can create Organizations in Docker Cloud to share repositories, and infrastructure and applications with coworkers and collaborators. - -Members of an organization can see only the teams to which they belong, and -their membership. Members of the `Owners` team can see and edit -all of the teams and all of the team membership lists. Docker Cloud users -outside an organization cannot see the Organizations or teams another user -belongs to. - -## Create an Organization - -An Organization in Docker Cloud contains Teams, and each Team contains users. -You cannot add users directly to an Organization. Organizations can also have -repositories, applications (services and containers), and infrastructure (nodes -and node clusters) associated with them. Paid features such as private -repositories and extra nodes are paid for using the billing information -associated with the Organization. - -To create an organization: - -1. Log in to Docker Cloud. -2. Select **Create Organization** from the user icon menu at the top right. -2. In the dialog that appears, enter a name for your organization. -3. Enter billing information for the organization. - - This is used for paid features used by the Organization account, including private repositories and additional nodes. - -4. Click **Save**. - - The Docker Cloud interface switches you to the new organization view. You - can return to your individual user account from the menu at the top right - corner. - -When you create an Organization, your user account is automatically added to the -Organization's `Owners` team, which allows you to manage the Organization. This -team must always have at least one member, and you can add other members to it -at any time. - -### Convert a user to an Organization - -Individual user accounts can be converted to organizations if needed. You can -no longer log in to the account; email addresses, linked source -repositories and collaborators are removed. Automated builds are -migrated. **Account conversion cannot be undone.** - -You need another valid Docker ID (not the account you are converting) for -the user who is the first member of the `Owners` team. All existing -automated builds are migrated to this user, and they can configure -the newly converted organization's settings to grant access to other users. - -1. Log in to Docker Cloud using the user account that you want to convert. -2. Click **Settings** in the user account menu in the top right corner. -3. Scroll down and click `Convert to organization`. -4. Read through the list of warnings and actions. -5. Enter the Docker ID of the user to be the first member of the Owners team. -6. Click **Save and Continue**. - -The UI refreshes. Log in from the Docker ID you specified as the first Owner, and then continue on to configure the organization as described below. - -#### What's next? - -Once you've created an organization: - -* Add users to [the Owners team](orgs.md#configure-the-owners-team) to help you manage the organization -* [Create teams](orgs.md#create-teams) -* [Set team permissions](orgs.md#set-team-permissions) -* Set up [linked providers](orgs.md#link-a-service-provider-to-an-organization), and [manage resources](orgs.md#manage-resources-for-an-organization) for the organization - -## Configure the Owners team - -Each organization has an `Owners` team which contains the users who manage the -organization's settings. If you created the organization, you are automatically -added to the `Owners` team. You can add new users to the `Owners` team and then -leave the team if you want to transfer ownership. There must always be at least -one member of the `Owners` team. - -Owners team members can: - -* create, change, and delete teams -* set and change team access permissions -* manage the organization's billing information -* configure the organization's settings (including linked services such as AWS and Github) -* view, change, create and delete repositories, services, and node clusters associated with the organization - -> **Note**: You cannot change the Owners team permission settings. Only add users to the Owners team who you are comfortable granting this level of access. - -1. While logged in to Docker Cloud, use the menu in the top right corner to switch to the organization you want to work on. -2. Click **Teams** in the lower left corner. -3. Click **owners**. -4. Click **Add user**. -5. Enter the Docker ID of a user to add. -6. Click **Create**. -6. Repeat for each user who you want to add. - -To transfer ownership of an organization, add the new owner to the `Owners` -team, then go to your Teams list and click **Leave** on the `Owners` team line. - -> **Note**: At this time, only members of the `Owners` team receive email -notifications for events (such as builds and container redeploys) in the -organization's resources. The email "notification level" setting for the -organization affects only the `Owners` team. - -## Create teams - -You can create Teams within an Organization to add users and manage access to infrastructure, applications, and repositories. - -Every organization contains an `Owners` team for users who manage the team -settings. You should create at least one team separate from the owners team so -that you can add members to your organization without giving them this level of -access. - -1. While logged in to Docker Cloud, switch to the organization you want to work on from the menu in the upper right corner. -2. Click **Teams** in the lower left corner of the navigation bar. -3. Click **Create** to create a new team. -4. Give the new team a name and description, and click **Create**. -4. On the screen that appears, click **Add User**. -5. Enter the Docker ID of the user and click **Create**. -6. Repeat this process for each user you want to add. - -## Set team permissions - -You can give Teams within an organization different levels of access to -resources that the organization owns. You can then assign individual users to a -Team to grant them that level of access. Team permissions are set by members of -the `Owners` team. - -> **Note**: If a user is a member of multiple teams, their access settings are conjunctive (sometimes called inclusive or additive). For example, if a user is a member of Team A that grants them `No access` to repositories, and they're also a member of Team B that grants them `Read and Write` access to repositories, the user has `Read and Write` access. - -To set or edit Team permissions: - -1. From the Team detail view, click **Permissions**. -2. Select an access level for `Runtime` resources. - Runtime resources include both infrastructure and applications. - -3. Optionally, grant the team access to one or more repositories in the **Repositories** section. - 1. Enter the name of the repository. - 2. Select an access level. - 3. Click the plus sign (`+`) icon. The change is saved immediately. - 4. Repeat this for each repository that the team needs access to. - - > **Note**: An organization can have public repositories which are visible to **all** users (including those outside the organization). Team members can view public repositories even if you have not given them `View` permission. You can use team permissions to grant write and admin access to public repositories. - -### Change team permissions for an individual repository - -You can also grant teams access to a repository from the repository's -**Permissions** page rather than from each team's permissions settings. You -might do this if you create repositories after you have already configured your -teams, and want to grant access to several teams at the same time. - -If the organization's repository is private, you must explicitly grant any access that your team members require. If the repository is public, all users are granted read-only access by default. - -Members of the organization's `Owners` team, and members of any team with `admin` access to the repository can change the repository's access permissions. - -To grant a team access to an organization's repository: - -1. Navigate to the organization's repository. -2. Click the **Permissions** tab. -3. Select the name of the team you want to add from the drop down menu. -5. Choose the access level the team should have. -6. Click the **plus sign** to add the selected team and permission setting. - - Your choice is saved immediately. - -7. Repeat this process for each team to which you want to grant access. - -To edit a team's permission level, select a new setting in the **Permission** drop down menu. - -To remove a team's access to the repository, click the **trashcan** icon next to the team's access permission line. - -> **Note**: If the organization's repository is _public_, team members without explicit access permissions still have read-only access to the repository. If the repository is _private_, removing a team's access completely prevents the team members from seeing the repository. - -### Docker Cloud team permission reference - -**General access levels**: - -* **No access**: no access at all. The resource is not visible to members of this team. -* **Read only**: users can view the resource and its configuration, but cannot perform actions on the resource. -* **Read and Write**: users can view *and change* the resource and its configuration. -* **Admin**: users can view, and edit the resource and its configuration, and can create or delete new instances of the resource. - -> **Note**: Only users who are members of the `Owners` team can create _new_ repositories. - -| Permission level | Access | -| ------------- | ------------- | -| **Swarms** (Beta)| | -| Admin | View swarms, manage swarms, add users | -| **Repositories** | | -| Read | Pull | -| Read/Write | Pull, push | -| Admin | All of the above, plus update description, create and delete | -| **Build** | | -| Read | View basic build settings and Timeline | -| Read/write | All of the above plus start, retry, or cancel build | -| Admin | All of the above, plus view and change build configuration, change build source, create and delete | -| **Nodes** | | -| Read | View | -| Read/write | View, scale, check node health | -| Admin | All of the above plus terminate, upgrade daemon, get certificate, create BYON token, update, deploy, and create | -| **Applications** | | -| Read | View, get logs, export stackfile | -| Read/write | All of the above, plus start, stop, redeploy, and scale | -| Admin | All of the above plus, open a terminal window, terminate, update, and create | - -## Machine user accounts in organizations - -Your organization might find it useful to have a dedicated account that is used for programmatic or scripted access to your organization's resources using the [Docker Cloud APIs](/apidocs/docker-cloud/). - -> **Note**: While these accounts are sometimes called "robot" accounts or "bots", these users may not be _created_ using scripts. - -To create a "robot" or machine account for your organization: - -1. Create a new Docker ID for the machine user. Verify the email address associated with the user. -2. If necessary, create a new Team for the machine user, and grant that team access to the required resources. - - This method is recommended because it makes it easier for administrators to - understand the machine user's access, and modify it without affecting other - users' access. - -3. Add the machine user to the new Team. - -## Modify a team - -To modify an existing team, log in to Docker Cloud and switch to your -organization, click **Teams** in the left navigation menu, then click the team -you want to modify. - -You can manage team membership from the first page that appears when you select the team. - -To change the team name or description, click **Settings**. - -To manage team permissions for runtime resources (nodes and applications) and -repositories click **Permissions**. - -## Manage resources for an Organization - -An organization can have its own resources including repositories, nodes and -node clusters, containers, services, and service stacks, just as if it was a -normal user account. - -If you're a member of the `Owners` team, you can create these resources when -logged in as the Organization, and manage which Teams can view, edit, and create -and delete each resource. - -#### Link a service provider to an Organization - -1. Log in to Docker Cloud as a member of the `Owners` team. - -2. Switch to the Organization account by selecting it from the user icon menu at the top right. - -3. Click **Cloud Settings** in the left navigation. - - From the Organization's Cloud settings page, you can [link to the organization's source code repositories](builds/link-source.md), [link to infrastructure hosts](infrastructure/index.md) such as a cloud service providers. - - The steps are the same as when you perform these actions as an individual user. - -#### Create repositories - -When a member of the `Owners` team creates a repository for an organization, -they can configure which teams within the organization can access the -repository. No access controls are configured by default on repository creation. -If the repository is _private_, this leaves it accessible only to members of the -`Owners` team until other teams are granted access. - -> **Tip**: Members of the `Owners` team can configure this default from the **Default privacy** section of the organization's **Cloud Settings** page. - -1. Log in to Docker Cloud as a member of the `Owners` team. - -2. Switch to the Organization account by selecting it from the user icon menu at the top right. - -3. [Create the repository](builds/repos.md#create-a-new-repository-in-docker-cloud) as usual. - -4. Once the repository has been created, navigate to it and click **Permissions**. - -5. [Grant access](#change-team-permissions-for-an-individual-repository) to any teams that require access to the repository. - -#### Manage organization settings - -From the Organization's **Cloud Settings** page you can also manage the -Organization's Plan and billing account information, notifications, and API -keys. - -#### Create organization resources - -To create resources for an Organization such as services and node clusters, log -in to Docker Cloud and switch to the Organization account. Create the -repositories, services, stacks, or node clusters as you would for any other -account. diff --git a/docker-cloud/release-notes.md b/docker-cloud/release-notes.md deleted file mode 100644 index 2a81a522ea..0000000000 --- a/docker-cloud/release-notes.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -description: Docker Cloud -keywords: Docker, cloud, release, notes -title: Docker Cloud release notes ---- - -Did you know we also have a [Release notes category](https://forums.docker.com/c/docker-cloud/release-notes) on the Docker Cloud Product forums? Now you do! - -## Docker Cloud June 2016 release notes - -In the last month we've made many small improvements to the new Docker Cloud UI, made team and organization repos from Hub visible in Docker Cloud, and enabled linking to BitBucket for automated builds. - -We've also added significant new features to the [automated builds](builds/automated-build.md) system, including: - -- Branch and tag selection -- Dynamic build rules (AKA regex build rules) -- Different hosted builder node sizes - -For more details, find the June post in the [release notes category](https://forums.docker.com/c/docker-cloud/release-notes), and as always, we welcome your feedback on the [Docker Cloud Product Forums](https://forums.docker.com/c/docker-cloud). - -## Docker Cloud May 2016 release notes - -In our May 2016 release, we introduced a new user interface for Docker Cloud. Try it out and share your feedback in the [Docker Cloud Product Forums](https://forums.docker.com/c/docker-cloud)! - -### Added - -**Docker Cloud Security Scanning** is now available as a beta add-on service for private repositories. - -### Fixed - -- **API docs now say CLI instead of bash** in the languages tab. You pointed out that this was confusing, so we fixed it. -- **Removed old references to Tutum** in the documentation. - -### Known issues - -- **Documentation screen captures** in some cases still reflect the Docker Cloud 1.0 user interface. This will be updated as soon as possible. - -Additional [Known issues here](docker-errors-faq.md) - -## Docker Cloud 1.0 release notes -**Tutum is now Docker Cloud**. Docker Cloud is a new service by Docker that implements all features previously offered by Tutum plus integration with Docker Hub Registry service and the common Docker ID credentials. - -The following release notes document changes since [Tutum v0.19.5](https://support.tutum.co/support/solutions/articles/5000694910-tutum-0-19-5). - - -### Added - -- **Docker Cloud is Generally Available**: all features of Docker Cloud are Generally Available with the exception of the build features which remain in beta. -- **Docker Hub Registry Integration**: all of your Docker Hub image repositories are available and accessible when you login to Docker Cloud. Changes you make to your repositories are reflected in both Docker Hub and Docker Cloud. -- **Autoredeploy from Docker Hub**: services that use a repository stored in the Docker Hub now have the [**autoredeploy** option](apps/auto-redeploy.md) available, which allows automatic redeployments on push without setting up webhooks. -- **Environment variable substitution on CLI**: the `docker-cloud` CLI now substitutes environment variables in stack files, [the same way Docker Compose does it](/compose/compose-file/#variable-substitution:91de898b5f5cdb090642a917d3dedf68). - - -### Changed - -- **Tutum is now Docker Cloud**: Docker Cloud is a new service by Docker that implements all features previously offered by Tutum. -- **Docker ID**: your Docker ID (formerly known as "Docker Hub account") is used to log into Docker Cloud. -- **Environment variables**: the environment variables that are automatically injected into containers that started with `TUTUM_` now start with `DOCKERCLOUD_`. -- **CLI renaming**: the `tutum` CLI has been deprecated and the new Docker Cloud CLI is now called `docker-cloud`. Login credentials are now shared between the `docker` and `docker-cli` CLIs and stored in `~/.docker/config.json`. -- **API domain**: the API domain is now `https://cloud.docker.com` for REST endpoints, and `wss://ws.cloud.docker.com` for websocket endpoints. -- **API endpoints**: the API endpoints have been relocated to a different URI scheme. [Click here for full documentation about the new endpoints](/apidocs/docker-cloud.md). -- **New Python and Go SDKs**: the new **[python-dockercloud](https://github.com/docker/python-dockercloud)** and **[go-dockercloud](https://github.com/docker/go-dockercloud)** SDKs are available to work with the new Docker Cloud APIs. -- **New HAproxy image**: the new `dockercloud/haproxy` repository can be used as a proxy/load balancer for user's applications and will automatically reconfigure itself if configured with API access via API role. -- **Docker Registry**: the Docker registry at `tutum.co` has been deprecated and replaced by the Docker Hub. It requires Docker Engine 1.6 or higher. Repositories are now shared between Docker Cloud and Docker Hub and will appear in both sites. -- **Agent renamed**: the `tutum-agent` has been renamed to `dockercloud-agent`. The installation script is now at `https://get.cloud.docker.com`. Its configuration file is now at `/etc/dockercloud/agent/` and logs are stored at `/var/log/dockercloud/`. -- **Deploy to Docker Cloud button**: the "Deploy to Tutum" button has been renamed to **Deploy to Docker Cloud**. [Click here to learn more](apps/deploy-to-cloud-btn.md). -- **AWS object names**: the names of the objects created by default in AWS have changed: the VPC is now called `dc-vpc` and has a CIDR of `10.78.0.0/16`, the subnets are called `dc-subnet`, the security group is now called `dc-vpc-default`, the internet gateway is now called `dc-gateway` and the route table is now called `dc-route-table`. -- **User endpoints**: the new domain used by node, service and container endpoints is now `dockerapp.io`. Endpoints now do not include the username and use short UUIDs to ensure uniqueness. -- **Community Forums**: the [Docker Cloud forums](https://forums.docker.com/c/docker-cloud) are now the recommended place to get in touch with the community. - - -### Fixed - -- **Overlay network**: we have fixed a memory limit issue on the overlay network containers that was causing containers to not attach to the overlay network under certain circumstances. -- **Scale up trigger**: we have fixed an issue where sometimes containers created by using a "scale up" trigger didn't inherit the service configuration and marked all other containers in the service with the "redeployment needed" flag. - -### Known issues - -- **Documentation screen captures** in most cases still reflect the Tutum interface and branding. We will update these and refresh the documentation as we go. -- **References to Tutum remain** in the documentation. We will update these and refresh the documentation as we go. diff --git a/docker-cloud/slack-integration.md b/docker-cloud/slack-integration.md deleted file mode 100644 index 9fb3f7b3a0..0000000000 --- a/docker-cloud/slack-integration.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -description: Integrate with Slack -keywords: Slack, integrate, notifications -redirect_from: -- /docker-cloud/tutorials/slack-integration/ -title: Set up Docker Cloud notifications in Slack ---- - -Docker Cloud can integrate with your **Slack** team to provide notifications about stacks, services, containers, and nodes. - -## Set up a Slack integration - -Before you begin, make sure that you are signed into the Slack team that you want to show notifications in. - -1. Log in to the Docker account that owns the builds or nodes that you want to receive notifications about. - - > **Note**: If you are setting up notifications for an organization, log in as a member of the organization's `Owners` team, then switch to the organization account to change the settings. - -2. Click **Cloud Settings** in the left hand navigation, and scroll down to the **Notifications** section. - -3. Click the plug icon next to **Slack**. - - The Docker Cloud page refreshes to show a Slack authorization screen. - -4. On the page that appears, double check that you're signed in to the correct Slack team. (If necessary sign in to the correct one.) -5. Select the channel that should receive notifications. -6. Click **Authorize**. - - Once you click **Authorize**, you should see a message in the Slack channel notifying you of the new integration. - - ![](images/slack-oauth-authorize.png) - - -Once configured, choose a notification level: - -* **Off** Do not receive any notifications. -* **Only failures** Only receive notifications about failed actions, containers that stop with a failed exit code, and nodes that become unreachable. -* **Everything** Receive all of the above, plus notifications about successful actions. - ![](images/slack-notification-updates.png) - -Enjoy your new Slack channel integration! - -## Edit a Slack integration - -* Click **Cloud Settings** in the lower left, scroll down to **Notifications**, and locate the **Slack** section. From here you can choose a new notification level, or remove the integration. - -* From the Slack **Notifications** section you can also change the channel that the integration posts to. Click the reload icon (two arrows) next to the Slack integration to reopen the OAuth channel selector. - -* Alternately, go to the Slack App Management page and search for "Docker Cloud". Click the result to see all of the Docker Cloud notification channels set for the Slack team. diff --git a/docker-cloud/standard/index.md b/docker-cloud/standard/index.md deleted file mode 100644 index f311891566..0000000000 --- a/docker-cloud/standard/index.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -description: non-Swarm mode Docker Cloud topics -keywords: Docker, cloud -title: Manage Nodes and Apps (Standard Mode) -notoc: true ---- - -These topics cover the traditional, pre-Swarm model for deploying and managing -nodes, services, and applications in Docker Cloud. - -* [Getting started with Docker Cloud](/docker-cloud/getting-started/index.md) - -* [Applications in Docker Cloud](/docker-cloud/apps/index.md) - -* [Cloud stack file YAML reference](/docker-cloud/apps/stack-yaml-reference.md) - -> **Note**: These workflows and references do not apply -to [Swarm Mode](/docker-cloud/cloud-swarm/index.md) Beta. diff --git a/docker-for-windows/index.md b/docker-for-windows/index.md index 4afe09d087..f1893689e7 100644 --- a/docker-for-windows/index.md +++ b/docker-for-windows/index.md @@ -541,23 +541,14 @@ See [How do I add custom CA certificates?](faqs.md#how-do-i-add-custom-ca-certif and [How do I add client certificates?](faqs.md#how-do-i-add-client-certificates) in the FAQs. -## Docker Store +## Docker Hub -Select **Docker Store** from the Docker for Windows menu to access the [Docker store](https://store.docker.com/) website. From there, you can log on to Docker Store and download apps. - -Docker Store is a component of the next-generation [Docker Hub](https://hub.docker.com) and the best place to find compliant, trusted +Select **Docker Hub** from the Docker for Windows menu to access the [Docker +Hub](https://hub.docker.com){: target="_blank" class="_" } website where you can +download apps. Docker Hub is the best place to find compliant, trusted commercial and free software distributed as Docker Images. -Refer to the [Docker Store documentation](/docker-store/index.md){: target="_blank" class="_" } - -## Docker Cloud - -Select **Sign in /Create Docker ID** from the Docker for Windows menu to access your [Docker Cloud](https://cloud.docker.com/){: target="_blank" clas="_" } account. Once logged in, you can access your Docker Cloud repositories directly from the Docker for Windows menu. - -See these [Docker Cloud topics](/docker-cloud/index.md){: target="_blank" class="_" } to learn more: - -* [Organizations and Teams in Docker Cloud](/docker-cloud/orgs/index.md){: target="_blank" class="_" } -* [Builds and Images](/docker-cloud/builds/index.md){: target="_blank" class="_" } +Refer to the [Docker Hub documentation](/docker-hub/){: target="_blank" class="_" } ## Where to go next diff --git a/docker-hub/accounts.md b/docker-hub/accounts.md index 39cbf1a8aa..11003c6f9e 100644 --- a/docker-hub/accounts.md +++ b/docker-hub/accounts.md @@ -1,27 +1,56 @@ --- -description: Using Docker Hub with your Docker ID account -keywords: Docker, docker, trusted, sign-up, registry, accounts, plans, Dockerfile, Docker Hub, docs, documentation -title: Use Docker Hub with Docker ID +title: Create a Docker Hub account +description: Creating a Docker Hub registers you for a Docker ID +keywords: Docker Hub, registry, account, docker id, forums, success-center, support-center --- -Docker Hub uses your free [Docker ID](../docker-id/) to save your account -settings, and as your account namespace. If you don't yet have a Docker ID, you -can [register for one](../docker-id/#/register-for-a-docker-id). +**Creating an account in Docker Hub is the same as registering for a Docker ID.** +If you already have a Docker ID, you can log in to Docker Hub. -You can `search` Docker Hub and `pull` images without an account and -without signing in. However, to `push` images, leave comments, or to *star* a -repository, you need to log in using a Docker ID. +{% include register-for-docker-id.md %} -Once you have a personal Docker ID, you can also create or join -Docker Hub [Organizations and Teams](orgs.md). +Docker Hub uses your free Docker ID as your account namespace and to save your +account settings. You can also create or join Docker Hub +[organizations and teams](manage/orgs-teams/){: target="_blank" class="_"}. -## Upgrade your account - -Free Docker Hub accounts include one private registry. If you need more private -registries, you can [upgrade your account](https://hub.docker.com/account/billing-plans/) to a paid plan directly -from the Hub. +> Log in benefits +> +> Without a Docker ID, and without signing in, you can `search` Docker Hub and +> `pull` images. However, you must log in to `push` images, leave comments, or +> to star ![starm](images/star.png){: .inline} a repository. ## Password reset process If you forget your password, or can't access your account for some reason, you -can reset your password from the [*Password Reset*](https://hub.docker.com/reset-password/) page. +can reset your password from the Settings page. + +## Email addresses + +You can associate multiple email addresses with your Docker ID, and one of these +becomes the primary address for the account. The primary address is used by +Docker to send password reset notifications and other important information, so +be sure to keep it updated. + +To add another email address to your Docker ID: + +1. In Docker Hub, click **Settings** under your username. + +2. Enter an email address in the "New Email" field. + +3. Click **Add Email** to send a verification email. + +4. Click the link in the email to verify your address. + +The new email address is not added to the account until you confirm it by +clicking the link in the verification email. This link is only good for a +limited time. To send a new verification email, click the envelope icon next to +the email address that you want to verify. + +If you have multiple verified email addresses associated with the account, you +can click **Set as primary** to change the primary email address. + +## Upgrade your account + +Free Docker Hub accounts include one private registry. If you need more private +registries, you can [upgrade your account](https://hub.docker.com/account/billing-plans/){: target="_blank" class="_"} +to a paid plan directly from the Hub. diff --git a/docker-hub/bitbucket.md b/docker-hub/bitbucket.md deleted file mode 100644 index af76d41e55..0000000000 --- a/docker-hub/bitbucket.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -description: Docker Hub Automated Builds using Bitbucket -keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker Hub, docs, documentation, trusted, builds, trusted builds, automated builds, bitbucket -title: Configure automated builds with Bitbucket ---- - -If you've previously linked Docker Hub to your Bitbucket account, skip to -[Creating an Automated Build](bitbucket.md#creating-an-automated-build). - -## Link to your Bitbucket account - -To set up an Automated Build of a repository on Bitbucket, you need to -link your [Docker Hub](https://hub.docker.com/account/authorized-services/) -account to a Bitbucket account. This allows the registry to see your -Bitbucket repositories. - -To add, remove, or view your linked account, go to the **Linked Accounts & -Services** section of your Hub profile **Settings**. - -![authorized-services](images/authorized-services.png) - -Then follow the onscreen instructions to authorize and link your Bitbucket -account to Docker Hub. Once it is linked, you can create a Docker Hub -repository from which to create the Automatic Build. - -## Create an Automated Build - -You can [create an Automated Build]( -https://hub.docker.com/add/automated-build/bitbucket/) from any of your public -or private Bitbucket repositories with a `Dockerfile`. - -To get started, log in to Docker Hub and click the "Create ▼" menu item -at the top right of the screen. Then select [Create Automated -Build](https://hub.docker.com/add/automated-build/bitbucket/). - -Select the linked Bitbucket account, and then choose a repository to set up -an Automated Build for. - -## The Bitbucket webhook - -When you create an Automated Build in Docker Hub, a webhook is added to your -Bitbucket repository automatically. - -You can also manually add a webhook from your repository's **Settings** page. -Set the URL to `https://registry.hub.docker.com/hooks/bitbucket`, to be -triggered for repository pushes. - -![bitbucket-hooks](images/bitbucket-hook.png) diff --git a/docker-hub/build/advanced.md b/docker-hub/build/advanced.md new file mode 100644 index 0000000000..fa2e62d939 --- /dev/null +++ b/docker-hub/build/advanced.md @@ -0,0 +1,157 @@ +--- +title: Advanced options for autobuild and autotest +description: Automated builds +keywords: Docker Hub, automated, build, images +redirect_from: +- /docker-cloud/builds/advanced/ +--- + +This page explains how to customize your automated build and test processes. + +## Environment variables for building and testing + +Several utility environment variables are set by the build process and are +available during automated builds, automated tests, and while executing hooks. + +> These environment variables are only available to build and test processes. + +| Env variable | Description | +|:------------------|:--------------------------------------------------------------------| +| `SOURCE_BRANCH` | Name of the branch or the tag that is currently being tested | +| `SOURCE_COMMIT` | SHA1 hash of the commit being tested | +| `COMMIT_MSG` | Message from the commit being tested and built | +| `DOCKER_REPO` | Name of the Docker repository being built | +| `DOCKERFILE_PATH` | Dockerfile currently being built | +| `CACHE_TAG` | Tag of the Docker repository being built | +| `IMAGE_NAME` | Name and tag of Docker repo being built (`DOCKER_REPO`:`CACHE_TAG`) | + +If you are using these build environment variables in a +`docker-compose.test.yml` file for automated testing, declare them in your `sut` +service environment as shown below. + +```none +sut: + build: . + command: run_tests.sh + environment: + - SOURCE_BRANCH +``` + +## Override build, test or push commands + +Docker Hub allows you to override and customize the `build`, `test` and `push` +commands during automated build and test processes using hooks. For example, you +might use a build hook to set build arguments used only during the build +process. (You can also set up [custom build phase hooks](#custom-build-phase-hooks) +to perform actions in between these commands.) + +> Use hooks with caution +> +> The contents of hook files replace the basic `docker` commands, so you must +> include a similar build, test, or push command in the hook, or your automated +> process does not complete. +{: .warning} + +To override these phases, create a folder called `hooks` in your source code +repository at the same directory level as your Dockerfile. Create a file called +`hooks/build`, `hooks/test`, or `hooks/push` and include commands that the +builder process can execute, such as `docker` and `bash` commands (prefixed +appropriately with `#!/bin/bash`). + +## Custom build phase hooks + +You can run custom commands between phases of the build process by creating +hooks. Hooks allow you to provide extra instructions to the autobuild and +autotest processes. + +Create a folder called `hooks` in your source code repository at the same +directory level as your Dockerfile. Place files that define the hooks in that +folder. Hook files can include both `docker` commands, and `bash` commands as +long as they are prefixed appropriately with `#!/bin/bash`. The builder executes +the commands in the files before and after each step. + +The following hooks are available: + +* `hooks/post_checkout` +* `hooks/pre_build` +* `hooks/post_build` +* `hooks/pre_test` +* `hooks/post_test` +* `hooks/pre_push` (only used when executing a build rule or automated build) +* `hooks/post_push` (only used when executing a build rule or automated build) + +### Build hook examples + +#### Override the "build" phase to set variables + +Docker Hub allows you to define build environment variables either in the hook +files, or from the automated build UI (which you can then reference in hooks). + +In the following example, we define a build hook that uses `docker build` +arguments to set the variable `CUSTOM` based on the value of variable we defined +using the Docker Hub build settings. `$DOCKERFILE_PATH` is a variable that we +provide with the name of the Dockerfile we wish to build, and `$IMAGE_NAME` is +the name of the image being built. + +```none +docker build --build-arg CUSTOM=$VAR -f $DOCKERFILE_PATH -t $IMAGE_NAME . +``` + +> Again ... +> +> A `hooks/build` file overrides the basic [docker build](/engine/reference/commandline/build.md){: target="_blank" class="_"} +> command used by the builder, so you must include a similar build command in +> the hook or the automated build fails. + +To learn more about Docker build-time variables, see the +[docker build documentation](/engine/reference/commandline/build/#set-build-time-variables-build-arg){: target="_blank" class="_"}. + +#### Two-phase build + +If your build process requires a component that is not a dependency for your +application, you can use a pre-build hook (refers to the `hooks/pre_build` file) +to collect and compile required components. In the example below, the hook uses +a Docker container to compile a Golang binary that is required before the build. + +```bash +#!/bin/bash +echo "=> Building the binary" +docker run --privileged \ + -v $(pwd):/src \ + -v /var/run/docker.sock:/var/run/docker.sock \ + centurylink/golang-builder +``` + +#### Push to multiple repos + +By default the build process pushes the image only to the repository where the build settings are configured. If you need to push the same image to multiple repositories, you can set up a `post_push` hook to add additional tags and push to more repositories. + +```none +docker tag $IMAGE_NAME $DOCKER_REPO:$SOURCE_COMMIT +docker push $DOCKER_REPO:$SOURCE_COMMIT +``` + +## Source repository and branch clones + +When Docker Hub pulls a branch from a source code repository, it performs a +shallow clone (only the tip of the specified branch). This has the advantage of +minimizing the amount of data transfer necessary from the repository and +speeding up the build because it pulls only the minimal code necessary. + +Because of this, if you need to perform a custom action that relies on a +different branch (such as a `post_push` hook), you cannot checkout that branch, +unless you do one of the following: + +* You can get a shallow checkout of the target branch by doing the following: + + ``` + git fetch origin branch:mytargetbranch --depth 1 + ``` + +* You can also "unshallow" the clone, which fetches the whole Git history (and + potentially takes a long time / moves a lot of data) by using the + `--unshallow` flag on the fetch: + + ``` + git fetch --unshallow origin + ``` diff --git a/docker-cloud/builds/automated-testing.md b/docker-hub/build/autotest.md similarity index 51% rename from docker-cloud/builds/automated-testing.md rename to docker-hub/build/autotest.md index 72bee906fe..dab23b9e90 100644 --- a/docker-cloud/builds/automated-testing.md +++ b/docker-hub/build/autotest.md @@ -1,26 +1,46 @@ --- +title: Automated repository tests description: Automated tests -keywords: Automated, testing, repository +keywords: Docker Hub, automated, testing, repository redirect_from: - /docker-cloud/feature-reference/automated-testing/ -title: Automated repository tests +- /docker-cloud/builds/automated-testing/ --- -[![Automated Tests with Docker Cloud](images/video-auto-tests-docker-cloud.png)](https://www.youtube.com/watch?v=KX6PD2MANRI "Automated Tests with Docker Cloud"){:target="_blank" class="_"} - -Docker Cloud can automatically test changes to your source code repositories -using containers. You can enable `Autotest` on [any Docker Cloud repository](repos.md) to run tests on each pull request to the source code -repository to create a continuous integration testing service. +Docker Hub can automatically test changes to your source code repositories using +containers. You can enable `Autotest` on any +[Docker Hub repository](../manage/repos) to run tests on each pull request to +the source code repository to create a continuous integration testing service. Enabling `Autotest` builds an image for testing purposes, but does **not** automatically push the built image to the Docker repository. If you want to push -built images to your Docker Cloud repository, enable [Automated Builds](automated-build.md). +built images to your Docker Hub repository, enable [automated builds](index). ## Set up automated test files To set up your automated tests, create a `docker-compose.test.yml` file which -defines a `sut` service that lists the tests to be run. This file has a structure -similar to the [docker-cloud.yml](/docker-cloud/apps/stack-yaml-reference/). +defines a `sut` service that lists the tests to be run. + +This file has a structure something like this: + +``` +lb: + image: dockerhub/haproxy + links: + - web + ports: + - "80:80" + roles: + - global +web: + image: dockerhub/quickstart-python + links: + - redis + target_num_containers: 4 +redis: + image: redis +``` + The `docker-compose.test.yml` file should be located in the same directory that contains the Dockerfile used to build the image. @@ -39,40 +59,41 @@ You can define any number of linked services in this file. The only requirement is that `sut` is defined. Its return code determines if tests passed or not. Tests **pass** if the `sut` service returns `0`, and **fail** otherwise. -> **Note**: Only the `sut` service and all other services listed in `depends_on` -are started. For instance, if you have services that poll for changes in other -services, be sure to include the polling services in the `depends_on` list to -make sure all of your services start. +> `sut` service +> +> Only the `sut` service and all other services listed in `depends_on` are +> started. For instance, if you have services that poll for changes in other +> services, be sure to include the polling services in the `depends_on` list to +> make sure all of your services start. You can define more than one `docker-compose.test.yml` file if needed. Any file that ends in `.test.yml` is used for testing, and the tests run sequentially. -You can also use [custom build -hooks](advanced.md#override-build-test-or-push-commands) to further customize +You can also use +[custom build hooks](advanced.md#override-build-test-or-push-commands) to further customize your test behavior. -> **Note**: If you enable Automated builds, they also run any tests defined -in the `test.yml` files. +> Enabling automated builds runs any tests defined in the `test.yml` files. ## Enable automated tests on a repository To enable testing on a source code repository, you must first create an -associated build-repository in Docker Cloud. Your `Autotest` settings are -configured on the same page as [automated builds](automated-build.md), however -you do not need to enable Autobuilds to use `Autotest`. Autobuild is enabled per -branch or tag, and you do not need to enable it at all. +associated build-repository in Docker Hub. Your `Autotest` settings are +configured on the same page as [automated builds](index), however you do not +need to enable autobuilds to use `Autotest`. Autobuild is enabled per branch or +tag, and you do not need to enable it at all. Only branches that are configured to use **Autobuild** push images to the Docker repository, regardless of the Autotest settings. -1. Log in to Docker Cloud and select **Repositories** in the left navigation. +1. Log in to Docker Hub and select **Repositories** in the left navigation. -3. Select the repository you want to enable `Autotest` on. +2. Select the repository you want to enable `Autotest` on. -4. From the repository view, click the **Builds** tab. +3. From the repository view, click the **Builds** tab. -4. Click **Configure automated builds**. +4. Click **Configure automated builds**. -5. Configure the automated build settings as explained in [Automated Builds](automated-build.md). +5. Configure the automated build settings as explained in [Automated Builds](index). At minimum you must configure: @@ -80,7 +101,7 @@ Docker repository, regardless of the Autotest settings. * the build location * at least one build rule -8. Choose your **Autotest** option. +6. Choose your **Autotest** option. The following options are available: @@ -95,12 +116,12 @@ Docker repository, regardless of the Autotest settings. pull requests to branches that match a build rule, including when the pull request originated in an external source repository. - > **Note**: For security purposes, autotest on _external pull requests_ is - limited on public repositories. Private images are not pulled and - environment variables defined in Docker Cloud ware not - available. Automated builds continue to work as usual. + > For security purposes, autotest on _external pull requests_ is limited on + > public repositories. Private images are not pulled and environment + > variables defined in Docker Hub ware not available. Automated builds + > continue to work as usual. -9. Click **Save** to save the settings, or click **Save and build** to save and +7. Click **Save** to save the settings, or click **Save and build** to save and run an initial test. ## Check your test results diff --git a/docker-hub/build/bitbucket.md b/docker-hub/build/bitbucket.md new file mode 100644 index 0000000000..277da71cfd --- /dev/null +++ b/docker-hub/build/bitbucket.md @@ -0,0 +1,76 @@ +--- +title: Configure automated builds with Bitbucket +description: Docker Hub Automated Builds using Bitbucket +keywords: Docker Hub, registry, builds, trusted builds, automated builds, bitbucket +redirect_from: +- /docker-hub/bitbucket/ +--- + +If you have previously linked Docker Hub to your Bitbucket account, skip to +[Build Docker images automatically](index). + +## Link to a Bitbucket user account + +1. Log in to Docker Hub with your Docker ID. + +2. In Docker Hub, select **Settings** > **Source providers**. + +3. Scroll to the **Source providers** section. + +4. Click the plug icon for the source provider you want to link. + +5. If necessary, log in to Bitbucket. + +6. On the page that appears, click **Grant access**. + +### Unlink a Bitbucket user account + +To revoke Docker Hub access to your Bitbucket account, unlink it both from Docker +Hub _and_ from your GitHub account. + +1. Log in to Docker Hub with your Docker ID. + +2. In Docker Hub, select **Settings** > **Source providers**. + +3. Click the plug icon next to the source provider you want to remove. + + The icon turns gray and has a slash through it when the account is disabled, + however access may not have been revoked. You can use this to _temporarily_ + disable a linked source code provider account. + +4. Go to your Bitbucket account and click the user menu icon in the top right corner. + +5. Click **Bitbucket settings**. + +6. On the page that appears, click **OAuth**. + +7. Click **Revoke** next to the Docker Hub line. + +> Webhooks not automatically removed +> +> Each repository that is configured as an automated build source contains a +> webhook that notifies Docker Hub of changes in the repository. This webhook is +> not automatically removed when you revoke access to a source code provider. + +## Create an automated build + +You can [create an Automated Build](https://hub.docker.com/add/automated-build/bitbucket/){: target="_blank" class="_"} +from any of your public or private Bitbucket repositories with a `Dockerfile`. + +To get started, log in to Docker Hub and click the "Create ▼" menu item +at the top right of the screen. Then select +[Create Automated Build](https://hub.docker.com/add/automated-build/bitbucket/){: target="_blank" class="_"}. + +Select the linked Bitbucket account, and then choose a repository to set up +an Automated Build for. + +## The Bitbucket webhook + +When you create an Automated Build in Docker Hub, a webhook is added to your +Bitbucket repository automatically. + +You can also manually add a webhook from your repository's **Settings** page. +Set the URL to `https://registry.hub.docker.com/hooks/bitbucket`, to be +triggered for repository pushes. + +![bitbucket-hooks](images/bitbucket-hook.png) diff --git a/docker-hub/build/github.md b/docker-hub/build/github.md new file mode 100644 index 0000000000..a2d5c551ec --- /dev/null +++ b/docker-hub/build/github.md @@ -0,0 +1,247 @@ +--- +title: Configure automated builds from GitHub +description: Docker Hub Automated Builds with GitHub +keywords: Docker Hub, registry, builds, trusted builds, automated builds, GitHub +redirect_from: +- /docker-hub/github/ +- /docker-cloud/builds/link-source/ +- /docker-cloud/tutorials/link-source/ +--- + +If you have previously linked Docker Hub to your GitHub account, skip to +[Build Docker images automatically](index). + +## Link to a GitHub user account + +1. Log in to Docker Hub with your Docker ID. + +2. In Docker Hub, select **Settings** > **Source providers**. + +3. Click the plug icon for the source provider you want to link. + +4. Review the settings for the **Docker Hub Builder** OAuth application. + + > GitHub organization owners + > + > If you are the owner of a Github organization, you might see options to + grant Docker Hub access to them from this screen. You can also individually + edit third-party access settings to grant or revoke Docker Hub access. See + [Grant access to a GitHub organization](link-source.md#grant-access-to-a-github-organization). + +5. Click **Authorize application** to save the link. + +You are now ready to create a new image! + +### Unlink a GitHub user account + +To revoke Docker Hub access to your GitHub account, unlink it both from Docker +Hub _and_ from your GitHub account. + +1. Log in to Docker Hub with your Docker ID. + +2. In Docker Hub, select **Settings** > **Source providers**. + +3. Click the plug icon next to the source provider you want to remove. + + The icon turns gray and has a slash through it when the account is disabled + but not revoked. You can use this to _temporarily_ disable a linked source + code provider account. + +4. Go to your GitHub account **Settings** page. + +5. Click **OAuth applications**. + +6. Click **Revoke** next to the Docker Hub Builder application. + +> Webhooks not automatically removed +> +> Each repository that is configured as an automated build source contains a +> webhook that notifies Docker Hub of changes in the repository. This webhook is +> not automatically removed when you revoke access to a source code provider. + +## Grant or revoke access to a GitHub organization + +If you are the owner of a Github organization you can grant or revoke Docker Hub +access to the organization's repositories. Depending on the GitHub organization +settings, you may need to be an organization owner. + +If the organization has not had specific access granted or revoked before, you +can often grant access at the same time as you link your user account. In this +case, a **Grant access** button appears next to the organization name in the +link accounts screen, as shown below. If this button does not appear, you must +manually grant the application's access. + +To manually grant or revoke Docker Hub access to a GitHub organization: + +1. [Link to your GitHub user account](#link-to-a-github-user-account). + +2. From your GitHub account settings, locate the **Organization settings** + section at the lower left. + +3. Click the organization to which you want to give Docker Hub access. + +4. From the Organization Profile menu, click **Third-party access**. + +5. Click the pencil icon next to Docker Hub Builder. + +6. Click **Grant access** next to the organization. + + To revoke access, click **Deny access**. + +## Auto builds and limited linked GitHub accounts. + +If you selected to link your GitHub account with only a "Limited Access" link, +then after creating your automated build, you need to either manually trigger a +Docker Hub build using the "Start a Build" button, or add the GitHub webhook +manually, as described in [GitHub Service Hooks](#github-service-hooks). This +only works for repositories under the user account, and adding an automated +build to a public GitHub organization using a "Limited Access" link is not +possible. + +## Change the GitHub user link + +If you want to remove, or change the level of linking between your GitHub +account and the Docker Hub, you need to do this in two places: + +- Remove the "Linked Account" from your Docker Hub "Settings". + +- Go to your GitHub account's Personal settings, and in the "Applications" section, "Revoke access". + +You can now re-link your account at any time. + +> Deleting GitHub account linkage +> +> If you delete the GitHub account linkage to an automated build repo, the +> previously built images are still available. If you later re-link to that +> GitHub account, the automated build can be started with the "Start Build" +> button; or if the webhook on the GitHub repository still exists, it is +> triggered by any subsequent commits. + +## GitHub organizations + +GitHub organizations and private repositories forked from organizations are +made available to auto build using the "Docker Hub Registry" application, which +needs to be added to the organization - and then applies to all users. + +To check, or request access, go to your GitHub user's "Setting" page, select the +"Applications" section from the left side bar, then click the "View" button for +"Docker Hub Registry". + +![Check User access to GitHub](images/gh-check-user-org-dh-app-access.png) + +The organization's administrators may need to go to the Organization's "Third +party access" screen in "Settings" to grant or deny access to the Docker Hub +Registry application. This change applies to all organization members. + +![Check Docker Hub application access to Organization](images/gh-check-admin-org-dh-app-access.png) + +More detailed access controls to specific users and GitHub repositories can be +managed using the GitHub "People and Teams" interfaces. + +## Create an automated build with GitHub + +You can [create an Automated Build](https://hub.docker.com/add/automated-build/github/){: target="_blank" class="_"} +from any of your public or private GitHub repositories that have a `Dockerfile`. + +Once you've selected the source repository, you can then configure: + +- Hub user/org namespace the repository is built to (with Docker ID or Hub organization name) +- Docker repository name the image is built to. +- Description of the repository. +- Accessibility: If you add a Private repository to a Hub user namespace, then + you can only add other users as collaborators, and those users can view and + pull all images in that repository. To configure more granular access + permissions, such as using teams of users or allow different users access to + different image tags, then you need to add the Private repository to a Hub + organization for which your user has Administrator privileges. +- Enable or disable rebuilding the Docker image when a commit is pushed to the + GitHub repository. + +You can also select one or more: + +- Git branch/tag. +- Repository sub-directory to use as the context. +- Docker image tag name. + +You can modify the description for the repository by clicking the "Description" +section of the repository view. The "Full Description" is over-written by the +README.md file when the next build is triggered. + +## GitHub private submodules + +If your GitHub repository contains links to private submodules, your build fails. + +Normally, the Docker Hub sets up a deploy key in your GitHub repository. +Unfortunately, GitHub only allows a repository deploy key to access a single +repository. + +To work around this, you can create a dedicated user account in GitHub and +attach the automated build's deploy key that account. This dedicated build +account can be limited to read-only access to just the repositories required to +build. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
StepScreenshotDescription
1.First, create the new account in GitHub. It should be given read-only + access to the main repository and all submodules that are needed.
2.This can be accomplished by adding the account to a read-only team in + the organization(s) where the main GitHub repository and all submodule + repositories are kept.
3.Next, remove the deploy key from the main GitHub repository. This can be done in the GitHub repository's "Deploy keys" Settings section.
4.Your automated build's deploy key is in the "Build Details" menu + under "Deploy keys".
5.In your dedicated GitHub User account, add the deploy key from your + Docker Hub Automated Build.
+ +## GitHub service hooks + +A GitHub Service hook allows GitHub to notify the Docker Hub when something has +been committed to a given git repository. + +When you create an Automated Build from a GitHub user that has full "Public and +Private" linking, a Service Hook should get automatically added to your GitHub +repository. + +If your GitHub account link to the Docker Hub is "Limited Access", then you +need to add the Service Hook manually. + +To add, confirm, or modify the service hook, log in to GitHub, then navigate to +the repository, click "Settings" (the gear), then select "Webhooks & Services". +You must have Administrator privileges on the repository to view or modify +this setting. + +The image below shows the "Docker" Service Hook. + +![github-hooks](images/github-side-hook.png) + +If you add the "Docker" service manually, make sure the "Active" checkbox is +selected and click the "Update service" button to save your changes. diff --git a/docker-hub/images/bitbucket-hook.png b/docker-hub/build/images/bitbucket-hook.png similarity index 100% rename from docker-hub/images/bitbucket-hook.png rename to docker-hub/build/images/bitbucket-hook.png diff --git a/docker-hub/images/gh-check-admin-org-dh-app-access.png b/docker-hub/build/images/gh-check-admin-org-dh-app-access.png similarity index 100% rename from docker-hub/images/gh-check-admin-org-dh-app-access.png rename to docker-hub/build/images/gh-check-admin-org-dh-app-access.png diff --git a/docker-hub/images/gh-check-user-org-dh-app-access.png b/docker-hub/build/images/gh-check-user-org-dh-app-access.png similarity index 100% rename from docker-hub/images/gh-check-user-org-dh-app-access.png rename to docker-hub/build/images/gh-check-user-org-dh-app-access.png diff --git a/docker-hub/images/gh_add_ssh_user_key.png b/docker-hub/build/images/gh_add_ssh_user_key.png similarity index 100% rename from docker-hub/images/gh_add_ssh_user_key.png rename to docker-hub/build/images/gh_add_ssh_user_key.png diff --git a/docker-hub/images/gh_docker-service.png b/docker-hub/build/images/gh_docker-service.png similarity index 100% rename from docker-hub/images/gh_docker-service.png rename to docker-hub/build/images/gh_docker-service.png diff --git a/docker-hub/images/gh_menu.png b/docker-hub/build/images/gh_menu.png similarity index 100% rename from docker-hub/images/gh_menu.png rename to docker-hub/build/images/gh_menu.png diff --git a/docker-hub/images/gh_org_members.png b/docker-hub/build/images/gh_org_members.png similarity index 100% rename from docker-hub/images/gh_org_members.png rename to docker-hub/build/images/gh_org_members.png diff --git a/docker-hub/images/gh_repo_deploy_key.png b/docker-hub/build/images/gh_repo_deploy_key.png similarity index 100% rename from docker-hub/images/gh_repo_deploy_key.png rename to docker-hub/build/images/gh_repo_deploy_key.png diff --git a/docker-hub/images/gh_service_hook.png b/docker-hub/build/images/gh_service_hook.png similarity index 100% rename from docker-hub/images/gh_service_hook.png rename to docker-hub/build/images/gh_service_hook.png diff --git a/docker-hub/images/gh_settings.png b/docker-hub/build/images/gh_settings.png similarity index 100% rename from docker-hub/images/gh_settings.png rename to docker-hub/build/images/gh_settings.png diff --git a/docker-hub/images/gh_team_members.png b/docker-hub/build/images/gh_team_members.png similarity index 100% rename from docker-hub/images/gh_team_members.png rename to docker-hub/build/images/gh_team_members.png diff --git a/docker-hub/images/github-side-hook.png b/docker-hub/build/images/github-side-hook.png similarity index 100% rename from docker-hub/images/github-side-hook.png rename to docker-hub/build/images/github-side-hook.png diff --git a/docker-hub/images/webhooks.png b/docker-hub/build/images/webhooks.png similarity index 100% rename from docker-hub/images/webhooks.png rename to docker-hub/build/images/webhooks.png diff --git a/docker-hub/builds.md b/docker-hub/build/index.md similarity index 54% rename from docker-hub/builds.md rename to docker-hub/build/index.md index 73cdcf6d8e..b6074d47f1 100644 --- a/docker-hub/builds.md +++ b/docker-hub/build/index.md @@ -1,43 +1,45 @@ --- -description: Docker Hub Automated Builds -keywords: Dockerfile, Hub, builds, trusted builds, automated builds -title: Configure automated builds on Docker Hub +title: Build Docker images automatically +description: Build images automatically from source code repos +keywords: Docker Hub, registry, builds, trusted builds, automated builds +redirect_from: +- /docker-cloud/builds/index/ +- /docker-cloud/builds/automated-build/ +- /docker-hub/builds/ --- -You can build your images automatically from a build context stored in a -repository. A *build context* is a Dockerfile and any files at a specific -location. For an automated build, the build context is a repository containing a -Dockerfile. +You can build images automatically from a build context stored in a repository. +The build context is a _repository_ with a Dockerfile and other necessary files +for the app. Automated Builds have several advantages: - * Images built in this way are built exactly as specified. - * The `Dockerfile` is available to anyone with access to your Docker Hub repository. - * Your repository is kept up-to-date with code changes automatically. +* Images built in this way are built exactly as specified. +* The `Dockerfile` is available to anyone with access to your Docker Hub repository. +* Your repository is kept up-to-date with code changes automatically. Automated Builds are supported for both public and private repositories on both -[GitHub](http://github.com) and [Bitbucket](https://bitbucket.org/). This +[GitHub](http://github.com){: target="_blank" class="_"} and +[Bitbucket](https://bitbucket.org/){: target="_blank" class="_"}. This document guides you through the process of working with automated builds. ## Prerequisites -To use automated builds, you must have an [account on Docker Hub](accounts.md) +To use automated builds, you must have an [account on Docker Hub](../accounts) and on the hosted repository provider (GitHub or Bitbucket). If you have previously linked your Github or Bitbucket account, you must have chosen the Public and Private connection type. -To view your current connection settings, log in to Docker Hub and choose -**Profile > Settings > Linked Accounts & Services**. +To view your current connection settings, log in to Docker Hub and select +**Profile > Hub Settings > Source code providers**. ## Limitations -- Currently Docker Hub does not support Git LFS (Large File Storage). If you have - binaries in your build context that are managed by Git LFS, only the pointer - file is present in the clone made during the automated build, which is not - what you want. - - Subscribe to the [GitHub issue](https://github.com/docker/hub-feedback/issues/500) - tracking this limitation. +- Currently Docker Hub does not support + [Git LFS (Large File Storage)](https://github.com/docker/hub-feedback/issues/500){: target="_blank" class="_"}. + If you have binaries in your build context that are managed by Git LFS, only + the pointer file is present in the clone made during the automated build, + which is not what you want. - Building Windows containers is not supported. @@ -45,67 +47,43 @@ To view your current connection settings, log in to Docker Hub and choose 1. Log into Docker Hub. -2. Navigate to **Profile > Settings > Linked Accounts & Services**. +2. Navigate to **Profile > Hub Settings > Source code providers**. -3. Click the service you want to link. +3. Click the service you want to link: [GitHub](github/){: target="_blank" class="_"} + or [BitBucket](bitbucket/){: target="_blank" class="_"}. - The system prompts you to choose between Public and Private and Limited Access. The Public and Private connection type is required if you want to use the Automated Builds. - -4. Press **Select** under Public and Private connection type. - - The system prompts you to enter your service credentials (Bitbucket or GitHub) to login. For example, Bitbucket's prompt looks like this: - - ![Bitbucket](images/bitbucket_creds.png) - - After you grant access to your code repository, the system returns you to Docker Hub and the link is complete. - - ![Linked account](images/linked-acct.png) +4. Press **Select** under Public and Private connection type. The system prompts + you to enter your service credentials (Bitbucket or GitHub) to login. ## Create an automated build Automated build repositories rely on the integration with your code repository -To build. However, you can also push already-built images to these -repositories using the `docker push` command. - -1. Select **Create** > **Create Automated Build** (from the drop down, upper right) on [Docker Hub](https://hub.docker.com/). +to build. +1. Select **Create** > **Create Automated Build** on [Docker Hub](https://hub.docker.com/){: target="_blank" class="_"}. The system prompts you with a list of User/Organizations and code repositories. -2. Select from the User/Organizations. +2. Select from the User/Organizations. -3. Optionally, type to filter the repository list. +3. Optionally, type to filter the repository list. -4. Pick the project to build. - - The system displays the **Create Automated Build** dialog. - - ![Create dialog](images/create-dialog1.png) +4. Pick the project to build. The system displays the **Create Automated Build** dialog. The dialog assumes some defaults which you can customize. By default, Docker builds images for each branch in your repository. It assumes the Dockerfile lives at the root of your source. When it builds an image, Docker tags it with the branch name. -6. Customize the automated build by pressing the **Click here to customize** this behavior link. - - ![Create dialog](images/create-dialog.png) +5. Customize the automated build by pressing the **Click here to customize** this behavior link. Specify which code branches or tags to build from. You can add new configurations by clicking the + (plus sign). The dialog accepts regular expressions. - ![Create dialog](images/regex-help.png) - -9. Click **Create**. - - The system displays the home page for your AUTOMATED BUILD. - - ![Home page](images/home-page.png) +6. Click **Create**. The system displays the home page for your AUTOMATED BUILD. Within GitHub, a Docker integration appears in your repositories Settings > Webhooks & services page. - ![GitHub](images/docker-integration.png) - A similar page appears in Bitbucket if you use that code repository. Be careful to leave the Docker integration in place. Removing it causes your automated builds to stop. @@ -116,8 +94,6 @@ The first time you create a new automated build, Docker Hub builds your image. In a few minutes, you should see your new build on the image dashboard. The Build Details page shows a log of your build systems: -![Pending](images/first_pending.png) - During the build process, Docker copies the contents of your `Dockerfile` to Docker Hub. The Docker community (for public repositories) or approved team members/orgs (for private repositories) can then view the Dockerfile on your @@ -139,22 +115,19 @@ You can view the status of the builds for a specific repository by looking at the Build Details screen. If you have builds that are queued or in progress, you can click **Cancel** to end them. -![Build statuses](images/build-states-ex.png) -The statuses are: - -* **Queued**: You're in line for your image to be built. Queue time varies depending on number of concurrent builds available to you. -* **Building**: The image is building. -* **Success**: The image has been built with no issues. -* **Error**: There was an issue with your image. Click the row to go to the Builds Details screen. The banner at the top of the page displays the last sentence of the log file, which indicates what the error was. If you need more information, scroll to the bottom of the screen to the logs section. +| Build status | Description | +|:--------------|:--------------------------------------------------------------------------------------| +| **Queued** | The image is in line to be built. Queue time varies on number of concurrent builds. | +| **Building** | The image is building. | +| **Success** | The image has been built with no issues. | +| **Error** | There was an issue with your image. Click the row to go to the Builds Details screen. | ## Use the Build Settings page The Build Settings page allows you to manage your existing automated build configurations and add new ones. By default, when new code is merged into your -source repository, it triggers a build of your DockerHub image. - -![Default checkbox](images/merge_builds.png) +source repository, it triggers a build of your Docker Hub image. Clear the checkbox to turn this behavior off. You can use the other settings on the page to configure and build images. @@ -164,34 +137,31 @@ the page to configure and build images. At the top of the Build Dialog is a list of configured builds. You can build from a code branch or by build tag. -![Build or tag](images/build-by.png) - Docker builds everything listed whenever a push is made to the code repository. If you specify a branch or tag, you can manually build that image by pressing the Trigger. If you use a regular expression syntax (regex) to define your build branch or tag, Docker does not give you the option to manually build. To add a new build: -1. Press the + (plus sign). +1. Press the + (plus sign). -2. Choose the Type. +2. Choose the Type. You can build by a code branch or by an image tag. -3. Enter the Name of the branch or tag. +3. Enter the Name of the branch or tag. You can enter a specific value or use a regex to select multiple values. To see examples of regex, press the Show More link on the right of the page. - ![Regexhelp](images/regex-help.png) +4. Enter a Dockerfile location. -4. Enter a Dockerfile location. +5. Specify a Tag Name. -5. Specify a Tag Name. +6. Press **Save Changes**. -6. Press **Save Changes**. - -If you make a mistake or want to delete a build, press the - (minus sign) and then **Save Changes**. +If you make a mistake or want to delete a build, press the minus sign (`-`) and +then **Save Changes**. ## Repository links @@ -203,16 +173,14 @@ sides causes an endless build loop. To add a link: -1. Go to the Build Settings for an automated build repository. +1. Go to the Build Settings for an automated build repository. -2. In the Repository Links section, enter an image repository name. +2. In the Repository Links section, enter an image repository name. - A remote repository name should be either an official repository name such as `ubuntu` or a public repository name `namespace/repoName`. - -3. Press **Add**. - - ![Links](images/repo_links.png) + A remote repository name should be either an official repository name such + as `ubuntu` or a public repository name `namespace/repoName`. +3. Press **Add**. ## Remote Build triggers @@ -221,8 +189,6 @@ trigger in another application such as GitHub or Bitbucket. When you Activate the build trigger for an Automated Build, it supplies you with a Token and a URL. -![Build trigger screen](images/build-trigger.png) - You can use `curl` to trigger a build: ```bash diff --git a/docker-hub/webhooks.md b/docker-hub/build/webhooks.md similarity index 82% rename from docker-hub/webhooks.md rename to docker-hub/build/webhooks.md index 40848cf745..4b0925eb45 100644 --- a/docker-hub/webhooks.md +++ b/docker-hub/build/webhooks.md @@ -1,13 +1,18 @@ --- -description: Docker Hub Automated Builds -keywords: Docker, webhookds, hub, builds title: Webhooks for automated builds +description: Docker Hub Automated Builds +keywords: Docker Hub, webhooks, builds +redirect_from: +- /docker-hub/webhooks/ --- If you have an automated build repository in Docker Hub, you can use Webhooks to cause an action in another application in response to an event in the -repository. Webhook is a POST request sent to a defined URL which provides the service. Docker Hub webhooks fire when an image is built in, or a new tag -is added to, your automated build repository. +repository. + +Webhook is a POST request sent to a defined URL which provides the service. +Docker Hub webhooks fire when an image is built in, or a new tag is added to, +your automated build repository. Configure webhooks on `https://hub.docker.com/r///~/settings/webhooks/`. @@ -48,7 +53,3 @@ with the following payload: } } ``` - ->**Note**: If you want to test your webhook, we recommend using a tool like ->[requestb.in](http://requestb.in/). Also note, the Docker Hub server can't be ->filtered by IP address. diff --git a/docker-hub/cloud-store-redirects.md b/docker-hub/cloud-store-redirects.md new file mode 100644 index 0000000000..ab0c4eabe6 --- /dev/null +++ b/docker-hub/cloud-store-redirects.md @@ -0,0 +1,80 @@ +--- +title: Message about Docker Cloud shutdown +description: Docker Cloud runtime was shutdown on 21 May 2018 +keywords: Docker Cloud, runtime +notoc: true +redirect_from: +- /docker-cloud/apps/api-roles/ +- /docker-cloud/apps/auto-destroy/ +- /docker-cloud/apps/auto-redeploy/ +- /docker-cloud/apps/autorestart/ +- /docker-cloud/apps/deploy-tags/ +- /docker-cloud/apps/deploy-to-cloud-btn/ +- /docker-cloud/apps/index/ +- /docker-cloud/apps/load-balance-hello-world/ +- /docker-cloud/apps/ports/ +- /docker-cloud/apps/service-links/ +- /docker-cloud/apps/service-redeploy/ +- /docker-cloud/apps/service-scaling/ +- /docker-cloud/apps/stacks/ +- /docker-cloud/apps/stack-yaml-reference/ +- /docker-cloud/apps/triggers/ +- /docker-cloud/apps/volumes/ +- /docker-cloud/cloud-swarm/connect-to-swarm/ +- /docker-cloud/cloud-swarm/create-cloud-swarm-aws/ +- /docker-cloud/cloud-swarm/create-cloud-swarm-azure/ +- /docker-cloud/cloud-swarm/index/ +- /docker-cloud/cloud-swarm/link-aws-swarm/ +- /docker-cloud/cloud-swarm/link-azure-swarm/ +- /docker-cloud/cloud-swarm/register-swarms/ +- /docker-cloud/cloud-swarm/ssh-key-setup/ +- /docker-cloud/cloud-swarm/using-swarm-mode/ +- /docker-cloud/getting-started/connect-infra/ +- /docker-cloud/getting-started/deploy-app +- /docker-cloud/getting-started/deploy-app/10_provision_a_data_backend_for_your_service/ +- /docker-cloud/getting-started/deploy-app/11_service_stacks/ +- /docker-cloud/getting-started/deploy-app/12_data_management_with_volumes/ +- /docker-cloud/getting-started/deploy-app/1_introduction/ +- /docker-cloud/getting-started/deploy-app/2_set_up/ +- /docker-cloud/getting-started/deploy-app/3_prepare_the_app/ +- /docker-cloud/getting-started/deploy-app/4_push_to_cloud_registry/ +- /docker-cloud/getting-started/deploy-app/5_deploy_the_app_as_a_service/ +- /docker-cloud/getting-started/deploy-app/6_define_environment_variables/ +- /docker-cloud/getting-started/deploy-app/7_scale_the_service/ +- /docker-cloud/getting-started/deploy-app/8_view_logs/ +- /docker-cloud/getting-started/deploy-app/9_load-balance_the_service/ +- /docker-cloud/getting-started/deploy-app/index/ +- /docker-cloud/getting-started/index/ +- /docker-cloud/getting-started/intro_cloud/ +- /docker-cloud/getting-started/your_first_node/ +- /docker-cloud/getting-started/your_first_service/ +- /docker-cloud/infrastructure/byoh/ +- /docker-cloud/infrastructure/cloud-on-aws-faq/ +- /docker-cloud/infrastructure/cloud-on-packet.net-faq/ +- /docker-cloud/infrastructure/deployment-strategies/ +- /docker-cloud/infrastructure/docker-upgrade/ +- /docker-cloud/infrastructure/index/ +- /docker-cloud/infrastructure/link-aws/ +- /docker-cloud/infrastructure/link-azure/ +- /docker-cloud/infrastructure/link-do/ +- /docker-cloud/infrastructure/link-packet/ +- /docker-cloud/infrastructure/link-softlayer/ +- /docker-cloud/infrastructure/ssh-into-a-node/ +- /docker-cloud/installing-cli/ +- /docker-cloud/migration/cloud-to-kube-aks/ +- /docker-cloud/migration/cloud-to-kube-gke/ +- /docker-cloud/migration/cloud-to-swarm/ +- /docker-cloud/migration/deregister-swarms/ +- /docker-cloud/migration/index/ +- /docker-cloud/migration/kube-primer/ +- /docker-cloud/release-notes/ +- /docker-cloud/standard +- /docker-cloud/standard/index/ +--- + + +**Docker Cloud and Docker Store have been consolidated into Docker Hub.** You +can find all Docker Cloud and Docker Store services in the new and improved +[Docker Hub](index/). + +![Docker Hub](images/docker-hub.png) diff --git a/docker-hub/commandline.md b/docker-hub/commandline.md new file mode 100644 index 0000000000..11933ca1b9 --- /dev/null +++ b/docker-hub/commandline.md @@ -0,0 +1,18 @@ +--- +title: Docker Hub from the commandline +description: Access Docker Hub from the commandline +keywords: Docker Hub, commands, cli, commandline, login, logout, pull, push, search +--- + +The Docker CLI provides access to Docker Hub services with these commands: + +- [docker login](/engine/reference/commandline/login.md) +- [docker logout](/engine/reference/commandline/logout.md) +- [docker pull](/engine/reference/commandline/pull.md) +- [docker push](/engine/reference/commandline/push.md) +- [docker search](/engine/reference/commandline/search.md) + + +Test the CLI commands with this command-line scenario: + +{% include docker-hub-cli-commands.md %} diff --git a/docker-hub/discover/index.md b/docker-hub/discover/index.md new file mode 100644 index 0000000000..f8030a7bb8 --- /dev/null +++ b/docker-hub/discover/index.md @@ -0,0 +1,38 @@ +--- +title: Discover and pull content +description: Search for images, discover, and pull +keywords: Docker Hub, search, discover, pull, content, images +--- + +[Docker Hub](https://hub.docker.com){: target="_blank" class="_"} users have +access to a content catalog that includes community images as well as +high-quality content from approved publishers. Explore trial, free, and +commercial products from Independent software vendors (ISV). + +## Content types + +There are four types of content in Docker Hub: + +- **Community images**: Public images published by any Docker Hub user. + +- **Official images**: Content curated by Docker that promotes best `Dockerfile` + practices and is designed for common use cases. + +- **Partner content**: Verified commercial products from publishers that includes + free products, trials, and paid products. + +- **Certified content**: Docker EE compatible content that is scanned and tested and + that comes with support from the publisher and Docker. + +> Features of partner and certified content +> +> - Images are versioned. +> - Images you are using will not change under the hood. +> - Updates are sent when new versions and patches come out. +> - You can easily roll back to the previous version if necessary. + + +## Next steps + +- [Official repositories on Docker Hub](official-repos) +- [Publish content on Docker Hub](../publish) diff --git a/docker-hub/discover/official-repos.md b/docker-hub/discover/official-repos.md new file mode 100644 index 0000000000..89fde2aca7 --- /dev/null +++ b/docker-hub/discover/official-repos.md @@ -0,0 +1,116 @@ +--- +title: Official repositories on Docker Hub +description: Guidelines for official repositories on Docker Hub +keywords: Docker Hub, registry, repos, official-repos +redirect_from: +- /docker-hub/official_repos/ +--- + +Docker [official repositories](https://hub.docker.com/official/){: target="_blank" class="_"} are a curated +set of repositories hosted on Docker Hub. They are designed to: + +* Provide essential base OS repositories (for example, + [ubuntu](https://hub.docker.com/_/ubuntu/){: target="_blank" class="_"}, + [centos](https://hub.docker.com/_/centos/){: target="_blank" class="_"}) that serve as the + starting point for the majority of users. + +* Provide drop-in solutions for popular programming language runtimes, data + stores, and other services, similar to what a Platform-as-a-Service (PAAS) + would offer. + +* Exemplify [`Dockerfile` best practices](/develop/develop-images/dockerfile_best-practices/){: target="_blank" class="_"} + and provide clear documentation to serve as a reference for other `Dockerfile` + authors. + +* Ensure that security updates are applied in a timely manner. This is + particularly important as many official repositories are some of the most + popular on Docker Hub. + +Docker sponsors a dedicated team that is responsible for reviewing and +publishing all content in the official repositories. This team works in +collaboration with upstream software maintainers, security experts, and the +broader Docker community. + +While it is preferable to have upstream software authors maintaining their +corresponding official repositories, this is not a strict requirement. Creating +and maintaining images for official repositories is a public process. It takes +place openly on GitHub where participation is encouraged. Anyone can provide +feedback, contribute code, suggest process changes, or even propose a new +official repository. + +## Should I use official repositories? + +New Docker users are encouraged to use official repositories in their projects. +These repositories have clear documentation, promote best practices, and are +designed for the most common use cases. Advanced users are encouraged to review +the official repositories as part of their `Dockerfile` learning process. + +A common rationale for diverging from official repositories is to optimize for +image size. For instance, many programming language stack images contain a +complete build toolchain to support installation of modules that depend on +optimized code. An advanced user could build a custom image with just the +necessary pre-compiled libraries to save space. + +A number of language stacks such as +[python](https://hub.docker.com/_/python/){: target="_blank" class="_"} and +[ruby](https://hub.docker.com/_/ruby/){: target="_blank" class="_"} have `-slim` tag variants +designed to fill the need for optimization. Even when these "slim" variants are +insufficient, it is still recommended to inherit from an official repository +base OS image to leverage the ongoing maintenance work, rather than duplicating +these efforts. + +## How do I know the official repositories are secure? + +Each of the images in the official repositories are scanned using the Docker +Security Scanning service. The results of these security scans provide valuable +information about which images contain security vulnerabilities, and allow you +to choose images that align with your security standards. + +To view the Docker Security Scanning results: + +1. Log in to Docker Hub. +2. Navigate to the official repository whose security scan you want to view. +3. Click the `Tags` tab to see a list of tags and their security scan summaries. + +You can click into a tag's detail page to see more information about which +layers in the image and which components within the layer are vulnerable. +Details including a link to the official CVE report for the vulnerability appear +when you click an individual vulnerable component. + +## How can I get involved? + +All official repositories contain a **User Feedback** section in their +documentation which covers the details for that specific repository. In most +cases, the GitHub repository which contains the Dockerfiles for an Official +Repository also has an active issue tracker. General feedback and support +questions should be directed to `#docker-library` on Freenode IRC. + +## How do I create a new official repository? + +From a high level, an official repository starts out as a proposal in the form +of a set of GitHub pull requests. Detailed and objective proposal +requirements are documented in the following GitHub repositories: + +* [docker-library/official-images](https://github.com/docker-library/official-images){: target="_blank" class="_"} + +* [docker-library/docs](https://github.com/docker-library/docs){: target="_blank" class="_"} + +The official repositories team, with help from community contributors, formally +review each proposal and provide feedback to the author. This initial review +process may require a bit of back and forth before the proposal is accepted. + +There are also subjective considerations during the review process. These +subjective concerns boil down to the basic question: "is this image generally +useful?" For example, the [python](https://hub.docker.com/_/python/){: target="_blank" class="_"} official +repository is "generally useful" to the large Python developer community, +whereas an obscure text adventure game written in Python last week is not. + +Once a new proposal is accepted, the author is responsible for keeping their +images up-to-date and responding to user feedback. The official repositories +team becomes responsible for publishing the images and documentation on Docker +Hub. + +Updates to the official repository follow the same pull request process, +though with less review. The official repositories team ultimately acts as a +gatekeeper for all changes, which helps mitigate the risk of quality and +security issues from being introduced. diff --git a/docker-hub/github.md b/docker-hub/github.md deleted file mode 100644 index 7f9c3674fc..0000000000 --- a/docker-hub/github.md +++ /dev/null @@ -1,203 +0,0 @@ ---- -description: Docker Hub Automated Builds with GitHub -keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker Hub, docs, documentation, trusted, builds, trusted builds, automated builds, GitHub -title: Configure automated builds from GitHub ---- - -If you've previously linked Docker Hub to your GitHub account, -skip to [Creating an Automated Build](github.md#creating-an-automated-build). - -## Linking Docker Hub to a GitHub account - -> *Note:* -> Automated Builds currently require *read* and *write* access since -> [Docker Hub](https://hub.docker.com) needs to set up a GitHub service -> hook. We have no choice here, this is how GitHub manages permissions. -> We do guarantee nothing else is touched in your account. - -To set up an Automated Build of a repository on GitHub, you need to -link [Docker Hub](https://hub.docker.com/account/authorized-services/) to your -GitHub account. This allows the registry to see your GitHub repositories. - -To add, remove or view your linked account, go to the "Linked Accounts & -Services" section of your Hub profile "Settings". - -![authorized-services](images/authorized-services.png) - -When linking to GitHub, select either "Public and Private", -or "Limited Access" linking. - -![add-authorized-github-service.png](images/add-authorized-github-service.png) - -The "Public and Private" option is the easiest to use, as it grants the Docker -Hub full access to all of your repositories. GitHub also allows you to grant -access to repositories belonging to your GitHub organizations. - -If you choose "Limited Access", Docker Hub only gets permission to access your -public data and public repositories. - -Follow the onscreen instructions to authorize and link your GitHub account to -Docker Hub. Once it is linked, you can choose a source repository from -which to create the Automatic Build. - -You can review and revoke Docker Hub's access by visiting the -[GitHub User's Applications settings](https://github.com/settings/applications). - -> **Note**: If you delete the GitHub account linkage that is used for one of your -> automated build repositories, the previously built images are still available. -> If you re-link to that GitHub account later, the automated build can be started -> using the "Start Build" button on the Hub, or if the webhook on the GitHub repository -> still exists, it is triggered by any subsequent commits. - -## Auto builds and limited linked GitHub accounts. - -If you selected to link your GitHub account with only a "Limited Access" link, -then after creating your automated build, you need to either manually -trigger a Docker Hub build using the "Start a Build" button, or add the GitHub -webhook manually, as described in [GitHub Service -Hooks](github.md#github-service-hooks). This only works for repositories -under the user account, and adding an automated build to a public GitHub -organization using a "Limited Access" link is not possible. - -## Changing the GitHub user link - -If you want to remove, or change the level of linking between your GitHub -account and the Docker Hub, you need to do this in two places. - -First, remove the "Linked Account" from your Docker Hub "Settings". Then go to -your GitHub account's Personal settings, and in the "Applications" section, -"Revoke access". - -You can now re-link your account at any time. - -## GitHub organizations - -GitHub organizations and private repositories forked from organizations are -made available to auto build using the "Docker Hub Registry" application, which -needs to be added to the organization - and then applies to all users. - -To check, or request access, go to your GitHub user's "Setting" page, select the -"Applications" section from the left side bar, then click the "View" button for -"Docker Hub Registry". - -![Check User access to GitHub](images/gh-check-user-org-dh-app-access.png) - -The organization's administrators may need to go to the Organization's "Third -party access" screen in "Settings" to grant or deny access to the Docker Hub -Registry application. This change applies to all organization members. - -![Check Docker Hub application access to Organization](images/gh-check-admin-org-dh-app-access.png) - -More detailed access controls to specific users and GitHub repositories can be -managed using the GitHub "People and Teams" interfaces. - -## Creating an Automated Build - -You can [create an Automated Build]( -https://hub.docker.com/add/automated-build/github/) from any of your -public or private GitHub repositories that have a `Dockerfile`. - -Once you've selected the source repository, you can then configure: - -- The Hub user/org namespace the repository is built to - either your Docker ID name, or the name of any Hub organizations your account is in -- The Docker repository name the image is built to -- The description of the repository -- If the visibility of the Docker repository: "Public" or "Private" - You can change the accessibility options after the repository has been created. - If you add a Private repository to a Hub user namespace, then you can only add other users - as collaborators, and those users can view and pull all images in that - repository. To configure more granular access permissions, such as using teams of - users or allow different users access to different image tags, then you need - to add the Private repository to a Hub organization for which your user has Administrator - privileges. -- Enable or disable rebuilding the Docker image when a commit is pushed to the - GitHub repository. - -You can also select one or more: -- The git branch/tag, -- A repository sub-directory to use as the context, -- The Docker image tag name - -You can modify the description for the repository by clicking the "Description" section -of the repository view. -The "Full Description" is over-written by the README.md file when the -next build is triggered. - -## GitHub private submodules - -If your GitHub repository contains links to private submodules, your build fails. - -Normally, the Docker Hub sets up a deploy key in your GitHub repository. -Unfortunately, GitHub only allows a repository deploy key to access a single -repository. - -To work around this, you can create a dedicated user account in GitHub and -attach the automated build's deploy key that account. This dedicated build -account can be limited to read-only access to just the repositories required to -build. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
StepScreenshotDescription
1.First, create the new account in GitHub. It should be given read-only - access to the main repository and all submodules that are needed.
2.This can be accomplished by adding the account to a read-only team in - the organization(s) where the main GitHub repository and all submodule - repositories are kept.
3.Next, remove the deploy key from the main GitHub repository. This can be done in the GitHub repository's "Deploy keys" Settings section.
4.Your automated build's deploy key is in the "Build Details" menu - under "Deploy keys".
5.In your dedicated GitHub User account, add the deploy key from your - Docker Hub Automated Build.
- -## GitHub service hooks - -A GitHub Service hook allows GitHub to notify the Docker Hub when something has -been committed to a given git repository. - -When you create an Automated Build from a GitHub user that has full "Public and -Private" linking, a Service Hook should get automatically added to your GitHub -repository. - -If your GitHub account link to the Docker Hub is "Limited Access", then you -need to add the Service Hook manually. - -To add, confirm, or modify the service hook, log in to GitHub, then navigate to -the repository, click "Settings" (the gear), then select "Webhooks & Services". -You must have Administrator privileges on the repository to view or modify -this setting. - -The image below shows the "Docker" Service Hook. - -![bitbucket-hooks](images/github-side-hook.png) - -If you add the "Docker" service manually, make sure the "Active" checkbox is -selected and click the "Update service" button to save your changes. diff --git a/docker-hub/images/add-authorized-github-service.png b/docker-hub/images/add-authorized-github-service.png deleted file mode 100644 index 581ab8fb1e..0000000000 Binary files a/docker-hub/images/add-authorized-github-service.png and /dev/null differ diff --git a/docker-hub/images/authorized-services.png b/docker-hub/images/authorized-services.png deleted file mode 100644 index 4e2df0b454..0000000000 Binary files a/docker-hub/images/authorized-services.png and /dev/null differ diff --git a/docker-hub/images/bitbucket_creds.png b/docker-hub/images/bitbucket_creds.png deleted file mode 100644 index 96f9ea566b..0000000000 Binary files a/docker-hub/images/bitbucket_creds.png and /dev/null differ diff --git a/docker-hub/images/build-by.png b/docker-hub/images/build-by.png deleted file mode 100644 index d2d7f43f02..0000000000 Binary files a/docker-hub/images/build-by.png and /dev/null differ diff --git a/docker-hub/images/build-states-ex.png b/docker-hub/images/build-states-ex.png deleted file mode 100644 index 3d1eea2b6f..0000000000 Binary files a/docker-hub/images/build-states-ex.png and /dev/null differ diff --git a/docker-hub/images/build-trigger.png b/docker-hub/images/build-trigger.png deleted file mode 100644 index 7e6a2cf747..0000000000 Binary files a/docker-hub/images/build-trigger.png and /dev/null differ diff --git a/docker-hub/images/busybox-image-tags.png b/docker-hub/images/busybox-image-tags.png deleted file mode 100644 index c70c17ccd1..0000000000 Binary files a/docker-hub/images/busybox-image-tags.png and /dev/null differ diff --git a/docker-hub/images/create-dialog.png b/docker-hub/images/create-dialog.png deleted file mode 100644 index b565af0b56..0000000000 Binary files a/docker-hub/images/create-dialog.png and /dev/null differ diff --git a/docker-hub/images/create-dialog1.png b/docker-hub/images/create-dialog1.png deleted file mode 100644 index 551e1085ce..0000000000 Binary files a/docker-hub/images/create-dialog1.png and /dev/null differ diff --git a/docker-hub/images/dashboard.png b/docker-hub/images/dashboard.png deleted file mode 100644 index eb7fdc6661..0000000000 Binary files a/docker-hub/images/dashboard.png and /dev/null differ diff --git a/docker-hub/images/deploy_key.png b/docker-hub/images/deploy_key.png deleted file mode 100644 index 57b02a5915..0000000000 Binary files a/docker-hub/images/deploy_key.png and /dev/null differ diff --git a/docker-hub/images/docker-hub.png b/docker-hub/images/docker-hub.png new file mode 100644 index 0000000000..c36518a450 Binary files /dev/null and b/docker-hub/images/docker-hub.png differ diff --git a/docker-hub/images/docker-integration.png b/docker-hub/images/docker-integration.png deleted file mode 100644 index e8991d48d8..0000000000 Binary files a/docker-hub/images/docker-integration.png and /dev/null differ diff --git a/docker-hub/images/first_pending.png b/docker-hub/images/first_pending.png deleted file mode 100644 index e85e933292..0000000000 Binary files a/docker-hub/images/first_pending.png and /dev/null differ diff --git a/docker-hub/images/getting-started.png b/docker-hub/images/getting-started.png deleted file mode 100644 index 3675c7beb1..0000000000 Binary files a/docker-hub/images/getting-started.png and /dev/null differ diff --git a/docker-hub/images/groups.png b/docker-hub/images/groups.png deleted file mode 100644 index 066703f84b..0000000000 Binary files a/docker-hub/images/groups.png and /dev/null differ diff --git a/docker-hub/images/home-page.png b/docker-hub/images/home-page.png deleted file mode 100644 index 3ead3a8bf2..0000000000 Binary files a/docker-hub/images/home-page.png and /dev/null differ diff --git a/docker-hub/images/hub.png b/docker-hub/images/hub.png deleted file mode 100644 index da50dcf51a..0000000000 Binary files a/docker-hub/images/hub.png and /dev/null differ diff --git a/docker-hub/images/invite.png b/docker-hub/images/invite.png deleted file mode 100644 index 1438bddb1f..0000000000 Binary files a/docker-hub/images/invite.png and /dev/null differ diff --git a/docker-hub/images/linked-acct.png b/docker-hub/images/linked-acct.png deleted file mode 100644 index 2cffac3430..0000000000 Binary files a/docker-hub/images/linked-acct.png and /dev/null differ diff --git a/docker-hub/images/login-web.png b/docker-hub/images/login-web.png deleted file mode 100644 index 518a9bbb9a..0000000000 Binary files a/docker-hub/images/login-web.png and /dev/null differ diff --git a/docker-hub/images/merge_builds.png b/docker-hub/images/merge_builds.png deleted file mode 100644 index e4e98dcbd4..0000000000 Binary files a/docker-hub/images/merge_builds.png and /dev/null differ diff --git a/docker-hub/images/org-repo-collaborators.png b/docker-hub/images/org-repo-collaborators.png deleted file mode 100644 index 2dc8b82b8d..0000000000 Binary files a/docker-hub/images/org-repo-collaborators.png and /dev/null differ diff --git a/docker-hub/images/orgs.png b/docker-hub/images/orgs.png deleted file mode 100644 index ab6c6b00a7..0000000000 Binary files a/docker-hub/images/orgs.png and /dev/null differ diff --git a/docker-hub/images/plus-carrot.png b/docker-hub/images/plus-carrot.png deleted file mode 100644 index c78cd06e93..0000000000 Binary files a/docker-hub/images/plus-carrot.png and /dev/null differ diff --git a/docker-hub/images/prompt.png b/docker-hub/images/prompt.png deleted file mode 100644 index 63ed7d7c9f..0000000000 Binary files a/docker-hub/images/prompt.png and /dev/null differ diff --git a/docker-hub/images/regex-help.png b/docker-hub/images/regex-help.png deleted file mode 100644 index cf005e85d2..0000000000 Binary files a/docker-hub/images/regex-help.png and /dev/null differ diff --git a/docker-hub/images/register-web.png b/docker-hub/images/register-web.png deleted file mode 100644 index 1a4022f1df..0000000000 Binary files a/docker-hub/images/register-web.png and /dev/null differ diff --git a/docker-hub/images/repo_links.png b/docker-hub/images/repo_links.png deleted file mode 100644 index 7f14de06e4..0000000000 Binary files a/docker-hub/images/repo_links.png and /dev/null differ diff --git a/docker-hub/images/repos.png b/docker-hub/images/repos.png deleted file mode 100644 index da50dcf51a..0000000000 Binary files a/docker-hub/images/repos.png and /dev/null differ diff --git a/docker-hub/images/scan-drilldown.gif b/docker-hub/images/scan-drilldown.gif deleted file mode 100644 index e74acc162e..0000000000 Binary files a/docker-hub/images/scan-drilldown.gif and /dev/null differ diff --git a/docker-hub/images/scan-results.png b/docker-hub/images/scan-results.png deleted file mode 100644 index db4825da8b..0000000000 Binary files a/docker-hub/images/scan-results.png and /dev/null differ diff --git a/docker-hub/images/scan-tags.png b/docker-hub/images/scan-tags.png deleted file mode 100644 index 175ff194c6..0000000000 Binary files a/docker-hub/images/scan-tags.png and /dev/null differ diff --git a/docker-hub/images/star.png b/docker-hub/images/star.png new file mode 100644 index 0000000000..7228949969 Binary files /dev/null and b/docker-hub/images/star.png differ diff --git a/docker-hub/index.md b/docker-hub/index.md index c447623ca0..e72d304a94 100644 --- a/docker-hub/index.md +++ b/docker-hub/index.md @@ -1,107 +1,58 @@ --- +title: Docker Hub overview description: Docker Hub overview -keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker Hub, docs, documentation, accounts, organizations, repositories, groups, teams +keywords: Docker Hub, docker id, autobuilds, builds, content, discovery, groups, images, organizations, publish, registry, repos, repositories, saas, teams, partners, vendors redirect_from: -- /docker-hub/overview/ -title: Overview of Docker Hub +- /docker-cloud/ +- /docker-store/ --- -[Docker Hub](https://hub.docker.com) is a cloud-based registry service which -allows you to link to code repositories, build your images and test them, stores -manually pushed images, and links to [Docker Cloud](/docker-cloud/) so you can -deploy images to your hosts. It provides a centralized resource for container -image discovery, distribution and change management, -[user and team collaboration](/docker-hub/orgs.md), and workflow automation -throughout the development pipeline. +[Docker Hub](https://hub.docker.com){: target="_blank" class="_"} is a hosted +registry service where you can create repositories and store Docker images that +you build and push, or by linking to your source code and building in Docker Hub +itself. -Log in to Docker Hub and Docker Cloud using -[your free Docker ID](/docker-hub/accounts.md). +Docker Hub features include: -![Getting started with Docker Hub](/docker-hub/images/getting-started.png) +- **Discover images**: Search, explore, and pull community, official, and partner content. +- **Manage repositories**: Create repos and control access with user orgs and teams. +- **Build images**: Automatically build from source code and configure webhook triggers. +- **Publish images**: Push and publish your community, official, and partner content. -Docker Hub provides the following major features: +Log in to Docker Hub with [your free Docker ID](accounts){: target="_blank" class="_"}. -* [Image Repositories](/docker-hub/repos.md): Find and pull images from - community and official libraries, and manage, push to, and pull from private - image libraries to which you have access. -* [Automated Builds](/docker-hub/builds.md): Automatically create new images - when you make changes to a source code repository. -* [Webhooks](/docker-hub/webhooks.md): A feature of Automated Builds, Webhooks - let you trigger actions after a successful push to a repository. -* [Organizations](/docker-hub/orgs.md): Create work groups to manage access to - image repositories. -* GitHub and Bitbucket Integration: Add the Hub and your Docker Images to your - current workflows. +![Docker Hub](images/docker-hub.png){:width="600px"}. +## Discover and pull content -## Create a Docker ID +Explore and pull Docker images from community and official repositories. You can +search the Docker Hub website or run `docker search` at the commandline. -To explore Docker Hub, you need to create an account by following the -directions in [Your Docker ID](/docker-hub/accounts.md). +For details see, [Discover and pull content](discover). -> **Note**: You can search for and pull Docker images from Hub without logging -> in, however to push images you must log in. +## Manage teams and organizations -Your Docker ID gives you one private Docker Hub repository for free. If you need -more private repositories, you can upgrade from your free account to a paid -plan. To learn more, log in to Docker Hub and go to [Billing & -Plans](https://hub.docker.com/account/billing-plans/), in the Settings menu. +Configure teams and organizations to share (and limit) access to your image +repositories. -### Explore repositories +You can create public repositories which can be accessed by all Hub users, +or you can create private repositories with limited access that you control. -You can find public repositories and images from Docker Hub in two ways. You can -"Search" from the Docker Hub website, or you can use the Docker command line -tool to run the `docker search` command. For example if you were looking for an -ubuntu image, you might run the following command line search: +For details see, [Manage repositories](manage). -``` - $ docker search ubuntu -``` +## Build images automatically -Both methods list the available public repositories on Docker Hub which match -the search term. +Create new images automatically when you make changes to a source code +repository. Use webhooks to trigger actions after a successful push to a +repository. -Private repositories do not appear in the repository search results. To see all -the repositories you can access and their status, view your "Dashboard" page on -[Docker Hub](https://hub.docker.com). +For details see, [Build images automatically](build). -### Use Official Repositories +## Push and publish content -Docker Hub contains a number of [Official -Repositories](http://hub.docker.com/explore/). These are public, certified -repositories from vendors and contributors to Docker. They contain Docker images -from vendors like Canonical, Oracle, and Red Hat that you can use as the basis -to build your applications and services. +Push your Docker images to Docker Hub and publish from your public repositories +for the community to use. Independent software vendors (ISV) can distribute and +sell their Docker certified content by joining the Docker vendor partner +program. -With Official Repositories you know you're using an optimized and -up-to-date image that was built by experts to power your applications. - -> **Note**: If you would like to contribute an Official Repository for your -> organization or product, see the documentation on -> [Official Repositories on Docker Hub](/docker-hub/official_repos.md) for more -> information. - - -## Work with Docker Hub image repositories - -Docker Hub provides a place for you and your team to build and ship Docker -images. - -You can configure Docker Hub repositories in two ways: - -* [Repositories](/docker-hub/repos.md), which allow you to push images from a - local Docker daemon to Docker Hub, and -* [Automated Builds](/docker-hub/builds.md), which link to a source code - repository and trigger an image rebuild process on Docker Hub when changes are - detected in the source code. - -You can create public repositories which can be accessed by any other Hub user, -or you can create private repositories with limited access you control. - -### Docker commands and Docker Hub - -Docker itself provides access to Docker Hub services via the -[`docker search`](/engine/reference/commandline/search.md), -[`pull`](/engine/reference/commandline/pull.md), -[`login`](/engine/reference/commandline/login.md), and -[`push`](/engine/reference/commandline/push.md) commands. +For details see, [Publish content on Docker Hub](publish). diff --git a/docker-hub/manage/index.md b/docker-hub/manage/index.md new file mode 100644 index 0000000000..57ef922343 --- /dev/null +++ b/docker-hub/manage/index.md @@ -0,0 +1,18 @@ +--- +title: Manage Docker Hub repositories +description: Manage user access to repos and content +keywords: Docker Hub, registry, teams, organizations, repositories, access, images +--- + +Docker Hub provides a hosted registry service where you can create repositories +to store your Docker images. + +You can push images that you build locally to your repos, or you can link to +your source code and do [autobuilds](../build) within Docker Hub itself. You +can also control access to repos by creating organizations and teams. + +## Next steps + +- [Create repositories](repos) to store your images +- [Push images](push-images) to your repo (or build them within Docker Hub) +- [Control access](orgs-teams) to your repositories with organizations and teams diff --git a/docker-hub/manage/orgs-teams.md b/docker-hub/manage/orgs-teams.md new file mode 100644 index 0000000000..8d1095faac --- /dev/null +++ b/docker-hub/manage/orgs-teams.md @@ -0,0 +1,341 @@ +--- +title: Create organizations and teams +description: Control access to repos with user organizations and teams +keywords: Docker Hub, registry, organizations, teams, resources, permissions +redirect_from: +- /docker-hub/orgs/ +- /docker-cloud/orgs/ +--- + +To share and restrict access to repositories in Docker Hub, create user +organizations and teams. + +Members of an organization can only see the teams to which they belong and the +membership of those teams. Members of the `Owners` team can see and edit all +teams and all team membership lists. + +Docker Hub users outside of an organization cannot see the organizations or +teams of other users. + +## Create an organization + +An organization is a group of teams, and a team is a group of users. You cannot +add users directly to an organization. + +Organizations can have repositories and images associated with them. Paid +features such as private repositories are purchased with the billing information +associated with the organization. + +To create an organization: + +1. Log in to Docker Hub. + +2. Select **Create Organization** from the user icon menu at the top right. + +3. Enter a name for your organization in the dialog. + +4. Enter billing information for the organization (for paid features, such as + private repositories). + +5. Click **Save**. + + The Docker Hub interface changes to the new organization view. Use the menu + at the top right to return to your individual user account. + +When you create an organization, your user account is automatically added to the +`Owners` team of that organization, allowing you to manage the organization. + +The `Owners` team must always have at least one member, and you can add other +members to it at any time. + +### Convert user acount to organization + +Individual user accounts can be converted to organizations if needed -- but they +canont be converted back so be careful. Also, create a new Docker ID before +converting. + +> Account conversion cannot be undone +> +> Account conversion cannot be undone! Alos, after converting, you cannot log in +> to the _original account_. Email addresses, linked source repositories, and +> collaborators are removed. Automated builds are migrated. +{: .warning} + +All existing automated builds are migrated to the first member of the `Owners` +team of the new organization (which you specify if the procedure below). This +person can configure the newly converted organization settings to grant access +to other users. + +1. Log in to Docker Hub using the user account that you want to convert. + +2. Click **Settings** in the user account menu in the top right corner. + +3. Scroll down and click `Convert to organization`. + +4. Read through the list of warnings and actions. + +5. Enter the Docker ID of the user to be the first member of the Owners team. + +6. Click **Save and Continue**. + +The UI refreshes. Log in from the Docker ID you specified as the first Owner, +and then continue on to configure the organization as described below. + +## Configure the Owners team + +Each organization has an `Owners` team with members who manage the settings of +the organization. There must always be at least one member of the `Owners` team. + +If you created the organization, you are automatically added to the `Owners` +team. You can add new members and also leave the team if you want to transfer +ownership. + +Owners team members can: + +- Create, edit, and delete teams. +* Configure and edit team access permissions. +* Manage billing information for the organization. +* Configure the organization settings (including linked services such as AWS and Github). +* Create, edit, and delete repositories associated with the organization. + +> You cannot change the Owners team permission settings. Only add users to the +> Owners team who you are comfortable granting this level of access. + +1. Select an organization from the menu in the top right corner of the UI. + +2. Click **Teams** in the left navigation panel. + +3. Click **owners**. + +4. Click **Add user**. + +5. Enter the Docker ID of a user to add. + +6. Click **Create**. + +7. Repeat for each user who you want to add. + +To transfer ownership of an organization, add the new owner to the `Owners` +team, then go to your Teams list and click **Leave** on the `Owners` team line. + +> Email notifications for Owners +> +> Only members of the `Owners` team receive email notifications for events (such +> as automated builds) in the organization's resources. The email "notification +> level" setting for the organization affects only the `Owners` team. + +## Create teams + +You can create teams within an Organization to add users and manage access to +repositories. + +Every organization contains an `Owners` team for users who manage the team +settings. You should create at least one team separate from the owners team so +that you can add members to your organization without giving them this level of +access. + +1. Select an organization from the menu in the top right corner of the UI. + +2. Click **Teams** in the left navigation panel. + +3. Click **Create** to create a new team. + +4. Give the new team a name and description, and click **Create**. + +5. On the screen that appears, click **Add User**. + +6. Enter the Docker ID of the user and click **Create**. + +7. Repeat this process for each user you want to add. + +## Configure team permissions + +You can give Teams within an organization different levels of access to +resources that the organization owns. + +You can then assign individual users to a Team to grant them that level of +access. Team permissions are set by members of the `Owners` team. + +> Additive permissions +> +> If a user is a member of multiple teams, access is conjunctive (inclusive or +> additive). For example, if Team A grants Alice `No access` to repositories, +> and Team B grants her `Read and Write` access, she has `Read and Write` access. + +To set or edit Team permissions: + +1. From the Team detail view, click **Permissions**. + + +2. Grant the team access to one or more repositories in the **Repositories** + section. + + a. Enter the name of the repository. + + b. Select an access level. + + c. Click the plus sign (`+`) icon. The change is saved immediately. + + d. Repeat this for each repository that the team needs access to. + + > Repo visibility + > + > An organization can have public repositories which are visible to **all** + > users (including those outside the organization). Team members can view + > public repositories even if you have not given them `View` permission. You + > can use team permissions to grant write and admin access to public + > repositories. + +### Edit permissions for individual repos + +You can also grant teams access to a repository from the repository's +**Permissions** page rather than from each team's permissions settings. You +might do this if you create repositories after you have already configured your +teams, and want to grant access to several teams at the same time. + +If the organization's repository is private, you must explicitly grant any +access that your team members require. If the repository is public, all users +are granted read-only access by default. + +Members of the organization's `Owners` team, and members of any team with +`admin` access to the repository can change the repository's access permissions. + +To grant a team access to an organization's repository: + +1. Navigate to the organization's repository. + +2. Click the **Permissions** tab. + +3. Select the name of the team you want to add from the drop down menu. + +4. Choose the access level the team should have. + +5. Click the **plus sign** to add the selected team and permission setting. Your choice is saved immediately. + +6. Repeat this process for each team to which you want to grant access. + +To edit a team's permission level, select a new setting in the **Permission** +drop down menu. + +To remove a team's access to the repository, click the **trashcan** icon next to +the team's access permission line. + +> Public vs private +> +> If the organization's repository is _public_, team members without explicit +> access permissions still have read-only access to the repository. If the +> repository is _private_, removing a team's access completely prevents the team +> members from seeing the repository. + +### Permissions reference for teams + +**Team access levels**: + +* **No access**: no access at all. The resource is not visible to members of this team. +* **Read only**: users can view the resource and its configuration, but cannot perform actions on the resource. +* **Read and Write**: users can view _and change_ the resource and its configuration. +* **Admin**: users can view, and edit the resource and its configuration, and can create or delete new instances of the resource. + +> Only users who are members of the `Owners` team can create _new_ repositories. + +| Permission level | Access | +| ----------------- | ---------------------------------------------------------- | +| **Repositories** | | +| Read | Pull | +| Read/Write | Pull, push | +| Admin | Pull, push, update description, create and delete | +| | | +| **Build** | | +| Read | View build settings and timeline | +| Read/write | View build settings and timeline, start/retry/cancel build | +| Admin | View build settings and timeline, start/retry/cancel/change build configuration and source, create and delete | + +## Machine user accounts in organizations + +Your organization might find it useful to have a dedicated account for +programmatic or scripted access to your organization's resources using the +[Docker Hub APIs](/apidocs/docker-Hub/). + +> These users may not be _created_ using scripts (even though these accounts are +> referred to as "robot" accounts or "bots"). + +To create a "robot" or machine account for your organization: + +1. Create a new Docker ID for the machine user. Verify the email address associated with the user. + +2. If necessary, create a new Team for the machine user, and grant that team access to the required resources. + + This method is recommended because it makes it easier for administrators to + understand the machine user's access, and modify it without affecting other + users' access. + +3. Add the machine user to the new Team. + +## Modify a team + +To modify an existing team, log in to Docker Hub and switch to your +organization, click **Teams** in the left navigation menu, then click the team +you want to modify. + +You can manage team membership from the first page that appears when you select the team. + +To change the team name or description, click **Settings**. + +To manage team permissions for runtime resources (nodes and applications) and +repositories click **Permissions**. + +## Manage resources for an organization + +An organization can have its own resources including repositories, nodes and +node clusters, containers, services, and service stacks, just as if it was a +normal user account. + +If you're a member of the `Owners` team, you can create these resources when +logged in as the Organization, and manage which Teams can view, edit, and create +and delete each resource. + +### Link a service provider to an organization + +1. Log in to Docker Hub as a member of the `Owners` team. + +2. Switch to the Organization account by selecting it from the user icon menu at the top right. + +3. Click **Hub Settings** in the left navigation. + + From the Organization's Hub settings page, you can link to the organization source code repositories in [GitHub](../build/github/) or [Bitbucket](../build/bitbucket/). + + The steps are the same as when you perform these actions as an individual user. + +### Create repositories + +When a member of the `Owners` team creates a repository for an organization, +they can configure which teams within the organization can access the +repository. No access controls are configured by default on repository creation. +If the repository is _private_, this leaves it accessible only to members of the +`Owners` team until other teams are granted access. + +> **Tip**: +> +> Members of the `Owners` team can configure this default from the +> **Default privacy** section of the organization's **Hub Settings** page. + +See [Create new user repository](../manage/repos#create-new-user-repository) + +1. Log in to Docker Hub as a member of the `Owners` team. + +2. Switch to the Organization account by selecting it from the user icon menu at the top right. + +3. [Create the repository](../manage/repos#create-new-user-repository) as usual. + +4. Once the repository has been created, navigate to it and click **Permissions**. + +5. [Grant access](#configure-team-permissions) to any teams that require access to the repository. + +### Manage organization settings + +From the Organization's **Hub Settings** page you can also manage the +organization's Plan and billing account information, notifications, and API +keys. diff --git a/docker-hub/manage/push-images.md b/docker-hub/manage/push-images.md new file mode 100644 index 0000000000..c6feaf8a82 --- /dev/null +++ b/docker-hub/manage/push-images.md @@ -0,0 +1,19 @@ +--- +title: Push images to Docker Hub +description: Push images that you build locally to Docker Hub +keywords: Docker Hub, images, repos, private, registry +redirect_from: +- /docker-cloud/getting-started/intermediate/pushing-images-to-dockercloud/ +- /docker-cloud/tutorials/pushing-images-to-dockercloud/ +- /docker-cloud/builds/push-images/ +--- + +Docker Hub provides a hosted registry service where you can create repositories +to store your Docker images. + +> Pushing to Docker Hub is supported in Docker Engine 1.6 and higher. + +Test pulling from an official Docker Hub repository and pushing to your personal +one: + +{% include docker-hub-cli-commands.md %} diff --git a/docker-hub/manage/repos.md b/docker-hub/manage/repos.md new file mode 100644 index 0000000000..ac0299547b --- /dev/null +++ b/docker-hub/manage/repos.md @@ -0,0 +1,161 @@ +--- +title: Docker Hub repositories +description: Create and edit Docker Hub repositories +keywords: Docker Hub, repositories, repos +redirect_from: +- /docker-hub/repos/ +- /docker-cloud/builds/repos/ +--- + +Repositories in Docker Hub store your Docker images. You can create repositories +and manually [push images](push-images) using `docker push`, or you can link +to a source code provider and use [automated builds](../build/) to +build the images for you. These repositories can be either public or private. + +## Create new user repository + +All individual users can create one private repository for free, and can create +unlimited public repositories. + +1. Click **Repositories** in the left navigation. + +2. Click **Create**. + +3. Enter a **name** and an optional **description**. + +4. Choose a visibility setting for the repository. + +5. Optionally, click a linked source code provider to set up [automated builds](../build/). + + a. Select a namespace from that source code provider. + + b. From that namespace, select a repository to build. + + c. Optionally, expand the build settings section to set up build rules and enable or disable Autobuilds. + + > You can set up autobuilds later + > + > Repos are configurable and you can change build settings at any time after + > the repository is created. If you choose not to enable automated builds, + > you can still push images to the repository. + +6. Click **Create**. + +### Repositories for organizations + +Every organization has and `Owners` team. Members of the `Owners` team can: + +- Create new repositories for that organization. +- Configure repo access permissions for other teams in the organization. +- Change the organization billing information. +- Link the organization to a source code provider to set up automated builds. + +To learn more, see the [organizations and teams documentation](orgs-teams/). + +## Edit an existing repository + +You can edit repositories in Docker Hub to change the description and build +configuration. + +From the **General** page, edit the repository short description, or click to +edit the version of the ReadMe displayed on the repository page. + +> Edits to the Docker Hub **ReadMe** are not reflected in the source code linked to a repository. + +## Change repository privacy settings + +Repositories in Docker Hub can be either public or private. + +Public repositories are visible in ..., and can be searched in ... + +Private repositories are only visible to the user account that created it +(unless it belongs to an organization, see below). + +> Privacy settings vs access permissions +> +> _Privacy_ settings for an individual repo differ from differ from +> [_access_ permissions](orgs-teams#edit-permissions-for-individual-repos) +> of a repo shared among members of an [organization](orgs-teams/). + +If a private repository belongs to an organization, members of the `Owners` team +can configure access. Only members of the `Owners` team can change an organization's +repository privacy settings. + +Each Docker Hub account comes with one free private repository. Additional +private repositories are available for subscribers on paid plans. + +To change a repository's privacy settings: + +1. Navigate to the repository in Docker Hub. + +2. Click the **Settings** tab. + +3. Click the **Make public** or **Make private** button. + +4. In the dialog that appears, enter the name of the repository to confirm the change. + +5. Click the button to save the change. + +## Link to repo from third party registry + +You can link to repositories hosted on a third party registry. This allows you +to enable automated builds and push built images back to the registry. + +To link to a repository that you want to share with an organization, contact a +member of the organization's `Owners` team. Only the Owners team can import new +external registry repositories for an organization. + +1. Click **Repositories** in the side menu. + +2. Click the down arrow menu next to the **Create** button. + +3. Select **Import**. + +4. Enter the name of the repository that you want to add. + + For example, `registry.com/namespace/reponame` where `registry.com` is the + hostname of the registry. + +5. Enter credentials for the registry. + + > Push vs read-only permissions + > + > Credentials must have **push** permissions to push built images back to + > the repository. If you provide **read-only** credentials, you can run + > automated tests but you cannot push built images to it. + +6. Click **Import**. + +7. Confirm that the repository on the third-party registry now appears in your **Repositories** dropdown list. + +## Delete a repository + +When you delete a repository in Docker Hub, all of the images in that +repository are also deleted. + +If automated builds are configured for the repository, the build rules and +settings are deleted along with any Docker Security Scan results. However, this +does not affect the code in the linked source code repository, and does not +remove the source code provider link. + +If you are running a service from deleted repository, the service continues +to run, but cannot be scaled up or redeployed. If any builds use the Docker +`FROM` directive and reference a deleted repository, those builds fail. + +To delete a repository: + +1. Navigate to the repository, and click the **Settings** tab. + +2. Click **Delete**. + +3. Enter the name of the repository to confirm deletion, and click **Delete**. + +External (third-party) repositories cannot be deleted from within Docker Hub,but +you can remove a link to them. The link is removed, but images in the external +repository are not deleted. + +> To delete an organization, you must be a member of the `Owners` team for that organization. + +## What's next? + +Once you create or link to a repository in Docker Hub, you can set up [automated testing](../build/autotest/) and [automated builds](../build/). diff --git a/docker-hub/official_repos.md b/docker-hub/official_repos.md deleted file mode 100644 index d17ce54b0d..0000000000 --- a/docker-hub/official_repos.md +++ /dev/null @@ -1,117 +0,0 @@ ---- -description: Guidelines for Official Repositories on Docker Hub -keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker Hub, docs, official,image, documentation -title: Official repositories on Docker Hub ---- - -The Docker [Official Repositories](https://hub.docker.com/official/) are a -curated set of Docker repositories hosted on Docker Hub. They are -designed to: - -* Provide essential base OS repositories (for example, - [ubuntu](https://hub.docker.com/_/ubuntu/), - [centos](https://hub.docker.com/_/centos/)) that serve as the - starting point for the majority of users. - -* Provide drop-in solutions for popular programming language runtimes, data - stores, and other services, similar to what a Platform-as-a-Service (PAAS) - would offer. - -* Exemplify [`Dockerfile` best practices](/engine/userguide/eng-image/dockerfile_best-practices/) - and provide clear documentation to serve as a reference for other `Dockerfile` - authors. - -* Ensure that security updates are applied in a timely manner. This is - particularly important as many Official Repositories are some of the most - popular on Docker Hub. - -Docker, Inc. sponsors a dedicated team that is responsible for reviewing and -publishing all content in the official repositories. This team works in -collaboration with upstream software maintainers, security experts, and the -broader Docker community. - -While it is preferable to have upstream software authors maintaining their -corresponding Official Repositories, this is not a strict requirement. Creating -and maintaining images for Official Repositories is a public process. It takes -place openly on GitHub where participation is encouraged. Anyone can provide -feedback, contribute code, suggest process changes, or even propose a new -Official Repository. - -## Should I use Official Repositories? - -New Docker users are encouraged to use the Official Repositories in their -projects. These repositories have clear documentation, promote best practices, -and are designed for the most common use cases. Advanced users are encouraged to -review the Official Repositories as part of their `Dockerfile` learning process. - -A common rationale for diverging from Official Repositories is to optimize for -image size. For instance, many of the programming language stack images contain -a complete build toolchain to support installation of modules that depend on -optimized code. An advanced user could build a custom image with just the -necessary pre-compiled libraries to save space. - -A number of language stacks such as -[python](https://hub.docker.com/_/python/) and -[ruby](https://hub.docker.com/_/ruby/) have `-slim` tag variants -designed to fill the need for optimization. Even when these "slim" variants are -insufficient, it is still recommended to inherit from an Official Repository -base OS image to leverage the ongoing maintenance work, rather than duplicating -these efforts. - -## How do I know the Official Repositories are secure? - -Each of the images in the Official Repositories is scanned using Docker Cloud's -[Security Scanning service](/docker-cloud/builds/image-scan/). The results of -these security scans provide valuable information about which images contain -security vulnerabilities, and allow you to choose images that align with your -security standards. - -To view the Docker Security Scanning results: - -1. Make sure you're logged in to Docker Hub. - You can view Official Images even while logged out, however the scan results are only available once you log in. -2. Navigate to the official repository whose security scan you want to view. -3. Click the `Tags` tab to see a list of tags and their security scan summaries. - ![Viewing CVEs from Docker images](images/scan-drilldown.gif) - -You can click into a tag's detail page to see more information about which -layers in the image and which components within the layer are vulnerable. -Details including a link to the official CVE report for the vulnerability appear -when you click an individual vulnerable component. - -## How can I get involved? - -All Official Repositories contain a **User Feedback** section in their -documentation which covers the details for that specific repository. In most -cases, the GitHub repository which contains the Dockerfiles for an Official -Repository also has an active issue tracker. General feedback and support -questions should be directed to `#docker-library` on Freenode IRC. - -## How do I create a new Official Repository? - -From a high level, an Official Repository starts out as a proposal in the form -of a set of GitHub pull requests. Detailed and objective proposal -requirements are documented in the following GitHub repositories: - -* [docker-library/official-images](https://github.com/docker-library/official-images) - -* [docker-library/docs](https://github.com/docker-library/docs) - -The Official Repositories team, with help from community contributors, formally -review each proposal and provide feedback to the author. This initial review -process may require a bit of back and forth before the proposal is accepted. - -There are also subjective considerations during the review process. These -subjective concerns boil down to the basic question: "is this image generally -useful?" For example, the [python](https://hub.docker.com/_/python/) -Official Repository is "generally useful" to the large Python developer -community, whereas an obscure text adventure game written in Python last week is -not. - -Once a new proposal is accepted, the author is responsible for keeping -their images up-to-date and responding to user feedback. The Official -Repositories team becomes responsible for publishing the images and -documentation on Docker Hub. Updates to the Official Repository follow the same -pull request process, though with less review. The Official Repositories team -ultimately acts as a gatekeeper for all changes, which helps mitigate the risk -of quality and security issues from being introduced. diff --git a/docker-hub/orgs.md b/docker-hub/orgs.md deleted file mode 100644 index 4115d78f03..0000000000 --- a/docker-hub/orgs.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -description: Docker Hub Teams and Organizations -keywords: Docker, docker, registry, teams, organizations, plans, Dockerfile, Docker Hub, docs, documentation -title: Organizations and teams in Docker Hub ---- - -Docker Hub [organizations](https://hub.docker.com/organizations/) let you create -teams so you can give colleagues access to shared image repositories. A Docker -Hub organization can contain public and private repositories just like a user -account. Access to push or pull for these repositories is allocated by defining -teams of users and then assigning team rights to specific repositories. -Repository creation is limited to users in the organization owner's group. This -allows you to distribute limited access Docker images, and to select which -Docker Hub users can publish new images. - -### Create and view organizations - -You can see which organizations you belong to and add new organizations by -clicking **Organizations** in the top nav bar. - -![organizations](images/orgs.png) - -### Organization teams - -Users in the "Owners" team of an organization can create and modify the -membership of all teams. - -Other users can only see teams they belong to. - -![teams](images/groups.png) - -### Repository team permissions - -Use teams to manage who can interact with your repositories. - -You need to be a member of the organization's "Owners" team to create a new -team, Hub repository, or automated build. As an "Owner", you then delegate the -following repository access rights to a team using the "Collaborators" section -of the repository view. - -Permissions are cumulative. For example, if you have Write permissions, you -automatically have Read permissions: - -- `Read` access allows users to view, search, and pull a private repository in the same way as they can a public repository. -- `Write` access allows users to push to non-automated repositories on the Docker Hub. -- `Admin` access allows users to modify the repositories "Description", "Collaborators" rights, "Public/Private" visibility and "Delete". - -> **Note**: A User who has not yet verified their email address only has -> `Read` access to the repository, regardless of the rights their team -> membership has given them. - -![Organization repository collaborators](images/org-repo-collaborators.png) diff --git a/docker-hub/publish/byol.md b/docker-hub/publish/byol.md new file mode 100644 index 0000000000..3887a89b45 --- /dev/null +++ b/docker-hub/publish/byol.md @@ -0,0 +1,52 @@ +--- +title: Bring Your Own License (BYOL) +description: Submit a product to be listed on Docker Hub +keywords: Docker Hub, products, license, byol +redirect_from: +- /docker-store/byol/ +--- + +## About Bring Your Own License (BYOL) + +Bring Your Own License (BYOL) allows customers with existing software licenses +to easily migrate to the containerized version of the software that you make +available on Docker Hub. + +To see and access the BYOL product listing of an Independent Softare Vendor +(ISV), customers simply subscribe to the product with their Docker ID. We call +this **Ungated BYOL**. + +ISVs can use the Docker Hub as an entitlement and distribution platform +for their various audiences, such as: + +- Existing customers who want their licensed software made available as Docker + containers. + +- New customers who are only interested in consuming their software as Docker + containers. + +## Ungated BYOL + +### Prerequisites and setup + +To use Docker as your fulfillment service, an ISV must: + +- [Apply and be approved as a Docker Hub Vendor Partner](https://goto.docker.com/partners) + +- Apply and be approved to list an Ungated BYOL product + +- Create one or more Ungated BYOL product plans, in the Docker Hub Publisher center. + +## Creating an ungated BYOL plan + +In Plans & Pricing section of the Publisher Center, ensure the following: + +- Price/Month is set to $0. + +- There are no free trials associated with the plan. + +- "Subscribed users only" (in Pull Requirements drop down) are selected. + +## What's next? + +More information about the publishing flow can be found [here](publish.md). diff --git a/docker-store/certify-images.md b/docker-hub/publish/certify-images.md similarity index 91% rename from docker-store/certify-images.md rename to docker-hub/publish/certify-images.md index 55573592e2..e146e50dec 100644 --- a/docker-store/certify-images.md +++ b/docker-hub/publish/certify-images.md @@ -1,14 +1,16 @@ --- -description: Run certification tests against your images -keywords: Docker, docker, store, certified content, images title: Certify Docker images +description: Run certification tests against your images +keywords: Docker Hub, certified content, images +redirect_from: +- /docker-store/certify-images/ --- ## Introduction Content that qualifies as **Docker Certified** must conform to best practices and pass certain baseline tests. -Docker Store lets you publish certified images as well as plugins for logging, volumes, and networks. You must certify your own _images and logging plugins_ with the `inspect` tools as explained in these docs. Currently, Docker Store certifies your volume and networking plugins for you upon submission. +Docker Hub lets you publish certified images as well as plugins for logging, volumes, and networks. You must certify your own _images and logging plugins_ with the `inspect` tools as explained in these docs. Currently, Docker Hub certifies your volume and networking plugins for you upon submission. This page explains how publishers can successfully test their **Docker images**. Also available: [Certify your Docker logging plugins](certify-plugins-logging). @@ -16,7 +18,7 @@ This page explains how publishers can successfully test their **Docker images**. ## Certify your Docker images -You must use the tool, `inspectDockerimage`, to certify your content for publication on Docker Store by ensuring that your images conform to best practices. Download the tool [here](#syntax). +You must use the tool, `inspectDockerimage`, to certify your content for publication on Docker Hub by ensuring that your images conform to best practices. Download the tool [here](#syntax). The `inspectDockerimage` tool does the following: @@ -43,34 +45,13 @@ The `inspectDockerimage` tool does the following: Your Docker EE installation must be running on the server used to verify your submissions. If necessary, request entitlement to a specific [Docker Enterprise Edition](https://store.docker.com/editions/enterprise/docker-ee-trial). - Docker EE (on the server for verifying submissions) -- git client - inspectDockerimage tool ### Set up testing environment -There are three steps: (1) install git, (2) configure credentials, and (3) configure endpoints (or use default endpoints). +There are two steps: (1) configure credentials, and (2) configure endpoints (or use default endpoints). -1. Install git (required for `inspectDockerimage`): - - **Ubuntu** - - ```bash - sudo apt-get update -qq - sudo apt-get install git -y - ``` - - **RHEL/CentOS** - - ```bash - sudo yum makecache fast - sudo yum install git -y - ``` - - **Windows** - - To download and install git for Windows: . - -2. Configure your Docker Registry credentials by either _defining environment variables_ **or** _passing them as arguments_ to `inspectDockerimage`. +1. Configure your Docker Registry credentials by either _defining environment variables_ **or** _passing them as arguments_ to `inspectDockerimage`. a. Define environment variables for registry credentials, `DOCKER_USER` and `DOCKER_PASSWORD`: @@ -102,7 +83,7 @@ There are three steps: (1) install git, (2) configure credentials, and (3) confi --docker-password ``` -3. Configure endpoints (and override default values) by either _defining environment variables_ **or** _passing them as arguments_ to `inspectDockerimage`. +2. Configure endpoints (and override default values) by either _defining environment variables_ **or** _passing them as arguments_ to `inspectDockerimage`. By default, `inspectDockerimage` uses these two endpoints to communicate with the Docker Hub Registry: @@ -185,7 +166,7 @@ There are three steps: (1) install git, (2) configure credentials, and (3) confi -log-tail int Number of lines to show from the end of the container logs. (default 20) -product-id string - Optional Product identifier from Docker Store for this image. Please include it when you want the output to be sent to docker store. + Optional Product identifier from Docker Hub for this image. Please include it when you want the output to be sent to docker store. -start-script string An optional custom script used to start the Docker container. The script will get passed one argument, the name of the Docker image. -start-wait-time int @@ -199,9 +180,9 @@ There are three steps: (1) install git, (2) configure credentials, and (3) confi ## Inspection Output -By default, `inspectDockerimage` displays output locally to `stdout` (the default), JSON, and HTML. You can also upload output to Docker Store, which is recommended for administrator verification. +By default, `inspectDockerimage` displays output locally to `stdout` (the default), JSON, and HTML. You can also upload output to Docker Hub, which is recommended for administrator verification. -- **Upload to Docker Store** (by entering `product-id` at the commandline). +- **Upload to Docker Hub** (by entering `product-id` at the commandline). - **Send message to `stdout`**. This is the default. @@ -244,7 +225,7 @@ docker container run -d \ $1 ``` -#### To inspect the Docker image, `gforghetti/tomcat-wildbook:latest`, with a custom startup script and upload the result to Docker Store (leave out the `-product-id` parameter if you are just testing): +#### To inspect the Docker image, `gforghetti/tomcat-wildbook:latest`, with a custom startup script and upload the result to Docker Hub (leave out the `-product-id` parameter if you are just testing): ``` root:[~/] # ./inspectDockerimage --start-script ./run_my_application.sh -product-id= gforghetti/tomcat-wildbook:latest @@ -427,9 +408,11 @@ Passed: This test was performed on Docker Enterprise Edition. * Summary of the inspection for Docker image: gforghetti/tomcat-wildbook:latest ******************************************************************************************************************************************************************************************************* -Date: Fri Oct 27 12:59:31 2017 -Operating System: Ubuntu 16.04.3 LTS -Docker version 17.06.2-ee-4, build dd2c358 +Date: Mon May 21 13:29:29 2018 +Operating System: Ubuntu 16.04.4 LTS +Architecture: amd64 +Docker Client Version: 17.06.2-ee-11 +Docker Server Version: 17.06.2-ee-11 There were 3 warnings detected! @@ -456,9 +439,9 @@ The inspection of the Docker image gforghetti/tomcat-wildbook:latest has complet If -product-id is specified on command line: ************************************************************************************************************************************************************************************************** -* Step #16 Upload the test result to Docker Store. +* Step #16 Upload the test result to Docker Hub. ************************************************************************************************************************************************************************************************** -Passed: The test results are uploaded to Docker Store. +Passed: The test results are uploaded to Docker Hub. root:[~/] # ``` @@ -479,16 +462,18 @@ Note: The output was piped to the **jq** command to display it "nicely". ``` { - "Date": "Fri Oct 27 13:01:49 2017", - "SystemOperatingSystem": "Operating System: Ubuntu 16.04.3 LTS", - "SystemDockerVersion": "Docker version 17.06.2-ee-4, build dd2c358", - "Dockerimage": { + "Date": "Mon May 21 13:23:37 2018", + "SystemOperatingSystem": "Operating System: Ubuntu 16.04.4 LTS", + "SystemArchitecture": "amd64", + "SystemDockerClientVersion": "17.06.2-ee-11", + "SystemDockerServerVersion": "17.06.2-ee-11", + "DockerImage": { "Name": "gforghetti/apache:latest", "Size": "178MB", "Layers": "23", "Digest": "sha256:65db5d0a8b88ee3d5e5a579a70943433d36d3e6d6a974598a5eebeef9e02a346", "BaseLayerDigest": "sha256:85b1f47fba49da65256f07c8790542a3880e9216f9c491965040f35ce2c6ca7a", - "OfficialBaseimage": "debian:8@sha256:3a5aa6bf675aa71e60df347b29f0a1b1634306cd8db47e1af0a16ad420d1b127", + "OfficialBaseImage": "debian:8@sha256:3a5aa6bf675aa71e60df347b29f0a1b1634306cd8db47e1af0a16ad420d1b127", "CreatedOn": "2017-10-19T17:51:53", "DockerVersion": "17.09.0-ce", "Author": "", @@ -541,11 +526,11 @@ Note: The output was piped to the **jq** command to display it "nicely". }, { "Status": "Passed", - "Message": "Docker container with the container id 725175cb80aa886bb84a892a1a44bf0bf87d6e1e4e16e423cf42d677fb333628 was started." + "Message": "Docker container 424de05adfa2c84890513a51d3d5bc210e4d4b41c746c9252648f38d95b8be49 was started." }, { "Status": "Passed", - "Message": "Docker container with the container id 725175cb80aa886bb84a892a1a44bf0bf87d6e1e4e16e423cf42d677fb333628 is running." + "Message": "Docker container 424de05adfa2c84890513a51d3d5bc210e4d4b41c746c9252648f38d95b8be49 is running." }, { "Status": "Passed", @@ -565,7 +550,7 @@ Note: The output was piped to the **jq** command to display it "nicely". }, { "Status": "Passed", - "Message": "Docker container 725175cb80aa886bb84a892a1a44bf0bf87d6e1e4e16e423cf42d677fb333628 was stopped successfully." + "Message": "Docker container 424de05adfa2c84890513a51d3d5bc210e4d4b41c746c9252648f38d95b8be49 was stopped successfully." }, { "Status": "Passed", @@ -641,13 +626,13 @@ PS D:\InspectDockerimage> .\inspectDockerimage microsoft/nanoserver:latest ******************************************************************************************************************************************************************************************************* * Step #1 Loading information on the Docker official base images ... ******************************************************************************************************************************************************************************************************* -The Docker official base images data has been loaded from the docker_official_base_images.json file. Last updated on Fri Oct 27 08:35:14 2017 +The Docker official base images data has been loaded from the docker_official_base_images.json file. Last updated on Sun May 20 16:36:20 2018. ******************************************************************************************************************************************************************************************************* * Step #2 Inspecting the Docker image "microsoft/nanoserver:latest" ... ******************************************************************************************************************************************************************************************************* -Pulling the Docker image microsoft/nanoserver:latest ... -Pulling the Docker image took 2m10.1332244s +Pulling the Docker Image microsoft/nanoserver:latest ... +Pulling the Docker Image took 13.2107625s Passed: Docker image "microsoft/nanoserver:latest" has been inspected. ******************************************************************************************************************************************************************************************************* @@ -655,12 +640,12 @@ Passed: Docker image "microsoft/nanoserver:latest" has been inspected. ******************************************************************************************************************************************************************************************************* +---------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Docker image: | microsoft/nanoserver:latest | -| Size: | 1.07GB | +| Size: | 1.13GB | | Layers: | 2 | -| Digest: | sha256:bea766f955b4e7e0c5d41654454629b81ef20d57df51709d61531d51d1cadec0 | +| Digest: | sha256:d3cc51de184f3bdf9262c53077886f78e3fc13282bcfc6daf172df7f47f86806 | | Base layer digest: | sha256:bce2fbc256ea437a87dadac2f69aabd25bed4f56255549090056c1131fad0277 | | Official base image: | golang:1.6.4-nanoserver@sha256:38890e2983bd2700145f1b4377ad8d826531a0a15fc68152b2478406f5ead6e2 | -| Created on: | 2017-10-10T10:56:24 | +| Created on: | 2018-05-08T10:43:39 | | Docker version: | | | Author: | | | Maintainer: | | @@ -683,8 +668,8 @@ Passed: Docker image "microsoft/nanoserver:latest" has been inspected. +----------+-------+------------------------------------------------------------------------------------------------------+------------+----------+---------------------------------------------------+ | Manifest | Layer | Command | Size | Blob | Matches | +----------+-------+------------------------------------------------------------------------------------------------------+------------+----------+---------------------------------------------------+ -| bea766f9 | 1 | Apply image 10.0.14393.0 | 241 Mib | bce2fbc2 | golang:1.6.4-nanoserver@38890e29 | -| bea766f9 | 2 | Install update 10.0.14393.1770 | 135.2 Mib | b0b5e40c | | +| d3cc51de | 1 | Apply image 10.0.14393.0 | 241 Mib | bce2fbc2 | golang:1.6.4-nanoserver@38890e29 | +| d3cc51de | 2 | Install update 10.0.14393.2248 | 157.2 Mib | 58518d66 | | +----------+-------+------------------------------------------------------------------------------------------------------+------------+----------+---------------------------------------------------+ ******************************************************************************************************************************************************************************************************* @@ -698,7 +683,7 @@ Warning: Docker image does not contain a Healthcheck! Although a Healthcheck is ******************************************************************************************************************************************************************************************************* * Step #6 Attempting to start a container from the Docker image "microsoft/nanoserver:latest" ... ******************************************************************************************************************************************************************************************************* -Passed: Docker container with the container id 86ac470a17ca212f76d19a5242c0cd692e46c0305b3cba6d474d0e92a626e461 was started. +Passed: Docker container 1cfbc4be9f39944d4e294cf895210c276143768b951159305dbeb30cb2207a1c was started. ******************************************************************************************************************************************************************************************************* * Step #7 Waiting 30 seconds to give the container time to initialize... @@ -708,7 +693,7 @@ Wait time expired, continuing. ******************************************************************************************************************************************************************************************************* * Step #8 Checking to see if the container is still running. ******************************************************************************************************************************************************************************************************* -Passed: Docker container with the container id 86ac470a17ca212f76d19a5242c0cd692e46c0305b3cba6d474d0e92a626e461 is running. +Passed: Docker container 1cfbc4be9f39944d4e294cf895210c276143768b951159305dbeb30cb2207a1c is running. ******************************************************************************************************************************************************************************************************* * Step #9 Displaying the running processes in the Docker container @@ -716,22 +701,22 @@ Passed: Docker container with the container id 86ac470a17ca212f76d19a5242c0cd69 Passed: Docker container has 16 running processes. Name PID CPU Private Working Set -smss.exe 6056 00:00:00.015 221.2kB -csrss.exe 3172 00:00:00.015 356.4kB -wininit.exe 5872 00:00:00.015 659.5kB -services.exe 3352 00:00:00.109 1.442MB -lsass.exe 3632 00:00:00.156 2.859MB -svchost.exe 1484 00:00:00.031 1.335MB -svchost.exe 1560 00:00:00.015 1.356MB -svchost.exe 5220 00:00:00.031 2.109MB -svchost.exe 6096 00:00:00.015 1.425MB -svchost.exe 4704 00:00:00.062 3.76MB -svchost.exe 4936 00:00:00.046 2.044MB -svchost.exe 5740 00:00:00.046 1.716MB -svchost.exe 4504 00:00:00.468 4.506MB -CExecSvc.exe 4464 00:00:00.000 782.3kB -svchost.exe 4328 00:00:00.093 3.113MB -cmd.exe 5668 00:00:00.031 426kB +smss.exe 852 00:00:00.031 217.1kB +csrss.exe 3436 00:00:00.015 348.2kB +wininit.exe 4728 00:00:00.046 647.2kB +services.exe 4292 00:00:00.125 1.491MB +lsass.exe 3560 00:00:00.203 2.839MB +svchost.exe 4484 00:00:00.078 1.229MB +svchost.exe 3460 00:00:00.031 1.47MB +svchost.exe 5184 00:00:00.078 2.154MB +svchost.exe 5496 00:00:00.046 1.45MB +svchost.exe 4088 00:00:00.078 3.715MB +svchost.exe 6140 00:00:00.046 1.942MB +svchost.exe 5212 00:00:00.015 1.683MB +svchost.exe 5680 00:00:00.375 4.612MB +svchost.exe 3384 00:00:00.234 6.369MB +CExecSvc.exe 5636 00:00:00.015 766kB +cmd.exe 3888 00:00:00.000 401.4kB ******************************************************************************************************************************************************************************************************* * Step #10 Displaying Docker container resource usage statistics @@ -739,20 +724,20 @@ cmd.exe 5668 00:00:00.031 426kB Passed: Docker container resource usage statistics were retrieved. CPU % PRIV WORKING SET BLOCK I/O NET I/O -0.00% 26.81MiB 5.1MB / 14.2MB 79.4kB / 7.24kB +0.00% 29.88MiB 5.21MB / 14.7MB 1.04MB / 24.1kB ******************************************************************************************************************************************************************************************************* * Step #11 Displaying the logs from the Docker container (last 20 lines) ******************************************************************************************************************************************************************************************************* Passed: Docker container logs were retrieved. -2017-10-27T13:27:28.268812200Z (c) 2016 Microsoft Corporation. All rights reserved. -2017-10-27T13:27:28.269808900Z +2018-05-21T14:29:02.580933000Z (c) 2016 Microsoft Corporation. All rights reserved. +2018-05-21T14:29:02.584933600Z ******************************************************************************************************************************************************************************************************* * Step #12 Attempting to stop the Docker container normally with a timeout of 60 seconds before it is killed ... ******************************************************************************************************************************************************************************************************* -Passed: Docker container 86ac470a17ca212f76d19a5242c0cd692e46c0305b3cba6d474d0e92a626e461 was stopped successfully. +Passed: Docker container 1cfbc4be9f39944d4e294cf895210c276143768b951159305dbeb30cb2207a1c was stopped successfully. Passed: Docker container exited with an exit code of 0. ******************************************************************************************************************************************************************************************************* @@ -770,9 +755,11 @@ Passed: This test was performed on Docker Enterprise Edition. * Summary of the inspection for Docker image: microsoft/nanoserver:latest ******************************************************************************************************************************************************************************************************* -Date: Fri Oct 27 13:25:00 2017 +Date: Mon May 21 14:28:36 2018 Operating System: Microsoft Windows Server 2016 Datacenter -Docker version 17.06.1-ee-2, build 8e43158 +Architecture: amd64 +Docker Client Version: 17.06.1-ee-2 +Docker Server Version: 17.06.1-ee-2 There were 3 warnings detected! @@ -781,18 +768,17 @@ Passed: Docker image was built from the official Docker base image "golang:1.6. Warning: Docker image was not built using Docker Enterprise Edition! Warning: Docker image metadata does not contain an Author or Maintainer! Warning: Docker image does not contain a Healthcheck! Although a Healthcheck is not required, it is recommended. -Passed: Docker container with the container id 86ac470a17ca212f76d19a5242c0cd692e46c0305b3cba6d474d0e92a626e461 was started. -Passed: Docker container with the container id 86ac470a17ca212f76d19a5242c0cd692e46c0305b3cba6d474d0e92a626e461 is running. +Passed: Docker container 1cfbc4be9f39944d4e294cf895210c276143768b951159305dbeb30cb2207a1c was started. +Passed: Docker container 1cfbc4be9f39944d4e294cf895210c276143768b951159305dbeb30cb2207a1c is running. Passed: Docker container has 16 running processes. Passed: Docker container resource usage statistics were retrieved. Passed: Docker container logs were retrieved. -Passed: Docker container 86ac470a17ca212f76d19a5242c0cd692e46c0305b3cba6d474d0e92a626e461 was stopped successfully. +Passed: Docker container 1cfbc4be9f39944d4e294cf895210c276143768b951159305dbeb30cb2207a1c was stopped successfully. Passed: Docker container exited with an exit code of 0. Passed: Docker container and any associated volumes removed. Passed: Docker image "microsoft/nanoserver:latest" was removed. Passed: This test was performed on Docker Enterprise Edition. The inspection of the Docker image microsoft/nanoserver:latest has completed. - PS D:\InspectDockerimage> ``` diff --git a/docker-store/certify-plugins-logging.md b/docker-hub/publish/certify-plugins-logging.md similarity index 79% rename from docker-store/certify-plugins-logging.md rename to docker-hub/publish/certify-plugins-logging.md index 0a3c6ea998..f2768e7c4d 100644 --- a/docker-store/certify-plugins-logging.md +++ b/docker-hub/publish/certify-plugins-logging.md @@ -1,14 +1,16 @@ --- -description: Run certification tests against your images -keywords: Docker, docker, store, certified content, logging title: Certify Docker logging plugins +description: Run certification tests against your logging plugins +keywords: Docker Hub, certified content, logging +redirect_from: +- /docker-store/certify-plugins-logging/ --- ## Introduction Content that qualifies as **Docker Certified** must conform to best practices and pass certain baseline tests. -Docker Store lets you publish certified images as well as plugins for logging, volumes, and networks. You must certify your own _images and logging plugins_ with the `inspect` tools as explained in these docs. Currently, Docker Store certifies your volume and networking plugins for you upon submission. +Docker Hub lets you publish certified images as well as plugins for logging, volumes, and networks. You must certify your own _images and logging plugins_ with the `inspect` tools as explained in these docs. Currently, Docker Hub certifies your volume and networking plugins for you upon submission. This page explains how publishers can successfully test their **Docker logging plugins**. Also available: [Certify your Docker images](certify-images). @@ -16,7 +18,7 @@ This page explains how publishers can successfully test their **Docker logging p ## Certify your logging plugins -You must use the tool, `inspectDockerLoggingPlugin`, to certify your content for publication on Docker Store by ensuring that your Docker logging plugins conform to best practices. +You must use the tool, `inspectDockerLoggingPlugin`, to certify your content for publication on Docker Hub by ensuring that your Docker logging plugins conform to best practices. The `inspectDockerLoggingPlugin` command verifies that your Docker logging plugin can be installed and works on Docker Enterprise Edition. It also runs a container from an official Docker image of `alpine:latest` and outputs the contents of a file named `quotes.txt` (available for download). In sum, the `inspectDockerLoggingPlugin` command: @@ -54,29 +56,9 @@ Your Docker EE installation must be running on the server used to verify your su ### Set up testing environment -There are three steps: (1) install git, (2) configure credentials, and (3) configure endpoints. +There are two steps: (1) configure credentials, and (2) configure endpoints. -1. Install git (required for `inspectDockerLoggingPlugin`): - - **Ubuntu** - - ```bash - sudo apt-get update -qq - sudo apt-get install git -y - ``` - - **RHEL/CentOS** - - ```bash - sudo yum makecache fast - sudo yum install git -y - ``` - - **Windows** - - To download and install git for Windows: . - -2. Configure your Docker Registry credentials by either _defining environment variables_ **or** _passing them as arguments_ to `inspectDockerLoggingPlugin`. +1. Configure your Docker Registry credentials by either _defining environment variables_ **or** _passing them as arguments_ to `inspectDockerLoggingPlugin`. a. Define environment variables for registry credentials, `DOCKER_USER` and `DOCKER_PASSWORD`: @@ -108,7 +90,7 @@ There are three steps: (1) install git, (2) configure credentials, and (3) confi --docker-password ``` -3. Configure endpoints (and override default values) by either _defining environment variables_ **or** _passing them as arguments_ to `inspectDockerLoggingPlugin`. +2. Configure endpoints (and override default values) by either _defining environment variables_ **or** _passing them as arguments_ to `inspectDockerLoggingPlugin`. By default, `inspectDockerLoggingPlugin` uses these two endpoints to communicate with the Docker Hub Registry: @@ -147,6 +129,28 @@ There are three steps: (1) install git, (2) configure credentials, and (3) confi --docker-registry-api-endpoint ``` +* If more details are needed for debugging problems communicating to the Docker Registry, the following environment variable can be exported which will generate detailed debugging output to a file named `./dockerAPI.go.log` in the directory where the command is run from. + + * Linux or MacOS + + ```bash + export DOCKER_REGISTRY_API_DEBUG="true" + ``` + + * Windows + + * Windows command prompt + + ```bash + set DOCKER_REGISTRY_API_DEBUG="true" + ``` + + * Windows powershell + + ```bash + $env:DOCKER_REGISTRY_API_DEBUG="true" + ``` + ### Syntax 1. Download `inspectDockerLoggingPlugin` command. @@ -158,15 +162,30 @@ There are three steps: (1) install git, (2) configure credentials, and (3) confi | Linux/IBMZ | [https://s3.amazonaws.com/store-logos-us-east-1/certification/zlinux/inspectDockerLoggingPlugin](https://s3.amazonaws.com/store-logos-us-east-1/certification/zlinux/inspectDockerLoggingPlugin) | | Linux/IBMPOWER | [https://s3.amazonaws.com/store-logos-us-east-1/certification/power/inspectDockerLoggingPlugin](https://s3.amazonaws.com/store-logos-us-east-1/certification/power/inspectDockerLoggingPlugin) | -2. Set permissions on `inspectDockerLoggingPlugin` so that it is executable: +2. Set permissions on `inspectDockerLoggingPlugin` for linux, zlinux and power so that it is executable: ``` chmod u+x inspectDockerLoggingPlugin ``` -3. Download [`quotes.txt`](https://s3.amazonaws.com/store-logos-us-east-1/certification/quotes.txt) and put it in the same directory. +3. Download `http_api_endpoint` command -4. Get the product ID from the plan page you'd like to reference for the certification test. Make sure the checkbox is checked and the plan is saved first. + | OS/Architecture | Download Link | + |-----------------|------------------| + | Windows/X86 | [https://s3.amazonaws.com/store-logos-us-east-1/certification/windows/http_api_endpoint.exe](https://s3.amazonaws.com/store-logos-us-east-1/certification/windows/http_api_endpoint.exe) | + | Linux/X86 | [https://s3.amazonaws.com/store-logos-us-east-1/certification/linux/http_api_endpoint](https://s3.amazonaws.com/store-logos-us-east-1/certification/linux/http_api_endpoint) | + | Linux/IBMZ | [https://s3.amazonaws.com/store-logos-us-east-1/certification/zlinux/http_api_endpoint](https://s3.amazonaws.com/store-logos-us-east-1/certification/zlinux/http_api_endpoint) | + | Linux/IBMPOWER | [https://s3.amazonaws.com/store-logos-us-east-1/certification/power/http_api_endpoint](https://s3.amazonaws.com/store-logos-us-east-1/certification/power/http_api_endpoint) | + +4. Set permissions on `http_api_endpoint` for linux, zlinux and power so that it is executable: + + ``` + chmod u+x http_api_endpoint + ``` + +5. Download [`quotes.txt`](https://s3.amazonaws.com/store-logos-us-east-1/certification/quotes.txt) and put it in the same directory. + +6. Get the product ID from the plan page you'd like to reference for the certification test. Make sure the checkbox is checked and the plan is saved first. ![product ID](images/store-product-id.png) @@ -186,6 +205,8 @@ There are three steps: (1) install git, (2) configure credentials, and (3) confi Docker User ID. This overrides the DOCKER_USER environment variable. -get-logs-script string An optional custom script used to retrieve the logs. + -insecure-skip-verify + Optional. Specifies to disable SSL verification for an insecure private Docker Trusted Registry. -help Help on the command. -html @@ -193,7 +214,7 @@ There are three steps: (1) install git, (2) configure credentials, and (3) confi -json Generate JSON output. -product-id string - Optional Product identifier from Docker Store for this plugin. Please include it when you want the output sent to docker store for certification. + Optional Product identifier from Docker Hub for this plugin. Please include it when you want the output sent to docker store for certification. -test-script string An optional custom script used to test the Docker logging plugin. The script gets passed 1 parameter - the Docker logging plugin name. -verbose @@ -205,9 +226,9 @@ There are three steps: (1) install git, (2) configure credentials, and (3) confi ## Inspection Output -By default, `inspectDockerLoggingPlugin` displays output locally to `stdout` (the default), JSON, and HTML. You can also upload output to Docker Store, which is recommended for admnistrator verification. +By default, `inspectDockerLoggingPlugin` displays output locally to `stdout` (the default), JSON, and HTML. You can also upload output to Docker Hub, which is recommended for admnistrator verification. -- **Upload to Docker Store** (by entering `product-id` at the commandline). +- **Upload to Docker Hub** (by entering `product-id` at the commandline). - **Send message to `stdout`**. This is the default. @@ -226,7 +247,7 @@ By default, `inspectDockerLoggingPlugin` displays output locally to `stdout` (th ### Inspect a Docker logging plugin with messages sent to stdout -#### To inspect the Docker logging plugin "gforghetti/docker-log-driver-test:latest", and upload the result to Docker Store (leave out the `-product-id` parameter if you are just testing): +#### To inspect the Docker logging plugin "gforghetti/docker-log-driver-test:latest", and upload the result to Docker Hub (leave out the `-product-id` parameter if you are just testing): ``` gforghetti:~:$ ./inspectDockerLoggingPlugin -product-id= gforghetti/docker-log-driver-test:latest @@ -235,90 +256,93 @@ gforghetti:~:$ ./inspectDockerLoggingPlugin -product-id= gforg ``` ************************************************************************************************************************************************************************************************** -* Docker logging plugin: gforghetti/docker-log-driver-test:latest +* Docker Logging Plugin: gforghetti/docker-log-driver-test:latest ************************************************************************************************************************************************************************************************** ************************************************************************************************************************************************************************************************** -* Step #1 Inspecting the Docker logging plugin: gforghetti/docker-log-driver-test:latest ... +* Step #1 Inspecting the Docker Logging Plugin: gforghetti/docker-log-driver-test:latest ... ************************************************************************************************************************************************************************************************** -Passed: Docker logging plugin image gforghetti/docker-log-driver-test:latest has been inspected. +Passed: Docker Logging Plugin image gforghetti/docker-log-driver-test:latest has been inspected. ************************************************************************************************************************************************************************************************** -* Step #2 Docker logging plugin information +* Step #2 Docker Logging Plugin information ************************************************************************************************************************************************************************************************** +-------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| Docker logging plugin: | gforghetti/docker-log-driver-test:latest | +| Docker Logging Plugin: | gforghetti/docker-log-driver-test:latest | | Description: | jsonfilelog as plugin | | Documentation: | - | -| Digest: | sha256:870c81b9b8872956eec83311707e52ccd4af40683ba64aac3c1700d252a07624 | -| Base layer digest: | sha256:8b0c5cbf1339dacef5f56717567aeee37d9e4f196f0874457d46c01592a30d70 | -| Docker version: | 17.06.0-ce | +| Digest: | sha256:1cdd79202a7a9f9a53524e904d9f89ed0a6bf6673717bc955ef55744f0826d4c | +| Base layer digest: | sha256:fda008d4a2b0d2c0a9d2e5dc952aefb0188f7a9c96c04e159662fd56b507c174 | +| Docker version: | 17.12.0-ce | | Interface Socket: | jsonfile.sock | | Interface Socket Types: | docker.logdriver/1.0 | | IpcHost: | false | | PidHost: | false | | Entrypoint: | /usr/bin/docker-log-driver | -| WorkDir: | /tmp | +| WorkDir: | | | User: | | +-------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -************************************************************************************************************************************************************************************************** -* Step #3 Checking to see if the Docker logging plugin is already installed. -************************************************************************************************************************************************************************************************** -The Docker logging plugin gforghetti/docker-log-driver-test:latest needs to be installed. +Warning: Docker logging plugin was not built using Docker Enterprise Edition! ************************************************************************************************************************************************************************************************** -* Step #4 Installing the Docker logging plugin gforghetti/docker-log-driver-test:latest ... +* Step #3 Installing the Docker logging plugin gforghetti/docker-log-driver-test:latest ... ************************************************************************************************************************************************************************************************** Passed: Docker logging plugin gforghetti/docker-log-driver-test:latest has been installed successfully. ************************************************************************************************************************************************************************************************** -* Step #5 Testing the Docker logging plugin: gforghetti/docker-log-driver-test:latest ... +* Step #4 Testing the Docker logging plugin: gforghetti/docker-log-driver-test:latest ... ************************************************************************************************************************************************************************************************** Starting a Docker container to test the docker logging plugin gforghetti/docker-log-driver-test:latest ************************************************************************************************************************************************************************************************** -* Step #6 Retrieving the Docker Logs ... +* Step #5 Retrieving the Docker Logs ... ************************************************************************************************************************************************************************************************** -Retrieving the Docker logs using the "docker container logs c0bc7de01b155203d1ab72a1496979c4715c9b9f81c7f5a1a37ada3aa333e1d5" command +Retrieving the Docker logs using the "docker container logs 4dc3e699dbf3d050a5b582a245c9a4718bb0300d7d55380887c74741d09bd730" command ************************************************************************************************************************************************************************************************** -* Step #7 Verifying that the contents retrieved matches what was sent to the Docker Logging plugin. +* Step #6 Verifying that the contents retrieved matches what was sent to the Docker Logging plugin. ************************************************************************************************************************************************************************************************** -Passed: Docker logging plugin Test was successful. +Passed: Docker Logging Plugin Test was successful. ************************************************************************************************************************************************************************************************** -* Step #8 Removing the Docker container and any associated volumes. +* Step #7 Removing the Docker container and any associated volumes. ************************************************************************************************************************************************************************************************** Passed: Docker container and any associated volumes removed. ************************************************************************************************************************************************************************************************** -* Step #9 Removing the Docker logging plugin +* Step #8 Removing the Docker logging plugin ************************************************************************************************************************************************************************************************** Passed: Docker logging plugin gforghetti/docker-log-driver-test:latest was removed. +Passed: This test was performed on Docker Enterprise Edition. ************************************************************************************************************************************************************************************************** -* Summary of the inspection for the Docker logging plugin: gforghetti/docker-log-driver-test:latest +* Summary of the inspection for the Docker Logging Plugin: gforghetti/docker-log-driver-test:latest ************************************************************************************************************************************************************************************************** -Report Date: Tue Sep 19 12:40:11 2017 -Operating System: Operating System: MacOS darwin Version: 10.12.6 -Docker version 17.09.0-ce-rc2, build 363a3e7 +Report Date: Mon May 21 14:40:41 2018 +Operating System: Operating System: Ubuntu 16.04.4 LTS +Architecture: amd64 +Docker Client Version: 17.06.2-ee-11 +Docker Server Version: 17.06.2-ee-11 +There were 1 warnings detected! -Passed: Docker logging plugin image gforghetti/docker-log-driver-test:latest has been inspected. +Passed: Docker Logging Plugin image gforghetti/docker-log-driver-test:latest has been inspected. +Warning: Docker logging plugin was not built using Docker Enterprise Edition! Passed: Docker logging plugin gforghetti/docker-log-driver-test:latest has been installed successfully. -Passed: Docker logging plugin Test was successful. +Passed: Docker Logging Plugin Test was successful. Passed: Docker container and any associated volumes removed. Passed: Docker logging plugin gforghetti/docker-log-driver-test:latest was removed. +Passed: This test was performed on Docker Enterprise Edition. The inspection of the Docker logging plugin gforghetti/docker-log-driver-test:latest has completed. If -product-id is specified on command line: ************************************************************************************************************************************************************************************************** -* Step #10 Upload the test result to Docker Store. +* Step #9 Upload the test result to Docker Hub. ************************************************************************************************************************************************************************************************** -Passed: The test results are uploaded to Docker Store. +Passed: The test results are uploaded to Docker Hub. gforghetti:~/$ ``` @@ -337,32 +361,39 @@ Note: The output was piped to the **jq** command to display it "nicely". #### Output: +``` ``` { - "Date": "Tue Sep 19 12:41:49 2017", - "SystemOperatingSystem": "Operating System: MacOS darwin Version: 10.12.6", - "SystemDockerVersion": "Docker version 17.09.0-ce-rc2, build 363a3e7", + "Date": "Mon May 21 14:38:28 2018", + "SystemOperatingSystem": "Operating System: Ubuntu 16.04.4 LTS", + "SystemArchitecture": "amd64", + "SystemDockerClientVersion": "17.06.2-ee-11", + "SystemDockerServerVersion": "17.06.2-ee-11", "DockerLogginPlugin": "gforghetti/docker-log-driver-test:latest", "Description": "jsonfilelog as plugin", "Documentation": "-", - "DockerLoggingPluginDigest": "sha256:870c81b9b8872956eec83311707e52ccd4af40683ba64aac3c1700d252a07624", - "BaseLayerImageDigest": "sha256:8b0c5cbf1339dacef5f56717567aeee37d9e4f196f0874457d46c01592a30d70", - "DockerVersion": "17.06.0-ce", + "DockerLoggingPluginDigest": "sha256:1cdd79202a7a9f9a53524e904d9f89ed0a6bf6673717bc955ef55744f0826d4c", + "BaseLayerImageDigest": "sha256:fda008d4a2b0d2c0a9d2e5dc952aefb0188f7a9c96c04e159662fd56b507c174", + "DockerVersion": "17.12.0-ce", "Entrypoint": "/usr/bin/docker-log-driver", "InterfaceSocket": "jsonfile.sock", "InterfaceSocketTypes": "docker.logdriver/1.0", - "WorkDir": "/tmp", + "WorkDir": "", "User": "", "IpcHost": false, "PidHost": false, "Errors": 0, - "Warnings": 0, + "Warnings": 1, "HTMLReportFile": "", "VulnerabilitiesScanURL": "", "Results": [ { "Status": "Passed", - "Message": "Docker logging plugin image gforghetti/docker-log-driver-test:latest has been inspected." + "Message": "Docker Logging Plugin image gforghetti/docker-log-driver-test:latest has been inspected." + }, + { + "Status": "Warning", + "Message": "Docker logging plugin was not built using Docker Enterprise Edition!" }, { "Status": "Passed", @@ -370,7 +401,7 @@ Note: The output was piped to the **jq** command to display it "nicely". }, { "Status": "Passed", - "Message": "Docker logging plugin Test was successful." + "Message": "Docker Logging Plugin Test was successful." }, { "Status": "Passed", @@ -379,10 +410,14 @@ Note: The output was piped to the **jq** command to display it "nicely". { "Status": "Passed", "Message": "Docker logging plugin gforghetti/docker-log-driver-test:latest was removed." + }, + { + "Status": "Passed", + "Message": "This test was performed on Docker Enterprise Edition." } ] } -gforghetti:~/$ +🐳 gforghetti:~/$ ``` diff --git a/docker-store/customer_faq.md b/docker-hub/publish/faq-customer.md similarity index 81% rename from docker-store/customer_faq.md rename to docker-hub/publish/faq-customer.md index a3fb58ddeb..d2d5aaac94 100644 --- a/docker-store/customer_faq.md +++ b/docker-hub/publish/faq-customer.md @@ -1,19 +1,20 @@ --- -description: Docker Store frequently asked questions -keywords: Docker, docker, store, purchase images -title: Docker Store Customer FAQs +title: Docker Hub customer FAQs +description: Frequently asked questions by Docker Hub customers +keywords: Docker Hub, customers +redirect_from: +- /docker-store/customer_faq/ --- ## Customer FAQs -### How do I log in to the Docker Store? +### How do I log in to the Docker Hub? -Log in to the Docker Store using your free Docker ID. Don’t have a Docker ID -yet? Go to [Docker Cloud](https://cloud.docker.com) to sign up. +Log in to the Docker Hub using your free Docker ID. Don’t have a Docker ID +yet? See [Create a Docker Hub account](../accounts/). -### I tried `docker login store.docker.com` via the CLI and it does not seem to work. Am I missing something? - -You should do `docker login` ( *not `docker login store.docker.com`* ) to login successfully, and pull content you might be entitled to. +You should do `docker login` ( *not `docker login store.docker.com`* ) to login +successfully, and pull content you might be entitled to. ## Certification program @@ -28,7 +29,7 @@ technology in containers with support from both Docker and the publisher. ## End user experience -### Why do I see the pull command for a few products and no pull command in other products? +### Why do I see the pull command for some products but not others? If a publisher publishes multiple images as part of their plan - we do not display the pull command. The pull command visual is exclusively for submissions @@ -53,19 +54,19 @@ organization member to pull. Organization owners can view the organization's subscriptions by changing the selected account on the **My Content** page. -### How can I become a Docker Store publisher? +### How can I become a Docker Hub publisher? -You can apply to become a Docker Store publisher by filling out +You can apply to become a Docker Hub publisher by filling out the form [here](https://store.docker.com/publisher/signup). When you've been accepted to the program, you can set up a publisher profile and submit your images for review. Learn more about the publisher duties and requirements [here](https://success.docker.com/Store). -## Docker Store billing frequently asked questions +## Docker Hub billing frequently asked questions ### What forms of payment do you accept? -The Docker Store accepts Visa, MasterCard, American Express, and Discover credit +The Docker Hub accepts Visa, MasterCard, American Express, and Discover credit cards. We do not accept ACH, EFT, or PIN-based debit card transactions at this time. diff --git a/docker-hub/publish/faq-publisher.md b/docker-hub/publish/faq-publisher.md new file mode 100644 index 0000000000..b661bf567c --- /dev/null +++ b/docker-hub/publish/faq-publisher.md @@ -0,0 +1,294 @@ +--- +title: Docker Hub publisher FAQs +description: Frequently asked questions by Docker Hub by publishers +keywords: Docker Hub, publishers +redirect_from: +- /docker-store/publisher_faq/ +--- + +## Program basics + +### What is the Docker Certified program? + +The Docker Certification program for infrastructure, images, and plugins is for both technology partners and enterprise customers. It is designed to recognize +high-quality containers and plugins, provide collaborative support, and ensure +compatibility with Docker EE. + +Docker Certification is aligned to the available Docker EE infrastructure and +gives enterprises a trusted way to run more technology in containers with +support from both Docker and the publisher. + +The +[Docker Technology Partner guide](https://www.docker.com/partners/partner-program#/technology_partner) +explains the Technology Partner program and the Docker Certification program for +infrastructure, images, and plugins in more detail. + +### What are the benefits of joining? + +Docker Hub promotes Docker certified containers and plugins running on Docker +certified infrastructure trusted and high quality content. + +With over 8 billion image pulls and access to Docker’s large customer base, +certifying your images and plugins differentiates your content. + +With a revenue share agreement, Docker can be a channel for your content. The +Docker Certified badge can also be listed next to external references to your +product. + +### How do I apply as a publisher? + +Start by applying to be a Docker Technology Partner at https://goto.docker.com/partner and click on "Publisher". + +* Requires acceptance of partnership agreement for completion. + +* Identify content that can be listed on Hub and includes a support offering. + +* Test your image against Docker certified infrastructure version 17.03 and + above (plugins must run on 17.03 and above). + +* Submit your image for certification through the publisher portal. Docker scans + the image and work with you to address vulnerabilities. Docker also conducts a + best practices review of the image. + +* Be a TSAnet member or join the Docker Limited Group. + +* Upon completion of certification criteria, and acceptance by Docker, a product + page is updated to reflect certified status. + +### What are official vs certified images? + +Many official images transition to the Docker Certified program and are +maintained and updated by the original owner of the software. Docker continues +to maintain some of the base OS images and language frameworks. + +### What is the application timeline? + +1-2 weeks. + +### Is certification optional or required? + +Certification is recommended for most commercial and supported container images. +Free, community, and other commercial (non-certified) content may also be listed +on Docker Hub. + +![certified content example](images/faq-certified-content-types.png) + +### How is certification of plugins handled? + +The Docker certification program recognizes the need to apply special scrutiny +and testing to containers that access system level interfaces such as storage +volumes and networking. + +Docker identifies these special containers as “plugins” which require additional +testing by the publisher or Docker. These plugins employ the V2 Plugin +Architecture that was first made available in 1.12 (experimental) and now +available in Docker Enterprise Edition 17.03. + +## Licensing, pricing, & plans + +### Is there a fee to join the program? + +In the future, Docker may charge a small annual listing fee. This is waived for +the initial period. + +### What are the customer pricing plans? + +As a publisher you can charge a subscription fee every month in USD. The amount is determined by you. We are working on other pricing options. If you have feedback about pricing, send us an email at publisher-support@docker.com + +### How do customers switch from a Trial to a Paid subscription? + +Publishers can provide two different tokens or let customers use the same token +and internally map the customer to a paid plan vs a free trial. + +### How am I paid if my commercial content is purchased? + +Docker cuts you a check post a revenue share. Details are included in your vendor +agreement. + +### Must subscribed users explicitly accept license terms? + +Yes. Even if something is published as a free tier, subscribed users must +explicitly click **Accept** on the license terms before they can download the +image. + +### Can I use the same repository for two plans with unique tags? + +Publishers must use a different repository for each plan. + +If a user is entitled to a plan in your product, the user is entitled to all tags in the relevant. For example, if you have a `Developer` plan that is mapped to repositories store/`mynamespace`/`myrepo1`:`mytag1`, another plan (say `Production`) **should** map to a different repository. + +**_Any user who is entitled to the `Developer` plan will be able to pull all tags in store/`mynamespace`/`myrepo1`_**. + +### How is export control handled? Can individual countries be specified? + +Docker provides export control via blacklisting several countries, IPs and users +based on the national export compliance database. Any export control we do is +across all products, we do not selectively blacklist versions and products for +specific groups. If you have questions, contact us at +publisher-support@docker.com. + + +### Do you have a license enforcement system for docker images sold? + +Currently, we have one licensing option -- Bring your own License or BYOL. + +With BYOL, publishers take care of license keys within the container. The +license key can be presented to the customer on Docker Hub. We expect the +publisher to build short circuits into the container, so the container stops +running once the license key expires. Once a customer cancels, or if the +customer subscription expires, the customer cannot download updates from the +Hub. + +### What happens if a customer stops paying for the image? + +Users who cancel their subscription cannot download updates from the Hub. The +container may continue running. If you have a licensing scheme built into the +container, the licensing scheme can be a forcing function and stop the +container. (_We do not build anything into the container, it is up to the publisher_). + +### What options are presented to users to pull an image? + +We provide users the following options to access your software: + +* Users who are logged-in. +* Users who have accepted ToS. +* All users (including users without Docker identity). + +Here is a [screenshot](https://user-images.githubusercontent.com/2453622/32067299-00cf1210-ba83-11e7-89f8-15deed6fef62.png) to describe how publishers can update the options provided to customers. + +### How do I distinguish free and paid subscribers? + +The analytics reports contain information about the subscriber and the relevant +product plan. You can identify subscribers for each plan for each product. + +### Where can I view customer insights? + +Analytics reports are only available to publishers with certified or commercial +Content. Go to https://store.docker.com/publisher/center and click on "Actions" +for the product you'd like to view analytics for. Here is a +[screenshot](https://user-images.githubusercontent.com/2453622/32352202-6e87ce6e-bfdd-11e7-8fb0-08fe5a3e8930.png). + +### How is customer/publisher support handled? + +All Docker certified container images and plugins running on Docker certified +infrastructure come with SLA-based support provided by the publisher and Docker. + +Normally, a customer contacts the publisher for container and application level +issues. Likewise, a customer contacts Docker for Docker Edition support. + +In the case where a customer calls Docker (or vice versa) about an issue on the +application, Docker advises the customer about the publisher support process and +performs a handover directly to the publisher if required. TSAnet is required +for exchange of support tickets between the publisher and Docker. + + +## Product submission + +### How long for an image to be approved? + +2 Weeks. + +### Can I preview my submission before publishing? + +Yes. You can preview your submission including the image you've submitted, the look and feel of the detail page and any markdown descriptions you might have. + +These screenshots illustrate the preview experience for markdown content: + +Product Description preview [screenshot](https://user-images.githubusercontent.com/2453622/32344591-9cd6b456-bfc4-11e7-9505-1f7e8235f812.png). + +Install instructions description preview [screenshot](https://user-images.githubusercontent.com/2453622/32344592-9cf2e234-bfc4-11e7-9e60-d773b62eae07.png). + +### Can I have a "publish by" date for my content? + +Not yet. Potential ETA Q2 2018. + +### Can I publish multi-container apps? + +Yes. Publishers can provide multiple images and add a compose file in the +install instructions to describe how the multi-container app can be used. For +now, we recommend asking publishers to look at this example from Microsoft +https://store.docker.com/images/mssql-server-linux where they have Supported +Tags listed in the Install instructions (you don't necessarily need to list it +in the readme). + +### Can a team work on the same product and publish? + +Yes. You can submit your content as a team. + +### Are organization details propagated to new products? + +Organization details need to be filled in only once. Updating organization info +once updates this for all images published by your organization. + +### How do fields required when publishing map to data on the product page? + +| Publishing field | Product data | +|:--------------------------|:-----------------------------| +| Product description | [Description](https://store.docker.com/images/openmaptiles-openstreetmap-maps?tab=description) | +| Support link | Resources > License | +| Documentation link | Resources > Documentation | +| Tier Description | Entitlement text (top right) | +| Installation instructions | [Setup Instructions](https://store.docker.com/images/openmaptiles-openstreetmap-maps/plans/f1fc533a-76f0-493a-80a1-4e0a2b38a563?tab=instructions) (below tier description) | + +[![product page](images/faq-product-page.png)](https://store.docker.com/images/openmaptiles-openstreetmap-maps?tab=description) + +* [Tier Description]() is what you see once users get entitled to a plan. For instance, this publisher entered: `A detailed street map of any place on a planet. Evaluation and non-production use. Production use license available separately`. + +* [Installation instructions](https://store.docker.com/images/openmaptiles-openstreetmap-maps/plans/f1fc533a-76f0-493a-80a1-4e0a2b38a563?tab=instructions ) is documentation on installing your software. In this case the documentation is `Just launch the container and the map is going to be available on port 80 - ready-to-use - with instructions and list of available styles.` We recommend more details for any content thats a certification candidate. + +### How is a certified image listed on Docker Hub? + +These images are differentiated from other images on Hub through a +certification badge. A user can search specifically for CI’s by limiting their +search parameters to show only certified content. + +![certified content example](images/faq-certified-content.png) + +### What is the process for pushing new updated builds? + +Edit the same product and update with the newly tagged repos. + +### How do we ensure customers download the latest version? + +You can not submit "latest" tags through the certification publish workflow. + +The reason we do this is so that users are aware of the exact version they +download. To make the user experience easy we have a copy widget that users can +use to copy the pull command and paste in their command line. + +Here is a +[screenshot](https://user-images.githubusercontent.com/2453622/32354702-1bec633a-bfe8-11e7-9f80-a02c26b1b10c.png) +to provide additional clarity. + +### How can I remove a submission? + +To remove your submission, contact us at publisher-support@docker.com. + +## Customer support + +### What kind of access to customers have? + +Customers can `docker pull` any tag associated with the source repo specified. + +We recommend that you create a distinct repo per plan and only use tags for +different versions of that specific plan. For example, if you have a community, +pro, and enterprise plan of a single product, you should create three separate +repos, `namespace/community, namespace/pro, and namespace/enterprise`. Customers +who are entitled to your enterprise plan, will be able to pull +`store/namespace/enterprise:anytag`. + +### Can customers view all tags for a specific repository? + +We don't support the abiltiy to view available tags for published products +because published products usually require entitlement. + +Official images and community images have available tags visible because anyone +can access any tag at any time anonymously. + +We aim to have product listings published with the concept of versions, allowing +publishers to manage which versions of their products they expose to customers +for access. (Expected Q3 2018) + +### Can publishers respond to product reviews? + +Yes! diff --git a/docker-store/images/certified_container.png b/docker-hub/publish/images/certified_container.png similarity index 100% rename from docker-store/images/certified_container.png rename to docker-hub/publish/images/certified_container.png diff --git a/docker-store/images/certified_infrastructure.png b/docker-hub/publish/images/certified_infrastructure.png similarity index 100% rename from docker-store/images/certified_infrastructure.png rename to docker-hub/publish/images/certified_infrastructure.png diff --git a/docker-store/images/certified_plugins.png b/docker-hub/publish/images/certified_plugins.png similarity index 100% rename from docker-store/images/certified_plugins.png rename to docker-hub/publish/images/certified_plugins.png diff --git a/docker-store/images/FAQ-types-of-certified-content.png b/docker-hub/publish/images/faq-certified-content-types.png similarity index 100% rename from docker-store/images/FAQ-types-of-certified-content.png rename to docker-hub/publish/images/faq-certified-content-types.png diff --git a/docker-store/images/FAQ-certified-content.png b/docker-hub/publish/images/faq-certified-content.png similarity index 100% rename from docker-store/images/FAQ-certified-content.png rename to docker-hub/publish/images/faq-certified-content.png diff --git a/docker-hub/publish/images/faq-product-page.png b/docker-hub/publish/images/faq-product-page.png new file mode 100644 index 0000000000..3276ac22c0 Binary files /dev/null and b/docker-hub/publish/images/faq-product-page.png differ diff --git a/docker-store/images/gforghetti-apache-latest_inspection_report.html-1.png b/docker-hub/publish/images/gforghetti-apache-latest_inspection_report.html-1.png similarity index 100% rename from docker-store/images/gforghetti-apache-latest_inspection_report.html-1.png rename to docker-hub/publish/images/gforghetti-apache-latest_inspection_report.html-1.png diff --git a/docker-store/images/gforghetti-apache-latest_inspection_report.html-2.png b/docker-hub/publish/images/gforghetti-apache-latest_inspection_report.html-2.png similarity index 100% rename from docker-store/images/gforghetti-apache-latest_inspection_report.html-2.png rename to docker-hub/publish/images/gforghetti-apache-latest_inspection_report.html-2.png diff --git a/docker-store/images/gforghetti-apache-latest_inspection_report.html-3.png b/docker-hub/publish/images/gforghetti-apache-latest_inspection_report.html-3.png similarity index 100% rename from docker-store/images/gforghetti-apache-latest_inspection_report.html-3.png rename to docker-hub/publish/images/gforghetti-apache-latest_inspection_report.html-3.png diff --git a/docker-store/images/gforghetti-log-driver-latest_inspection_report.html.png b/docker-hub/publish/images/gforghetti-log-driver-latest_inspection_report.html.png similarity index 100% rename from docker-store/images/gforghetti-log-driver-latest_inspection_report.html.png rename to docker-hub/publish/images/gforghetti-log-driver-latest_inspection_report.html.png diff --git a/docker-hub/publish/images/icon-green-check.svg b/docker-hub/publish/images/icon-green-check.svg new file mode 100644 index 0000000000..b33d1fc529 --- /dev/null +++ b/docker-hub/publish/images/icon-green-check.svg @@ -0,0 +1,14 @@ + + + + Round Check + Created with Sketch. + + + + + + + + + \ No newline at end of file diff --git a/docker-hub/publish/images/icon-red-x.jpg b/docker-hub/publish/images/icon-red-x.jpg new file mode 100644 index 0000000000..d9f0490ea6 Binary files /dev/null and b/docker-hub/publish/images/icon-red-x.jpg differ diff --git a/docker-hub/publish/images/publish-diagram.png b/docker-hub/publish/images/publish-diagram.png new file mode 100644 index 0000000000..ba9abd895d Binary files /dev/null and b/docker-hub/publish/images/publish-diagram.png differ diff --git a/docker-store/images/scan-full-details.png b/docker-hub/publish/images/scan-full-details.png similarity index 100% rename from docker-store/images/scan-full-details.png rename to docker-hub/publish/images/scan-full-details.png diff --git a/docker-store/images/scan-single.png b/docker-hub/publish/images/scan-single.png similarity index 100% rename from docker-store/images/scan-single.png rename to docker-hub/publish/images/scan-single.png diff --git a/docker-store/images/scan-tags.png b/docker-hub/publish/images/scan-tags.png similarity index 100% rename from docker-store/images/scan-tags.png rename to docker-hub/publish/images/scan-tags.png diff --git a/docker-store/images/scan-view.png b/docker-hub/publish/images/scan-view.png similarity index 100% rename from docker-store/images/scan-view.png rename to docker-hub/publish/images/scan-view.png diff --git a/docker-hub/publish/images/subscribed.png b/docker-hub/publish/images/subscribed.png new file mode 100644 index 0000000000..4cc2556de0 Binary files /dev/null and b/docker-hub/publish/images/subscribed.png differ diff --git a/docker-hub/publish/images/trustchain.png b/docker-hub/publish/images/trustchain.png new file mode 100644 index 0000000000..85c41666a1 Binary files /dev/null and b/docker-hub/publish/images/trustchain.png differ diff --git a/docker-hub/publish/index.md b/docker-hub/publish/index.md new file mode 100644 index 0000000000..0d902ea343 --- /dev/null +++ b/docker-hub/publish/index.md @@ -0,0 +1,348 @@ +--- +title: Publish content on Docker Hub +description: Submit a product for the Docker Hub +keywords: Docker Hub, publish, content, images +redirect_from: +- /docker-store/ +- /docker-store/publish/ +--- + +## Permitted content and support options + +{% assign green-check = '![yes](images/icon-green-check.svg){: style="height: 14px; margin: 0 auto"}' %} +{% assign red-x = '![no](images/icon-red-x.jpg){: style="height: 14px; margin: 0 auto"}' %} + +Docker Hub allows you to publish content that runs on: + +* **Docker Enterprise Edition (Docker certified infrastructure)**. This + content may also qualify to become a Docker Certified Container or Plugin + image and be backed by collaborative Docker/Publisher support + +* **Docker Community Edition**. Content on Docker CE is _not supported_ by + Docker nor is it eligible for certification. + +| If your content: | You can publish on Hub | Docker certifies & supports | Publisher supports | +|:---------------------------------------------------|:------------------------|:----------------------------|:--------------------------| +| Works on Docker Enterprise Edition | {{ green-check }} | {{ green-check }} | {{ green-check }} | +| Works on Docker Community Edition | {{ green-check }} | {{ red-x }} |
Optional
| +| Does _not_ work on Docker certified infrastructure | {{ red-x }} |
n/a
|
n/a
| + +## Onboarding + +The publishing process begins when you **sign into Docker Hub your Docker ID**. +Then **specify a product name (image source) from a private repository**. Your +product images must be stored in private repositories of Docker Hub as they +serve as an internal staging area from which you can revise and submit content +for review. + +After specifying a source, **provide the content-manifest items** to populate your +product details page. These items include logos, descriptions, and licensing and +support links so that customers can make informed decisions about your image. +These items are submitted alongside the image itself for moderation. + +**The Docker Hub team conducts a comprehensive review of your image and metadata.** +We evaluate the security of your product images with Docker Security Scanning, +and share results with you as the publisher. During the image-moderation phase, +we iterate back and forth with publishers to address outstanding vulnerabilities +and content-manifest issues until the image is ready for publication. + +Commercial content and other supported images may qualify for the **Docker Certified quality mark** +(for containers or plugins). The testing for this program goes beyond the +vulnerability scan and also evaluates container images for Docker best practices +developed over years of experience. Collaborative support capability between +Docker and the publisher is also established. + +Refer to the diagram below for a high-level summary: + +![publishing workflow](images/publish-diagram.png) + +## Create great content + +Create your content, and follow our [Dockerfile best practices](/develop/develop-images/dockerfile_best-practices/) +to Dockerize it. Keep your images small, your layers few, and your components +secure. Refer to the links and guidelines listed below to build and deliver +great content: + +* [Best practices for writing Dockerfiles](/develop/develop-images/dockerfile_best-practices/){: target="_blank" class="_"} + +* [Official repositories on Docker Hub](/docker-hub/official_repos/){: target="_blank" class="_"} + +* [Docker bench for security](https://github.com/docker/docker-bench-security){: target="_blank" class="_"} + +Here are some best practices when it comes to building vulnerability-free Docker images: + +### Choose a secure base image (See your Dockerfile `FROM:` directive) + +Many base images have a strong record of being secure, including: + +* [Alpine](https://hub.docker.com/_/alpine/){: target="_blank" class="_"} Linux: + Alpine is a minimal linux distribution with an excellent security record. + +* Alpine-based application images such as `python:alpine`, `ruby:alpine`, + and `golang:alpine`. They are secure and minimal, while providing the + convenience of their non-Alpine alternatives. + +* [Debian](https://hub.docker.com/r/library/debian/tags/jessie/){: target="_blank" class="_"} + Linux: both small and tightly-controlled, Debian-linux is a good alternative + if you're currently using Ubuntu. + +Docker strongly recommends **Alpine Linux**. The founder of this Linux +distribution is leading an initiative at Docker to provide safe, compact base +images for all container applications. + +### Remove unused components + +Often, vulnerabilities exist in components that aren't actually used in the +containerized application. To avoid this, you can: + +* Follow best practices when using the `apt-get` command. + +* Run `apt-get-remove` to destroy any components required to build but not + actually run your application. Usually, this involves creating multi-line + Dockerfile directives, as seen below. The following example shows how to remove + `curl` and `python-pip` after they are used to install the Python `requests` + package, all in a single Dockerfile directive: + + ```shell + RUN apt-get update && \ + apt-get install -y --no-install-recommends curl python-pip && \ + pip install requests && \ + apt-get remove -y python-pip curl && \ + rm -rf /var/lib/apt/lists/ + ``` + +> Files introduced in one directive of your Dockerfile can only be removed in +> the same directive (and not in subsequent directives in your Dockerfile). + +### Keep required components up-to-date + +Your images are composed of open-source libraries and packages that amass +vulnerabilities over time and are consequently patched. To ensure the integrity +of your product, keep your images up-to-date: + +* Periodically update your base image's version, especially if you’re using a + version deemed to be vulnerable. + +* Re-build your image periodically. Directives including commands such as + `apt-get install ...` pull the latest versions of dependencies, which may + include security fixes. + +## Create and maintain your publisher profile in the Hub + +Let the Docker community know who you are. Add your details, your company +story, and what you do. At the very minimum, we require: + +* Legal entity name +* Company website +* Phone number +* Valid company email +* Company icon/logo (square; at least 512 x 512px + +## Prepare your image-manifest materials + +You must provide the namespace (including repository and tags) of a private +repository on Docker Hub that contains the source for your product. This +repository path is not shown to users, but the repositories you choose determine +the product tiers available for customers to download. + +The following content information helps us make your product look great and +discoverable: + +* Product Name +* Product icon/logo +* Short description: a one-to-two-sentence summary; up to 140 characters +* Category: Database, Networking, Business Software, etc. and any search tags +* Long description: includes product details/pitch +* Screenshot(s) +* Support link +* Product tier name +* Product tier description +* Product tier price +* Installation instructions +* Link to license agreements + +### How the manifest information is displayed in the UI + +This is an approximate representation. We frequently make enhancements to the +look and some elements might shift around. + +![Manifest information displayed on Hub UI](images/subscribed.png) + +## Support your users + +Users who download your content from Docker Hub might need your help, so be +prepared for questions! The information you provide with your submission saves +support time in the future. + +### Support information + +If you provide support with your content, include that information. Is there a +support website? What email address can users contact for help? Are there +self-help or troubleshooting resources available? + +### Support SLA + +Include a Service Level Agreement (SLA) for each image you offerfon the Hub. An +SLA is your commitment to your users about the nature and level of support you +provide to them. Make sure your SLA includes support hours and response-time +expectations, where applicable. + +## Security and audit policies + +Docker Hub [scans](#docker-security-scanning) your official images for +vulnerabilities with the Docker Security Scanning tool, and +[audits](#usage-audit-and-reporting) consumer activity of your images to provide +you intelligence about the use of your product. + +### Docker Security Scanning + +Docker Security Scanning automatically and continuously assesses the intergity +of your products. The Docker Security Scanning tool deconstructs an image, +conducts a binary scan of the bits to identify the open-source components +present in each image layer, and associates those components with known +vulnerabilities and exposures. + +Docker then shares the scan results with you as the publisher, so that you can +modify the content of your images as necessary. Your scan results are private, +and are never shared with end customers or other publishers. + +#### Interpret results + +To interpret the results of a scanned image: + +1. Log on to [Docker Hub](https://hub.docker.com){: target="_blank" class="_"}. + +2. Navigate to the repository details page (for example, + [Nginx](https://hub.docker.com/_/nginx/){: target="_blank" class="_"}). + +3. Click **View Available Tags** under the pull command in the upper right of + the UI. + + Displalyed is a list of each tag scan with its age. A solid green bar + indicates a clean scan without known vulnerabilities. Yellow, orange, and + red indicate minor, major, and critical vulnerabilities respectively. + + ![Scanned tags](images/scan-tags.png) + + > Vulnerability scores + > + > Vulnerability scores are defined by the entity that issues the + > vulnerability, such as [NVD](https://nvd.nist.gov/){: target="_blank" class="_"}, + > and are based on a + > [Qualitative Severity Rating Scale](https://www.first.org/cvss/specification-document#5-Qualitative-Severity-Rating-Scale){: target="_blank" class="_"} + > defined as part of the + > [Common Vulnerability Scoring System (CVSS) specification](https://www.first.org/cvss/specification-document){: target="_blank" class="_"}. + +4. Click a scan summary to see a list of results for each layer of the image. + + Each layer may have one or more scannable components represented by colored + squares in a grid. + + ![Scanned results](images/scan-view.png) + + > Base layers + > + > Base layers contain components that are included in the parent image, + > but that you did not build and may not be able to edit. If a base layer + > has a vulnerability, switch to a version of the parent image that does not + > have any vulnerabilities, or to a similar but more secure image. + +5. Hover over a square in the grid, then click to see the vulnerability report + for that specific component. + + Only components that add software are scanned. If a layer has + no scannable components, it shows a `No components in this layer` message. + + ![Scanned component preview](images/scan-single.png) + +6. Click the arrow icon (twice) to expand the list and show all vulnerable + components and their CVE report codes. + + ![Scanned components](images/scan-full-details.png) + +7. Click one of the CVE codes to view the original vulnerability report. + +#### Classification of issues + +* All Scan results include the CVE numbers and a CVSS (Common Vulnerability + Scoring System) Score. + +* CVE Identifiers (also referred to by the community as "CVE names," "CVE + numbers," "CVE entries," "CVE-IDs," and "CVEs") are unique identifiers for + publicly-known, cyber-security vulnerabilities. + +* The Common Vulnerability Scoring System (CVSS) provides an open + framework for communicating the characteristics and impacts of + IT vulnerabilities. Its quantitative model ensures repeatable, + accurate measurement while enabling users to see the underlying + vulnerability characteristics that were used to generate the scores. + As a result, CVSS is well-suited as a standard measurement system + for industries, organizations, and governments that need accurate + and consistent vulnerability-impact scores. CVSS is commonly used + to prioritize vulnerability-remediation activities, and calculate + the severity of vulnerabilities discovered on systems. The + National Vulnerability Database (NVD) provides CVSS scores for + almost all known vulnerabilities. + +* Docker classifies the severity of issues per CVSS range, Docker classification, + and service level agreement (SLA) as follows. + +| CVSS range | Docker classification | SLA for fixing issues | +|:------------|:----------------------|:----------------------------------------------| +| 7.0 to 10.0 | Critical | Within 72 hours of notification | +| 4.0 to 6.9 | Major | Within 7 days of notification | +| 0.1 to 3.9 | Minor | No SLA. Best-effort to fix or address in docs | + +* In addition to CVSS, the Docker Security team can identify or classify + vulnerabilities that need to be fixed, and categorize them in the + minor-to-critical range. + +* The publisher is presented with initial scan results, including all components + with their CVEs and their CVSS scores. + +* If you use Docker’s Scanning Service, you can subscribe to a notification + service for new vulnerabilities. + +* Failure to meet above SLAs may cause the listing to be put on “hold”. + +* A warning label shows up on the marketplace listing. An email is sent to the + users who have downloaded and subscribed for notifications. + +* A Repo’s listing can stay in the "hold" state for a maximum of 1 month, after + which the listing is revoked. + +### Usage audit and reporting + +Unless otherwise negotiated, an audit of activity on publisher content is +retained for no less than 180 days. + +A monthly report of said activity is provided to the publisher with the +following data: (1) report of content download by free and paid customers by +date and time; (2) report of purchase, cancellations, refunds, tax payments, +where applicable, and subscription length for paid customers of the content; and +(3) the consolidated amount to be received by the publisher. + +### Certification + +There are three types of certification that appear in Docker Hub -- Container, +Plugins, Infrastructure: + +![certified container badge](images/certified_container.png) + +**Docker container certification** ensures that a Docker container image has +been tested, complies best practices guidelines, runs on a Docker xertified +infrastructure, has proven provenance, has been scanned for vulnerabilities, and +is supported by Docker and the content publisher. + +![certified plugins badge](images/certified_plugins.png) + +**Docker plugin certification** is designed for volume, network, and other +plugins that access system level Docker APIs. Docker certified plugins provide +the same level of assurance as a Docker certified container, but go further by +having passed an additional suite of API compliance testing. + +![certified infrastructure badge](images/certified_infrastructure.png) + +**Docker infrastructure certification** indicates that the release of the Docker +Enterprise Edition and the underlying platform have been tested together and are +supported in combination by both Docker and the partner. diff --git a/docker-store/trustchain.md b/docker-hub/publish/trustchain.md similarity index 59% rename from docker-store/trustchain.md rename to docker-hub/publish/trustchain.md index d48cc7c7d4..1ac1545602 100644 --- a/docker-store/trustchain.md +++ b/docker-hub/publish/trustchain.md @@ -1,26 +1,28 @@ --- -title: Docker Store trust chain -keywords: trust, chain, store, security +title: Docker Hub trust chain +keywords: Docker Hub, trust, chain, security +redirect_from: +- /docker-store/trustchain/ --- ## For consumers Docker ensures that all content is securely received and verified from original producers, and additionally audits images before adding them to the Docker -Store. Docker cryptographically signs the images upon completion of a +Hub. Docker cryptographically signs the images upon completion of a satisfactory image check, so that you can verify and trust certified content -from the Docker Store. +from the Docker Hub. Here’s the full trust chain in detail, with details on how to cryptographically -verify completion of the process when pulling an image from Docker Store: +verify completion of the process when pulling an image from Docker Hub: 1. Producers sign and push their images using Docker Content Trust to a private staging area. 2. Docker pulls the image, verifies the signatures to guarantee authenticity, integrity, and freshness of the image. -3. The Docker Store certification team performs a thorough review of the image, looking for vulnerabilities and verifying best practices for image hygiene, such as ensuring minimal image sizes and working health-checks. +3. The Docker Hub certification team performs a thorough review of the image, looking for vulnerabilities and verifying best practices for image hygiene, such as ensuring minimal image sizes and working health-checks. -4. Upon a successful review, Docker signs the image and makes it officially available on Docker Store. As a consumer, you can confirm that Docker signed the image by pulling and running with Docker Content Trust: +4. Upon a successful review, Docker signs the image and makes it officially available on Docker Hub. As a consumer, you can confirm that Docker signed the image by pulling and running with Docker Content Trust: ```shell DOCKER_CONTENT_TRUST=1 docker pull @@ -30,11 +32,11 @@ verify completion of the process when pulling an image from Docker Store: ## For publishers -The Docker Store has a thorough and well-defined certification process to ensure +Docker Hub has a thorough and well-defined certification process to ensure top-quality content from producers is delivered to consumers in a trusted -manner. As a producer of content, you are required to sign your images so -that Docker can verify that your content is not tampered with upon starting the -image certification and publishing process as outlined below: +manner. As a producer of content, you are required to sign your images so that +Docker can verify that your content is not tampered with upon starting the image +certification and publishing process as outlined below: 1. Producers sign and push their images using Docker Content Trust to a private staging area. To do this, run a `docker push` command with Content Trust enabled: @@ -44,11 +46,11 @@ image certification and publishing process as outlined below: 2. Docker verifies the signatures to guarantee authenticity, integrity, and freshness of the image. All of the individual layers of your image, and the combination thereof, are encompassed as part of this verification check. [Read more detail about Content Trust in Docker's documentation](/engine/security/trust/content_trust/#understand-trust-in-docker). -3. Upon a successful signature verification, Docker pulls the original image to a private, internal staging area only accessible to the Docker Store certification team. +3. Upon a successful signature verification, Docker pulls the original image to a private, internal staging area only accessible to the Docker Hub certification team. -4. The Docker Store certification team performs a thorough review of the image, looking for vulnerabilities and verifying best practices for image hygiene, such as ensuring minimal image sizes and working health-checks. +4. The Docker Hub certification team performs a thorough review of the image, looking for vulnerabilities and verifying best practices for image hygiene, such as ensuring minimal image sizes and working health-checks. -5. Upon a successful review, Docker signs the image and makes it officially available on Docker Store. Similar to artifacts on the Apple Store, this is the final and only signature on the image. Your consumers confirm that the full certification process was completed by checking Docker’s signature by pulling and running with Docker Content Trust: +5. Upon a successful review, Docker signs the image and makes it officially available on Docker Hub. Similar to artifacts on the Apple Store, this is the final and only signature on the image. Your consumers confirm that the full certification process was completed by checking Docker’s signature by pulling and running with Docker Content Trust: ```shell DOCKER_CONTENT_TRUST=1 docker pull @@ -56,7 +58,7 @@ image certification and publishing process as outlined below: DOCKER_CONTENT_TRUST=1 docker run ``` -![Store Trust Chain signing process](images/image_0.png) +![Trust Chain signing process](images/image_0.png) To learn more the trust chain and certification for publishing content, see [Security and Audit Policies](publish.md#security-and-audit-policies) in the diff --git a/docker-hub/repos.md b/docker-hub/repos.md deleted file mode 100644 index 9ac512246b..0000000000 --- a/docker-hub/repos.md +++ /dev/null @@ -1,258 +0,0 @@ ---- -description: Your Repositories on Docker Hub -keywords: Docker, docker, trusted, registry, accounts, plans, Dockerfile, Docker Hub, webhooks, docs, documentation -title: Repositories on Docker Hub ---- - -Docker Hub repositories let you share images with co-workers, customers, or the -Docker community at large. If you're building your images internally, either on -your own Docker daemon, or using your own Continuous integration services, you -can push them to a Docker Hub repository that you add to your Docker Hub user or -organization account. - -Alternatively, if the source code for your Docker image is on GitHub or -Bitbucket, you can use an "Automated build" repository, which is built by the -Docker Hub services. See the [automated builds documentation](/docker-hub/builds.md) to read -about the extra functionality provided by those services. - -![repositories](/docker-hub/images/repos.png) - -## Searching for images - -You can search the [Docker Hub](https://hub.docker.com) registry via its search -interface or by using the command line interface. Searching can find images by -image name, user name, or description: - - $ docker search centos - NAME DESCRIPTION STARS OFFICIAL AUTOMATED - centos The official build of CentOS. 1034 [OK] - ansible/centos7-ansible Ansible on Centos7 43 [OK] - tutum/centos Centos image with SSH access. For the root... 13 [OK] - ... - -There you can see two example results: `centos` and `ansible/centos7-ansible`. -The second result shows that it comes from the public repository of a user, -named `ansible/`, while the first result, `centos`, doesn't explicitly list a -repository which means that it comes from the top-level namespace for [Official -Repositories](official_repos.md). The `/` character separates a user's -repository from the image name. - -Once you've found the image you want, you can download it with `docker pull `: - - $ docker pull centos - latest: Pulling from centos - 6941bfcbbfca: Pull complete - 41459f052977: Pull complete - fd44297e2ddb: Already exists - centos:latest: The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security. - Digest: sha256:d601d3b928eb2954653c59e65862aabb31edefa868bd5148a41fa45004c12288 - Status: Downloaded newer image for centos:latest - -You now have an image from which you can run containers. - -## Viewing repository tags - -Docker Hub's repository "Tags" view shows you the available tags and the size -of the associated image. - -Image sizes are the cumulative space taken up by the image and all its parent -images. This is also the disk space used by the contents of the Tar file created -when you `docker save` an image. - -![images/busybox-image-tags.png](/docker-hub/images/busybox-image-tags.png) - -## Creating a new repository on Docker Hub - -When you first create a Docker Hub user, you see a "Get started with -Docker Hub." screen, from which you can click directly into "Create Repository". -You can also use the "Create ▼" menu to "Create Repository". - -When creating a new repository, you can choose to put it in your Docker ID -namespace, or that of any [organization](/docker-hub/orgs.md) that you are in the "Owners" -team. The Repository Name needs to be unique in that namespace, can be two -to 255 characters, and can only contain lowercase letters, numbers or `-` and -`_`. - -The "Short Description" of 100 characters is used in the search results, -while the "Full Description" can be used as the Readme for the repository, and -can use Markdown to add simple formatting. - -After you hit the "Create" button, you then need to `docker push` images to that -Hub based repository. - - - -## Pushing a repository image to Docker Hub - -To push a repository to the Docker Hub, you need to -name your local image using your Docker Hub username, and the -repository name that you created in the previous step. -You can add multiple images to a repository, by adding a specific `:` to -it (for example `docs/base:testing`). If it's not specified, the tag defaults to -`latest`. -You can name your local images either when you build it, using -`docker build -t /[:]`, -by re-tagging an existing local image `docker tag /[:]`, -or by using `docker commit /[:]` to commit -changes. - -Now you can push this repository to the registry designated by its name or tag. - - $ docker push /: - -The image is then uploaded and available for use by your team-mates and/or -the community. - - -## Stars - -Your repositories can be starred and you can star repositories in return. Stars -are a way to show that you like a repository. They are also an easy way of -bookmarking your favorites. - -## Comments - -You can interact with other members of the Docker community and maintainers by -leaving comments on repositories. If you find any comments that are not -appropriate, you can flag them for review. - -## Collaborators and their role - -A collaborator is someone you want to give access to a private repository. Once -designated, they can `push` and `pull` to your repositories. They are not -allowed to perform any administrative tasks such as deleting the repository or -changing its status from private to public. - -> **Note**: -> A collaborator cannot add other collaborators. Only the owner of -> the repository has administrative access. - -You can also assign more granular collaborator rights ("Read", "Write", or -"Admin") on Docker Hub by using organizations and teams. For more information -see the [organizations documentation](/docker-hub/orgs.md). - -## Private repositories - -Private repositories allow you to have repositories that contain images that you -want to keep private, either to your own account or within an organization or -team. - -To work with a private repository on [Docker Hub](https://hub.docker.com), you -need to add one using the [Add Repository](https://hub.docker.com/add/repository/) button. You get one private -repository for free with your Docker Hub user account (not usable for -organizations you're a member of). If you need more accounts you can upgrade -your [Docker Hub](https://hub.docker.com/account/billing-plans/) plan. - -Once the private repository is created, you can `push` and `pull` images to and -from it using Docker. - -> **Note**: You need to be signed in and have access to work with a -> private repository. - -Private repositories are just like public ones. However, it isn't possible to -browse them or search their content on the public registry. They do not get -cached the same way as a public repository either. - -You can designate collaborators and manage their access to a private -repository from that repository's *Settings* page. You can also toggle the -repository's status between public and private, if you have an available -repository slot open. Otherwise, you can upgrade your -[Docker Hub](https://hub.docker.com/account/billing-plans/) plan. - -## Webhooks - -A webhook is an HTTP call-back triggered by a specific event. You can use a Hub -repository webhook to notify people, services, and other applications after a -new image is pushed to your repository (this also happens for Automated builds). -For example, you can trigger an automated test or deployment to happen as soon -as the image is available. - -To get started adding webhooks, go to the desired repository in the Hub, and -click "Webhooks" under the "Settings" box. A webhook is called only after a -successful `push` is made. The webhook calls are HTTP POST requests with a JSON -payload similar to the example shown below. - -*Example webhook JSON payload:* - -```json -{ - "callback_url": "https://registry.hub.docker.com/u/svendowideit/busybox/hook/2141bc0cdec4hebec411i4c1g40242eg110020/", - "push_data": { - "images": [ - "27d47432a69bca5f2700e4dff7de0388ed65f9d3fb1ec645e2bc24c223dc1cc3", - "51a9c7c1f8bb2fa19bcd09789a34e63f35abb80044bc10196e304f6634cc582c", - "..." - ], - "pushed_at": 1.417566822e+09, - "pusher": "svendowideit" - }, - "repository": { - "comment_count": 0, - "date_created": 1.417566665e+09, - "description": "", - "full_description": "webhook triggered from a 'docker push'", - "is_official": false, - "is_private": false, - "is_trusted": false, - "name": "busybox", - "namespace": "svendowideit", - "owner": "svendowideit", - "repo_name": "svendowideit/busybox", - "repo_url": "https://registry.hub.docker.com/u/svendowideit/busybox/", - "star_count": 0, - "status": "Active" - } -} -``` - - - ->**Note**: If you want to test your webhook, we recommend using a tool like ->[requestb.in](http://requestb.in/). Also note, the Docker Hub server can't be ->filtered by IP address. - -### Webhook chains - -Webhook chains allow you to chain calls to multiple services. For example, you -can use this to trigger a deployment of your container only after it has been -successfully tested, then update a separate Changelog once the deployment is -complete. After clicking the "Add webhook" button, simply add as many URLs as -necessary in your chain. - -The first webhook in a chain is called after a successful push. Subsequent -URLs are contacted after the callback has been validated. - -### Validating a callback - -To validate a callback in a webhook chain, you need to - -1. Retrieve the `callback_url` value in the request's JSON payload. -1. Send a POST request to this URL containing a valid JSON body. - -> **Note**: A chain request is only considered complete once the last -> callback has been validated. - -To help you debug or simply view the results of your webhook(s), view the -"History" of the webhook available on its settings page. - -#### Callback JSON data - -The following parameters are recognized in callback data: - -* `state` (required): Accepted values are `success`, `failure`, and `error`. - If the state isn't `success`, the webhook chain is interrupted. -* `description`: A string containing miscellaneous information that is - available on Docker Hub. Maximum 255 characters. -* `context`: A string containing the context of the operation. Can be retrieved - from the Docker Hub. Maximum 100 characters. -* `target_url`: The URL where the results of the operation can be found. Can be - retrieved on the Docker Hub. - -*Example callback payload:* - - { - "state": "success", - "description": "387 tests PASSED", - "context": "Continuous integration by Acme CI", - "target_url": "http://ci.acme.com/results/afd339c1c3d27" - } diff --git a/docker-id/images/login-cloud.png b/docker-id/images/login-cloud.png deleted file mode 100644 index 591609e882..0000000000 Binary files a/docker-id/images/login-cloud.png and /dev/null differ diff --git a/docker-id/index.md b/docker-id/index.md index a6ac858675..6358281a07 100644 --- a/docker-id/index.md +++ b/docker-id/index.md @@ -1,53 +1,41 @@ --- -description: Sign up for a Docker ID and log in -keywords: accounts, docker ID, billing, paid plans, support, Cloud, Hub, Store, Forums, knowledge base, beta access -title: Docker ID accounts +title: Sign up for a Docker ID +description: Register for a Docker ID for a Docker account +keywords: docker-id, account, hub, forums, success-center, support-center --- -Your free Docker ID grants you access to Docker services such as the Docker -Store, Docker Cloud, Docker Hub repositories, and some beta programs. Your -Docker ID becomes repository namespace used by hosted services such as Docker -Hub and Docker Cloud. All you need is an email address. - -This account also allows you to log in to services such as the Docker Support -Center, the Docker Forums, and the Docker Success portal. +Your free Docker ID grants you access to Docker Hub repositories and other +Docker services. Your Docker ID becomes repository namespace. All you need is +an email address. +Your Docker account also allows you to log in to services such as the Docker +Support Center, the Docker Forums, and the Docker Success portal. ## Register for a Docker ID -Your Docker ID becomes your user namespace for hosted Docker services, and becomes your username on the Docker Forums. +{% include register-for-docker-id.md %} -1. Go to the [Docker Cloud sign up page](https://cloud.docker.com). - -2. Enter a username that is also your Docker ID. - - Your Docker ID must be between 4 and 30 characters long, and can only contain numbers and lowercase letters. - -3. Enter a unique, valid email address. - -4. Enter a password between 6 and 128 characters long. - -3. Click **Sign up**. - - Docker sends a verification email to the address you provided. - -4. Click the link in the email to verify your address. - -> **Note**: You cannot log in with your Docker ID until you verify your email address. - - -## Log in +## Log in to Docker Once you register and verify your Docker ID email address, you can log in -to Docker services. +to Docker services through the web interface or the commandline. -For Docker Cloud, Hub, and Store, log in using the web interface. +At the commandline, use `docker login`. See the [CLI reference](/engine/reference/commandline/login.md). -![Login using the web interface](/docker-id/images/login-cloud.png) +``` +$ docker login +Login with your Docker ID to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com to create one. +Username: +Password: +WARNING! Your password will be stored unencrypted in /home//.docker/config.json. +Configure a credential helper to remove this warning. See +https://docs.docker.com/engine/reference/commandline/login/#credentials-store -You can also log in using the `docker login` command. (You can read more about `docker login` [here](/engine/reference/commandline/login.md).) +Login Succeeded +``` -> **Warning**: +> Docker login creds not secure +> > When you use the `docker login` command, your credentials are stored in your home directory in `.docker/config.json`. The password is base64 encoded in this file. If you require secure storage for this password, use the diff --git a/docker-store/byol.md b/docker-store/byol.md deleted file mode 100644 index 0644b44177..0000000000 --- a/docker-store/byol.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -description: Submit a product to be listed on Docker Store -keywords: Docker, docker, store, purchase images -title: Bring Your Own License (BYOL) products on Store ---- - -## What is Bring Your Own License (BYOL)? - -Bring Your Own License (BYOL) allows customers with existing software licenses -to easily migrate to the containerized version of the software that you make -available on Docker Store. - -To see and access the BYOL product listing of an Independent Softare Vendor -(ISV), customers simply subscribe to the product with their Docker ID. We call -this **Ungated BYOL**. - -ISVs can use the Docker Store/Hub as an entitlement and distribution platform -for their various audiences, such as: - -- Existing customers that want their licensed software made available as Docker containers. -- New customers that are only interested in consuming their software as Docker containers. - -## Ungated BYOL - -### Prerequisites and setup - -To use Docker as your fulfillment service, an ISV must: -- [Apply and be approved as a Docker Store Vendor Partner](https://goto.docker.com/partners) -- Apply and be approved to list an Ungated BYOL product -- Create one or more Ungated BYOL product plans, in the Docker Store Publisher center. - -## Creating an ungated BYOL plan - -In Plans & Pricing section of the Publisher Center, ensure the following: -- Price/Month should be set to $0 -- There should be no free trial associated with the plan -- Under the Pull Requirements dropdown, "Subscribed users only" should be selected. - -## What's next? - -More information about the publishing flow can be found [here](publish.md). diff --git a/docker-store/images/image_0.png b/docker-store/images/image_0.png deleted file mode 100644 index e0f56ae8fa..0000000000 Binary files a/docker-store/images/image_0.png and /dev/null differ diff --git a/docker-store/images/publish-diagram.png b/docker-store/images/publish-diagram.png deleted file mode 100644 index c199fa86c8..0000000000 Binary files a/docker-store/images/publish-diagram.png and /dev/null differ diff --git a/docker-store/images/publish_byol.png b/docker-store/images/publish_byol.png deleted file mode 100644 index 89df7fade8..0000000000 Binary files a/docker-store/images/publish_byol.png and /dev/null differ diff --git a/docker-store/images/store-browse.png b/docker-store/images/store-browse.png deleted file mode 100644 index ed0d71bb56..0000000000 Binary files a/docker-store/images/store-browse.png and /dev/null differ diff --git a/docker-store/images/store-get.png b/docker-store/images/store-get.png deleted file mode 100644 index f70a921d67..0000000000 Binary files a/docker-store/images/store-get.png and /dev/null differ diff --git a/docker-store/images/store-product-id.png b/docker-store/images/store-product-id.png deleted file mode 100644 index 78228608c5..0000000000 Binary files a/docker-store/images/store-product-id.png and /dev/null differ diff --git a/docker-store/images/store-pullcmd.png b/docker-store/images/store-pullcmd.png deleted file mode 100644 index 0f65ce1d7f..0000000000 Binary files a/docker-store/images/store-pullcmd.png and /dev/null differ diff --git a/docker-store/images/store-search.png b/docker-store/images/store-search.png deleted file mode 100644 index fc17ffd4b2..0000000000 Binary files a/docker-store/images/store-search.png and /dev/null differ diff --git a/docker-store/images/subscribed.png b/docker-store/images/subscribed.png deleted file mode 100644 index 75345092c6..0000000000 Binary files a/docker-store/images/subscribed.png and /dev/null differ diff --git a/docker-store/index.md b/docker-store/index.md deleted file mode 100644 index c5ac99ee1c..0000000000 --- a/docker-store/index.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -description: Docker Store programs and policies for publishers -keywords: Docker, docker, store, publishers, publish images -title: Docker Store overview ---- - -For developers and operators, Docker Store is the best way to discover -high-quality Docker content. - -Independent Software Vendors (ISVs) can utilize Docker Store to distribute and -sell their Dockerized content. Publish your software through Docker Store to -experience the following benefits: - -* **Access to Docker’s large and growing customer-base.** Docker has experienced - rapid adoption, and is popular in dev-ops environments. Docker users have - pulled images over twelve billion times and they are increasingly turning to - Docker Store as the canonical source for high-quality, curated content. - -* **Customers can try or buy your software**, right from your product listing. - Your content is accessible for installation, trial, and purchase from the - Docker Store and the Docker CLI. - -* **Use of Docker licensing support.** Control who has access to your software: - a) logged-in users, b) users who have purchased a license, or c) all Docker - users. We’ll help you manage and control your distribution. - -* **We handle checkout.** You don’t need to set up your own digital - e-commerce site when you sell your content through the Docker Store. Choose - how much you want to charge for your products and we handle the billing. - -* **Seamless updates and upgrades for your customers.** We tell your customers - when your content has upgrades or updates available. - -* **Become Docker Certified.** Publisher container images and plugins that meet - the quality, security, and support criteria of the program displays a - “Docker Certified” badge within the Docker Store (which can be used in - external marketing). - -## How is Docker Store different from Docker Hub? What about Official Images? - -Docker Hub contains community content - these are images that have been created -by entities that are not vetted or curated. You can think of the community -content you consume as the "Wild West"--anyone can push new images to the -community and there are no guarantees around the quality or compatibility of -this content. - -Docker Store contains content that has been submitted for approval by qualified -Store Vendor Partners. These products are published and maintained directly by a -commercial entity. In addition, the Docker Certified logo distinguishes content -by providing quality, provenance, and support assurances. - -Official Images are an exception when it comes to community content; these -images are maintained as open source community projects. All official images -adhere to strict guidelines but are not necessarily backed by a commercial -entity. - -> **Note**: Docker Hub and Docker Store leverage the same backend, the Docker -> Public Registry. In general, all publicly available community content should -> be visible via both Docker Hub and Docker Store, and this includes official -> images. Any content that has explicitly been added by a commercial entity and -> has gone through a vetting process with respect to content submission and best -> practices, is only available on Docker Store. - -## Distribution models - -The Docker Store welcomes free and open-source content, as well as software sold -directly by publishers. We support the following commercial models: - -### Paid via Docker - -This commercial model allows customers to pay for ISV content via Docker, as -described in the Store Vendor Partner agreement. Paid-via-Docker content -includes both software that can be deployed on a host, as well as software that -runs in the cloud and can be accessed by the customer via an agent -(containerized cloud services, for example). - -### Licensed content via Docker Store BYOL program - -ISVs can use Docker Store as an entitlement and distribution platform. Using -APIs provided by Docker, ISVs can entitle users and distribute their Dockerized -content to many different audiences: -* Existing customers that want their licensed software made available as Docker containers. -* New customers that are only interested in consuming their software as Docker containers. -* Trial or beta customers, where the ISV can distribute feature or time limited software. - -Docker provides a fulfillment service so that ISVs can programmatically entitle -users, by creating subscriptions to their content in Docker Store. For more -information, see [Bring Your Own License (BYOL) products on Store](byol.md). - -### Plugins and agents - -ISVs have the ability to create and distribute [plugin images](https://store.docker.com/search?certification_status=certified&q=&type=plugin) for their customers to integrate with the ISV's proprietary hardware or cloud infrastructure and Docker Enterprise Edition deployments. - -You can [apply to be a publisher](https://goto.docker.com/partners) and learn more about our [Technology Partner Program](https://www.docker.com/partners/partner-program#/technology_partner). - -## What's next? - -* Learn about submitting products and content to Docker Store, see the [publishers guide](publish.md) and the [publisher FAQs](publisher_faq.md). -* Learn about using Docker Store to download content, see the [customer FAQs](customer_faq.md). diff --git a/docker-store/publish.md b/docker-store/publish.md deleted file mode 100644 index 6a33a3f1a5..0000000000 --- a/docker-store/publish.md +++ /dev/null @@ -1,435 +0,0 @@ ---- -description: Submit a product for the Docker Store -keywords: Docker, docker, store, purchase images -title: Publish content on Docker Store ---- - -## Permitted content and support options - -* Content that runs on a Docker Enterprise Edition (Docker Certified - Infrastructure) may be published in the Store. This content may also qualify - to become a Docker Certified Container or Plugin image and be backed by - collaborative Docker/Publisher support - -* Content that runs on the Docker Community Edition may be published in the - Store, but is not supported by Docker nor is it eligible for certification. - -* Content that requires a non Certified Infrastructure environment may not be - published in the Store. - - -| If your content: | Can publish on Store | Can be certified and supported by Docker | Supported by publisher | -|:-----|:--------|:------|:-----| -| Works on Docker Enterprise Edition | YES | YES | Required | -| Works on Docker Community Edition | YES | NO | Optional | -| Does not work on Docker Certified Infrastructure | NO | N/A | N/A | - - -## Onboarding - -The Docker Store publishing process begins from the landing page: sign in with -your Docker ID and specify a product name and image source from a private -repository. Your product images must be stored in private repositories of Docker -Cloud and/or Hub as they serve as an internal staging area from which you can -revise and submit content for review. - -After specifying a source, provide the content-manifest items to populate your -product details page. These items include logos, descriptions, and licensing and -support links so that customers can make informed decisions about your image. -These items are submitted alongside the image itself for moderation. - -The Docker Store team then conducts a comprehensive review of your image and -metadata. We use Docker Security Scanning to evaluate the security of your -product images, and share results with you as the publisher. During the -image-moderation phase, we iterate back and forth with publishers to address -outstanding vulnerabilities and content-manifest issues until the image is ready -for publication. - -Commercial content and other supported images may qualify for the Docker -Certified Container or Plugins quality mark. The testing for this program goes -beyond the vulnerability scan and also evaluates container images for Docker -best practices developed over years of experience. Collaborative support -capability between Docker and the publisher is also established. Refer to the -diagram below for a high-level summary: - -![publishing workflow](images/publish-diagram.png) - -## Create great content - -Create your content, and follow our best practices to Dockerize it. Keep your -images small, your layers few, and your components secure. Refer to the links -and guidelines listed below to build and deliver great content: - -* [Best practices for writing Dockerfiles](/engine/userguide/eng-image/dockerfile_best-practices/) - -* [Official repositories on Docker Hub](/docker-hub/official_repos/) - -* [Docker Bench for Security](https://github.com/docker/docker-bench-security){: target="_blank" -class="_"} - -Here are some best practices when it comes to building vulnerability-free Docker images: - -### Choose a secure base image (See your Dockerfile's `FROM:` directive) - -Many base images have a strong record of being secure, including: - -* [Debian](https://hub.docker.com/r/library/debian/tags/jessie/){: target="_blank" class="_"} - Linux: both small and tightly-controlled, Debian-linux is a good alternative - if you're currently using Ubuntu. - -* [Alpine](https://hub.docker.com/_/alpine/){: target="_blank" class="_"} Linux: - Alpine is a minimal linux distribution with an excellent security record. - -* Alpine-based application images: these include `python:alpine`, `ruby:alpine`, - and `golang:alpine`. They are secure and minimal, while providing the - convenience of their non-Alpine alternatives. - -Docker strongly recommends Alpine Linux. The founder of this Linux distribution -is leading an initiative at Docker to provide safe, compact base images for all -container applications. - -### Remove unused components - -Often, vulnerabilities exist in components that aren't actually used in the -containerized application. To avoid this, you can: - -* Follow best practices when using the `apt-get` command. - -* Run `apt-get-remove` to destroy any components required to build but not - actually run your application. Usually, this involves creating multi-line - Dockerfile directives, as seen below. The following example shows how to remove - `curl` and `python-pip` after they are used to install the Python `requests` - package, all in a single Dockerfile directive: - - ```shell - RUN apt-get update && \ - apt-get install -y --no-install-recommends curl python-pip && \ - pip install requests && \ - apt-get remove -y python-pip curl && \ - rm -rf /var/lib/apt/lists/ - ``` - -> Files introduced in one directive of your Dockerfile can only be removed in -> the same directive (and not in subsequent directives in your Dockerfile). - -### Keep required components up-to-date - -Your images are composed of open-source libraries and packages that amass -vulnerabilities over time and are consequently patched. To ensure the integrity -of your product, keep your images up-to-date: - -* Periodically update your base image's version, especially if you’re using a - version deemed to be vulnerable. - -* Re-build your image periodically. Directives including commands such as - `apt-get install ...` pull the latest versions of dependencies, which may - include security fixes. - -## Create and maintain your publisher profile in the Store - -Let the Docker community know who you are. Add your details, your company -story, and what you do. At the very minimum, we require: - -* Legal entity name -* Company website -* Phone number -* Valid company email -* Company icon/logo (square; at least 512x512px - - -## Prepare your image-manifest materials - -You must provide the namespace (including repository and tags) of a private -repository on Docker Cloud or Hub that contains the source for your product. -This repository path is not shown to users, but the repositories you choose -determine the Product Tiers available for customers to download. - -The following content information helps us make your product look great and -discoverable: - -1. Product Name -2. Product icon/logo -3. Short description: a one-to-two-sentence summary; up to 140 characters -4. Category: Database, Networking, Business Software, etc. and any search tags -5. Long description: includes product details/pitch -6. Screenshot(s) -7. Support link -8. Product tier name -9. Product tier description -10. Product tier price -11. Installation instructions -12. Link to license agreements - -### How the manifest information is displayed in the UI - -This is an approximate representation. We frequently make enhancements to the -look and some elements might shift around. - -![manifest information displayed on store UI](images/subscribed.png) - -## Support your users - -Docker users who download your content from the Store might need your help -later, so be prepared for questions! The information you provide with your -submission saves support time in the future. - -### Support information - -If you provide support along with your content, include that information. Is -there a support website? What email address can users contact for help? Are -there self-help or troubleshooting resources available? - -### Support SLA - -Include a Service Level Agreement (SLA) for each image you're offering for the -Store. An SLA is your commitment to your users about the nature and level of -support you provide to them. Make sure your SLA includes support hours and -response-time expectations, where applicable. - -## Security and audit policies - -Docker Store [scans](#docker-security-scanning) your official images for -vulnerabilities with the Docker Security Scanning tool, and -[audits](#usage-audit-and-reporting) consumer activity of your images to provide -you intelligence about the use of your product. - -### Docker Security Scanning - -Docker Security Scanning automatically and continuously assesses the intergity -of your products. The Docker Security Scanning tool deconstructs an image, -conducts a binary scan of the bits to identify the open-source components -present in each image layer, and associates those components with known -vulnerabilities and exposures. - -Docker then shares the scan results with you as the publisher, so that you can -modify the content of your images as necessary. Your scan results are private, -and are never shared with end customers or other publishers. - -#### Interpret results - -To interpret the results of a scanned image: - -1. Log on to [Docker Store](https://store.docker.com){: target="_blank" class="_"}. - -2. Navigate to the repository details page (for example, - [Nginx](https://store.docker.com/images/nginx){: target="_blank" class="_"}). - -3. Click **View Available Tags** under the pull command in the upper right of - the UI. - - Displalyed is a list of each tag scan with its age. A solid green bar - indicates a clean scan without known vulnerabilities. Yellow, orange, and - red indicate minor, major, and critical vulnerabilities respectively. - - ![Scanned tags](images/scan-tags.png) - - > Vulnerability scores - > - > Vulnerability scores are defined by the entity that issues the - > vulnerability, such as [NVD](https://nvd.nist.gov/){: target="_blank" class="_"}, - > and are based on a - > [Qualitative Severity Rating Scale](https://www.first.org/cvss/specification-document#5-Qualitative-Severity-Rating-Scale){: target="_blank" class="_"} - > defined as part of the - > [Common Vulnerability Scoring System (CVSS) specification](https://www.first.org/cvss/specification-document){: target="_blank" class="_"}. - -4. Click a scan summary to see a list of results for each layer of the image. - - Each layer may have one or more scannable components represented by colored - squares in a grid. - - ![Scanned results](images/scan-view.png) - - > Base layers - > - > Base layers contain components that are included in the parent image, - > but that you did not build and may not be able to edit. If a base layer - > has a vulnerability, switch to a version of the parent image that does not - > have any vulnerabilities, or to a similar but more secure image. - -5. Hover over a square in the grid, then click to see the vulnerability report - for that specific component. - - Only components that add software are scanned. If a layer has - no scannable components, it shows a `No components in this layer` message. - - ![Scanned component preview](images/scan-single.png) - -6. Click the arrow icon (twice) to expand the list and show all vulnerable - components and their CVE report codes. - - ![Scanned components](images/scan-full-details.png) - -7. Click one of the CVE codes to view the original vulnerability report. - -#### Classification of issues - -* All Scan results include the CVE numbers and a CVSS (Common Vulnerability - Scoring System) Score. - -* CVE Identifiers (also referred to by the community as "CVE names," "CVE - numbers," "CVE entries," "CVE-IDs," and "CVEs") are unique identifiers for - publicly-known, cyber-security vulnerabilities. - -* The Common Vulnerability Scoring System (CVSS) provides an open - framework for communicating the characteristics and impacts of - IT vulnerabilities. Its quantitative model ensures repeatable, - accurate measurement while enabling users to see the underlying - vulnerability characteristics that were used to generate the scores. - As a result, CVSS is well-suited as a standard measurement system - for industries, organizations, and governments that need accurate - and consistent vulnerability-impact scores. CVSS is commonly used - to prioritize vulnerability-remediation activities, and calculate - the severity of vulnerabilities discovered on systems. The - National Vulnerability Database (NVD) provides CVSS scores for - almost all known vulnerabilities. - -* Docker classifies the severity of issues per CVSS range, Docker classification, - and service level agreement (SLA) as follows. - -| CVSS range | Docker classification | SLA for fixing issues | -|:-----|:--------|:------| -| 7.0 to 10.0 | Critical | Within 72 hours of notification | -| 4.0 to 6.9 | Major | Within 7 days of notification | -| 0.1 to 3.9 | Minor | No SLA. Best-effort to fix or address in documentation. | - -* In addition to CVSS, the Docker Security team can identify or classify - vulnerabilities that need to be fixed, and categorize them in the - minor-to-critical range. - -* The publisher is presented with initial scan results, including all components - with their CVEs and their CVSS scores. - -* If you use Docker’s Scanning Service, you can subscribe to a notification - service for new vulnerabilities. - -* Failure to meet above SLAs may cause the listing to be put on “hold”. - -* A warning label shows up on the marketplace listing. An email is sent to the - users who have downloaded and subscribed for notifications. - -* A Repo’s listing can stay in the "hold" state for a maximum of 1 month, after - which the listing is revoked. - -### Usage audit and reporting - -Unless otherwise negotiated, an audit of activity on publisher content is -retained for no less than 180 days. - -A monthly report of said activity is provided to the publisher with the -following data: (1) report of content download by free and paid customers by -date and time; (2) report of purchase, cancellations, refunds, tax payments, -where applicable, and subscription length for paid customers of the content; and -(3) the consolidated amount to be received by the publisher. - -### Certification - -There are three types of certification that appear in Docker Store. - -![certified container badge](images/certified_container.png) - -Certifies that a container image on Docker Store has been tested; complies best -practices guidelines; runs on a Docker Certified Infrastructure; has proven -provenance; been scanned for vulnerabilities; and is supported by Docker and the -content publisher - -![certified plugins badge](images/certified_plugins.png) - -This certification is designed for volume, network, and other plugins that -access system level Docker APIs. Docker Certified Plugins provide the same level -of assurance as a Docker Certified Container, but go further by having passed an -additional suite of API compliance testing. - -![certified plugins badge](images/certified_infrastructure.png) - -Indicates that the release of the Docker Edition and the underlying platform -have been tested together and are supported in combination by both Docker and -the partner. - -### Docker Certified Publisher FAQ - -#### What is the Docker Certified program? - -Docker Certified Container images and plugins are meant to differentiate high -quality content on Docker Store. Customers can consume Certified Containers with -confidence knowing that both Docker and the publisher stands behind the -solution. Further details can be found in the -[Docker Partner Program Guide](https://www.docker.com/partnerprogramguide){: target="_blank" class="_"}. - -#### What are the benefits of Docker Certified? - -Docker Store promotes Docker Certified Containers and Plugins running on Docker -Certified Infrastructure trusted and high quality content. With over 8B image -pulls and access to Docker’s large customer base, a publisher can differentiate -their content by certifying their images and plugins. With a revenue share -agreement, Docker can be a channel for your content. The Docker Certified badge -can also be listed alongside external references to your product. - -#### How is the Docker Certified Container image listed on Docker Store? - -These images are differentiated from other images on store through a -certification badge. A user can search specifically for CI’s by limiting their -search parameters to show only certified content. - -![certified content example](images/FAQ-certified-content.png) - -#### Is certification optional or required to be listed on Store? - -Certification is recommended for most commercial and supported container images. -Free, community, and other commercial (non-certified) content may also be listed -on Docker Store. - -![certified content example](images/FAQ-types-of-certified-content.png) - -#### How is support handled? - -All Docker Certified Container images and plugins running on Docker Certified -Infrastructure come with SLA based support provided by the publisher and Docker. -Normally, a customer contacts the publisher for container and application level -issues. Likewise, a customer contacts Docker for Docker Edition support. In the -case where a customer calls Docker (or vice versa) about an issue on the -application, Docker advises the customer about the publisher support process and -performs a handover directly to the publisher if required. TSAnet is required -for exchange of support tickets between the publisher and Docker. - -#### How does a publisher apply to the Docker Certified program? - -Start by applying to be a [Docker Technology -Partner](https://goto.docker.com/partners){: target="_blank" class="_"} - -* Requires acceptance of partnership agreement for completion - -* Identify commercial content that can be listed on Store and includes a support - offering - -* Test your image against the Docker CS Engine 1.12+ or on a Docker Certified - Infrastructure version 17.03 and above (Plugins must run on 17.03 and above) - -* Submit your image for Certification through the publisher portal. Docker - scans the image and works with you to address vulnerabilities. Docker also - conducts a best practices review of the image. - -* Be a [TSAnet](https://www.tsanet.org/){: target="_blank" class="_"} member or - join the Docker Limited Group. - -* Upon completion of Certification criteria, and acceptance by - Docker, the Publisher’s product page is updated to reflect Certified status. - -#### Is there a fee to join the program? - -In the future, Docker may charge a small annual listing fee. This is waived for -the initial period. - -#### What is the difference between Official Images and Docker Certified? - -Many Official images transition to the Docker Certified program and are -maintained and updated by the original owner of the software. Docker -continues to maintain some of the base OS images and language frameworks. - -#### How is certification of plugins handled? - -Docker Certification program recognizes the need to apply special scrutiny and -testing to containers that access system level interfaces like storage volumes -and networking. Docker identifies these special containers as “Plugins” which -require additional testing by the publisher or Docker. These plugins employ the -V2 Plugin Architecture that was first made available in 1.12 (experimental) and -now available in Docker Enterprise Edition 17.03 diff --git a/docker-store/publisher_faq.md b/docker-store/publisher_faq.md deleted file mode 100644 index f302da0f62..0000000000 --- a/docker-store/publisher_faq.md +++ /dev/null @@ -1,204 +0,0 @@ ---- -description: Docker Store frequently asked questions -keywords: Docker, docker, store, purchase images -title: Docker Store Publisher FAQs ---- - -## Certification program - -### What is the certification program for images and plugins, and what are some benefits? - -The Docker Certification program for Infrastructure, Images, and Plugins is -designed for both technology partners and enterprise customers to recognize -high-quality Containers and Plugins, provide collaborative support, and ensure -compatibility with Docker EE. Docker Certification is aligned to the available -Docker EE infrastructure and gives enterprises a trusted way to run more -technology in containers with support from both Docker and the publisher. The -[Docker Technology Partner guide](https://www.docker.com/partners/partner-program#/technology_partner) -explains the Technology Partner program and the Docker Certification Program for -Infrastructure, Images, and Plugins in more detail. - -## Publisher signup and approval - -### How do I get started with the publisher signup and approval process? - -Start by applying to be a Docker Technology Partner at https://goto.docker.com/partner and click on "Publisher". - -* Requires acceptance of partnership agreement for completion -* Identify content that can be listed on Store and includes a support offering -* Test your image against Docker Certified Infrastructure version 17.03 and -above (Plugins must run on 17.03 and above). -* Submit your image for Certification through the publisher portal. Docker -scans the image and work with you to address vulnerabilities. Docker also -conducts a best practices review of the image. -* Be a TSAnet member or join the Docker Limited Group. -* Upon completion of Certification criteria, and acceptance by Docker, -Publisher’s product page is updated to reflect Certified status. - -### What is the Docker Store Publisher Program application timeline? - -1-2 weeks. - -### Can we have a group of people work on the same product and publish to Store? (This replicates our internal workflow where more than one person is working on Dockerizing our product.) - -Yes. You can submit your content as a team. - -## Product submission - -### What exactly is a customer given access to once they're entitled to a given product plan? - -The customer will be given the permissions to docker pull any tag associated with the source repo specified. We recommend that you create a distinct repo per plan and only use tags for different versions of that specific plan. For example, if you have a community, pro, and enterprise plan of a single product, you should create three separate repos, `namespace/community, namespace/pro, and namespace/enterprise`. Once a customer is entitled to your enterprise plan, they will be able to pull `store/namespace/enterprise:anytag`. - -### How long does it typically take to have an image approved? - -2 Weeks. - -### Once a product is published, what is the process for pushing a new build (1.2, 1.3)? Will we simply edit the same product, adding the newly tagged repos? - -Edit the same product and update with the newly tagged repos. - -### On the Information page, organization details are required. Do we need to fill those in again for every product we publish, or are they carried over? And if we change them for a later image publish, are they updated for all images published by our organization? - -Organization details need to be filled in only once. Updating organization info -once updates this for all images published by your organization. - -### On the page for another vendor’s product on Docker store, I see the following chunks of data: How do these fields map to the following that are required in the publish process? - -#### Fields I see - -* Description -* License -* Feedback -* Contributing Guidelines -* Documentation - -#### Fields in the publish process - -* Product description -* Support link -* Documentation link -* Screenshots -* Tier description -* Installation instructions - -*Description* maps to *Product description* in the publish process. -*License* maps to *Support Link* in the publish process. -*Documentation* maps to *Documentation Link* in the publish process. -*Feedback* is provided via customer reviews. https://store.docker.com/images/node?tab=reviews is an example. -*Tier Description* is what you see once users get entitled to a plan. For instance, in https://store.docker.com/images/openmaptiles-openstreetmap-maps/plans/f1fc533a-76f0-493a-80a1-4e0a2b38a563?tab=instructions `A detailed street map of any place on a planet. Evaluation and non-production use. Production use license available separately` is what this publisher entered in the Tier description -*Installation instructions* is documentation on installing your software. In this case the documentation is just `Just launch the container and the map is going to be available on port 80 - ready-to-use - with instructions and list of available styles.` (We recommend more details for any content thats a certification candidate). - -### How can I remove a submission? I don’t want to currently have this image published as it is missing several information. - -If you would like your submission removed, let us know by contacting us at -publisher-support@docker.com. - -### Can publishers publish multi-container apps? - -Yes. Publishers can provide multiple images and add a compose file in the -install instructions to describe how the multi-container app can be used. For -now, we recommend asking publishers to look at this example from Microsoft -https://store.docker.com/images/mssql-server-linux where they have Supported -Tags listed in the Install instructions (you don't necessarily need to list it -in the readme). - -### Regarding source repo tags: it says not to use “latest”. However, if we want users to be able to download the images without specifying a tag, then presumably an image tagged “latest” is required. So how do we go about that? - -You can not submit "latest" tags via the certification/store publish workflow. -The reason we do this is so that users are aware of the exact version they -download. To make the user experience easy we have a copy widget that users can -use to copy the pull command and paste in their command line. Here is a -[screenshot](https://user-images.githubusercontent.com/2453622/32354702-1bec633a-bfe8-11e7-9f80-a02c26b1b10c.png) -to provide additional clarity. - -### I have two plans, can I use the same repository but different tags for the two plans? - -We expect publishers to use a different repository for each plan. If a user is entitled to a plan in your product, the user is entitled to all tags in the relevant. -For instance, if you have a `Developer` Plan, that is mapped to repositories store/`mynamespace`/`myrepo1`:`mytag1`, another plan (say `Production`) **should** map to a different repository. -**_Any user that is entitled to the `Developer` plan will be able to pull all tags in store/`mynamespace`/`myrepo1`_**. - -## Licensing, terms and conditions, and pricing - -### What options are presented to users to pull an image? - -We provide users the following options to access your software -* logged-in users. -* users who have accepted ToS -* all users (including users without Docker Identity) -Here is a [screenshot](https://user-images.githubusercontent.com/2453622/32067299-00cf1210-ba83-11e7-89f8-15deed6fef62.png) to describe how publishers can update the options provided to customers. - -### If something is published as a free tier, for subscribed users only, does a user need to explicitly click Accept on the license terms for which we provide the link before they can download the image? -Yes - -### Do you have a license enforcement system for docker images sold on store? How are they protected, once they have been downloaded? What happens if a customer stop paying for the image I am selling after, let's say, 2 months? - -We provide the following licensing option to customers: -* Bring your own License or BYOL. - -The expectation is that the publisher would take care of License Keys within the -container. The License Key itself can be presented to the customer via Docker -Store. We expect the Publisher to build short circuits into the container, so -the container stops running once the License Key expires. Once a customer -cancels, or if the customer subscription expires - the customer cannot -download updates from the Store. - -If a user cancels their subscription, they cannot download updates -from the Store. The container may continue running. If you have a licensing -scheme built into the container, the licensing scheme can be a forcing function -and stop the container. (_We do not build anything into the container, it is up to the publisher_). - -### How does a customer transition from a Trial to a Paid subscription? Question assumes these are two separate pulls from Store, or can they just drop in a license via Store? - -Publisher can provide two different tokens or let customers use the same token -and internally map the customer to a paid plan vs a free trial. - -### What are Docker Store pricing plans like? Can I have metered pricing? - -As a publisher you can charge a subscription fee every month in USD. The amount -is determined by you. We are working on other pricing options. If you have -feedback about pricing, send us an email at publisher-support@docker.com - -### As a publisher, I have not setup any payment account. How does money get to me if my commercial content gets purchased by customers? - -We (Docker) cut you a check post a revenue share. Your Docker Store Vendor -Agreement should cover specifics. - -### How does Docker handle Export control? Can individual countries be specified if differing from Docker's list of embargoed countries? - -We provide export control via blacklisting several countries, IPs and users -based on the national export compliance database. Any export control we do is -across all products, we do not selectively blacklist versions and products for -specific groups. Send us an email at publisher-support if you have questions - -## Analytics - -### Where can I view customer insights? - -Analytics reports are only available to Publishers with Certified or Commercial -Content. Go to https://store.docker.com/publisher/center and click on "Actions" -for the product you'd like to view analytics for. Here is a -[screenshot](https://user-images.githubusercontent.com/2453622/32352202-6e87ce6e-bfdd-11e7-8fb0-08fe5a3e8930.png). - -### How do metrics differentiate between Free and Paid subscribers? - -The Analytics reports contain information about the Subscriber and the -relevant product plan. You can identify subscribers for each plan -for each product. - -### Can I preview my submission before publishing? - -Yes. You can preview your submission including the image you've submitted, the look and feel of the detail page and any markdown descriptions you might have. - -Here are a few screenshots that illustrate the preview experience for markdown content. -Product Description preview [screenshot](https://user-images.githubusercontent.com/2453622/32344591-9cd6b456-bfc4-11e7-9505-1f7e8235f812.png). -Install instructions description preview [screenshot](https://user-images.githubusercontent.com/2453622/32344592-9cf2e234-bfc4-11e7-9e60-d773b62eae07.png). - -## Other FAQs - -### Can a publisher respond to a review of their product? - -Yes - -### Can I have a publish by date for my content? - -Not yet. Potential ETA Q2 2018.