Merge branch 'master' into patch-1
This commit is contained in:
commit
c2d3b1b9c4
|
@ -14,3 +14,4 @@ install:
|
|||
script:
|
||||
- go test -v k8s.io/kubernetes.github.io/test
|
||||
- $GOPATH/bin/md-check --root-dir=$HOME/gopath/src/k8s.io/kubernetes.github.io
|
||||
- ./verify-entry-toc.sh
|
||||
|
|
|
@ -45,6 +45,7 @@ toc:
|
|||
- docs/getting-started-guides/network-policy/walkthrough.md
|
||||
- docs/getting-started-guides/network-policy/calico.md
|
||||
- docs/getting-started-guides/network-policy/romana.md
|
||||
- docs/getting-started-guides/network-policy/weave.md
|
||||
|
||||
- title: Batch Jobs
|
||||
section:
|
||||
|
@ -67,7 +68,9 @@ toc:
|
|||
- docs/user-guide/simple-nginx.md
|
||||
- docs/user-guide/pods/single-container.md
|
||||
- docs/user-guide/pods/multi-container.md
|
||||
- docs/user-guide/pods/init-container.md
|
||||
- docs/user-guide/configuring-containers.md
|
||||
- docs/user-guide/pod-templates.md
|
||||
- docs/user-guide/production-pods.md
|
||||
- docs/user-guide/containers.md
|
||||
- docs/user-guide/environment-guide/index.md
|
||||
|
@ -149,7 +152,11 @@ toc:
|
|||
- docs/getting-started-guides/fedora/flannel_multi_node_cluster.md
|
||||
- docs/getting-started-guides/centos/centos_manual_config.md
|
||||
- docs/getting-started-guides/coreos/index.md
|
||||
- /docs/getting-started-guides/ubuntu/
|
||||
- title: Ubuntu
|
||||
section:
|
||||
- docs/getting-started-guides/ubuntu/automated.md
|
||||
- docs/getting-started-guides/ubuntu/calico.md
|
||||
- docs/getting-started-guides/ubuntu/manual.md
|
||||
- docs/getting-started-guides/windows/index.md
|
||||
- docs/admin/node-conformance.md
|
||||
- docs/getting-started-guides/docker-multinode.md
|
||||
|
@ -163,11 +170,16 @@ toc:
|
|||
- docs/admin/cluster-management.md
|
||||
- docs/admin/kubeadm.md
|
||||
- docs/admin/addons.md
|
||||
- docs/admin/audit.md
|
||||
- docs/admin/ha-master-gce.md
|
||||
- docs/admin/namespaces/index.md
|
||||
- docs/admin/namespaces/walkthrough.md
|
||||
- docs/admin/limitrange/index.md
|
||||
- docs/admin/disruptions.md
|
||||
- docs/admin/resourcequota/index.md
|
||||
- docs/admin/resourcequota/walkthrough.md
|
||||
- docs/admin/rescheduler.md
|
||||
- docs/admin/sysctls.md
|
||||
- docs/admin/cluster-components.md
|
||||
- docs/admin/etcd.md
|
||||
- docs/admin/multi-cluster.md
|
||||
|
@ -189,5 +201,21 @@ toc:
|
|||
|
||||
- title: Administering Federation
|
||||
section:
|
||||
- /docs/admin/federation/kubfed/
|
||||
- docs/admin/federation/index.md
|
||||
- docs/admin/federation/kubefed.md
|
||||
- title: Federated Kubernetes Objects
|
||||
section:
|
||||
- docs/user-guide/federation/index.md
|
||||
- docs/user-guide/federation/configmap.md
|
||||
- docs/user-guide/federation/daemonsets.md
|
||||
- docs/user-guide/federation/deployment.md
|
||||
- docs/user-guide/federation/events.md
|
||||
- docs/user-guide/federation/federated-ingress.md
|
||||
- docs/user-guide/federation/namespaces.md
|
||||
- docs/user-guide/federation/replicasets.md
|
||||
- docs/user-guide/federation/secrets.md
|
||||
- docs/federation/api-reference/index.md
|
||||
- title: Federation Components
|
||||
section:
|
||||
- docs/admin/federation-apiserver.md
|
||||
- title : federation-controller-mananger
|
||||
path: /docs/admin/federation-controller-manager
|
||||
|
|
|
@ -15,6 +15,7 @@ toc:
|
|||
- docs/admin/service-accounts-admin.md
|
||||
- docs/api-reference/v1/operations.html
|
||||
- docs/api-reference/v1/definitions.html
|
||||
- docs/api-reference/labels-annotations-taints.md
|
||||
- kubernetes/third_party/swagger-ui/index.md
|
||||
|
||||
- title: Autoscaling API
|
||||
|
@ -183,20 +184,3 @@ toc:
|
|||
- title: Security in Kubernetes
|
||||
path: https://github.com/kubernetes/kubernetes/blob/release-1.3/docs/design/security.md
|
||||
|
||||
- title: Federation
|
||||
section:
|
||||
- docs/user-guide/federation/index.md
|
||||
- docs/user-guide/federation/configmap.md
|
||||
- docs/user-guide/federation/daemonsets.md
|
||||
- docs/user-guide/federation/deployment.md
|
||||
- docs/user-guide/federation/events.md
|
||||
- docs/user-guide/federation/federated-ingress.md
|
||||
- docs/user-guide/federation/namespaces.md
|
||||
- docs/user-guide/federation/replicasets.md
|
||||
- docs/user-guide/federation/secrets.md
|
||||
- docs/federation/api-reference/README.md
|
||||
- title: Federation Components
|
||||
section:
|
||||
- docs/admin/federation-apiserver.md
|
||||
- title : federation-controller-mananger
|
||||
path: /docs/admin/federation-controller-manager
|
||||
|
|
|
@ -10,8 +10,10 @@ toc:
|
|||
- docs/contribute/write-new-topic.md
|
||||
- docs/contribute/stage-documentation-changes.md
|
||||
- docs/contribute/page-templates.md
|
||||
- docs/contribute/review-issues.md
|
||||
- docs/contribute/style-guide.md
|
||||
|
||||
|
||||
- title: Troubleshooting
|
||||
section:
|
||||
- docs/user-guide/debugging-pods-and-replication-controllers.md
|
||||
|
|
|
@ -12,10 +12,12 @@ toc:
|
|||
- docs/tasks/configure-pod-container/distribute-credentials-secure.md
|
||||
- docs/tasks/configure-pod-container/pull-image-private-registry.md
|
||||
- docs/tasks/configure-pod-container/configure-liveness-readiness-probes.md
|
||||
- docs/tasks/configure-pod-container/communicate-containers-same-pod.md
|
||||
|
||||
- title: Accessing Applications in a Cluster
|
||||
section:
|
||||
- docs/tasks/access-application-cluster/port-forward-access-application-cluster.md
|
||||
- docs/tasks/access-application-cluster/load-balance-access-application-cluster.md
|
||||
|
||||
- title: Debugging Applications in a Cluster
|
||||
section:
|
||||
|
@ -28,7 +30,6 @@ toc:
|
|||
- title: Administering a Cluster
|
||||
section:
|
||||
- docs/tasks/administer-cluster/assign-pods-nodes.md
|
||||
|
||||
- docs/tasks/administer-cluster/dns-horizontal-autoscaling.md
|
||||
- docs/tasks/administer-cluster/safely-drain-node.md
|
||||
|
||||
|
@ -43,4 +44,4 @@ toc:
|
|||
- title: Troubleshooting
|
||||
section:
|
||||
- docs/tasks/troubleshoot/debug-init-containers.md
|
||||
- /docs/tasks/administer-cluster/access-control-identity-management/
|
||||
- docs/tasks/administer-cluster/access-control-identity-management/
|
||||
|
|
|
@ -14,8 +14,7 @@ toc:
|
|||
|
||||
- title: Third-Party Tools
|
||||
section:
|
||||
- docs/tools/kompose/index.md
|
||||
- docs/tools/kompose/user-guide.md
|
||||
- title: Helm
|
||||
path: https://github.com/kubernetes/helm
|
||||
- title: Kompose
|
||||
path: https://github.com/kubernetes-incubator/kompose
|
||||
|
||||
|
|
|
@ -41,3 +41,6 @@ toc:
|
|||
- docs/tutorials/stateful-application/run-stateful-application.md
|
||||
- docs/tutorials/stateful-application/run-replicated-stateful-application.md
|
||||
- docs/tutorials/stateful-application/zookeeper.md
|
||||
- title: Services
|
||||
section:
|
||||
- docs/tutorials/services/source-ip.md
|
||||
|
|
|
@ -2,14 +2,12 @@
|
|||
assignees:
|
||||
- soltysh
|
||||
- sttts
|
||||
|
||||
title: Audit in Kubernetes
|
||||
---
|
||||
|
||||
* TOC
|
||||
{:toc}
|
||||
|
||||
## Audit in Kubernetes
|
||||
|
||||
Kubernetes Audit provides a security-relevant chronological set of records documenting
|
||||
the sequence of activities that have affected system by individual users, administrators
|
||||
or other components of the system. It allows cluster administrator to
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
assignees:
|
||||
- madhusudancs
|
||||
|
||||
title: Setting up Cluster Federation with Kubefed
|
||||
---
|
||||
|
||||
* TOC
|
||||
|
|
|
@ -1,14 +1,12 @@
|
|||
---
|
||||
assignees:
|
||||
- jszczepkowski
|
||||
|
||||
title: Setting up High-Availability Kubernetes Masters
|
||||
---
|
||||
|
||||
* TOC
|
||||
{:toc}
|
||||
|
||||
## Introduction
|
||||
|
||||
Kubernetes version 1.5 adds alpha support for replicating Kubernetes masters in `kube-up` or `kube-down` scripts for Google Compute Engine.
|
||||
This document describes how to use kube-up/down scripts to manage highly available (HA) masters and how HA masters are implemented for use with GCE.
|
||||
|
||||
|
|
|
@ -3,15 +3,13 @@ assignees:
|
|||
- davidopp
|
||||
- filipg
|
||||
- piosz
|
||||
|
||||
title: Guaranteed Scheduling For Critical Add-On Pods
|
||||
---
|
||||
|
||||
* TOC
|
||||
{:toc}
|
||||
|
||||
# "Guaranteed" scheduling of critical add-on pods
|
||||
|
||||
## Critical add-ons
|
||||
## Overview
|
||||
|
||||
In addition to Kubernetes core components like api-server, scheduler, controller-manager running on a master machine
|
||||
there are a number of add-ons which, for various reasons, must run on a regular cluster node (rather than the Kubernetes master).
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
assignees:
|
||||
- sttts
|
||||
|
||||
title: Using Sysctls in a Kubernetes Cluster
|
||||
---
|
||||
|
||||
* TOC
|
||||
|
|
|
@ -1,16 +0,0 @@
|
|||
---
|
||||
---
|
||||
# API Reference
|
||||
|
||||
Use the following reference docs to understand the Kubernetes REST API for various API group versions:
|
||||
|
||||
* v1: [operations](/docs/api-reference/v1/operations.html), [model definitions](/docs/api-reference/v1/definitions.html)
|
||||
* extensions/v1beta1: [operations](/docs/api-reference/extensions/v1beta1/operations.html), [model definitions](/docs/api-reference/extensions/v1beta1/definitions.html)
|
||||
* batch/v1: [operations](/docs/api-reference/batch/v1/operations.html), [model definitions](/docs/api-reference/batch/v1/definitions.html)
|
||||
* autoscaling/v1: [operations](/docs/api-reference/autoscaling/v1/operations.html), [model definitions](/docs/api-reference/autoscaling/v1/definitions.html)
|
||||
* apps/v1beta1: [operations](/docs/api-reference/apps/v1beta1/operations.html), [model definitions](/docs/api-reference/apps/v1beta1/definitions.html)
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
title: Well-Known Labels, Annotations and Taints
|
||||
---
|
||||
# Well-Known Labels, Annotations and Taints
|
||||
|
||||
Kubernetes reserves all labels and annotations in the kubernetes.io namespace. This document describes
|
||||
the well-known kubernetes.io labels and annotations.
|
||||
|
|
|
@ -0,0 +1,67 @@
|
|||
---
|
||||
title: Reviewing Documentation Issues
|
||||
---
|
||||
|
||||
{% capture overview %}
|
||||
|
||||
This page explains how you should review and prioritize documentation issues made for the [kubernetes/kubernetes.github.io](https://github.com/kubernetes/kubernetes.github.io){: target="_blank"} repository. The purpose is to provide a way to organize issues and make it easier to contribute to Kubernetes documentation. The following should be used as the standard way of prioritizing, labeling, and interacting with issues.
|
||||
{% endcapture %}
|
||||
|
||||
{% capture body %}
|
||||
|
||||
### Categorizing issues
|
||||
Issues should be sorted into different buckets of work using the following labels and definitions. If an issue doesn't have enough information to identify a problem that can be researched, reviewed, or worked on (i.e. the issue doesn't fit into any of the categories below) you should close the issue with a comment explaining why it is being closed.
|
||||
|
||||
|
||||
#### Actionable
|
||||
* Issues that can be worked on with current information (or may need a comment to explain what needs to be done to make it more clear)
|
||||
* Allows contributors to have easy to find issues to work on
|
||||
|
||||
|
||||
#### Tech Review Needed
|
||||
* Issues that need more information in order to be worked on (the proposed solution needs to be proven, a subject matter expert needs to be involved, work needs to be done to understand the problem/resolution and if the issue is still relevant)
|
||||
* Promotes transparency about level of work needed for the issue and that issue is in progress
|
||||
|
||||
#### Docs Review Needed
|
||||
* Issues that are suggestions for better processes or site improvements that require community agreement to be implemented
|
||||
* Topics can be brought to SIG meetings as agenda items
|
||||
|
||||
|
||||
### Prioritizing Issues
|
||||
The following labels and definitions should be used to prioritize issues. If you change the priority of an issues, please comment on the issue with your reasoning for the change.
|
||||
|
||||
#### P1
|
||||
* Major content errors affecting more than 1 page
|
||||
* Broken code sample on a heavily trafficked page
|
||||
* Errors on a “getting started” page
|
||||
* Well known or highly publicized customer pain points
|
||||
* Automation issues
|
||||
|
||||
#### P2
|
||||
* Default for all new issues
|
||||
* Broken code for sample that is not heavily used
|
||||
* Minor content issues in a heavily trafficked page
|
||||
* Major content issues on a lower-trafficked page
|
||||
|
||||
#### P3
|
||||
* Typos and broken anchor links
|
||||
|
||||
### Handling special issue types
|
||||
|
||||
#### Duplicate issues
|
||||
If a single problem has one or more issues open for it, the problem should be consolodated into a single issue. You should decide which issue to keep open (or open a new issue), port over all relevant information, link related issues, and close all the other issues that describe the same problem. Only having a single issue to work on will help reduce confusion and avoid duplicating work on the same problem.
|
||||
|
||||
#### Dead link issues
|
||||
Depending on where the dead link is reported, different actions are required to resolve the issue. Dead links in the API and Kubectl docs are automation issues and should be assigned a P1 until the problem can be fully understood. All other dead links are issues that need to be manually fixed and can be assigned a P3.
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
|
||||
{% capture whatsnext %}
|
||||
* Learn about [writing a new topic](/docs/contribute/write-new-topic).
|
||||
* Learn about [using page templates](/docs/contribute/page-templates/).
|
||||
* Learn about [staging your changes](/docs/contribute/stage-documentation-changes).
|
||||
{% endcapture %}
|
||||
|
||||
{% include templates/concept.md %}
|
|
@ -1,9 +1,7 @@
|
|||
---
|
||||
title: Federation API
|
||||
title: Federation API Reference
|
||||
---
|
||||
|
||||
# API Reference
|
||||
|
||||
Federation API server supports the following group versions:
|
||||
|
||||
* federation/v1beta1: [operations](/docs/federation/api-reference/federation/v1beta1/operations.html), [model definitions](/docs/federation/api-reference/federation/v1beta1/definitions.html)
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
assignees:
|
||||
- bboreham
|
||||
|
||||
title: Weave Net Addon
|
||||
---
|
||||
|
||||
The [Weave Net Addon](https://www.weave.works/docs/net/latest/kube-addon/) for Kubernetes comes with a Network Policy Controller.
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
assignees:
|
||||
- caesarxuchao
|
||||
- erictune
|
||||
|
||||
title: Setting up Kubernetes with Juju
|
||||
---
|
||||
|
||||
Ubuntu 16.04 introduced the [Canonical Distribution of Kubernetes](https://jujucharms.com/canonical-kubernetes/), a pure upstream distribution of Kubernetes designed for production usage. Out of the box it comes with the following components on 12 machines:
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
|
||||
title: Deploying Kubernetes with Calico Networking on Ubuntu
|
||||
---
|
||||
|
||||
This document describes how to deploy Kubernetes with Calico networking from scratch on _bare metal_ Ubuntu. For more information on Project Calico, visit [projectcalico.org](http://projectcalico.org) and the [calico-containers repository](https://github.com/projectcalico/calico-containers).
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
assignees:
|
||||
- thockin
|
||||
|
||||
title: Manually Deploying Kubernetes on Ubuntu Nodes
|
||||
---
|
||||
|
||||
This document describes how to deploy Kubernetes on ubuntu nodes, 1 master and 3 nodes involved
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
---
|
||||
title: Providing Load-Balanced Access to an Application in a Cluster
|
||||
---
|
||||
|
||||
{% capture overview %}
|
||||
|
|
|
@ -0,0 +1,149 @@
|
|||
---
|
||||
title: Communicating Between Containers Running in the Same Pod
|
||||
---
|
||||
|
||||
{% capture overview %}
|
||||
|
||||
This page shows how to use a Volume to communicate between two Containers running
|
||||
in the same Pod.
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% capture prerequisites %}
|
||||
|
||||
{% include task-tutorial-prereqs.md %}
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% capture steps %}
|
||||
|
||||
### Creating a Pod that runs two Containers
|
||||
|
||||
In this exercise, you create a Pod that runs two Containers. The two containers
|
||||
share a Volume that they can use to communicate. Here is the configuration file
|
||||
for the Pod:
|
||||
|
||||
{% include code.html language="yaml" file="two-container-pod.yaml" ghlink="/docs/tasks/configure-pod-container/two-container-pod.yaml" %}
|
||||
|
||||
In the configuration file, you can see that the Pod has a Volume named
|
||||
`shared-data`.
|
||||
|
||||
The first container listed in the configuration file runs an nginx server. The
|
||||
mount path for the shared Volume is `/usr/share/nginx/html`.
|
||||
The second container is based on the debian image, and has a mount path of
|
||||
`/pod-data`. The second container runs the following command and then terminates.
|
||||
|
||||
echo Hello from the debian container > /pod-data/index.html
|
||||
|
||||
Notice that the second container writes the `index.html` file in the root
|
||||
directory of the nginx server.
|
||||
|
||||
Create the Pod and the two Containers:
|
||||
|
||||
kubectl create -f http://k8s.io/docs/tasks/configure-pod-container/two-container-pod.yaml
|
||||
|
||||
View information about the Pod and the Containers:
|
||||
|
||||
kubectl get pod two-containers --output=yaml
|
||||
|
||||
Here is a portion of the output:
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
...
|
||||
name: two-containers
|
||||
namespace: default
|
||||
...
|
||||
spec:
|
||||
...
|
||||
containerStatuses:
|
||||
|
||||
- containerID: docker://c1d8abd1 ...
|
||||
image: debian
|
||||
...
|
||||
lastState:
|
||||
terminated:
|
||||
...
|
||||
name: debian-container
|
||||
...
|
||||
|
||||
- containerID: docker://96c1ff2c5bb ...
|
||||
image: nginx
|
||||
...
|
||||
name: nginx-container
|
||||
...
|
||||
state:
|
||||
running:
|
||||
...
|
||||
|
||||
You can see that the debian Container has terminated, and the nginx Container
|
||||
is still running.
|
||||
|
||||
Get a shell to nginx Container:
|
||||
|
||||
kubectl exec -it two-containers -c nginx-container -- /bin/bash
|
||||
|
||||
In your shell, verify that nginx is running:
|
||||
|
||||
root@two-containers:/# ps aux
|
||||
|
||||
The output is similar to this:
|
||||
|
||||
USER PID ... STAT START TIME COMMAND
|
||||
root 1 ... Ss 21:12 0:00 nginx: master process nginx -g daemon off;
|
||||
|
||||
Recall that the debian Container created the `index.html` file in the nginx root
|
||||
directory. Use `curl` to send a GET request to the nginx server:
|
||||
|
||||
root@two-containers:/# apt-get update
|
||||
root@two-containers:/# apt-get install curl
|
||||
root@two-containers:/# curl localhost
|
||||
|
||||
The output shows that nginx serves a web page written by the debian container:
|
||||
|
||||
Hello from the debian container
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% capture discussion %}
|
||||
|
||||
### Discussion
|
||||
|
||||
The primary reason that Pods can have multiple containers is to support
|
||||
helper applications that assist a primary application. Typical examples of
|
||||
helper applications are data pullers, data pushers, and proxies.
|
||||
Helper and primary applications often need to communicate with each other.
|
||||
Typically this is done through a shared filesystem, as shown in this exercise,
|
||||
or through the loopback network interface, localhost. An example of this pattern is a
|
||||
web server along with a helper program that polls a Git repository for new updates.
|
||||
|
||||
The Volume in this exercise provides a way for Containers to communicate during
|
||||
the life of the Pod. If the Pod is deleted and recreated, any data stored in
|
||||
the shared Volume is lost.
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% capture whatsnext %}
|
||||
|
||||
* Learn more about
|
||||
[patterns for composite containers](http://blog.kubernetes.io/2015/06/the-distributed-system-toolkit-patterns.html).
|
||||
|
||||
* Learn about
|
||||
[composite containers for modular architecture](http://www.slideshare.net/Docker/slideshare-burns).
|
||||
|
||||
* See
|
||||
[Configuring a Pod to Use a Volume for Storage](http://localhost:4000/docs/tasks/configure-pod-container/configure-volume-storage/).
|
||||
|
||||
* See [Volume](/docs/api-reference/v1/definitions/#_v1_volume).
|
||||
|
||||
* See [Pod](/docs/api-reference/v1/definitions/#_v1_pod).
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% include templates/task.md %}
|
|
@ -0,0 +1,27 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: two-containers
|
||||
spec:
|
||||
|
||||
restartPolicy: Never
|
||||
|
||||
volumes:
|
||||
- name: shared-data
|
||||
emptyDir: {}
|
||||
|
||||
containers:
|
||||
|
||||
- name: nginx-container
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: shared-data
|
||||
mountPath: /usr/share/nginx/html
|
||||
|
||||
- name: debian-container
|
||||
image: debian
|
||||
volumeMounts:
|
||||
- name: shared-data
|
||||
mountPath: /pod-data
|
||||
command: ["/bin/sh"]
|
||||
args: ["-c", "echo Hello from the debian container > /pod-data/index.html"]
|
|
@ -0,0 +1,43 @@
|
|||
---
|
||||
|
||||
assignees:
|
||||
- cdrage
|
||||
|
||||
title: Kompose Overview
|
||||
---
|
||||
|
||||
`kompose` is a tool to help users who are familiar with `docker-compose` move to **Kubernetes**. `kompose` takes a Docker Compose file and translates it into Kubernetes resources.
|
||||
|
||||
`kompose` is a convenience tool to go from local Docker development to managing your application with Kubernetes. Transformation of the Docker Compose format to Kubernetes resources manifest may not be exact, but it helps tremendously when first deploying an application on Kubernetes.
|
||||
|
||||
## Use Case
|
||||
|
||||
If you have a Docker Compose `docker-compose.yml` or a Docker Distributed Application Bundle `docker-compose-bundle.dab` file, you can convert it into Kubernetes deployments and services like this:
|
||||
|
||||
```console
|
||||
$ kompose --bundle docker-compose-bundle.dab convert
|
||||
WARN[0000]: Unsupported key networks - ignoring
|
||||
file "redis-svc.json" created
|
||||
file "web-svc.json" created
|
||||
file "web-deployment.json" created
|
||||
file "redis-deployment.json" created
|
||||
|
||||
$ kompose -f docker-compose.yml convert
|
||||
WARN[0000]: Unsupported key networks - ignoring
|
||||
file "redis-svc.json" created
|
||||
file "web-svc.json" created
|
||||
file "web-deployment.json" created
|
||||
file "redis-deployment.json" created
|
||||
```
|
||||
|
||||
## Installation
|
||||
|
||||
Grab the latest [release](https://github.com/kubernetes-incubator/kompose/releases) for your OS, untar and extract the binary.
|
||||
|
||||
### Linux
|
||||
|
||||
```sh
|
||||
wget https://github.com/kubernetes-incubator/kompose/releases/download/v0.1.2/kompose_linux-amd64.tar.gz
|
||||
tar -xvf kompose_linux-amd64.tar.gz --strip 1
|
||||
sudo mv kompose /usr/local/bin
|
||||
```
|
|
@ -0,0 +1,310 @@
|
|||
---
|
||||
|
||||
assignees:
|
||||
- cdrage
|
||||
|
||||
title: Kompose User Guide
|
||||
---
|
||||
|
||||
* TOC
|
||||
{:toc}
|
||||
|
||||
Kompose has support for two providers: OpenShift and Kubernetes.
|
||||
You can choose targeted provider either using global option `--provider`, or by setting environment variable `PROVIDER`.
|
||||
By setting environment variable `PROVIDER` you can permanently switch to OpenShift provider without need to always specify `--provider openshift` option.
|
||||
If no provider is specified Kubernetes is default provider.
|
||||
|
||||
|
||||
## Kompose convert
|
||||
|
||||
Currently Kompose supports to transform either Docker Compose file (both of v1 and v2) and [experimental Distributed Application Bundles](https://blog.docker.com/2016/06/docker-app-bundle/) into Kubernetes and OpenShift objects.
|
||||
There is a couple of sample files in the `examples/` directory for testing.
|
||||
You will convert the compose or dab file to Kubernetes or OpenShift objects with `kompose convert`.
|
||||
|
||||
### Kubernetes
|
||||
```console
|
||||
$ cd examples/
|
||||
|
||||
$ ls
|
||||
docker-compose.yml docker-compose-bundle.dab docker-gitlab.yml docker-voting.yml
|
||||
|
||||
$ kompose -f docker-gitlab.yml convert -y
|
||||
file "redisio-svc.yaml" created
|
||||
file "gitlab-svc.yaml" created
|
||||
file "postgresql-svc.yaml" created
|
||||
file "gitlab-deployment.yaml" created
|
||||
file "postgresql-deployment.yaml" created
|
||||
file "redisio-deployment.yaml" created
|
||||
|
||||
$ ls *.yaml
|
||||
gitlab-deployment.yaml postgresql-deployment.yaml redis-deployment.yaml redisio-svc.yaml web-deployment.yaml
|
||||
gitlab-svc.yaml postgresql-svc.yaml redisio-deployment.yaml redis-svc.yaml web-svc.yaml
|
||||
```
|
||||
|
||||
You can try with a Docker Compose version 2 like this:
|
||||
|
||||
```console
|
||||
$ kompose --file docker-voting.yml convert
|
||||
WARN[0000]: Unsupported key networks - ignoring
|
||||
WARN[0000]: Unsupported key build - ignoring
|
||||
file "worker-svc.json" created
|
||||
file "db-svc.json" created
|
||||
file "redis-svc.json" created
|
||||
file "result-svc.json" created
|
||||
file "vote-svc.json" created
|
||||
file "redis-deployment.json" created
|
||||
file "result-deployment.json" created
|
||||
file "vote-deployment.json" created
|
||||
file "worker-deployment.json" created
|
||||
file "db-deployment.json" created
|
||||
|
||||
$ ls
|
||||
db-deployment.json docker-compose.yml docker-gitlab.yml redis-deployment.json result-deployment.json vote-deployment.json worker-deployment.json
|
||||
db-svc.json docker-compose-bundle.dab docker-voting.yml redis-svc.json result-svc.json vote-svc.json worker-svc.json
|
||||
```
|
||||
|
||||
Using `--bundle, --dab` to specify a DAB file as below:
|
||||
|
||||
```console
|
||||
$ kompose --bundle docker-compose-bundle.dab convert
|
||||
WARN[0000]: Unsupported key networks - ignoring
|
||||
file "redis-svc.json" created
|
||||
file "web-svc.json" created
|
||||
file "web-deployment.json" created
|
||||
file "redis-deployment.json" created
|
||||
```
|
||||
|
||||
### OpenShift
|
||||
|
||||
```console
|
||||
$ kompose --provider openshift --file docker-voting.yml convert
|
||||
WARN[0000] [worker] Service cannot be created because of missing port.
|
||||
INFO[0000] file "vote-service.json" created
|
||||
INFO[0000] file "db-service.json" created
|
||||
INFO[0000] file "redis-service.json" created
|
||||
INFO[0000] file "result-service.json" created
|
||||
INFO[0000] file "vote-deploymentconfig.json" created
|
||||
INFO[0000] file "vote-imagestream.json" created
|
||||
INFO[0000] file "worker-deploymentconfig.json" created
|
||||
INFO[0000] file "worker-imagestream.json" created
|
||||
INFO[0000] file "db-deploymentconfig.json" created
|
||||
INFO[0000] file "db-imagestream.json" created
|
||||
INFO[0000] file "redis-deploymentconfig.json" created
|
||||
INFO[0000] file "redis-imagestream.json" created
|
||||
INFO[0000] file "result-deploymentconfig.json" created
|
||||
INFO[0000] file "result-imagestream.json" created
|
||||
```
|
||||
|
||||
In similar way you can convert DAB files to OpenShift.
|
||||
```console$
|
||||
$ kompose --bundle docker-compose-bundle.dab --provider openshift convert
|
||||
WARN[0000]: Unsupported key networks - ignoring
|
||||
INFO[0000] file "redis-svc.json" created
|
||||
INFO[0000] file "web-svc.json" created
|
||||
INFO[0000] file "web-deploymentconfig.json" created
|
||||
INFO[0000] file "web-imagestream.json" created
|
||||
INFO[0000] file "redis-deploymentconfig.json" created
|
||||
INFO[0000] file "redis-imagestream.json" created
|
||||
```
|
||||
|
||||
## Kompose up
|
||||
|
||||
Kompose supports a straightforward way to deploy your "composed" application to Kubernetes or OpenShift via `kompose up`.
|
||||
|
||||
|
||||
### Kubernetes
|
||||
```console
|
||||
$ kompose --file ./examples/docker-guestbook.yml up
|
||||
We are going to create Kubernetes deployments and services for your Dockerized application.
|
||||
If you need different kind of resources, use the 'kompose convert' and 'kubectl create -f' commands instead.
|
||||
|
||||
INFO[0000] Successfully created service: redis-master
|
||||
INFO[0000] Successfully created service: redis-slave
|
||||
INFO[0000] Successfully created service: frontend
|
||||
INFO[0001] Successfully created deployment: redis-master
|
||||
INFO[0001] Successfully created deployment: redis-slave
|
||||
INFO[0001] Successfully created deployment: frontend
|
||||
|
||||
Your application has been deployed to Kubernetes. You can run 'kubectl get deployment,svc,pods' for details.
|
||||
|
||||
$ kubectl get deployment,svc,pods
|
||||
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
|
||||
frontend 1 1 1 1 4m
|
||||
redis-master 1 1 1 1 4m
|
||||
redis-slave 1 1 1 1 4m
|
||||
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
frontend 10.0.174.12 <none> 80/TCP 4m
|
||||
kubernetes 10.0.0.1 <none> 443/TCP 13d
|
||||
redis-master 10.0.202.43 <none> 6379/TCP 4m
|
||||
redis-slave 10.0.1.85 <none> 6379/TCP 4m
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
frontend-2768218532-cs5t5 1/1 Running 0 4m
|
||||
redis-master-1432129712-63jn8 1/1 Running 0 4m
|
||||
redis-slave-2504961300-nve7b 1/1 Running 0 4m
|
||||
```
|
||||
Note:
|
||||
|
||||
- You must have a running Kubernetes cluster with a pre-configured kubectl context.
|
||||
- Only deployments and services are generated and deployed to Kubernetes. If you need different kind of resources, use the 'kompose convert' and 'kubectl create -f' commands instead.
|
||||
|
||||
### OpenShift
|
||||
```console
|
||||
$kompose --file ./examples/docker-guestbook.yml --provider openshift up
|
||||
We are going to create OpenShift DeploymentConfigs and Services for your Dockerized application.
|
||||
If you need different kind of resources, use the 'kompose convert' and 'oc create -f' commands instead.
|
||||
|
||||
INFO[0000] Successfully created service: redis-slave
|
||||
INFO[0000] Successfully created service: frontend
|
||||
INFO[0000] Successfully created service: redis-master
|
||||
INFO[0000] Successfully created deployment: redis-slave
|
||||
INFO[0000] Successfully created ImageStream: redis-slave
|
||||
INFO[0000] Successfully created deployment: frontend
|
||||
INFO[0000] Successfully created ImageStream: frontend
|
||||
INFO[0000] Successfully created deployment: redis-master
|
||||
INFO[0000] Successfully created ImageStream: redis-master
|
||||
|
||||
Your application has been deployed to OpenShift. You can run 'oc get dc,svc,is' for details.
|
||||
|
||||
$ oc get dc,svc,is
|
||||
NAME REVISION DESIRED CURRENT TRIGGERED BY
|
||||
dc/frontend 0 1 0 config,image(frontend:v4)
|
||||
dc/redis-master 0 1 0 config,image(redis-master:e2e)
|
||||
dc/redis-slave 0 1 0 config,image(redis-slave:v1)
|
||||
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
svc/frontend 172.30.46.64 <none> 80/TCP 8s
|
||||
svc/redis-master 172.30.144.56 <none> 6379/TCP 8s
|
||||
svc/redis-slave 172.30.75.245 <none> 6379/TCP 8s
|
||||
NAME DOCKER REPO TAGS UPDATED
|
||||
is/frontend 172.30.12.200:5000/fff/frontend
|
||||
is/redis-master 172.30.12.200:5000/fff/redis-master
|
||||
is/redis-slave 172.30.12.200:5000/fff/redis-slave v1
|
||||
```
|
||||
|
||||
Note:
|
||||
|
||||
- You must have a running OpenShift cluster with a pre-configured `oc` context (`oc login`)
|
||||
|
||||
## Kompose down
|
||||
|
||||
Once you have deployed "composed" application to Kubernetes, `kompose down` will help you to take the application out by deleting its deployments and services. If you need to remove other resources, use the 'kubectl' command.
|
||||
|
||||
```console
|
||||
$ kompose --file docker-guestbook.yml down
|
||||
INFO[0000] Successfully deleted service: redis-master
|
||||
INFO[0004] Successfully deleted deployment: redis-master
|
||||
INFO[0004] Successfully deleted service: redis-slave
|
||||
INFO[0008] Successfully deleted deployment: redis-slave
|
||||
INFO[0009] Successfully deleted service: frontend
|
||||
INFO[0013] Successfully deleted deployment: frontend
|
||||
```
|
||||
Note:
|
||||
- You must have a running Kubernetes cluster with a pre-configured kubectl context.
|
||||
|
||||
## Alternate formats
|
||||
|
||||
The default `kompose` transformation will generate Kubernetes [Deployments](http://kubernetes.io/docs/user-guide/deployments/) and [Services](http://kubernetes.io/docs/user-guide/services/), in json format. You have alternative option to generate yaml with `-y`. Also, you can alternatively generate [Replication Controllers](http://kubernetes.io/docs/user-guide/replication-controller/) objects, [Deamon Sets](http://kubernetes.io/docs/admin/daemons/), or [Helm](https://github.com/helm/helm) charts.
|
||||
|
||||
```console
|
||||
$ kompose convert
|
||||
file "redis-svc.json" created
|
||||
file "web-svc.json" created
|
||||
file "redis-deployment.json" created
|
||||
file "web-deployment.json" created
|
||||
```
|
||||
The `*-deployment.json` files contain the Deployment objects.
|
||||
|
||||
```console
|
||||
$ kompose convert --rc -y
|
||||
file "redis-svc.yaml" created
|
||||
file "web-svc.yaml" created
|
||||
file "redis-rc.yaml" created
|
||||
file "web-rc.yaml" created
|
||||
```
|
||||
|
||||
The `*-rc.yaml` files contain the Replication Controller objects. If you want to specify replicas (default is 1), use `--replicas` flag: `$ kompose convert --rc --replicas 3 -y`
|
||||
|
||||
```console
|
||||
$ kompose convert --ds -y
|
||||
file "redis-svc.yaml" created
|
||||
file "web-svc.yaml" created
|
||||
file "redis-daemonset.yaml" created
|
||||
file "web-daemonset.yaml" created
|
||||
```
|
||||
|
||||
The `*-daemonset.yaml` files contain the Daemon Set objects
|
||||
|
||||
If you want to generate a Chart to be used with [Helm](https://github.com/kubernetes/helm) simply do:
|
||||
|
||||
```console
|
||||
$ kompose convert -c -y
|
||||
file "web-svc.yaml" created
|
||||
file "redis-svc.yaml" created
|
||||
file "web-deployment.yaml" created
|
||||
file "redis-deployment.yaml" created
|
||||
chart created in "./docker-compose/"
|
||||
|
||||
$ tree docker-compose/
|
||||
docker-compose
|
||||
├── Chart.yaml
|
||||
├── README.md
|
||||
└── templates
|
||||
├── redis-deployment.yaml
|
||||
├── redis-svc.yaml
|
||||
├── web-deployment.yaml
|
||||
└── web-svc.yaml
|
||||
```
|
||||
|
||||
The chart structure is aimed at providing a skeleton for building your Helm charts.
|
||||
|
||||
## Unsupported docker-compose configuration options
|
||||
|
||||
Currently `kompose` does not support the following Docker Compose options.
|
||||
|
||||
```
|
||||
"build", "cgroup_parent", "devices", "depends_on", "dns", "dns_search", "domainname", "env_file", "extends", "external_links", "extra_hosts", "hostname", "ipc", "logging", "mac_address", "mem_limit", "memswap_limit", "network_mode", "networks", "pid", "security_opt", "shm_size", "stop_signal", "volume_driver", "uts", "read_only", "stdin_open", "tty", "user", "ulimits", "dockerfile", "net"
|
||||
```
|
||||
|
||||
For example:
|
||||
|
||||
```console
|
||||
$ cat nginx.yml
|
||||
nginx:
|
||||
image: nginx
|
||||
dockerfile: foobar
|
||||
build: ./foobar
|
||||
cap_add:
|
||||
- ALL
|
||||
container_name: foobar
|
||||
|
||||
$ kompose -f nginx.yml convert
|
||||
WARN[0000] Unsupported key build - ignoring
|
||||
WARN[0000] Unsupported key cap_add - ignoring
|
||||
WARN[0000] Unsupported key dockerfile - ignoring
|
||||
```
|
||||
|
||||
## Labels
|
||||
|
||||
`kompose` supports Kompose-specific labels within the `docker-compose.yml` file in order to explicitly imply a service type upon conversion.
|
||||
|
||||
The currently supported options are:
|
||||
|
||||
| Key | Value |
|
||||
|----------------------|-------------------------------------|
|
||||
| kompose.service.type | nodeport / clusterip / loadbalancer |
|
||||
|
||||
|
||||
Here is a brief example that uses the annotations / labels feature to specify a service type:
|
||||
|
||||
```yaml
|
||||
version: "2"
|
||||
services:
|
||||
nginx:
|
||||
image: nginx
|
||||
dockerfile: foobar
|
||||
build: ./foobar
|
||||
cap_add:
|
||||
- ALL
|
||||
container_name: foobar
|
||||
labels:
|
||||
kompose.service.type: nodeport
|
||||
```
|
|
@ -29,6 +29,10 @@ each of which has a sequence of steps.
|
|||
|
||||
* [Running ZooKeeper, A CP Distributed System](/docs/tutorials/stateful-application/zookeeper/)
|
||||
|
||||
#### Services
|
||||
|
||||
* [Using SourceIP](/docs/tutorials/services/source-ip/)
|
||||
|
||||
### What's next
|
||||
|
||||
If you would like to write a tutorial, see
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
---
|
||||
title: Using Source IP
|
||||
---
|
||||
|
||||
{% capture overview %}
|
||||
|
|
|
@ -1,23 +0,0 @@
|
|||
## Building
|
||||
|
||||
For each container, the build steps are the same. The examples below
|
||||
are for the `show` container. Replace `show` with `backend` for the
|
||||
backend container.
|
||||
|
||||
## Google Container Registry ([GCR](https://cloud.google.com/tools/container-registry/))
|
||||
|
||||
docker build -t gcr.io/<project-name>/show .
|
||||
gcloud docker push gcr.io/<project-name>/show
|
||||
|
||||
## Docker Hub
|
||||
|
||||
docker build -t <username>/show .
|
||||
docker push <username>/show
|
||||
|
||||
## Change Pod Definitions
|
||||
|
||||
Edit both `show-rc.yaml` and `backend-rc.yaml` and replace the
|
||||
specified `image:` with the one that you built.
|
||||
|
||||
|
||||
|
|
@ -1,8 +1,9 @@
|
|||
---
|
||||
assignees:
|
||||
- erictune
|
||||
|
||||
title: Pod Templates
|
||||
---
|
||||
|
||||
Pod templates are [pod](/docs/user-guide/pods/) specifications which are included in other objects, such as
|
||||
[Replication Controllers](/docs/user-guide/replication-controller/), [Jobs](/docs/user-guide/jobs/), and
|
||||
[DaemonSets](/docs/admin/daemons/). Controllers use Pod Templates to make actual pods.
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
assignees:
|
||||
- erictune
|
||||
|
||||
title: Init Containers
|
||||
---
|
||||
|
||||
* TOC
|
||||
|
|
|
@ -113,8 +113,8 @@ metadata:
|
|||
name: mysecret
|
||||
type: Opaque
|
||||
data:
|
||||
password: MWYyZDFlMmU2N2Rm
|
||||
username: YWRtaW4=
|
||||
password: MWYyZDFlMmU2N2Rm
|
||||
```
|
||||
|
||||
The data field is a map. Its keys must match
|
||||
|
@ -142,8 +142,8 @@ Get back the secret created in the previous section:
|
|||
$ kubectl get secret mysecret -o yaml
|
||||
apiVersion: v1
|
||||
data:
|
||||
password: MWYyZDFlMmU2N2Rm
|
||||
username: YWRtaW4=
|
||||
password: MWYyZDFlMmU2N2Rm
|
||||
kind: Secret
|
||||
metadata:
|
||||
creationTimestamp: 2016-01-22T18:41:56Z
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
# Put files you want to skip table of contents entry check here:
|
||||
docs/search.md
|
||||
docs/sitemap.md
|
||||
docs/user-guide/pods/_viewing-a-pod.md
|
||||
docs/user-guide/simple-yaml.md
|
||||
docs/user-guide/walkthrough/index.md
|
||||
docs/user-guide/walkthrough/k8s201.md
|
||||
docs/user-guide/logging-demo/README.md
|
||||
docs/user-guide/downward-api/README.md
|
||||
docs/user-guide/configmap/README.md
|
|
@ -0,0 +1,22 @@
|
|||
#!/bin/bash
|
||||
|
||||
no_entry=false
|
||||
|
||||
# Verify all docs/.../*.md files are referenced in at least one of _data/*.yml
|
||||
# files. Skip checking files in skip_toc_check.txt
|
||||
for file in `find docs -name "*.md" -type f`; do
|
||||
if ! grep -q "${file}" skip_toc_check.txt; then
|
||||
path=${file%.*}
|
||||
# abc/index.md should point to abc, not abc/index
|
||||
path=${path%%index}
|
||||
if ! grep -q "${path}" _data/*.yml; then
|
||||
echo "Error: ${file} doesn't have an entry in the table of contents under _data/*.yml"
|
||||
no_entry=true
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
if ${no_entry}; then
|
||||
echo "Found files without entries. For how to fix it, see http://kubernetes.io/docs/contribute/write-new-topic/#creating-an-entry-in-the-table-of-contents"
|
||||
exit 1
|
||||
fi
|
Loading…
Reference in New Issue