diff --git a/_config.yml b/_config.yml index 59869f26c0..84a5524023 100644 --- a/_config.yml +++ b/_config.yml @@ -20,4 +20,5 @@ defaults: layout: docwithnav showedit: true githubbranch: "release-1.2" + docsbranch: "master" permalink: pretty diff --git a/_data/guides.yml b/_data/guides.yml index 453e84de7b..553b37cd59 100644 --- a/_data/guides.yml +++ b/_data/guides.yml @@ -147,6 +147,8 @@ toc: path: /docs/user-guide/getting-into-containers/ - title: The Lifecycle of a Pod path: /docs/user-guide/pod-states/ + - title: Pod Templates + path: /docs/user-guide/pod-templates/ - title: Assigning Pods to Nodes path: /docs/user-guide/node-selection/ - title: Creating Pods with the Downward API @@ -167,13 +169,13 @@ toc: - title: Using DNS Pods and Services path: /docs/admin/dns/ - title: Setting Up and Configuring DNS - path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/cluster-dns + path: https://github.com/kubernetes/kubernetes/tree/release-1.2/examples/cluster-dns - title: Deploying DNS path: /docs/getting-started-guides/docker-multinode/deployDNS/ - title: Connecting Applications path: /docs/user-guide/connecting-applications/ - title: Creating Servers with External IPs - path: https://github.com/kubernetes/kubernetes/blob/release-1.1/examples/simple-nginx.md + path: https://github.com/kubernetes/kubernetes/blob/release-1.2/examples/simple-nginx.md - title: Connect with Proxies path: /docs/user-guide/connecting-to-applications-proxy/ - title: Connect with Port Forwarding @@ -193,6 +195,8 @@ toc: path: /docs/user-guide/config-best-practices/ - title: Configuring Containers path: /docs/user-guide/configuring-containers/ + - title: Using ConfigMap + path: /docs/user-guide/configmap/ - title: Sharing Cluster Access with kubeconfig path: /docs/user-guide/sharing-clusters/ - title: Using Environment Variables @@ -228,11 +232,11 @@ toc: - title: Testing a Kubernetes Cluster path: /docs/getting-started-guides/docker-multinode/testing/ - title: Simulating Large Test Loads - path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/k8petstore + path: https://github.com/kubernetes/kubernetes/tree/release-1.2/examples/k8petstore - title: Checking Pod Health path: /docs/user-guide/liveness/ - title: Using Explorer to Examine the Runtime Environment - path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/explorer + path: https://github.com/kubernetes/kubernetes/tree/release-1.2/examples/explorer - title: Resource Usage Monitoring path: /docs/user-guide/monitoring/ - title: Logging diff --git a/_data/reference.yml b/_data/reference.yml index a7be4c5b70..ecfb5b21d0 100644 --- a/_data/reference.yml +++ b/_data/reference.yml @@ -40,6 +40,8 @@ toc: path: /docs/user-guide/docker-cli-to-kubectl/ - title: JSONpath Support path: /docs/user-guide/jsonpath/ + - title: kubectl Cheat Sheet + path: /docs/user-guide/kubectl-cheatsheet/ - title: kubectl Commands section: - title: kubectl @@ -174,14 +176,14 @@ toc: - title: Kubernetes Design Docs section: - title: Kubernetes Architecture - path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/design/architecture.md + path: https://github.com/kubernetes/kubernetes/blob/release-1.2/docs/design/architecture.md - title: Kubernetes Design Overview - path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/design/ + path: https://github.com/kubernetes/kubernetes/blob/release-1.2/docs/design/ - title: Security in Kubernetes - path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/design/security.md + path: https://github.com/kubernetes/kubernetes/blob/release-1.2/docs/design/security.md - title: Kubernetes Identity and Access Management - path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/design/access.md + path: https://github.com/kubernetes/kubernetes/blob/release-1.2/docs/design/access.md - title: Security Contexts - path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/design/security_context.md + path: https://github.com/kubernetes/kubernetes/blob/release-1.2/docs/design/security_context.md - title: Kubernetes OpenVSwitch GRE/VxLAN networking path: /docs/admin/ovs-networking/ \ No newline at end of file diff --git a/_data/samples.yml b/_data/samples.yml index ea8ddf3217..52aaf32455 100644 --- a/_data/samples.yml +++ b/_data/samples.yml @@ -7,52 +7,56 @@ toc: - title: Clustered Application Samples section: - title: Apache Cassandra Database - path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/cassandra + path: https://github.com/kubernetes/kubernetes/tree/release-1.2/examples/cassandra - title: Apache Spark - path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/spark + path: https://github.com/kubernetes/kubernetes/tree/release-1.2/examples/spark - title: Apache Storm - path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/storm + path: https://github.com/kubernetes/kubernetes/tree/release-1.2/examples/storm - title: Distributed Task Queue - path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/celery-rabbitmq + path: https://github.com/kubernetes/kubernetes/tree/release-1.2/examples/celery-rabbitmq - title: Hazelcast - path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/hazelcast + path: https://github.com/kubernetes/kubernetes/tree/release-1.2/examples/hazelcast - title: Meteor Applications - path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/meteor/ + path: https://github.com/kubernetes/kubernetes/tree/release-1.2/examples/meteor/ - title: Redis - path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/redis/ + path: https://github.com/kubernetes/kubernetes/tree/release-1.2/examples/redis/ - title: RethinkDB - path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/rethinkdb/ + path: https://github.com/kubernetes/kubernetes/tree/release-1.2/examples/rethinkdb/ - title: Elasticsearch/Kibana Logging Demonstration - path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/logging-demo/ + path: https://github.com/kubernetes/kubernetes/tree/release-1.2/examples/logging-demo/ - title: Elasticsearch - path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/elasticsearch/ + path: https://github.com/kubernetes/kubernetes/tree/release-1.2/examples/elasticsearch/ - title: OpenShift Origin - path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/openshift-origin/ + path: https://github.com/kubernetes/kubernetes/tree/release-1.2/examples/openshift-origin/ - title: Ceph - path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/rbd/ + path: https://github.com/kubernetes/kubernetes/tree/release-1.2/examples/rbd/ - title: MEAN stack on Google Cloud Platform path: /docs/getting-started-guides/meanstack/ - title: Persistent Volume Samples section: - title: WordPress on a Kubernetes Persistent Volume - path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/mysql-wordpress-pd/ + path: https://github.com/kubernetes/kubernetes/tree/release-1.2/examples/mysql-wordpress-pd/ - title: GlusterFS - path: /https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/glusterfs/ + path: /https://github.com/kubernetes/kubernetes/tree/release-1.2/examples/glusterfs/ - title: iSCSI - path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/iscsi/ + path: https://github.com/kubernetes/kubernetes/tree/release-1.2/examples/iscsi/ - title: NFS - path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/nfs/ + path: https://github.com/kubernetes/kubernetes/tree/release-1.2/examples/nfs/ - title: Downward API Volumes - path: /docs/user-guide/downward-api/volume/ + path: /docs/user-guide/downward-api/volume - title: Multi-tier Application Samples section: - title: Guestbook - Go Server - path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/guestbook-go/ + path: https://github.com/kubernetes/kubernetes/tree/release-1.2/examples/guestbook-go/ - title: GuestBook - PHP Server - path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/guestbook/ + path: https://github.com/kubernetes/kubernetes/tree/release-1.2/examples/guestbook/ - title: MySQL - Phabricator Server - path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/phabricator/ - - title: Elasticsearch/Kibana Logging Demo - path: https://github.com/kubernetes/kubernetes.github.io/tree/master/docs/user-guide/logging-demo + path: https://github.com/kubernetes/kubernetes/tree/release-1.2/examples/phabricator/ + +- title: Elasticsearch/Kibana Logging Demo + path: https://github.com/kubernetes/kubernetes.github.io/tree/master/docs/user-guide/logging-demo + +- title: ConfigMap Example + path: https://github.com/kubernetes/kubernetes.github.io/tree/master/docs/user-guide/configmap diff --git a/docs/admin/daemons.md b/docs/admin/daemons.md index bf35595122..ce3b4aaba5 100644 --- a/docs/admin/daemons.md +++ b/docs/admin/daemons.md @@ -71,7 +71,7 @@ a node for testing. If you specify a `.spec.template.spec.nodeSelector`, then the DaemonSet controller will create pods on nodes which match that [node -selector](/docs/user-guide/node-selection/). +selector](https://github.com/kubernetes/kubernetes.github.io/tree/{{page.docsbranch}}/docs/user-guide/node-selection). If you do not specify a `.spec.template.spec.nodeSelector`, then the DaemonSet controller will create pods on all nodes. diff --git a/docs/user-guide/accessing-the-cluster.md b/docs/user-guide/accessing-the-cluster.md index 2d1479b803..eae63ed84e 100644 --- a/docs/user-guide/accessing-the-cluster.md +++ b/docs/user-guide/accessing-the-cluster.md @@ -85,7 +85,7 @@ The above example uses the `--insecure` flag. This leaves it subject to MITM attacks. When kubectl accesses the cluster it uses a stored root certificate and client certificates to access the server. (These are installed in the `~/.kube` directory). Since cluster certificates are typically self-signed, it -make take special configuration to get your http client to use root +may take special configuration to get your http client to use root certificate. On some clusters, the apiserver does not require authentication; it may serve @@ -119,6 +119,13 @@ is associated with a service account, and a credential (token) for that service account is placed into the filesystem tree of each container in that pod, at `/var/run/secrets/kubernetes.io/serviceaccount/token`. +If available, a certificate bundle is placed into the filesystem tree of each +container at `/var/run/secrets/kubernetes.io/serviceaccount/ca.crt`, and should be +used to verify the serving certificate of the apiserver. + +Finally, the default namespace to be used for namespaced API operations is placed in a file +at `/var/run/secrets/kubernetes.io/serviceaccount/namespace` in each container. + From within a pod the recommended ways to connect to API are: - run a kubectl proxy as one of the containers in the pod, or as a background @@ -195,9 +202,9 @@ at `https://104.197.5.247/api/v1/proxy/namespaces/kube-system/services/elasticse #### Manually constructing apiserver proxy URLs As mentioned above, you use the `kubectl cluster-info` command to retrieve the service's proxy URL. To create proxy URLs that include service endpoints, suffixes, and parameters, you simply append to the service's proxy URL: -`http://`*`kubernetes_master_address`*`/`*`service_path`*`/`*`service_name`*`/`*`service_endpoint-suffix-parameter`* - +`http://`*`kubernetes_master_address`*`/api/v1/proxy/namespaces/`*`namespace_name`*`/services/`*`service_name[:port_name]`* + +If you haven't specified a name for your port, you don't have to specify *port_name* in the URL ##### Examples @@ -205,7 +212,7 @@ about namespaces? 'proxy' verb? --> * To access the Elasticsearch cluster health information `_cluster/health?pretty=true`, you would use: `https://104.197.5.247/api/v1/proxy/namespaces/kube-system/services/elasticsearch-logging/_cluster/health?pretty=true` ```json -{ + { "cluster_name" : "kubernetes_logging", "status" : "yellow", "timed_out" : false, diff --git a/docs/user-guide/compute-resources.md b/docs/user-guide/compute-resources.md index f962a35ac8..06f6ed47c0 100644 --- a/docs/user-guide/compute-resources.md +++ b/docs/user-guide/compute-resources.md @@ -185,7 +185,7 @@ on the pod you are interested in: Name: simmemleak-hra99 Namespace: default Image(s): saadali/simmemleak -Node: kubernetes-minion-tf0f/10.240.216.66 +Node: kubernetes-node-tf0f/10.240.216.66 Labels: name=simmemleak Status: Running Reason: @@ -208,14 +208,14 @@ Containers: Restart Count: 5 Conditions: Type Status - Ready False + Ready False Events: FirstSeen LastSeen Count From SubobjectPath Reason Message - Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {scheduler } scheduled Successfully assigned simmemleak-hra99 to kubernetes-minion-tf0f - Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-minion-tf0f} implicitly required container POD pulled Pod container image "gcr.io/google_containers/pause:0.8.0" already present on machine - Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-minion-tf0f} implicitly required container POD created Created with docker id 6a41280f516d - Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-minion-tf0f} implicitly required container POD started Started with docker id 6a41280f516d - Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-minion-tf0f} spec.containers{simmemleak} created Created with docker id 87348f12526a + Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {scheduler } scheduled Successfully assigned simmemleak-hra99 to kubernetes-node-tf0f + Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-node-tf0f} implicitly required container POD pulled Pod container image "gcr.io/google_containers/pause:0.8.0" already present on machine + Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-node-tf0f} implicitly required container POD created Created with docker id 6a41280f516d + Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-node-tf0f} implicitly required container POD started Started with docker id 6a41280f516d + Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-node-tf0f} spec.containers{simmemleak} created Created with docker id 87348f12526a ``` The `Restart Count: 5` indicates that the `simmemleak` container in this pod was terminated and restarted 5 times. @@ -225,7 +225,7 @@ You can call `get pod` with the `-o go-template=...` option to fetch the status ```shell [13:59:01] $ ./cluster/kubectl.sh get pod -o go-template='{{range.status.containerStatuses}}{{"Container Name: "}}{{.name}}{{"\r\nLastState: "}}{{.lastState}}{{end}}' simmemleak-60xbc Container Name: simmemleak -LastState: map[terminated:map[exitCode:137 reason:OOM Killed startedAt:2015-07-07T20:58:43Z finishedAt:2015-07-07T20:58:43Z containerID:docker://0e4095bba1feccdfe7ef9fb6ebffe972b4b14285d5acdec6f0d3ae8a22fad8b2]][13:59:03] clusterScaleDoc ~/go/src/github.com/kubernetes/kubernetes $ +LastState: map[terminated:map[exitCode:137 reason:OOM Killed startedAt:2015-07-07T20:58:43Z finishedAt:2015-07-07T20:58:43Z containerID:docker://0e4095bba1feccdfe7ef9fb6ebffe972b4b14285d5acdec6f0d3ae8a22fad8b2]] ``` We can see that this container was terminated because `reason:OOM Killed`, where *OOM* stands for Out Of Memory. diff --git a/docs/user-guide/config-best-practices.md b/docs/user-guide/config-best-practices.md index b3f36056a0..80c204d552 100644 --- a/docs/user-guide/config-best-practices.md +++ b/docs/user-guide/config-best-practices.md @@ -3,23 +3,111 @@ This document is meant to highlight and consolidate in one place configuration best practices that are introduced throughout the user-guide and getting-started documentation and examples. This is a living document so if you think of something that is not on this list but might be useful to others, please don't hesitate to file an issue or submit a PR. -1. When writing configuration, use the latest stable API version (currently v1). -1. Configuration should be stored in version control before being pushed to the cluster. This allows configuration to be quickly rolled back if needed and will aid with cluster re-creation and restoration if the worst were to happen. -1. Use YAML rather than JSON. They can be used interchangeably in almost all scenarios but YAML tends to be more user-friendly for config. -1. Group related objects together in a single file. This is often better than separate files. -1. Use `kubectl create -f ` where possible. This looks for config objects in all `.yaml`, `.yml`, and `.json` files in `` and passes them to create. -1. Create a service before corresponding replication controllers so that the scheduler can spread the pods comprising the service. You can also create the replication controller without specifying replicas, create the service, then scale up the replication controller, which may work better in an example using progressive disclosure and may have benefits in real scenarios also, such as ensuring one replica works before creating lots of them) -1. Don't use `hostPort` unless absolutely necessary (e.g., for a node daemon) as it will prevent certain scheduling configurations due to port conflicts. Use the apiserver proxying or port forwarding for debug/admin access, or a service for external service access. If you need to expose a pod's port on the host machine, consider using a [NodePort](/docs/user-guide/services/#type--loadbalancer) service before resorting to `hostPort`. If you only need access to the port for debugging purposes, you can also use the [kubectl proxy and apiserver proxy](/docs/user-guide/connecting-to-applications-proxy) or [kubectl port-forward](/docs/user-guide/connecting-to-applications-port-forward). -1. Don't use `hostNetwork` for the same reasons as `hostPort`. -1. Don't specify default values unnecessarily, to simplify and minimize configs. For example, omit the selector and labels in ReplicationController if you want them to be the same as the labels in its podTemplate, since those fields are populated from the podTemplate labels by default. -1. Instead of attaching one label to a set of pods to represent a service (e.g., `service: myservice`) and another to represent the replication controller managing the pods (e.g., `controller: mycontroller`), attach labels that identify semantic attributes of your application or deployment and select the appropriate subsets in your service and replication controller, such as `{ app: myapp, tier: frontend, deployment: v3 }`. A service can be made to span multiple deployments, such as across rolling updates, by simply omitting release-specific labels from its selector, rather than updating a service's selector to match the replication controller's selector fully. -1. Use kubectl bulk operations (via files and/or labels) for get and delete. See [label selectors](/docs/user-guide/labels/#label-selectors) and [using labels effectively](/docs/user-guide/managing-deployments/#using-labels-effectively). -1. Use kubectl run and expose to quickly create and expose single container replication controllers. See the [quick start guide](/docs/user-guide/quick-start) for an example. -1. Use headless services for easy service discovery when you don't need kube-proxy load balancing. See [headless services](/docs/user-guide/services/#headless-services). -1. Use kubectl delete rather than stop. Delete has a superset of the functionality of stop and stop is deprecated. -1. If there is a viable alternative to naked pods (i.e. pods not bound to a controller), go with the alternative. Controllers are almost always preferable to creating pods (except for some `restartPolicy: Never` scenarios). A minimal Job is coming. See [#1624](http://issue.k8s.io/1624). Naked pods will not be rescheduled in the event of node failure. -1. Put a version number or hash as a suffix to the name and in a label on a replication controller to facilitate rolling update, as we do for [--image](/docs/user-guide/kubectl/kubectl_rolling-update). This is necessary because rolling-update actually creates a new controller as opposed to modifying the existing controller. This does not play well with version agnostic controller names. -1. Put an object description in an annotation to allow better introspection. +## General Config Tips + +- When defining configurations, specify the latest stable API version (currently v1). + +- Configuration files should be stored in version control before being pushed to the cluster. This allows a configuration to be quickly rolled back if needed, and will aid with cluster re-creation and restoration if necessary. + +- Write your configuration files using YAML rather than JSON. They can be used interchangeably in almost all scenarios, but YAML tends to be more user-friendly for config. + +- Group related objects together in a single file where this makes sense. This format is often easier to manage than separate files. See the [guestbook-all-in-one.yaml](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/guestbook/all-in-one/guestbook-all-in-one.yaml) file as an example of this syntax. +(Note also that many `kubectl` commands can be called on a directory, and so you can also call +`kubectl create` on a directory of config files— see below for more detail). + +- Don't specify default values unnecessarily, in order to simplify and minimize configs, and to + reduce error. For example, omit the selector and labels in a `ReplicationController` if you want + them to be the same as the labels in its `podTemplate`, since those fields are populated from the + `podTemplate` labels by default. See the [guestbook app's](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/guestbook/) .yaml files for some [examples](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/guestbook/frontend-controller.yaml) of this. + +- Put an object description in an annotation to allow better introspection. +## "Naked" Pods vs Replication Controllers and Jobs + +- If there is a viable alternative to naked pods (i.e., pods not bound to a [replication controller + ](/docs/user-guide/replication-controller)), go with the alternative. Naked pods will not be rescheduled in the + event of node failure. + + Replication controllers are almost always preferable to creating pods, except for some explicit + [`restartPolicy: Never`](/docs/user-guide/pod-states/#restartpolicy) scenarios. A + [Job](/docs/user-guide/jobs/) object (currently in Beta), may also be appropriate. + + +## Services + +- It's typically best to create a [service](/docs/user-guide/services/) before corresponding [replication + controllers](/docs/user-guide/replication-controller/), so that the scheduler can spread the pods comprising the + service. You can also create a replication controller without specifying replicas (this will set + replicas=1), create a service, then scale up the replication controller. This can be useful in + ensuring that one replica works before creating lots of them. + +- Don't use `hostPort` (which specifies the port number to expose on the host) unless absolutely + necessary, e.g., for a node daemon. When you bind a Pod to a `hostPort`, there are a limited + number of places that pod can be scheduled, due to port conflicts— you can only schedule as many + such Pods as there are nodes in your Kubernetes cluster. + + If you only need access to the port for debugging purposes, you can use the [kubectl proxy and apiserver proxy](/docs/user-guide/connecting-to-applications-proxy/) or [kubectl port-forward](/docs/user-guide/connecting-to-applications-port-forward/). + You can use a [Service](/docs/user-guide/services/) object for external service access. + If you do need to expose a pod's port on the host machine, consider using a [NodePort](/docs/user-guide/services/#type-nodeport) service before resorting to `hostPort`. + +- Avoid using `hostNetwork`, for the same reasons as `hostPort`. + +- Use _headless services_ for easy service discovery when you don't need kube-proxy load balancing. + See [headless services](/docs/user-guide/services/#headless-services). + +## Using Labels + +- Define and use [labels](/docs/user-guide/labels/) that identify __semantic attributes__ of your application or + deployment. For example, instead of attaching a label to a set of pods to explicitly represent + some service (e.g., `service: myservice`), or explicitly representing the replication + controller managing the pods (e.g., `controller: mycontroller`), attach labels that identify + semantic attributes, such as `{ app: myapp, tier: frontend, phase: test, deployment: v3 }`. This + will let you select the object groups appropriate to the context— e.g., a service for all "tier: + frontend" pods, or all "test" phase components of app "myapp". See the + [guestbook](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/guestbook/) app for an example of this approach. + + A service can be made to span multiple deployments, such as is done across [rolling updates](/docs/user-guide/kubectl/kubectl_rolling-update/), by simply omitting release-specific labels from its selector, rather than updating a service's selector to match the replication controller's selector fully. + +- To facilitate rolling updates, include version info in replication controller names, e.g. as a + suffix to the name. It is useful to set a 'version' label as well. The rolling update creates a + new controller as opposed to modifying the existing controller. So, there will be issues with + version-agnostic controller names. See the [documentation](/docs/user-guide/kubectl/kubectl_rolling-update/) on + the rolling-update command for more detail. + + Note that the [Deployment](/docs/user-guide/deployments/) object obviates the need to manage replication + controller 'version names'. A desired state of an object is described by a Deployment, and if + changes to that spec are _applied_, the deployment controller changes the actual state to the + desired state at a controlled rate. (Deployment objects are currently part of the [`extensions` + API Group](/docs/api/#api-groups), and are not enabled by default.) + +- You can manipulate labels for debugging. Because Kubernetes replication controllers and services + match to pods using labels, this allows you to remove a pod from being considered by a + controller, or served traffic by a service, by removing the relevant selector labels. If you + remove the labels of an existing pod, its controller will create a new pod to take its place. + This is a useful way to debug a previously "live" pod in a quarantine environment. See the + [`kubectl label`](/docs/user-guide/kubectl/kubectl_label/) command. + +## Container Images + +- The [default container image pull policy](images.md) is `IfNotPresent`, which causes the + [Kubelet](/docs/admin/kubelet.md) to not pull an image if it already exists. If you would like to + always force a pull, you must specify a pull image policy of `Always` in your .yaml file + (`imagePullPolicy: Always`) or specify a `:latest` tag on your image. + + That is, if you're specifying an image with other than the `:latest` tag, e.g. `myimage:v1`, and + there is an image update to that same tag, the Kubelet won't pull the updated image. You can + address this by ensuring that any updates to an image bump the image tag as well (e.g. + `myimage:v2`), and ensuring that your configs point to the correct version. + +## Using kubectl + +- Use `kubectl create -f ` where possible. This looks for config objects in all `.yaml`, `.yml`, and `.json` files in `` and passes them to `create`. + +- Use `kubectl delete` rather than `stop`. `Delete` has a superset of the functionality of `stop`, and `stop` is deprecated. + +- Use kubectl bulk operations (via files and/or labels) for get and delete. See [label selectors](/docs/user-guide/labels/#label-selectors) and [using labels effectively](/docs/user-guide/managing-deployments/#using-labels-effectively). + +- Use `kubectl run` and `expose` to quickly create and expose single container replication controllers. See the [quick start guide](/docs/user-guide/quick-start/) for an example. + diff --git a/docs/user-guide/configmap.md b/docs/user-guide/configmap.md new file mode 100644 index 0000000000..f1ed5edde2 --- /dev/null +++ b/docs/user-guide/configmap.md @@ -0,0 +1,511 @@ +--- +--- +Many applications require configuration via some combination of config files, command line +arguments, and environment variables. These configuration artifacts should be decoupled from image +content in order to keep containerized applications portable. The ConfigMap API resource provides +mechanisms to inject containers with configuration data while keeping containers agnostic of +Kubernetes. ConfigMap can be used to store fine-grained information like individual properties or +coarse-grained information like entire config files or JSON blobs. + + +## Overview of ConfigMap + +The ConfigMap API resource holds key-value pairs of configuration data that can be consumed in pods +or used to store configuration data for system components such as controllers. ConfigMap is similar +to [Secrets](/docs/user-guide/secrets/), but designed to more conveniently support working with strings that do not +contain sensitive information. + +Let's look at a made-up example: + +```yaml +kind: ConfigMap +apiVersion: v1 +metadata: + creationTimestamp: 2016-02-18T19:14:38Z + name: example-config + namespace: default +data: + example.property.1: hello + example.property.2: world + example.property.file: |- + property.1=value-1 + property.2=value-2 + property.3=value-3 +``` + +The `data` field contains the configuration data. As you can see, ConfigMaps can be used to hold +fine-grained information like individual properties or coarse-grained information like the contents +of configuration files. + +Configuration data can be consumed in pods in a variety of ways. ConfigMaps can be used to: + +1. Populate the value of environment variables +2. Set command-line arguments in a container +3. Populate config files in a volume + +Both users and system components may store configuration data in ConfigMap. + +## Creating ConfigMaps + +You can use the `kubectl create configmap` command to create configmaps easily from literal values, +files, or directories. + +Let's take a look at some different ways to create a ConfigMap: + +### Creating from directories + +Say that we have a directory with some files that already contain the data we want to populate a ConfigMap with: + +```shell +$ ls docs/user-guide/configmap/kubectl/ +game.properties +ui.properties + +$ cat docs/user-guide/configmap/kubectl/game.properties +enemies=aliens +lives=3 +enemies.cheat=true +enemies.cheat.level=noGoodRotten +secret.code.passphrase=UUDDLRLRBABAS +secret.code.allowed=true +secret.code.lives=30 + +$ cat docs/user-guide/configmap/kubectl/ui.properties +color.good=purple +color.bad=yellow +allow.textmode=true +how.nice.to.look=fairlyNice +``` + +The `kubectl create configmap` command can be used to create a ConfigMap holding the content of each +file in this directory: + +```console + +$ kubectl create configmap game-config --from-file=docs/user-guide/configmap/kubectl + +``` + +When `--from-file` points to a directory, each file directly in that directory is used to populate a +key in the ConfigMap, where the name of the key is the filename, and the value of the key is the +content of the file. + +Let's take a look at the ConfigMap that this command created: + +```shell +$ cluster/kubectl.sh describe configmaps game-config +Name: game-config +Namespace: default +Labels: +Annotations: + +Data +==== +game.properties: 121 bytes +ui.properties: 83 bytes +``` + +You can see the two keys in the map are created from the filenames in the directory we pointed +kubectl to. Since the content of those keys may be large, in the output of `kubectl describe`, +you'll see only the names of the keys and their sizes. + +If we want to see the values of the keys, we can simply `kubectl get` the resource: + +```shell +$ kubectl get configmaps game-config -o yaml +apiVersion: v1 +data: + game.properties: |- + enemies=aliens + lives=3 + enemies.cheat=true + enemies.cheat.level=noGoodRotten + secret.code.passphrase=UUDDLRLRBABAS + secret.code.allowed=true + secret.code.lives=30 + ui.properties: | + color.good=purple + color.bad=yellow + allow.textmode=true + how.nice.to.look=fairlyNice +kind: ConfigMap +metadata: + creationTimestamp: 2016-02-18T18:34:05Z + name: game-config + namespace: default + resourceVersion: "407"- + selfLink: /api/v1/namespaces/default/configmaps/game-config + uid: 30944725-d66e-11e5-8cd0-68f728db1985 +``` + +### Creating from files + +We can also pass `--from-file` a specific file, and pass it multiple times to kubectl. The +following command yields equivalent results to the above example: + +```shell +$ kubectl create configmap game-config-2 --from-file=docs/user-guide/configmap/kubectl/game.properties --from-file=docs/user-guide/configmap/kubectl/ui.properties + +$ cluster/kubectl.sh get configmaps game-config-2 -o yaml +apiVersion: v1 +data: + game.properties: |- + enemies=aliens + lives=3 + enemies.cheat=true + enemies.cheat.level=noGoodRotten + secret.code.passphrase=UUDDLRLRBABAS + secret.code.allowed=true + secret.code.lives=30 + ui.properties: | + color.good=purple + color.bad=yellow + allow.textmode=true + how.nice.to.look=fairlyNice +kind: ConfigMap +metadata: + creationTimestamp: 2016-02-18T18:52:05Z + name: game-config-2 + namespace: default + resourceVersion: "516" + selfLink: /api/v1/namespaces/default/configmaps/game-config-2 + uid: b4952dc3-d670-11e5-8cd0-68f728db1985 +``` + +We can also set the key to use for an individual file with `--from-file` by passing an expression +of `key=value`: `--from-file=game-special-key=docs/user-guide/configmap/kubectl/game.properties`: + +```shell +$ kubectl create configmap game-config-3 --from-file=game-special-key=docs/user-guide/configmap/kubectl/game.properties + +$ kubectl get configmaps game-config-3 -o yaml +apiVersion: v1 +data: + game-special-key: |- + enemies=aliens + lives=3 + enemies.cheat=true + enemies.cheat.level=noGoodRotten + secret.code.passphrase=UUDDLRLRBABAS + secret.code.allowed=true + secret.code.lives=30 +kind: ConfigMap +metadata: + creationTimestamp: 2016-02-18T18:54:22Z + name: game-config-3 + namespace: default + resourceVersion: "530" + selfLink: /api/v1/namespaces/default/configmaps/game-config-3 + uid: 05f8da22-d671-11e5-8cd0-68f728db1985 +``` + +### Creating from literal values + +It is also possible to supply literal values for ConfigMaps using `kubectl create configmap`. The +`--from-literal` option takes a `key=value` syntax that allows literal values to be supplied +directly on the command line: + +```shell +$ kubectl create configmap special-config --from-literal=special.how=very --from-literal=special.type=charm + +$ kubectl get configmaps special-config -o yaml +apiVersion: v1 +data: + special.how: very + special.type: charm +kind: ConfigMap +metadata: + creationTimestamp: 2016-02-18T19:14:38Z + name: special-config + namespace: default + resourceVersion: "651" + selfLink: /api/v1/namespaces/default/configmaps/special-config + uid: dadce046-d673-11e5-8cd0-68f728db1985 +``` + +## Consuming ConfigMap in pods + +### Use-Case: Consume ConfigMap in environment variables + +ConfigMaps can be used to populate the value of command line arguments. As an example, consider +the following ConfigMap: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: special-config + namespace: default +data: + special.how: very + special.type: charm +``` + +We can consume the keys of this ConfigMap in a pod like so: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: dapi-test-pod +spec: + containers: + - name: test-container + image: gcr.io/google_containers/busybox + command: [ "/bin/sh", "-c", "env" ] + env: + - name: SPECIAL_LEVEL_KEY + valueFrom: + configMapKeyRef: + name: special-configmap + key: special.how + - name: SPECIAL_TYPE_KEY + valueFrom: + configMapKeyRef: + name: special-config + key: data-1 + restartPolicy: Never +``` + +When this pod is run, its output will include the lines: + +```shell +SPECIAL_LEVEL_KEY=very +SPECIAL_TYPE_KEY=charm +``` + +### Use-Case: Set command-line arguments with ConfigMap + +ConfigMaps can also be used to set the value of the command or arguments in a container. This is +accomplished using the kubernetes substitution syntax `$(VAR_NAME)`. Consider the ConfigMap: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: special-config + namespace: default +data: + special.how: very + special.type: charm +``` + +In order to inject values into the command line, we must consume the keys we want to use as +environment variables, as in the last example. Then we can refer to them in a container's command +using the `$(VAR_NAME)` syntax. + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: dapi-test-pod +spec: + containers: + - name: test-container + image: gcr.io/google_containers/busybox + command: [ "/bin/sh", "-c", "echo $(SPECIAL_LEVEL_KEY) $(SPECIAL_TYPE_KEY)" ] + env: + - name: SPECIAL_LEVEL_KEY + valueFrom: + configMapKeyRef: + name: special-configmap + key: special.how + - name: SPECIAL_TYPE_KEY + valueFrom: + configMapKeyRef: + name: special-config + key: data-1 + restartPolicy: Never +``` + +When this pod is run, the output from the `test-container` container will be: + +```shell +very charm +``` + +### Use-Case: Consume ConfigMap via volume plugin + +ConfigMaps can also be consumed in volumes. Returning again to our example ConfigMap: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: special-config + namespace: default +data: + special.how: very + special.type: charm +``` + +We have a couple different options for consuming this ConfigMap in a volume. The most basic +way is to populate the volume with files where the key is the filename and the content of the file +is the value of the key: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: dapi-test-pod +spec: + containers: + - name: test-container + image: gcr.io/google_containers/busybox + command: [ "/bin/sh", "cat", "/etc/config/special.how" ] + volumeMounts: + - name: config-volume + mountPath: /etc/config + volumes: + - name: config-volume + configMap: + name: special-config + restartPolicy: Never +``` + +When this pod is run, the output will be: + +```shell +very +``` + +We can also control the paths within the volume where ConfigMap keys are projected: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: dapi-test-pod +spec: + containers: + - name: test-container + image: gcr.io/google_containers/busybox + command: [ "/bin/sh", "cat", "/etc/config/path/to/special-key" ] + volumeMounts: + - name: config-volume + mountPath: /etc/config + volumes: + - name: config-volume + configMap: + name: special-config + items: + - key: special.how + path: path/to/special-key + restartPolicy: Never +``` + +When this pod is run, the output will be: + +```shell +very +``` + +## Real World Example: Configuring Redis + +Let's take a look at a real-world example: configuring redis using ConfigMap. Say we want to inject +redis with the recommendation configuration for using redis as a cache. The redis config file +should contain: + +```conf +maxmemory 2mb +maxmemory-policy allkeys-lru +``` + +Such a file is in `docs/user-guide/configmap/redis`; we can use the following command to create a +ConfigMap instance with it: + +```shell +$ kubectl create configmap example-redis-config --from-file=docs/user-guide/configmap/redis/redis-config + +$ kubectl get configmap redis-config -o yaml +``` + +```yaml +{ + "kind": "ConfigMap", + "apiVersion": "v1", + "metadata": { + "name": "example-redis-config", + "namespace": "default", + "selfLink": "/api/v1/namespaces/default/configmaps/example-redis-config", + "uid": "07fd0419-d97b-11e5-b443-68f728db1985", + "resourceVersion": "15", + "creationTimestamp": "2016-02-22T15:43:34Z" + }, + "data": { + "redis-config": "maxmemory 2mb\nmaxmemory-policy allkeys-lru\n" + } +} +``` + +Now, let's create a pod that uses this config: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: redis +spec: + containers: + - name: redis + image: kubernetes/redis:v1 + env: + - name: MASTER + value: "true" + ports: + - containerPort: 6379 + resources: + limits: + cpu: "0.1" + volumeMounts: + - mountPath: /redis-master-data + name: data + - mountPath: /redis-master + name: config + volumes: + - name: data + emptyDir: {} + - name: config + configMap: + name: example-redis-config + items: + - key: redis-config + path: redis.conf +``` + +Notice that this pod has a ConfigMap volume that places the `redis-config` key of the +`example-redis-config` ConfigMap into a file called `redis.conf`. This volume is mounted into the +`/redis-master` directory in the redis container, placing our config file at +`/redis-master/redis.conf`, which is where the image looks for the redis config file for the master. + +```shell +$ kubectl create -f docs/user-guide/configmap/redis/redis-pod.yaml +``` + +If we `kubectl exec` into this pod and run the `redis-cli` tool, we can check that our config was +applied correctly: + +```shell +$ kubectl exec -it redis redis-cli +127.0.0.1:6379> CONFIG GET maxmemory +1) "maxmemory" +2) "2097152" +127.0.0.1:6379> CONFIG GET maxmemory-policy +1) "maxmemory-policy" +2) "allkeys-lru" +``` + +## Restrictions + +ConfigMaps must be created before they are consumed in pods. Controllers may be written to tolerate +missing configuration data; consult individual components configured via ConfigMap on a case-by-case +basis. + +ConfigMaps reside in a namespace. They can only be referenced by pods in the same namespace. + +Quota for ConfigMap size is a planned feature. + +Kubelet only supports use of ConfigMap for pods it gets from the API server. This includes any pods +created using kubectl, or indirectly via a replication controller. It does not include pods created +via the Kubelet's `--manifest-url` flag, its `--config` flag, or its REST API (these are not common +ways to create pods.) diff --git a/docs/user-guide/configmap/README.md b/docs/user-guide/configmap/README.md index e2f98e70a7..980acb8090 100644 --- a/docs/user-guide/configmap/README.md +++ b/docs/user-guide/configmap/README.md @@ -1,8 +1,3 @@ - - - - - # ConfigMap example @@ -11,21 +6,21 @@ This example assumes you have a Kubernetes cluster installed and running, and that you have installed the `kubectl` command line tool somewhere in your path. Please see the [getting -started](../../../docs/getting-started-guides/) for installation instructions for your platform. +started](http://kubernetes.io/docs/getting-started-guides/) for installation instructions for your platform. ## Step One: Create the ConfigMap A ConfigMap contains a set of named strings. -Use the [`examples/configmap/configmap.yaml`](configmap.yaml) file to create a ConfigMap: +Use the [`configmap.yaml`](configmap.yaml) file to create a ConfigMap: -```console +```shell $ kubectl create -f docs/user-guide/configmap/configmap.yaml ``` You can use `kubectl` to see information about the ConfigMap: -```console +```shell $ kubectl get configmap NAME DATA test-secret 2 @@ -43,7 +38,7 @@ data-2: 7 bytes View the values of the keys with `kubectl get`: -```console +```shell $ cluster/kubectl.sh get configmaps test-configmap -o yaml apiVersion: v1 data: @@ -61,16 +56,16 @@ metadata: ## Step Two: Create a pod that consumes a configMap in environment variables -Use the [`examples/configmap/env-pod.yaml`](env-pod.yaml) file to create a Pod that consumes the +Use the [`env-pod.yaml`](env-pod.yaml) file to create a Pod that consumes the ConfigMap in environment variables. -```console +```shell $ kubectl create -f docs/user-guide/configmap/env-pod.yaml ``` This pod runs the `env` command to display the environment of the container: -```console +```shell $ kubectl logs secret-test-pod KUBE_CONFIG_1=value-1 KUBE_CONFIG_2=value-2 @@ -78,40 +73,29 @@ KUBE_CONFIG_2=value-2 ## Step Three: Create a pod that sets the command line using ConfigMap -Use the [`examples/configmap/command-pod.yaml`](env-pod.yaml) file to create a Pod with a container +Use the [`command-pod.yaml`](env-pod.yaml) file to create a Pod with a container whose command is injected with the keys of a ConfigMap -```console +```shell $ kubectl create -f docs/user-guide/configmap/env-pod.yaml ``` This pod runs an `echo` command to display the keys: -```console +```shell value-1 value-2 ``` ## Step Four: Create a pod that consumes a configMap in a volume -Pods can also consume ConfigMaps in volumes. Use the [`examples/configmap/volume-pod.yaml`](volume-pod.yaml) file to create a Pod that consume the ConfigMap in a volume. +Pods can also consume ConfigMaps in volumes. Use the [`volume-pod.yaml`](volume-pod.yaml) file to create a Pod that consume the ConfigMap in a volume. -```console +```shell $ kubectl create -f docs/user-guide/configmap/volume-pod.yaml ``` This pod runs a `cat` command to print the value of one of the keys in the volume: -```console +```shell value-1 -``` - - - - - - - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/configmap/README.md?pixel)]() - +``` \ No newline at end of file diff --git a/docs/user-guide/configuring-containers.md b/docs/user-guide/configuring-containers.md index 14314ec0ec..a48ab34970 100644 --- a/docs/user-guide/configuring-containers.md +++ b/docs/user-guide/configuring-containers.md @@ -34,7 +34,7 @@ The value of `metadata.name`, `hello-world`, will be the name of the pod resourc The [`command`](/docs/user-guide/containers/#containers-and-commands) overrides the Docker container's `Entrypoint`. Command arguments (corresponding to Docker's `Cmd`) may be specified using `args`, as follows: ```yaml -command: ["/bin/echo"] + command: ["/bin/echo"] args: ["hello","world"] ``` @@ -49,22 +49,17 @@ pods/hello-world ## Validating configuration -If you're not sure you specified the resource correctly, you can ask `kubectl` to validate it for you: - -```shell -$ kubectl create -f ./hello-world.yaml --validate -``` +We enable validation by default in `kubectl` since v1.1. Let's say you specified `entrypoint` instead of `command`. You'd see output as follows: ```shell -I0709 06:33:05.600829 14160 schema.go:126] unknown field: entrypoint -I0709 06:33:05.600988 14160 schema.go:129] this may be a false alarm, see http://issue.k8s.io/6842 pods/hello-world +error validating "./hello-world.yaml": error validating data: found invalid field Entrypoint for v1.Container; if you choose to ignore these errors, turn validation off with --validate=false ``` -`kubectl create --validate` currently warns about problems it detects, but creates the resource anyway, unless a required field is absent or a field value is invalid. Unknown API fields are ignored, so be careful. This pod was created, but with no `command`, which is an optional field, since the image may specify an `Entrypoint`. +Using `kubectl create --validate=false` to turn validation off, it creates the resource anyway, unless a required field is absent or a field value is invalid. Unknown API fields are ignored, so be careful. This pod was created, but with no `command`, which is an optional field, since the image may specify an `Entrypoint`. View the [Pod API -object](http://kubernetes.io/v1.1/docs/api-reference/v1/definitions/#_v1_pod) +object](/docs/api-reference/v1/definitions/#_v1_pod) to see the list of valid fields. ## Environment variables and variable expansion @@ -76,7 +71,7 @@ apiVersion: v1 kind: Pod metadata: name: hello-world -spec: # specification of the pod's contents +spec: # specification of the pod’s contents restartPolicy: Never containers: - name: hello @@ -91,7 +86,7 @@ spec: # specification of the pod's contents However, a shell isn't necessary just to expand environment variables. Kubernetes will do it for you if you use [`$(ENVVAR)` syntax](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/expansion): ```yaml -command: ["/bin/echo"] + command: ["/bin/echo"] args: ["$(MESSAGE)"] ``` @@ -129,7 +124,7 @@ hello-world 0/1 ExitCode:0 0 15s ## Viewing pod output -You probably want to see the output of the command you ran. As with [`docker logs`](https://docs.docker.com/userguide/usingdocker/), `kubectl logs` will show you the output: +You probably want to see the output of the command you ran. As with [`docker logs`](https://docs.docker.com/engine/reference/commandline/logs/), `kubectl logs` will show you the output: ```shell $ kubectl logs hello-world diff --git a/docs/user-guide/connecting-applications.md b/docs/user-guide/connecting-applications.md index 9d00a4b32d..327630c1cb 100644 --- a/docs/user-guide/connecting-applications.md +++ b/docs/user-guide/connecting-applications.md @@ -12,7 +12,7 @@ By default, Docker uses host-private networking, so containers can talk to other Coordinating ports across multiple developers is very difficult to do at scale and exposes users to cluster-level issues outside of their control. Kubernetes assumes that pods can communicate with other pods, regardless of which host they land on. We give every pod its own cluster-private-IP address so you do not need to explicitly create links between pods or mapping container ports to host ports. This means that containers within a Pod can all reach each other's ports on localhost, and all pods in a cluster can see each other without NAT. The rest of this document will elaborate on how you can run reliable services on such a networking model. -This guide uses a simple nginx server to demonstrate proof of concept. The same principles are embodied in a more complete [Jenkins CI application](http://blog.kubernetes.io/2015/07/strong-simple-ssl-for-kubernetes). +This guide uses a simple nginx server to demonstrate proof of concept. The same principles are embodied in a more complete [Jenkins CI application](http://blog.kubernetes.io/2015/07/strong-simple-ssl-for-kubernetes.html). ## Exposing pods to the cluster @@ -43,8 +43,8 @@ This makes it accessible from any node in your cluster. Check the nodes the pod ```shell $ kubectl create -f ./nginxrc.yaml $ kubectl get pods -l app=nginx -o wide -my-nginx-6isf4 1/1 Running 0 2h e2e-test-beeps-minion-93ly -my-nginx-t26zt 1/1 Running 0 2h e2e-test-beeps-minion-93ly +my-nginx-6isf4 1/1 Running 0 2h e2e-test-beeps-node-93ly +my-nginx-t26zt 1/1 Running 0 2h e2e-test-beeps-node-93ly ``` Check your pods' IPs: @@ -83,7 +83,7 @@ spec: app: nginx ``` -This specification will create a Service which targets TCP port 80 on any Pod with the `app=nginx` label, and expose it on an abstracted Service port (`targetPort`: is the port the container accepts traffic on, `port`: is the abstracted Service port, which can be any port other pods use to access the Service). View [service API object](http://kubernetes.io/v1.1/docs/api-reference/v1/definitions/#_v1_service) to see the list of supported fields in service definition. +This specification will create a Service which targets TCP port 80 on any Pod with the `app=nginx` label, and expose it on an abstracted Service port (`targetPort`: is the port the container accepts traffic on, `port`: is the abstracted Service port, which can be any port other pods use to access the Service). View [service API object](/docs/api-reference/v1/definitions/#_v1_service) to see the list of supported fields in service definition. Check your Service: ```shell @@ -289,7 +289,7 @@ Lets test this from a pod (the same secret is being reused for simplicity, the p ```shell $ cat curlpod.yaml -vapiVersion: v1 +apiVersion: v1 kind: ReplicationController metadata: name: curlrc @@ -367,7 +367,7 @@ $ curl https://104.197.63.17:30645 -k Lets now recreate the Service to use a cloud load balancer, just change the `Type` of Service in the nginx-app.yaml from `NodePort` to `LoadBalancer`: ```shell -$ kubectl delete rc, svc -l app=nginx +$ kubectl delete rc,svc -l app=nginx $ kubectl create -f ./nginx-app.yaml $ kubectl get svc nginxsvc NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE @@ -381,6 +381,18 @@ $ curl https://162.22.184.144 -k The IP address in the `EXTERNAL_IP` column is the one that is available on the public internet. The `CLUSTER_IP` is only available inside your cluster/private cloud network. +Note that on AWS, type `LoadBalancer` creates an ELB, which uses a (long) +hostname, not an IP. It's too long to fit in the standard `kubectl get svc` +output, in fact, so you'll need to do `kubectl describe service nginxsvc` to +see it. You'll see something like this: + +```shell +> kubectl describe service nginxsvc +... +LoadBalancer Ingress: a320587ffd19711e5a37606cf4a74574-1142138393.us-east-1.elb.amazonaws.com +... +``` + ## What's next? [Learn about more Kubernetes features that will help you run containers reliably in production.](/docs/user-guide/production-pods) diff --git a/docs/user-guide/containers.md b/docs/user-guide/containers.md index 215a87430c..c38453af42 100644 --- a/docs/user-guide/containers.md +++ b/docs/user-guide/containers.md @@ -18,9 +18,9 @@ we can use: Docker images have metadata associated with them that is used to store information about the image. The image author may use this to define defaults for the command and arguments to run a container -when the user does not supply values. Docker calls the fields for commands and arguments -`Entrypoint` and `Cmd` respectively. The full details for this feature are too complicated to -describe here, mostly due to the fact that the Docker API allows users to specify both of these +when the user does not supply values. Docker calls the fields for commands and arguments +`Entrypoint` and `Cmd` respectively. The full details for this feature are too complicated to +describe here, mostly due to the fact that the docker API allows users to specify both of these fields as either a string array or a string and there are subtle differences in how those cases are handled. We encourage the curious to check out Docker's documentation for this feature. @@ -50,7 +50,7 @@ Here are examples for these rules in table format By default, Docker containers are "unprivileged" and cannot, for example, run a Docker daemon inside a Docker container. We can have fine grain control over the capabilities using cap-add and cap-drop.More details [here](https://docs.docker.com/reference/run/#runtime-privilege-linux-capabilities-and-lxc-configuration). -The relationship between Docker's capabilities and [Linux capabilities](http://man7.org/linux/man-pages/man7/capabilities.7) +The relationship between Docker's capabilities and [Linux capabilities](http://man7.org/linux/man-pages/man7/capabilities.7.html) | Docker's capabilities | Linux capabilities | | ---- | ---- | diff --git a/docs/user-guide/debugging-services.md b/docs/user-guide/debugging-services.md index d9cea7e5b9..790f15f71b 100644 --- a/docs/user-guide/debugging-services.md +++ b/docs/user-guide/debugging-services.md @@ -417,24 +417,16 @@ depends on your `Node` OS. On some OSes it is a file, such as should see something like: ```shell -I0707 17:34:53.945651 30031 server.go:88] Running in resource-only container "/kube-proxy" -I0707 17:34:53.945921 30031 proxier.go:121] Setting proxy IP to 10.240.115.247 and initializing iptables -I0707 17:34:54.053023 30031 roundrobin.go:262] LoadBalancerRR: Setting endpoints for default/kubernetes: to [10.240.169.188:443] -I0707 17:34:54.053175 30031 roundrobin.go:262] LoadBalancerRR: Setting endpoints for default/hostnames:default to [10.244.0.5:9376 10.244.0.6:9376 10.244.0.7:9376] -I0707 17:34:54.053284 30031 roundrobin.go:262] LoadBalancerRR: Setting endpoints for default/kube-dns:dns to [10.244.3.3:53] -I0707 17:34:54.053310 30031 roundrobin.go:262] LoadBalancerRR: Setting endpoints for default/kube-dns:dns-tcp to [10.244.3.3:53] -I0707 17:34:54.054780 30031 proxier.go:306] Adding new service "default/kubernetes:" at 10.0.0.1:443/TCP -I0707 17:34:54.054903 30031 proxier.go:247] Proxying for service "default/kubernetes:" on TCP port 40074 -I0707 17:34:54.079181 30031 proxier.go:306] Adding new service "default/hostnames:default" at 10.0.1.175:80/TCP -I0707 17:34:54.079273 30031 proxier.go:247] Proxying for service "default/hostnames:default" on TCP port 48577 -I0707 17:34:54.113665 30031 proxier.go:306] Adding new service "default/kube-dns:dns" at 10.0.0.10:53/UDP -I0707 17:34:54.113776 30031 proxier.go:247] Proxying for service "default/kube-dns:dns" on UDP port 34149 -I0707 17:34:54.120224 30031 proxier.go:306] Adding new service "default/kube-dns:dns-tcp" at 10.0.0.10:53/TCP -I0707 17:34:54.120297 30031 proxier.go:247] Proxying for service "default/kube-dns:dns-tcp" on TCP port 53476 -I0707 17:34:54.902313 30031 proxysocket.go:130] Accepted TCP connection from 10.244.3.3:42670 to 10.244.3.1:40074 -I0707 17:34:54.903107 30031 proxysocket.go:130] Accepted TCP connection from 10.244.3.3:42671 to 10.244.3.1:40074 -I0707 17:35:46.015868 30031 proxysocket.go:246] New UDP connection from 10.244.3.2:57493 -I0707 17:35:46.017061 30031 proxysocket.go:246] New UDP connection from 10.244.3.2:55471 +I1027 22:14:53.995134 5063 server.go:200] Running in resource-only container "/kube-proxy" +I1027 22:14:53.998163 5063 server.go:247] Using iptables Proxier. +I1027 22:14:53.999055 5063 server.go:255] Tearing down userspace rules. Errors here are acceptable. +I1027 22:14:54.038140 5063 proxier.go:352] Setting endpoints for "kube-system/kube-dns:dns-tcp" to [10.244.1.3:53] +I1027 22:14:54.038164 5063 proxier.go:352] Setting endpoints for "kube-system/kube-dns:dns" to [10.244.1.3:53] +I1027 22:14:54.038209 5063 proxier.go:352] Setting endpoints for "default/kubernetes:https" to [10.240.0.2:443] +I1027 22:14:54.038238 5063 proxier.go:429] Not syncing iptables until Services and Endpoints have been received from master +I1027 22:14:54.040048 5063 proxier.go:294] Adding new service "default/kubernetes:https" at 10.0.0.1:443/TCP +I1027 22:14:54.040154 5063 proxier.go:294] Adding new service "kube-system/kube-dns:dns" at 10.0.0.10:53/UDP +I1027 22:14:54.040223 5063 proxier.go:294] Adding new service "kube-system/kube-dns:dns-tcp" at 10.0.0.10:53/TCP ``` If you see error messages about not being able to contact the master, you @@ -446,6 +438,12 @@ One of the main responsibilities of `kube-proxy` is to write the `iptables` rules which implement `Service`s. Let's check that those rules are getting written. +The kube-proxy can run in either "userspace" mode or "iptables" mode. +Hopefully you are using the newer, faster, more stable "iptables" mode. You +should see one of the following cases. + +#### Userspace + ```shell u@node$ iptables-save | grep hostnames -A KUBE-PORTALS-CONTAINER -d 10.0.1.175/32 -p tcp -m comment --comment "default/hostnames:default" -m tcp --dport 80 -j REDIRECT --to-ports 48577 @@ -457,6 +455,27 @@ example) - a "KUBE-PORTALS-CONTAINER" and a "KUBE-PORTALS-HOST". If you do not see these, try restarting `kube-proxy` with the `-V` flag set to 4, and then look at the logs again. +#### Iptables + +```shell +u@node$ iptables-save | grep hostnames +-A KUBE-SEP-57KPRZ3JQVENLNBR -s 10.244.3.6/32 -m comment --comment "default/hostnames:" -j MARK --set-xmark 0x00004000/0x00004000 +-A KUBE-SEP-57KPRZ3JQVENLNBR -p tcp -m comment --comment "default/hostnames:" -m tcp -j DNAT --to-destination 10.244.3.6:9376 +-A KUBE-SEP-WNBA2IHDGP2BOBGZ -s 10.244.1.7/32 -m comment --comment "default/hostnames:" -j MARK --set-xmark 0x00004000/0x00004000 +-A KUBE-SEP-WNBA2IHDGP2BOBGZ -p tcp -m comment --comment "default/hostnames:" -m tcp -j DNAT --to-destination 10.244.1.7:9376 +-A KUBE-SEP-X3P2623AGDH6CDF3 -s 10.244.2.3/32 -m comment --comment "default/hostnames:" -j MARK --set-xmark 0x00004000/0x00004000 +-A KUBE-SEP-X3P2623AGDH6CDF3 -p tcp -m comment --comment "default/hostnames:" -m tcp -j DNAT --to-destination 10.244.2.3:9376 +-A KUBE-SERVICES -d 10.0.1.175/32 -p tcp -m comment --comment "default/hostnames: cluster IP" -m tcp --dport 80 -j KUBE-SVC-NWV5X2332I4OT4T3 +-A KUBE-SVC-NWV5X2332I4OT4T3 -m comment --comment "default/hostnames:" -m statistic --mode random --probability 0.33332999982 -j KUBE-SEP-WNBA2IHDGP2BOBGZ +-A KUBE-SVC-NWV5X2332I4OT4T3 -m comment --comment "default/hostnames:" -m statistic --mode random --probability 0.50000000000 -j KUBE-SEP-X3P2623AGDH6CDF3 +-A KUBE-SVC-NWV5X2332I4OT4T3 -m comment --comment "default/hostnames:" -j KUBE-SEP-57KPRZ3JQVENLNBR +``` + +There should be 1 rule in `KUBE-SERVICES`, 1 or 2 rules per endpoint in +`KUBE-SVC-(hash)` (depending on `SessionAffinity`), one `KUBE-SEP-(hash)` chain +per endpoint, and a few rules in each `KUBE-SEP-(hash)` chain. The exact rules +will vary based on your exact config (including node-ports and load-balancers). + ### Is kube-proxy proxying? Assuming you do see the above rules, try again to access your `Service` by IP: @@ -466,10 +485,12 @@ u@node$ curl 10.0.1.175:80 hostnames-0uton ``` -If this fails, we can try accessing the proxy directly. Look back at the -`iptables-save` output above, and extract the port number that `kube-proxy` is -using for your `Service`. In the above examples it is "48577". Now connect to -that: +If this fails and you are using the userspace proxy, you can try accessing the +proxy directly. If you are using the iptables proxy, skip this section. + +Look back at the `iptables-save` output above, and extract the +port number that `kube-proxy` is using for your `Service`. In the above +examples it is "48577". Now connect to that: ```shell u@node$ curl localhost:48577 diff --git a/docs/user-guide/deploying-applications.md b/docs/user-guide/deploying-applications.md index d573c7d3a5..0a0e49b40f 100644 --- a/docs/user-guide/deploying-applications.md +++ b/docs/user-guide/deploying-applications.md @@ -1,7 +1,6 @@ --- --- -You previously read about how to quickly deploy a simple replicated application using [`kubectl run`](/docs/user-guide/quick-start) and how to configure and launch single-run containers using pods ([Configuring containers](/docs/user-guide/configuring-containers)). Here you'll use the configuration-based approach to deploy a continuously running, replicated application. * TOC {:toc} @@ -35,7 +34,7 @@ spec: Some differences compared to specifying just a pod are that the `kind` is `ReplicationController`, the number of `replicas` desired is specified, and the pod specification is under the `template` field. The names of the pods don't need to be specified explicitly because they are generated from the name of the replication controller. View the [replication controller API -object](http://kubernetes.io/v1.1/docs/api-reference/v1/definitions/#_v1_replicationcontroller) +object](/docs/api-reference/v1/definitions/#_v1_replicationcontroller) to view the list of supported fields. This replication controller can be created using `create`, just as with pods: diff --git a/docs/user-guide/deployments.md b/docs/user-guide/deployments.md index 662108c1b3..a35260a079 100644 --- a/docs/user-guide/deployments.md +++ b/docs/user-guide/deployments.md @@ -6,30 +6,16 @@ ## What is a _Deployment_? -A _Deployment_ provides declarative update for Pods and ReplicationControllers. -Users describe the desired state in deployment object and deployment -controller changes the actual state to that at a controlled rate. -Users can define deployments to create new resources, or replace existing ones +A _Deployment_ provides declarative updates for Pods and ReplicationControllers. +Users describe the desired state in a Deployment object, and the deployment +controller changes the actual state to the desired state at a controlled rate. +Users can define Deployments to create new resources, or replace existing ones by new ones. A typical use case is: -* Create a deployment to bring up a replication controller and pods. -* Later, update that deployment to recreate the pods (for ex: to use a new image). - -## Enabling Deployments on kubernetes cluster - -Deployments is part of the [`extensions` API Group](/docs/api/#api-groups) and is not enabled by default. -Set `--runtime-config=extensions/v1beta1/deployments=true` on API server to -enable it. -This can be achieved by exporting `ENABLE_DEPLOYMENTS=true` before running -`kube-up.sh` script on GCE. - -Note that Deployment objects effectively have [API version -`v1alpha1`](/docs/api/)#api-versioning). -Alpha objects may change or even be discontinued in future software releases. -However, due to to a known issue, they will appear as API version `v1beta1` if -enabled. +* Create a Deployment to bring up a replication controller and pods. +* Later, update that Deployment to recreate the pods (for example, to use a new image). ## Creating a Deployment @@ -45,7 +31,13 @@ $ kubectl create -f docs/user-guide/nginx-deployment.yaml deployment "nginx-deployment" created ``` -Running a get immediately will give: +Running + +```console +$ kubectl get deployments +``` + +immediately will give: ```shell $ kubectl get deployments @@ -53,10 +45,9 @@ NAME UPDATEDREPLICAS AGE nginx-deployment 0/3 8s ``` -This indicates that deployment is trying to update 3 replicas. It has not -updated any one of those yet. +This indicates that the Deployment is trying to update 3 replicas, and has not updated any of them yet. -Running a get again after a minute, will give: +Running the `get` again after a minute, should give: ```shell $ kubectl get deployments @@ -64,16 +55,14 @@ NAME UPDATEDREPLICAS AGE nginx-deployment 3/3 1m ``` -This indicates that deployent has created all the 3 replicas. -Running ```kubectl get rc``` -and ```kubectl get pods``` -will show the replication controller (RC) and pods created. +This indicates that the Deployment has created all three replicas. +Running `kubectl get rc` and `kubectl get pods` will show the replication controller (RC) and pods created. ```shell $ kubectl get rc CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS AGE REPLICAS AGE -deploymentrc-1975012602 nginx nginx:1.7.9 deployment.kubernetes.io/podTemplateHash=1975012602,app=nginx 3 2m +deploymentrc-1975012602 nginx nginx:1.7.9 pod-template-hash=1975012602,app=nginx 3 2m ``` ```shell @@ -84,22 +73,24 @@ deploymentrc-1975012602-j975u 1/1 Running 0 1m deploymentrc-1975012602-uashb 1/1 Running 0 1m ``` -The created RC will ensure that there are 3 nginx pods at all time. +The created RC will ensure that there are three nginx pods at all times. ## Updating a Deployment -Lets say, now we want to update the nginx pods to start using nginx:1.9.1 image -instead of nginx:1.7.9. -For this, we update our deployment to be as follows: +Suppose that we now want to update the nginx pods to start using the `nginx:1.9.1` image +instead of the `nginx:1.7.9` image. +For this, we update our deployment file as follows: {% include code.html language="yaml" file="new-nginx-deployment.yaml" ghlink="/docs/user-guide/new-nginx-deployment.yaml" %} +We can then `apply` the Deployment: + ```shell $ kubectl apply -f docs/user-guide/new-nginx-deployment.yaml deployment "nginx-deployment" configured ``` -Running a get immediately will still give: +Running a `get` immediately will still give: ```shell $ kubectl get deployments @@ -109,7 +100,7 @@ nginx-deployment 3/3 8s This indicates that deployment status has not been updated yet (it is still showing old status). -Running a get again after a minute, will give: +Running a `get` again after a minute, should show: ```shell $ kubectl get deployments @@ -117,9 +108,9 @@ NAME UPDATEDREPLICAS AGE nginx-deployment 1/3 1m ``` -This indicates that deployment has updated one of the three pods that it needs +This indicates that the Deployment has updated one of the three pods that it needs to update. -Eventually, it will get around to updating all the pods. +Eventually, it will update all the pods. ```shell $ kubectl get deployments @@ -127,18 +118,17 @@ NAME UPDATEDREPLICAS AGE nginx-deployment 3/3 3m ``` -We can run `kubectl get rc` -to see that deployment updated the pods by creating a new RC -which it scaled up to 3 and scaled down the old RC to 0. +We can run `kubectl get rc` to see that the Deployment updated the pods by creating a new RC, +which it scaled up to 3 replicas, and has scaled down the old RC to 0 replicas. ```shell kubectl get rc CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS AGE -deploymentrc-1562004724 nginx nginx:1.9.1 deployment.kubernetes.io/podTemplateHash=1562004724,app=nginx 3 5m -deploymentrc-1975012602 nginx nginx:1.7.9 deployment.kubernetes.io/podTemplateHash=1975012602,app=nginx 0 7m +deploymentrc-1562004724 nginx nginx:1.9.1 pod-template-hash=1562004724,app=nginx 3 5m +deploymentrc-1975012602 nginx nginx:1.7.9 pod-template-hash=1975012602,app=nginx 0 7m ``` -Running get pods, will only show the new pods. +Running `get pods` should now show only the new pods: ```shell kubectl get pods @@ -148,7 +138,7 @@ deploymentrc-1562004724-1rkfl 1/1 Running 0 8m deploymentrc-1562004724-6v702 1/1 Running 0 8m ``` -Next time we want to update pods, we can just update the deployment again. +Next time we want to update these pods, we can just update and re-apply the Deployment again. Deployment ensures that not all pods are down while they are being updated. By default, it ensures that minimum of 1 less than the desired number of pods are @@ -177,27 +167,29 @@ Events: 1m 1m 1 {deployment-controller } ScalingRC Scaled down rc deploymentrc-1975012602 to 0 ``` -Here we see that when we first created the deployment, it created an RC and scaled it up to 3 replicas directly. -When we updated the deployment, it created a new RC and scaled it up to 1 and then scaled down the old RC by 1, so that at least 2 pods were available at all times. +Here we see that when we first created the Deployment, it created an RC and scaled it up to 3 replicas directly. +When we updated the Deployment, it created a new RC and scaled it up to 1 and then scaled down the old RC by 1, so that at least 2 pods were available at all times. It then scaled up the new RC to 3 and when those pods were ready, it scaled down the old RC to 0. ### Multiple Updates Each time a new deployment object is observed, a replication controller is created to bring up the desired pods if there is no existing RC doing so. -Existing RCs controlling pods whose labels match `.spec.selector` but the +Existing RCs controlling pods whose labels match `.spec.selector` but whose template does not match `.spec.template` are scaled down. Eventually, the new RC will be scaled to `.spec.replicas` and all old RCs will be scaled to 0. -If the user updates the deployment while an existing deployment was in progress, -deployment will create a new RC as per the update and start scaling that up and -will roll the RC that it was scaling up before in its list of old RCs and will + +If the user updates a Deployment while an existing deployment is in progress, +the Deployment will create a new RC as per the update and start scaling that up, and +will roll the RC that it was scaling up previously-- it will add it to its list of old RCs and will start scaling it down. -For example: If user creates a deployment to create 5 replicas of nginx:1.7.9. -But then updates the deployment to create 5 replicas of nging:1.9.1, when only 3 -replicas of nginx:1.7.9 had been created, then deployment will immediately start -killing the 3 nginx:1.7.9 pods that it had created and will start creating -nginx:1.9.1 pods. It will not wait for 5 replicas of nginx:1.7.9 to be created + +For example, suppose the user creates a deployment to create 5 replicas of `nginx:1.7.9`, +but then updates the deployment to create 5 replicas of `nginx:1.9.1`, when only 3 +replicas of `nginx:1.7.9` had been created. In that case, deployment will immediately start +killing the 3 `nginx:1.7.9` pods that it had created, and will start creating +`nginx:1.9.1` pods. It will not wait for 5 replicas of `nginx:1.7.9` to be created before changing course. ## Writing a Deployment Spec @@ -218,7 +210,7 @@ the same schema as a [pod](/docs/user-guide/pods), except it is nested and does ### Replicas -`.spec.replicas` is an optional field that specifies the number of desired pods. Defaults +`.spec.replicas` is an optional field that specifies the number of desired pods. It defaults to 1. ### Selector @@ -229,20 +221,9 @@ template is different than `.spec.template` or if the total number of such pods exceeds `.spec.replicas`. It will bring up new pods with `.spec.template` if number of pods are less than the desired number. -### Unique Label Key - -`.spec.uniqueLabelKey` is an optional field specifying key of the selector that -is added to existing RCs (and label key that is added to its pods) to prevent -the existing RCs to select new pods (and old pods being selected by new RC). -Users can set this to an empty string to indicate that the system should -not add any selector and label. If unspecified, system uses -"deployment.kubernetes.io/podTemplateHash". -Value of this key is hash of `.spec.template`. -No label is added if this is set to empty string. - ### Strategy -`.spec.strategy` specifies the strategy to replace old pods by new ones. +`.spec.strategy` specifies the strategy used to replace old pods by new ones. `.spec.strategy.type` can be "Recreate" or "RollingUpdate". "RollingUpdate" is the default value. @@ -250,11 +231,11 @@ the default value. All existing pods are killed before new ones are created when `.spec.strategy.type==Recreate`. -Note: This is not implemented yet. +__Note: This is not implemented yet__. #### Rolling Update Deployment -Deployment updates pods in a [rolling update][update-demo/] fashion +The Deployment updates pods in a [rolling update](/docs/user-guide/update-demo/) fashion when `.spec.strategy.type==RollingUpdate`. Users can specify `maxUnavailable`, `maxSurge` and `minReadySeconds` to control the rolling update process. @@ -263,39 +244,41 @@ the rolling update process. `.spec.strategy.rollingUpdate.maxUnavailable` is an optional field that specifies the maximum number of pods that can be unavailable during the update process. -Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: -10%). -Absolute number is calculated from percentage by rounding up. +The value can be an absolute number (e.g. 5) or a percentage of desired pods +(e.g. 10%). +The absolute number is calculated from percentage by rounding up. This can not be 0 if `.spec.strategy.rollingUpdate.maxSurge` is 0. By default, a fixed value of 1 is used. -Example: when this is set to 30%, the old RC can be scaled down to + +For example, when this value is set to 30%, the old RC can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that the total number of pods available at all times during the -update is at least 70% of desired pods. +update is at least 70% of the desired pods. ##### Max Surge `.spec.strategy.rollingUpdate.maxSurge` is an optional field that specifies the maximum number of pods that can be created above the desired number of pods. -Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: -10%). -This can not be 0 if MaxUnavailable is 0. -Absolute number is calculated from percentage by rounding up. +Value can be an absolute number (e.g. 5) or a percentage of desired pods +(e.g. 10%). +This can not be 0 if `MaxUnavailable` is 0. +The absolute number is calculated from percentage by rounding up. By default, a value of 1 is used. -Example: when this is set to 30%, the new RC can be scaled up immediately when + +For example, when this value is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, -new RC can be scaled up further, ensuring that total number of pods running -at any time during the update is atmost 130% of desired pods. +the new RC can be scaled up further, ensuring that the total number of pods running +at any time during the update is at most 130% of desired pods. ##### Min Ready Seconds -`.spec.strategy.rollingUpdate.minReadySeconds` is an optional field that specifies the +`.spec.minReadySeconds` is an optional field that specifies the minimum number of seconds for which a newly created pod should be ready -without any of its container crashing, for it to be considered available. -Defaults to 0 (pod will be considered available as soon as it is ready). -Note: This is not implemented yet. +without any of its containers crashing, for it to be considered available. +This defaults to 0 (the pod will be considered available as soon as it is ready). +To learn more about when a pod is considered ready, see [Container Probes](/docs/user-guide/pod-states/#container-probes). ## Alternative to Deployments diff --git a/docs/user-guide/downward-api.md b/docs/user-guide/downward-api.md index bac3455433..2932423ecd 100644 --- a/docs/user-guide/downward-api.md +++ b/docs/user-guide/downward-api.md @@ -84,12 +84,11 @@ In future, it will be possible to specify a specific annotation or label. ## Example -This is an example of a pod that consumes its labels and annotations via the downward API volume, labels and annotations are dumped in `/etc/podlabels` and in `/etc/annotations`, respectively: +This is an example of a pod that consumes its labels and annotations via the downward API volume, labels and annotations are dumped in `/etc/labels` and in `/etc/annotations`, respectively: {% include code.html language="yaml" file="downward-api/volume/dapi-volume.yaml" ghlink="/docs/user-guide/downward-api/volume/dapi-volume.yaml" %} Some more thorough examples: - * [environment variables](/docs/user-guide/environment-guide/) - * [downward API](/docs/user-guide/downward-api/) \ No newline at end of file + * [downward API](/docs/user-guide/downward-api/) diff --git a/docs/user-guide/downward-api/index.md b/docs/user-guide/downward-api/README.md similarity index 77% rename from docs/user-guide/downward-api/index.md rename to docs/user-guide/downward-api/README.md index 2bb36e7743..257058cfd8 100644 --- a/docs/user-guide/downward-api/index.md +++ b/docs/user-guide/downward-api/README.md @@ -1,21 +1,18 @@ ---- ---- - Following this example, you will create a pod with a container that consumes the pod's name and -namespace using the [downward API](/docs/user-guide/downward-api/). +namespace using the [downward API](http://kubernetesio/docs/user-guide/downward-api/). ## Step Zero: Prerequisites This example assumes you have a Kubernetes cluster installed and running, and that you have installed the `kubectl` command line tool somewhere in your path. Please see the [getting -started](/docs/getting-started-guides/) for installation instructions for your platform. +started](http://kubernetes.io/docs/getting-started-guides/) for installation instructions for your platform. ## Step One: Create the pod Containers consume the downward API using environment variables. The downward API allows containers to be injected with the name and namespace of the pod the container is in. -Use the [`dapi-pod.yaml`](/docs/user-guide/downward-api/dapi-pod.yaml) file to create a Pod with a container that consumes the +Use the [`dapi-pod.yaml`](dapi-pod.yaml) file to create a Pod with a container that consumes the downward API. ```shell diff --git a/docs/user-guide/downward-api/volume/index.md b/docs/user-guide/downward-api/volume/index.md index 835f5381b9..446d8eb563 100644 --- a/docs/user-guide/downward-api/volume/index.md +++ b/docs/user-guide/downward-api/volume/index.md @@ -13,8 +13,7 @@ Supported metadata fields: ### Step Zero: Prerequisites -This example assumes you have a Kubernetes cluster installed and running, and the `kubectl` -command line tool somewhere in your path. Please see the [gettingstarted](/docs/getting-started-guides/) for installation instructions for your platform. +This example assumes you have a Kubernetes cluster installed and running, and the `kubectl` command line tool somewhere in your path. Please see the [gettingstarted](/docs/getting-started-guides/) for installation instructions for your platform. ### Step One: Create the pod diff --git a/docs/user-guide/environment-guide/index.md b/docs/user-guide/environment-guide/index.md index 135297f315..938f0f0b10 100644 --- a/docs/user-guide/environment-guide/index.md +++ b/docs/user-guide/environment-guide/index.md @@ -23,7 +23,7 @@ for your platform. ## Optional: Build your own containers The code for the containers is under -[containers/](https://github.com/kubernetes/kubernetes.github.io/tree/master/docs/user-guide/environment-guide/containers/) +[containers/](/docs/user-guide/environment-guide/containers/) ## Get everything running @@ -40,8 +40,8 @@ Use `kubectl describe service show-srv` to determine the public IP of your service. > Note: If your platform does not support external load balancers, -> you'll need to open the proper port and direct traffic to the -> internal IP shown for the frontend service with the above command + you'll need to open the proper port and direct traffic to the + internal IP shown for the frontend service with the above command Run `curl :80` to query the service. You should get something like this back: diff --git a/docs/user-guide/images.md b/docs/user-guide/images.md index 48832d4f84..b5d324d6d1 100644 --- a/docs/user-guide/images.md +++ b/docs/user-guide/images.md @@ -18,6 +18,9 @@ pull an image if it already exists. If you would like to always force a pull you must set a pull image policy of `Always` or specify a `:latest` tag on your image. +If you did not specify tag of your image, it will be assumed as `:latest`, with +pull image policy of `Always` correspondingly. + ## Using a Private Registry Private registries may require keys to read images from them. @@ -52,6 +55,21 @@ Google service account. The service account on the instance will have a `https://www.googleapis.com/auth/devstorage.read_only`, so it can pull from the project's GCR, but not push. +### Using AWS EC2 Container Registry + +Kubernetes has native support for the [AWS EC2 Container +Registry](https://aws.amazon.com/ecr/), when nodes are AWS instances. + +Simply use the full image name (e.g. `ACCOUNT.dkr.ecr.REGION.amazonaws.com/imagename:tag`) +in the Pod definition. + +All users of the cluster who can create pods will be able to run pods that use any of the +images in the ECR registry. + +The kubelet will fetch and periodically refresh ECR credentials. It needs the +`ecr:GetAuthorizationToken` permission to do this. + + ### Configuring Nodes to Authenticate to a Private Repository **Note:** if you are running on Google Container Engine (GKE), there will already be a `.dockercfg` on each node @@ -61,18 +79,19 @@ with credentials for Google Container Registry. You cannot use this approach. will not work reliably on GCE, and any other cloud provider that does automatic node replacement. -Docker stores keys for private registries in the `$HOME/.dockercfg` file. If you put this -in the `$HOME` of `root` on a kubelet, then docker will use it. +Docker stores keys for private registries in the `$HOME/.dockercfg` or `$HOME/.docker/config.json` file. If you put this +in the `$HOME` of user `root` on a kubelet, then docker will use it. Here are the recommended steps to configuring your nodes to use a private registry. In this example, run these on your desktop/laptop: - 1. run `docker login [server]` for each set of credentials you want to use. - 1. view `$HOME/.dockercfg` in an editor to ensure it contains just the credentials you want to use. - 1. get a list of your nodes - - for example: `nodes=$(kubectl get nodes -o template --template='{{range.items}}{{.metadata.name}} {{end}}')` - 1. copy your local `.dockercfg` to the home directory of root on each node. - - for example: `for n in $nodes; do scp ~/.dockercfg root@$n:/root/.dockercfg; done` + 1. run `docker login [server]` for each set of credentials you want to use. This updates `$HOME/.docker/config.json`. + 1. view `$HOME/.docker/config.json` in an editor to ensure it contains just the credentials you want to use. + 1. get a list of your nodes, for example: + - if you want the names: `nodes=$(kubectl get nodes -o jsonpath='{range.items[*].metadata}{.name} {end}')` + - if you want to get the IPs: `nodes=$(kubectl get nodes -o jsonpath='{range .items[*].status.addresses[?(@.type=="ExternalIP")]}{.address} {end}')` + 1. copy your local `.docker/config.json` to the home directory of root on each node. + - for example: `for n in $nodes; do scp ~/.docker/config.json root@$n:/root/.docker/config.json; done` Verify by creating a pod that uses a private image, e.g.: @@ -108,12 +127,13 @@ $ kubectl describe pods/private-image-test-1 | grep "Failed" Fri, 26 Jun 2015 15:36:13 -0700 Fri, 26 Jun 2015 15:39:13 -0700 19 {kubelet node-i2hq} spec.containers{uses-private-image} failed Failed to pull image "user/privaterepo:v1": Error: image user/privaterepo:v1 not found ``` -You must ensure all nodes in the cluster have the same `.dockercfg`. Otherwise, pods will run on + +You must ensure all nodes in the cluster have the same `.docker/config.json`. Otherwise, pods will run on some nodes and fail to run on others. For example, if you use node autoscaling, then each instance -template needs to include the `.dockercfg` or mount a drive that contains it. +template needs to include the `.docker/config.json` or mount a drive that contains it. All pods will have read access to images in any private registry once private -registry keys are added to the `.dockercfg`. +registry keys are added to the `.docker/config.json`. **This was tested with a private docker repository as of 26 June with Kubernetes version v0.19.3. It should also work for a private registry such as quay.io, but that has not been tested.** @@ -145,43 +165,53 @@ where node creation is automated. Kubernetes supports specifying registry keys on a pod. -First, create a `.dockercfg`, such as running `docker login `. -Then put the resulting `.dockercfg` file into a [secret resource](/docs/user-guide/secrets). For example: +#### Creating a Secret with a Docker Config + +Run the following command, substituting the appropriate uppercase values: ```shell -$ docker login -Username: janedoe -Password: '�?'�?'�?'�?'�?'�?'�?'�?'�?'�?'�? -Email: jdoe@example.com -WARNING: login credentials saved in /Users/jdoe/.dockercfg. -Login Succeeded +$ kubectl create secret docker-registry myregistrykey --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL +secret "myregistrykey" created. +``` -$ echo $(cat ~/.dockercfg) -{ "https://index.docker.io/v1/": { "auth": "ZmFrZXBhc3N3b3JkMTIK", "email": "jdoe@example.com" } } +If you need access to multiple registries, you can create one secret for each registry. +Kubelet will merge any `imagePullSecrets` into a single virtual `.docker/config.json` +when pulling images for your Pods. -$ cat ~/.dockercfg | base64 -eyAiaHR0cHM6Ly9pbmRleC5kb2NrZXIuaW8vdjEvIjogeyAiYXV0aCI6ICJabUZyWlhCaGMzTjNiM0prTVRJSyIsICJlbWFpbCI6ICJqZG9lQGV4YW1wbGUuY29tIiB9IH0K +Pods can only reference image pull secrets in their own namespace, +so this process needs to be done one time per namespace. -$ cat > /tmp/image-pull-secret.yaml <