Merge branch 'master' into patch-1
This commit is contained in:
commit
e825b4081c
|
|
@ -14,3 +14,4 @@ install:
|
|||
script:
|
||||
- go test -v k8s.io/kubernetes.github.io/test
|
||||
- $GOPATH/bin/md-check --root-dir=$HOME/gopath/src/k8s.io/kubernetes.github.io
|
||||
- ./verify-entry-toc.sh
|
||||
|
|
|
|||
|
|
@ -45,6 +45,7 @@ toc:
|
|||
- docs/getting-started-guides/network-policy/walkthrough.md
|
||||
- docs/getting-started-guides/network-policy/calico.md
|
||||
- docs/getting-started-guides/network-policy/romana.md
|
||||
- docs/getting-started-guides/network-policy/weave.md
|
||||
|
||||
- title: Batch Jobs
|
||||
section:
|
||||
|
|
@ -67,7 +68,9 @@ toc:
|
|||
- docs/user-guide/simple-nginx.md
|
||||
- docs/user-guide/pods/single-container.md
|
||||
- docs/user-guide/pods/multi-container.md
|
||||
- docs/user-guide/pods/init-container.md
|
||||
- docs/user-guide/configuring-containers.md
|
||||
- docs/user-guide/pod-templates.md
|
||||
- docs/user-guide/production-pods.md
|
||||
- docs/user-guide/containers.md
|
||||
- docs/user-guide/environment-guide/index.md
|
||||
|
|
@ -84,8 +87,11 @@ toc:
|
|||
- title: Monitoring, Logging, and Debugging Containers
|
||||
section:
|
||||
- docs/user-guide/monitoring.md
|
||||
- docs/getting-started-guides/logging.md
|
||||
- docs/getting-started-guides/logging-elasticsearch.md
|
||||
- title: Logging
|
||||
section:
|
||||
- docs/user-guide/logging/overview.md
|
||||
- docs/user-guide/logging/stackdriver.md
|
||||
- docs/user-guide/logging/elasticsearch.md
|
||||
- docs/user-guide/getting-into-containers.md
|
||||
- docs/user-guide/connecting-to-applications-proxy.md
|
||||
- docs/user-guide/connecting-to-applications-port-forward.md
|
||||
|
|
@ -146,7 +152,11 @@ toc:
|
|||
- docs/getting-started-guides/fedora/flannel_multi_node_cluster.md
|
||||
- docs/getting-started-guides/centos/centos_manual_config.md
|
||||
- docs/getting-started-guides/coreos/index.md
|
||||
- /docs/getting-started-guides/ubuntu/
|
||||
- title: Ubuntu
|
||||
section:
|
||||
- docs/getting-started-guides/ubuntu/automated.md
|
||||
- docs/getting-started-guides/ubuntu/calico.md
|
||||
- docs/getting-started-guides/ubuntu/manual.md
|
||||
- docs/getting-started-guides/windows/index.md
|
||||
- docs/admin/node-conformance.md
|
||||
- docs/getting-started-guides/docker-multinode.md
|
||||
|
|
@ -160,11 +170,16 @@ toc:
|
|||
- docs/admin/cluster-management.md
|
||||
- docs/admin/kubeadm.md
|
||||
- docs/admin/addons.md
|
||||
- docs/admin/audit.md
|
||||
- docs/admin/ha-master-gce.md
|
||||
- docs/admin/namespaces/index.md
|
||||
- docs/admin/namespaces/walkthrough.md
|
||||
- docs/admin/limitrange/index.md
|
||||
- docs/admin/disruptions.md
|
||||
- docs/admin/resourcequota/index.md
|
||||
- docs/admin/resourcequota/walkthrough.md
|
||||
- docs/admin/rescheduler.md
|
||||
- docs/admin/sysctls.md
|
||||
- docs/admin/cluster-components.md
|
||||
- docs/admin/etcd.md
|
||||
- docs/admin/multi-cluster.md
|
||||
|
|
@ -186,5 +201,21 @@ toc:
|
|||
|
||||
- title: Administering Federation
|
||||
section:
|
||||
- /docs/admin/federation/kubfed/
|
||||
- docs/admin/federation/index.md
|
||||
- docs/admin/federation/kubefed.md
|
||||
- title: Federated Kubernetes Objects
|
||||
section:
|
||||
- docs/user-guide/federation/index.md
|
||||
- docs/user-guide/federation/configmap.md
|
||||
- docs/user-guide/federation/daemonsets.md
|
||||
- docs/user-guide/federation/deployment.md
|
||||
- docs/user-guide/federation/events.md
|
||||
- docs/user-guide/federation/federated-ingress.md
|
||||
- docs/user-guide/federation/namespaces.md
|
||||
- docs/user-guide/federation/replicasets.md
|
||||
- docs/user-guide/federation/secrets.md
|
||||
- docs/federation/api-reference/index.md
|
||||
- title: Federation Components
|
||||
section:
|
||||
- docs/admin/federation-apiserver.md
|
||||
- title : federation-controller-mananger
|
||||
path: /docs/admin/federation-controller-manager
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@ toc:
|
|||
- docs/admin/service-accounts-admin.md
|
||||
- docs/api-reference/v1/operations.html
|
||||
- docs/api-reference/v1/definitions.html
|
||||
- docs/api-reference/labels-annotations-taints.md
|
||||
- kubernetes/third_party/swagger-ui/index.md
|
||||
|
||||
- title: Autoscaling API
|
||||
|
|
@ -183,20 +184,3 @@ toc:
|
|||
- title: Security in Kubernetes
|
||||
path: https://github.com/kubernetes/kubernetes/blob/release-1.3/docs/design/security.md
|
||||
|
||||
- title: Federation
|
||||
section:
|
||||
- docs/user-guide/federation/index.md
|
||||
- docs/user-guide/federation/configmap.md
|
||||
- docs/user-guide/federation/daemonsets.md
|
||||
- docs/user-guide/federation/deployment.md
|
||||
- docs/user-guide/federation/events.md
|
||||
- docs/user-guide/federation/federated-ingress.md
|
||||
- docs/user-guide/federation/namespaces.md
|
||||
- docs/user-guide/federation/replicasets.md
|
||||
- docs/user-guide/federation/secrets.md
|
||||
- docs/federation/api-reference/README.md
|
||||
- title: Federation Components
|
||||
section:
|
||||
- docs/admin/federation-apiserver.md
|
||||
- title : federation-controller-mananger
|
||||
path: /docs/admin/federation-controller-manager
|
||||
|
|
|
|||
|
|
@ -10,13 +10,14 @@ toc:
|
|||
- docs/contribute/write-new-topic.md
|
||||
- docs/contribute/stage-documentation-changes.md
|
||||
- docs/contribute/page-templates.md
|
||||
- docs/contribute/review-issues.md
|
||||
- docs/contribute/style-guide.md
|
||||
|
||||
|
||||
- title: Troubleshooting
|
||||
section:
|
||||
- docs/user-guide/debugging-pods-and-replication-controllers.md
|
||||
- docs/user-guide/introspection-and-debugging.md
|
||||
- docs/user-guide/logging.md
|
||||
- docs/user-guide/application-troubleshooting.md
|
||||
- docs/admin/cluster-troubleshooting.md
|
||||
- docs/user-guide/debugging-services.md
|
||||
|
|
@ -39,3 +40,6 @@ toc:
|
|||
path: https://github.com/kubernetes/kubernetes/releases/
|
||||
- title: Release Roadmap
|
||||
path: https://github.com/kubernetes/kubernetes/milestones/
|
||||
|
||||
- title: Deprecation Policy
|
||||
path: /docs/deprecation-policy.md
|
||||
|
|
|
|||
|
|
@ -10,10 +10,14 @@ toc:
|
|||
- docs/tasks/configure-pod-container/assign-cpu-ram-container.md
|
||||
- docs/tasks/configure-pod-container/configure-volume-storage.md
|
||||
- docs/tasks/configure-pod-container/distribute-credentials-secure.md
|
||||
- docs/tasks/configure-pod-container/pull-image-private-registry.md
|
||||
- docs/tasks/configure-pod-container/configure-liveness-readiness-probes.md
|
||||
- docs/tasks/configure-pod-container/communicate-containers-same-pod.md
|
||||
|
||||
- title: Accessing Applications in a Cluster
|
||||
section:
|
||||
- docs/tasks/access-application-cluster/port-forward-access-application-cluster.md
|
||||
- docs/tasks/access-application-cluster/load-balance-access-application-cluster.md
|
||||
|
||||
- title: Debugging Applications in a Cluster
|
||||
section:
|
||||
|
|
@ -26,7 +30,6 @@ toc:
|
|||
- title: Administering a Cluster
|
||||
section:
|
||||
- docs/tasks/administer-cluster/assign-pods-nodes.md
|
||||
|
||||
- docs/tasks/administer-cluster/dns-horizontal-autoscaling.md
|
||||
- docs/tasks/administer-cluster/safely-drain-node.md
|
||||
|
||||
|
|
@ -41,4 +44,4 @@ toc:
|
|||
- title: Troubleshooting
|
||||
section:
|
||||
- docs/tasks/troubleshoot/debug-init-containers.md
|
||||
- /docs/tasks/administer-cluster/access-control-identity-management/
|
||||
- docs/tasks/administer-cluster/access-control-identity-management/
|
||||
|
|
|
|||
|
|
@ -14,8 +14,7 @@ toc:
|
|||
|
||||
- title: Third-Party Tools
|
||||
section:
|
||||
- docs/tools/kompose/index.md
|
||||
- docs/tools/kompose/user-guide.md
|
||||
- title: Helm
|
||||
path: https://github.com/kubernetes/helm
|
||||
- title: Kompose
|
||||
path: https://github.com/kubernetes-incubator/kompose
|
||||
|
||||
|
|
|
|||
|
|
@ -41,3 +41,6 @@ toc:
|
|||
- docs/tutorials/stateful-application/run-stateful-application.md
|
||||
- docs/tutorials/stateful-application/run-replicated-stateful-application.md
|
||||
- docs/tutorials/stateful-application/zookeeper.md
|
||||
- title: Services
|
||||
section:
|
||||
- docs/tutorials/services/source-ip.md
|
||||
|
|
|
|||
|
|
@ -12,9 +12,9 @@
|
|||
{% assign path = item.path %}
|
||||
{% assign title = item.title %}
|
||||
{% else %}
|
||||
{% assign page = site.pages | where: "path", item | first %}
|
||||
{% assign title = page.title %}
|
||||
{% assign path = page.url %}
|
||||
{% assign found_page = site.pages | where: "path", item | first %}
|
||||
{% assign title = found_page.title %}
|
||||
{% assign path = found_page.url %}
|
||||
{% endif %}
|
||||
{% endcapture %}
|
||||
|
||||
|
|
|
|||
|
|
@ -2,14 +2,12 @@
|
|||
assignees:
|
||||
- soltysh
|
||||
- sttts
|
||||
|
||||
title: Audit in Kubernetes
|
||||
---
|
||||
|
||||
* TOC
|
||||
{:toc}
|
||||
|
||||
## Audit in Kubernetes
|
||||
|
||||
Kubernetes Audit provides a security-relevant chronological set of records documenting
|
||||
the sequence of activities that have affected system by individual users, administrators
|
||||
or other components of the system. It allows cluster administrator to
|
||||
|
|
|
|||
|
|
@ -91,15 +91,8 @@ about containers in a central database, and provides a UI for browsing that data
|
|||
|
||||
#### Cluster-level Logging
|
||||
|
||||
[Container Logging](/docs/user-guide/monitoring) saves container logs
|
||||
to a central log store with search/browsing interface. There are two
|
||||
implementations:
|
||||
|
||||
* [Cluster-level logging to Google Cloud Logging](
|
||||
/docs/user-guide/logging/#cluster-level-logging-to-google-cloud-logging)
|
||||
|
||||
* [Cluster-level Logging with Elasticsearch and Kibana](
|
||||
/docs/getting-started-guides/logging-elasticsearch/)
|
||||
A [Cluster-level logging](/docs/user-guide/logging/overview) mechanism is responsible for
|
||||
saving container logs to a central log store with search/browsing interface.
|
||||
|
||||
## Node components
|
||||
|
||||
|
|
|
|||
|
|
@ -384,7 +384,7 @@ for more information.
|
|||
|
||||
## References
|
||||
|
||||
- [Docs for the DNS cluster addon](http://releases.k8s.io/{{page.githubbranch}}/build-tools/kube-dns/README.md)
|
||||
- [Docs for the DNS cluster addon](http://releases.k8s.io/{{page.githubbranch}}/build/kube-dns/README.md)
|
||||
|
||||
## What's next
|
||||
- [Autoscaling the DNS Service in a Cluster](/docs/tasks/administer-cluster/dns-horizontal-autoscaling/).
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
assignees:
|
||||
- madhusudancs
|
||||
|
||||
title: Setting up Cluster Federation with Kubefed
|
||||
---
|
||||
|
||||
* TOC
|
||||
|
|
|
|||
|
|
@ -1,14 +1,12 @@
|
|||
---
|
||||
assignees:
|
||||
- jszczepkowski
|
||||
|
||||
title: Setting up High-Availability Kubernetes Masters
|
||||
---
|
||||
|
||||
* TOC
|
||||
{:toc}
|
||||
|
||||
## Introduction
|
||||
|
||||
Kubernetes version 1.5 adds alpha support for replicating Kubernetes masters in `kube-up` or `kube-down` scripts for Google Compute Engine.
|
||||
This document describes how to use kube-up/down scripts to manage highly available (HA) masters and how HA masters are implemented for use with GCE.
|
||||
|
||||
|
|
|
|||
|
|
@ -61,7 +61,8 @@ project](/docs/admin/salt).
|
|||
* **DNS Integration with SkyDNS** ([dns.md](/docs/admin/dns)):
|
||||
Resolving a DNS name directly to a Kubernetes service.
|
||||
|
||||
* **Logging** with [Kibana](/docs/user-guide/logging)
|
||||
* [**Cluster-level logging**](/docs/user-guide/logging/overview)
|
||||
Saving container logs to a central log store with search/browsing interface.
|
||||
|
||||
## Multi-tenant support
|
||||
|
||||
|
|
|
|||
|
|
@ -84,6 +84,9 @@ Valid values are the ones supported by `controller-manager`, namely `"aws"`,
|
|||
the cloud provider, you should create a `/etc/kubernetes/cloud-config`
|
||||
file manually, before running `kubeadm init`. `kubeadm` automatically
|
||||
picks those settings up and ensures other nodes are configured correctly.
|
||||
The exact format and content of the file `/etc/kubernetes/cloud-config` depends
|
||||
on the type you specified for `--cloud-provider`; see the appropriate documentation
|
||||
for your cloud provider for details.
|
||||
You must also set the `--cloud-provider` and `--cloud-config` parameters
|
||||
yourself by editing the `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf`
|
||||
file appropriately.
|
||||
|
|
|
|||
|
|
@ -3,15 +3,13 @@ assignees:
|
|||
- davidopp
|
||||
- filipg
|
||||
- piosz
|
||||
|
||||
title: Guaranteed Scheduling For Critical Add-On Pods
|
||||
---
|
||||
|
||||
* TOC
|
||||
{:toc}
|
||||
|
||||
# "Guaranteed" scheduling of critical add-on pods
|
||||
|
||||
## Critical add-ons
|
||||
## Overview
|
||||
|
||||
In addition to Kubernetes core components like api-server, scheduler, controller-manager running on a master machine
|
||||
there are a number of add-ons which, for various reasons, must run on a regular cluster node (rather than the Kubernetes master).
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
assignees:
|
||||
- sttts
|
||||
|
||||
title: Using Sysctls in a Kubernetes Cluster
|
||||
---
|
||||
|
||||
* TOC
|
||||
|
|
|
|||
|
|
@ -1,16 +0,0 @@
|
|||
---
|
||||
---
|
||||
# API Reference
|
||||
|
||||
Use the following reference docs to understand the Kubernetes REST API for various API group versions:
|
||||
|
||||
* v1: [operations](/docs/api-reference/v1/operations.html), [model definitions](/docs/api-reference/v1/definitions.html)
|
||||
* extensions/v1beta1: [operations](/docs/api-reference/extensions/v1beta1/operations.html), [model definitions](/docs/api-reference/extensions/v1beta1/definitions.html)
|
||||
* batch/v1: [operations](/docs/api-reference/batch/v1/operations.html), [model definitions](/docs/api-reference/batch/v1/definitions.html)
|
||||
* autoscaling/v1: [operations](/docs/api-reference/autoscaling/v1/operations.html), [model definitions](/docs/api-reference/autoscaling/v1/definitions.html)
|
||||
* apps/v1beta1: [operations](/docs/api-reference/apps/v1beta1/operations.html), [model definitions](/docs/api-reference/apps/v1beta1/definitions.html)
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
title: Well-Known Labels, Annotations and Taints
|
||||
---
|
||||
# Well-Known Labels, Annotations and Taints
|
||||
|
||||
Kubernetes reserves all labels and annotations in the kubernetes.io namespace. This document describes
|
||||
the well-known kubernetes.io labels and annotations.
|
||||
|
|
|
|||
|
|
@ -0,0 +1,67 @@
|
|||
---
|
||||
title: Reviewing Documentation Issues
|
||||
---
|
||||
|
||||
{% capture overview %}
|
||||
|
||||
This page explains how you should review and prioritize documentation issues made for the [kubernetes/kubernetes.github.io](https://github.com/kubernetes/kubernetes.github.io){: target="_blank"} repository. The purpose is to provide a way to organize issues and make it easier to contribute to Kubernetes documentation. The following should be used as the standard way of prioritizing, labeling, and interacting with issues.
|
||||
{% endcapture %}
|
||||
|
||||
{% capture body %}
|
||||
|
||||
### Categorizing issues
|
||||
Issues should be sorted into different buckets of work using the following labels and definitions. If an issue doesn't have enough information to identify a problem that can be researched, reviewed, or worked on (i.e. the issue doesn't fit into any of the categories below) you should close the issue with a comment explaining why it is being closed.
|
||||
|
||||
|
||||
#### Actionable
|
||||
* Issues that can be worked on with current information (or may need a comment to explain what needs to be done to make it more clear)
|
||||
* Allows contributors to have easy to find issues to work on
|
||||
|
||||
|
||||
#### Tech Review Needed
|
||||
* Issues that need more information in order to be worked on (the proposed solution needs to be proven, a subject matter expert needs to be involved, work needs to be done to understand the problem/resolution and if the issue is still relevant)
|
||||
* Promotes transparency about level of work needed for the issue and that issue is in progress
|
||||
|
||||
#### Docs Review Needed
|
||||
* Issues that are suggestions for better processes or site improvements that require community agreement to be implemented
|
||||
* Topics can be brought to SIG meetings as agenda items
|
||||
|
||||
|
||||
### Prioritizing Issues
|
||||
The following labels and definitions should be used to prioritize issues. If you change the priority of an issues, please comment on the issue with your reasoning for the change.
|
||||
|
||||
#### P1
|
||||
* Major content errors affecting more than 1 page
|
||||
* Broken code sample on a heavily trafficked page
|
||||
* Errors on a “getting started” page
|
||||
* Well known or highly publicized customer pain points
|
||||
* Automation issues
|
||||
|
||||
#### P2
|
||||
* Default for all new issues
|
||||
* Broken code for sample that is not heavily used
|
||||
* Minor content issues in a heavily trafficked page
|
||||
* Major content issues on a lower-trafficked page
|
||||
|
||||
#### P3
|
||||
* Typos and broken anchor links
|
||||
|
||||
### Handling special issue types
|
||||
|
||||
#### Duplicate issues
|
||||
If a single problem has one or more issues open for it, the problem should be consolodated into a single issue. You should decide which issue to keep open (or open a new issue), port over all relevant information, link related issues, and close all the other issues that describe the same problem. Only having a single issue to work on will help reduce confusion and avoid duplicating work on the same problem.
|
||||
|
||||
#### Dead link issues
|
||||
Depending on where the dead link is reported, different actions are required to resolve the issue. Dead links in the API and Kubectl docs are automation issues and should be assigned a P1 until the problem can be fully understood. All other dead links are issues that need to be manually fixed and can be assigned a P3.
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
|
||||
{% capture whatsnext %}
|
||||
* Learn about [writing a new topic](/docs/contribute/write-new-topic).
|
||||
* Learn about [using page templates](/docs/contribute/page-templates/).
|
||||
* Learn about [staging your changes](/docs/contribute/stage-documentation-changes).
|
||||
{% endcapture %}
|
||||
|
||||
{% include templates/concept.md %}
|
||||
|
|
@ -3,11 +3,9 @@ assignees:
|
|||
- bgrant0607
|
||||
- lavalamp
|
||||
- thockin
|
||||
|
||||
title: Kubernetes Deprecation Policy
|
||||
---
|
||||
|
||||
# Kubernetes Deprecation Policy
|
||||
|
||||
Kubernetes is a large system with many components and many contributors. As
|
||||
with any such software, the feature set naturally evolves over time, and
|
||||
sometimes a feature may need to be removed. This could include an API, a flag,
|
||||
|
|
@ -75,6 +73,7 @@ version. Beta API versions *may not* replace GA API versions.
|
|||
**Rule #4: Other than the most recent API version in each track, older API
|
||||
versions must be supported after their announced deprecation for a duration of
|
||||
no less than:**
|
||||
|
||||
* **GA: 1 year or 2 releases (whichever is longer)**
|
||||
* **Beta: 3 months or 1 release (whichever is longer)**
|
||||
* **Alpha: 0 releases**
|
||||
|
|
@ -84,18 +83,93 @@ which supports a particular API group. A new Kubernetes release is made every
|
|||
approximately 3 months (4 per year). The following table describes which API
|
||||
versions are supported in a series of subsequent releases.
|
||||
|
||||
| Release | API versions | Notes |
|
||||
|---------|--------------|-------|
|
||||
| X | v1 | |
|
||||
| X+1 | v1, v2alpha1 | |
|
||||
| X+2 | v1, v2alpha2 | * v2alpha1 is removed, "action required" relnote |
|
||||
| X+3 | v1, v2beta1 | * v2alpha2 is removed, "action required" relnote |
|
||||
| X+4 | v1, v2beta1, v2beta2 | * v2beta1 is deprecated, "action required" relnote |
|
||||
| X+5 | v1, v2, v2beta2 | * v2beta1 is removed, "action required" relnote<br> * v2beta2 is deprecated, "action required" relnote<br> * v1 is deprecated, "action required" relnote |
|
||||
| X+6 | v1, v2 | * v2beta2 is removed, "action required" relnote |
|
||||
| X+7 | v1, v2 | |
|
||||
| X+8 | v1, v2 | |
|
||||
| X+9 | v2 | * v1 is removed, "action required" relnote |
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Release</th>
|
||||
<th>API Versions</th>
|
||||
<th>Notes</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>X</td>
|
||||
<td>v1</td>
|
||||
<td></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>X+1</td>
|
||||
<td>v1, v2alpha1</td>
|
||||
<td></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>X+2</td>
|
||||
<td>v1, v2alpha2</td>
|
||||
<td>
|
||||
<ul>
|
||||
<li>v2alpha1 is removed, "action required" relnote</li>
|
||||
</ul>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>X+3</td>
|
||||
<td>v1, v2beta1</td>
|
||||
<td>
|
||||
<ul>
|
||||
<li>v2alpha2 is removed, "action required" relnote</li>
|
||||
</ul>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>X+4</td>
|
||||
<td>v1, v2beta1, v2beta2</td>
|
||||
<td>
|
||||
<ul>
|
||||
<li>v2beta1 is deprecated, "action required" relnote</li>
|
||||
</ul>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>X+5</td>
|
||||
<td>v1, v2, v2beta2</td>
|
||||
<td>
|
||||
<ul>
|
||||
<li>v2beta1 is removed, "action required" relnote</li>
|
||||
<li>v2beta2 is deprecated, "action required" relnote</li>
|
||||
<li>v1 is deprecated, "action required" relnote</li>
|
||||
</ul>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>X+6</td>
|
||||
<td>v1, v2</td>
|
||||
<td>
|
||||
<ul>
|
||||
<li>v2beta2 is removed, "action required" relnote</li>
|
||||
</ul>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>X+7</td>
|
||||
<td>v1, v2</td>
|
||||
<td></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>X+8</td>
|
||||
<td>v1, v2</td>
|
||||
<td></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>X+9</td>
|
||||
<td>v2</td>
|
||||
<td>
|
||||
<ul>
|
||||
<li>v1 is removed, "action required" relnote</li>
|
||||
</ul>
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
### REST resources (aka API objects)
|
||||
|
||||
|
|
@ -147,12 +221,14 @@ follows:
|
|||
|
||||
**Rule #5a: CLI elements of user-facing components (e.g. kubectl) must function
|
||||
after their announced deprecation for no less than:**
|
||||
|
||||
* **GA: 1 year or 2 releases (whichever is longer)**
|
||||
* **Beta: 3 months or 1 release (whichever is longer)**
|
||||
* **Alpha: 0 releases**
|
||||
|
||||
**Rule #5b: CLI elements of admin-facing components (e.g. kubelet) must function
|
||||
after their announced deprecation for no less than:**
|
||||
|
||||
* **GA: 6 months or 1 release (whichever is longer)**
|
||||
* **Beta: 3 months or 1 release (whichever is longer)**
|
||||
* **Alpha: 0 releases**
|
||||
|
|
|
|||
|
|
@ -1,9 +1,7 @@
|
|||
---
|
||||
title: Federation API
|
||||
title: Federation API Reference
|
||||
---
|
||||
|
||||
# API Reference
|
||||
|
||||
Federation API server supports the following group versions:
|
||||
|
||||
* federation/v1beta1: [operations](/docs/federation/api-reference/federation/v1beta1/operations.html), [model definitions](/docs/federation/api-reference/federation/v1beta1/definitions.html)
|
||||
|
|
@ -46,7 +46,7 @@ wget -q -O - https://get.k8s.io | bash
|
|||
|
||||
Once this command completes, you will have a master VM and four worker VMs, running as a Kubernetes cluster.
|
||||
|
||||
By default, some containers will already be running on your cluster. Containers like `fluentd` provide [logging](/docs/getting-started-guides/logging), while `heapster` provides [monitoring](http://releases.k8s.io/{{page.githubbranch}}/cluster/addons/cluster-monitoring/README.md) services.
|
||||
By default, some containers will already be running on your cluster. Containers like `fluentd` provide [logging](/docs/user-guide/logging/overview), while `heapster` provides [monitoring](http://releases.k8s.io/{{page.githubbranch}}/cluster/addons/cluster-monitoring/README.md) services.
|
||||
|
||||
The script run by the commands above creates a cluster with the name/prefix "kubernetes". It defines one specific cluster config, so you can't run it more than once.
|
||||
|
||||
|
|
|
|||
|
|
@ -319,8 +319,9 @@ edit the `kubeadm` dropin for the `kubelet` service (`/etc/systemd/system/kubele
|
|||
If your cloud provider requires any extra packages installed on host, for example for volume mounting/unmounting, install those packages.
|
||||
|
||||
Specify the `--cloud-provider` flag to kubelet and set it to the cloud of your choice. If your cloudprovider requires a configuration
|
||||
file, create the file `/etc/kubernetes/cloud-config` on every node and set the values your cloud requires. Also append
|
||||
`--cloud-config=/etc/kubernetes/cloud-config` to the kubelet arguments.
|
||||
file, create the file `/etc/kubernetes/cloud-config` on every node. The exact format and content of that file depends on the requirements imposed by your cloud provider.
|
||||
If you use the `/etc/kubernetes/cloud-config` file, you must append it to the `kubelet` arguments as follows:
|
||||
`--cloud-config=/etc/kubernetes/cloud-config`
|
||||
|
||||
Lastly, run `kubeadm init --cloud-provider=xxx` to bootstrap your cluster with cloud provider features.
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
assignees:
|
||||
- bboreham
|
||||
|
||||
title: Weave Net Addon
|
||||
---
|
||||
|
||||
The [Weave Net Addon](https://www.weave.works/docs/net/latest/kube-addon/) for Kubernetes comes with a Network Policy Controller.
|
||||
|
|
|
|||
|
|
@ -826,11 +826,9 @@ Notes for setting up each cluster service are given below:
|
|||
* [Setup instructions](http://releases.k8s.io/{{page.githubbranch}}/cluster/addons/dns/)
|
||||
* [Admin Guide](/docs/admin/dns/)
|
||||
* Cluster-level Logging
|
||||
* Multiple implementations with different storage backends and UIs.
|
||||
* [Elasticsearch Backend Setup Instructions](http://releases.k8s.io/{{page.githubbranch}}/cluster/addons/fluentd-elasticsearch/)
|
||||
* [Google Cloud Logging Backend Setup Instructions](http://releases.k8s.io/{{page.githubbranch}}/cluster/addons/fluentd-gcp/).
|
||||
* Both require running fluentd on each node.
|
||||
* [User Guide](/docs/user-guide/logging/)
|
||||
* [Cluster-level Logging Overview](/docs/user-guide/logging/overview)
|
||||
* [Cluster-level Logging with Elasticsearch](/docs/user-guide/logging/elasticsearch)
|
||||
* [Cluster-level Logging with Stackdriver Logging](/docs/user-guide/logging/stackdriver)
|
||||
* Container Resource Monitoring
|
||||
* [Setup instructions](http://releases.k8s.io/{{page.githubbranch}}/cluster/addons/cluster-monitoring/)
|
||||
* GUI
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
assignees:
|
||||
- caesarxuchao
|
||||
- erictune
|
||||
|
||||
title: Setting up Kubernetes with Juju
|
||||
---
|
||||
|
||||
Ubuntu 16.04 introduced the [Canonical Distribution of Kubernetes](https://jujucharms.com/canonical-kubernetes/), a pure upstream distribution of Kubernetes designed for production usage. Out of the box it comes with the following components on 12 machines:
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
|
||||
title: Deploying Kubernetes with Calico Networking on Ubuntu
|
||||
---
|
||||
|
||||
This document describes how to deploy Kubernetes with Calico networking from scratch on _bare metal_ Ubuntu. For more information on Project Calico, visit [projectcalico.org](http://projectcalico.org) and the [calico-containers repository](https://github.com/projectcalico/calico-containers).
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
assignees:
|
||||
- thockin
|
||||
|
||||
title: Manually Deploying Kubernetes on Ubuntu Nodes
|
||||
---
|
||||
|
||||
This document describes how to deploy Kubernetes on ubuntu nodes, 1 master and 3 nodes involved
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
---
|
||||
title: Providing Load-Balanced Access to an Application in a Cluster
|
||||
---
|
||||
|
||||
{% capture overview %}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,149 @@
|
|||
---
|
||||
title: Communicating Between Containers Running in the Same Pod
|
||||
---
|
||||
|
||||
{% capture overview %}
|
||||
|
||||
This page shows how to use a Volume to communicate between two Containers running
|
||||
in the same Pod.
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% capture prerequisites %}
|
||||
|
||||
{% include task-tutorial-prereqs.md %}
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% capture steps %}
|
||||
|
||||
### Creating a Pod that runs two Containers
|
||||
|
||||
In this exercise, you create a Pod that runs two Containers. The two containers
|
||||
share a Volume that they can use to communicate. Here is the configuration file
|
||||
for the Pod:
|
||||
|
||||
{% include code.html language="yaml" file="two-container-pod.yaml" ghlink="/docs/tasks/configure-pod-container/two-container-pod.yaml" %}
|
||||
|
||||
In the configuration file, you can see that the Pod has a Volume named
|
||||
`shared-data`.
|
||||
|
||||
The first container listed in the configuration file runs an nginx server. The
|
||||
mount path for the shared Volume is `/usr/share/nginx/html`.
|
||||
The second container is based on the debian image, and has a mount path of
|
||||
`/pod-data`. The second container runs the following command and then terminates.
|
||||
|
||||
echo Hello from the debian container > /pod-data/index.html
|
||||
|
||||
Notice that the second container writes the `index.html` file in the root
|
||||
directory of the nginx server.
|
||||
|
||||
Create the Pod and the two Containers:
|
||||
|
||||
kubectl create -f http://k8s.io/docs/tasks/configure-pod-container/two-container-pod.yaml
|
||||
|
||||
View information about the Pod and the Containers:
|
||||
|
||||
kubectl get pod two-containers --output=yaml
|
||||
|
||||
Here is a portion of the output:
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
...
|
||||
name: two-containers
|
||||
namespace: default
|
||||
...
|
||||
spec:
|
||||
...
|
||||
containerStatuses:
|
||||
|
||||
- containerID: docker://c1d8abd1 ...
|
||||
image: debian
|
||||
...
|
||||
lastState:
|
||||
terminated:
|
||||
...
|
||||
name: debian-container
|
||||
...
|
||||
|
||||
- containerID: docker://96c1ff2c5bb ...
|
||||
image: nginx
|
||||
...
|
||||
name: nginx-container
|
||||
...
|
||||
state:
|
||||
running:
|
||||
...
|
||||
|
||||
You can see that the debian Container has terminated, and the nginx Container
|
||||
is still running.
|
||||
|
||||
Get a shell to nginx Container:
|
||||
|
||||
kubectl exec -it two-containers -c nginx-container -- /bin/bash
|
||||
|
||||
In your shell, verify that nginx is running:
|
||||
|
||||
root@two-containers:/# ps aux
|
||||
|
||||
The output is similar to this:
|
||||
|
||||
USER PID ... STAT START TIME COMMAND
|
||||
root 1 ... Ss 21:12 0:00 nginx: master process nginx -g daemon off;
|
||||
|
||||
Recall that the debian Container created the `index.html` file in the nginx root
|
||||
directory. Use `curl` to send a GET request to the nginx server:
|
||||
|
||||
root@two-containers:/# apt-get update
|
||||
root@two-containers:/# apt-get install curl
|
||||
root@two-containers:/# curl localhost
|
||||
|
||||
The output shows that nginx serves a web page written by the debian container:
|
||||
|
||||
Hello from the debian container
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% capture discussion %}
|
||||
|
||||
### Discussion
|
||||
|
||||
The primary reason that Pods can have multiple containers is to support
|
||||
helper applications that assist a primary application. Typical examples of
|
||||
helper applications are data pullers, data pushers, and proxies.
|
||||
Helper and primary applications often need to communicate with each other.
|
||||
Typically this is done through a shared filesystem, as shown in this exercise,
|
||||
or through the loopback network interface, localhost. An example of this pattern is a
|
||||
web server along with a helper program that polls a Git repository for new updates.
|
||||
|
||||
The Volume in this exercise provides a way for Containers to communicate during
|
||||
the life of the Pod. If the Pod is deleted and recreated, any data stored in
|
||||
the shared Volume is lost.
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% capture whatsnext %}
|
||||
|
||||
* Learn more about
|
||||
[patterns for composite containers](http://blog.kubernetes.io/2015/06/the-distributed-system-toolkit-patterns.html).
|
||||
|
||||
* Learn about
|
||||
[composite containers for modular architecture](http://www.slideshare.net/Docker/slideshare-burns).
|
||||
|
||||
* See
|
||||
[Configuring a Pod to Use a Volume for Storage](http://localhost:4000/docs/tasks/configure-pod-container/configure-volume-storage/).
|
||||
|
||||
* See [Volume](/docs/api-reference/v1/definitions/#_v1_volume).
|
||||
|
||||
* See [Pod](/docs/api-reference/v1/definitions/#_v1_pod).
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% include templates/task.md %}
|
||||
|
|
@ -0,0 +1,271 @@
|
|||
---
|
||||
redirect_from:
|
||||
- "/docs/user-guide/liveness/"
|
||||
- "/docs/user-guide.liveness.html"
|
||||
title: Configuring Liveness and Readiness Probes
|
||||
---
|
||||
|
||||
{% capture overview %}
|
||||
|
||||
This page shows how to configure liveness and readiness probes for Containers.
|
||||
|
||||
The [kubelet](/docs/admin/kubelet/) uses liveness probes to know when to
|
||||
restart a Container. For example, liveness probes could catch a deadlock,
|
||||
where an application is running, but unable to make progress. Restarting a
|
||||
Container in such a state can help to make the application more available
|
||||
despite bugs.
|
||||
|
||||
The kubelet uses readiness probes to know when a Container is ready to start
|
||||
accepting traffic. A Pod is considered ready when all of its Containers are ready.
|
||||
One use of this signal is to control which Pods are used as backends for Services.
|
||||
When a Pod is not ready, it is removed from Service load balancers.
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
{% capture prerequisites %}
|
||||
|
||||
{% include task-tutorial-prereqs.md %}
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
{% capture steps %}
|
||||
|
||||
### Defining a liveness command
|
||||
|
||||
Many applications running for long periods of time eventually transition to
|
||||
broken states, and cannot recover except by being restarted. Kubernetes provides
|
||||
liveness probes to detect and remedy such situations.
|
||||
|
||||
In this exercise, you create a Pod that runs a Container based on the
|
||||
`gcr.io/google_containers/busybox` image. Here is the configuration file for the Pod:
|
||||
|
||||
{% include code.html language="yaml" file="exec-liveness.yaml" ghlink="/docs/tasks/configure-pod-container/exec-liveness.yaml" %}
|
||||
|
||||
In the configuration file, you can see that the Pod has a single Container.
|
||||
The `livenessProbe` field specifies that the kubelet should perform a liveness
|
||||
probe every 5 seconds. The `initialDelaySeconds` field tells the kubelet that it
|
||||
should wait 5 second before performing the first probe. To perform a probe, the
|
||||
kubelet executes the command `cat /tmp/healthy` in the Container. If the
|
||||
command succeeds, it returns 0, and the kubelet considers the Container to be alive and
|
||||
healthy. If the command returns a non-zero value, the kubelet kills the Container
|
||||
and restarts it.
|
||||
|
||||
When the Container starts, it executes this command:
|
||||
|
||||
```shell
|
||||
/bin/sh -c "touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600"
|
||||
```
|
||||
|
||||
For the first 30 seconds of the Container's life, there is a `/tmp/healthy` file.
|
||||
So during the first 30 seconds, the command `cat /tmp/healthy` returns a success
|
||||
code. After 30 seconds, `cat /tmp/healthy` returns a failure code.
|
||||
|
||||
Create the Pod:
|
||||
|
||||
```shell
|
||||
kubectl create -f http://k8s.io/docs/tasks/configure-pod-container/exec-liveness.yaml
|
||||
```
|
||||
|
||||
Within 30 seconds, view the Pod events:
|
||||
|
||||
```
|
||||
kubectl describe pod liveness-exec
|
||||
```
|
||||
|
||||
The output indicates that no liveness probes have failed yet:
|
||||
|
||||
```shell
|
||||
FirstSeen LastSeen Count From SubobjectPath Type Reason Message
|
||||
--------- -------- ----- ---- ------------- -------- ------ -------
|
||||
24s 24s 1 {default-scheduler } Normal Scheduled Successfully assigned liveness-exec to worker0
|
||||
23s 23s 1 {kubelet worker0} spec.containers{liveness} Normal Pulling pulling image "gcr.io/google_containers/busybox"
|
||||
23s 23s 1 {kubelet worker0} spec.containers{liveness} Normal Pulled Successfully pulled image "gcr.io/google_containers/busybox"
|
||||
23s 23s 1 {kubelet worker0} spec.containers{liveness} Normal Created Created container with docker id 86849c15382e; Security:[seccomp=unconfined]
|
||||
23s 23s 1 {kubelet worker0} spec.containers{liveness} Normal Started Started container with docker id 86849c15382e
|
||||
```
|
||||
|
||||
After 30 seconds, view the Pod events again:
|
||||
|
||||
```shell
|
||||
kubectl describe pod liveness-exec
|
||||
```
|
||||
|
||||
At the bottom of the output, there are messages indicating that the liveness
|
||||
probes have failed, and the containers have been killed and recreated.
|
||||
|
||||
```shell
|
||||
FirstSeen LastSeen Count From SubobjectPath Type Reason Message
|
||||
--------- -------- ----- ---- ------------- -------- ------ -------
|
||||
37s 37s 1 {default-scheduler } Normal Scheduled Successfully assigned liveness-exec to worker0
|
||||
36s 36s 1 {kubelet worker0} spec.containers{liveness} Normal Pulling pulling image "gcr.io/google_containers/busybox"
|
||||
36s 36s 1 {kubelet worker0} spec.containers{liveness} Normal Pulled Successfully pulled image "gcr.io/google_containers/busybox"
|
||||
36s 36s 1 {kubelet worker0} spec.containers{liveness} Normal Created Created container with docker id 86849c15382e; Security:[seccomp=unconfined]
|
||||
36s 36s 1 {kubelet worker0} spec.containers{liveness} Normal Started Started container with docker id 86849c15382e
|
||||
2s 2s 1 {kubelet worker0} spec.containers{liveness} Warning Unhealthy Liveness probe failed: cat: can't open '/tmp/healthy': No such file or directory
|
||||
```
|
||||
|
||||
Wait another 30 seconds, and verify that the Container has been restarted:
|
||||
|
||||
```shell
|
||||
kubectl get pod liveness-exec
|
||||
```
|
||||
|
||||
The output shows that `RESTARTS` has been incremented:
|
||||
|
||||
```shell
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
liveness-exec 1/1 Running 1 1m
|
||||
```
|
||||
|
||||
### Defining a liveness HTTP request
|
||||
|
||||
Another kind of liveness probe uses an HTTP GET request. Here is the configuration
|
||||
file for a Pod that runs a container based on the `gcr.io/google_containers/liveness`
|
||||
image.
|
||||
|
||||
{% include code.html language="yaml" file="http-liveness.yaml" ghlink="/docs/tasks/configure-pod-container/http-liveness.yaml" %}
|
||||
|
||||
In the configuration file, you can see that the Pod has a single Container.
|
||||
The `livenessProbe` field specifies that the kubelet should perform a liveness
|
||||
probe every 3 seconds. The `initialDelaySeconds` field tells the kubelet that it
|
||||
should wait 3 seconds before performing the first probe. To perform a probe, the
|
||||
kubelet sends an HTTP GET request to the server that is running in the Container
|
||||
and listening on port 8080. If the handler for the server's `/healthz` path
|
||||
returns a success code, the kubelet considers the Container to be alive and
|
||||
healthy. If the handler returns a failure code, the kubelet kills the Container
|
||||
and restarts it.
|
||||
|
||||
Any code greater than or equal to 200 and less than 400 indicates success. Any
|
||||
other code indicates failure.
|
||||
|
||||
You can see the source code for the server in
|
||||
[server.go](http://k8s.io/docs/user-guide/liveness/image/server.go).
|
||||
|
||||
For the first 10 seconds that the Container is alive, the `/healthz` handler
|
||||
returns a status of 200. After that, the handler returns a status of 500.
|
||||
|
||||
```go
|
||||
http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
|
||||
duration := time.Now().Sub(started)
|
||||
if duration.Seconds() > 10 {
|
||||
w.WriteHeader(500)
|
||||
w.Write([]byte(fmt.Sprintf("error: %v", duration.Seconds())))
|
||||
} else {
|
||||
w.WriteHeader(200)
|
||||
w.Write([]byte("ok"))
|
||||
}
|
||||
```
|
||||
|
||||
The kubelet starts performing health checks 3 seconds after the Container starts.
|
||||
So the first couple of health checks will succeed. But after 10 seconds, the health
|
||||
checks will fail, and the kubelet will kill and restart the Container.
|
||||
|
||||
To try the HTTP liveness check, create a Pod:
|
||||
|
||||
```shell
|
||||
kubectl create -f http://k8s.io/docs/tasks/configure-pod-container/http-liveness.yaml
|
||||
```
|
||||
|
||||
After 10 seconds, view Pod events to verify that liveness probes have failed and
|
||||
the Container has been restarted:
|
||||
|
||||
```shell
|
||||
kubectl describe pod liveness-http
|
||||
```
|
||||
|
||||
### Using a named port
|
||||
|
||||
You can use a named
|
||||
[ContainerPort](/docs/api-reference/v1/definitions/#_v1_containerport)
|
||||
for HTTP liveness checks:
|
||||
|
||||
```yaml
|
||||
ports:
|
||||
- name: liveness-port
|
||||
containerPort: 8080
|
||||
hostPort: 8080
|
||||
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: liveness-port
|
||||
```
|
||||
|
||||
### Defining readiness probes
|
||||
|
||||
Sometimes, applications are temporarily unable to serve traffic.
|
||||
For example, an application might need to load large data or configuration
|
||||
files during startup. In such cases, you don't want to kill the application,
|
||||
but you don’t want to send it requests either. Kubernetes provides
|
||||
readiness probes to detect and mitigate these situations. A pod with containers
|
||||
reporting that they are not ready does not receive traffic through Kubernetes
|
||||
Services.
|
||||
|
||||
Readiness probes are configured similarly to liveness probes. The only difference
|
||||
is that you use the `readinessProbe` field instead of the `livenessProbe` field.
|
||||
|
||||
```yaml
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- cat
|
||||
- /tmp/healthy
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
```
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% capture discussion %}
|
||||
|
||||
### Discussion
|
||||
|
||||
{% comment %}
|
||||
Eventually, some of this Discussion section could be moved to a concept topic.
|
||||
{% endcomment %}
|
||||
|
||||
[Probes](/docs/api-reference/v1/definitions/#_v1_probe) have these additional fields that you can use to more precisely control the behavior of liveness and readiness checks:
|
||||
|
||||
* timeoutSeconds
|
||||
* successThreshold
|
||||
* failureThreshold
|
||||
|
||||
[HTTP probes](/docs/api-reference/v1/definitions/#_v1_httpgetaction)
|
||||
have these additional fields:
|
||||
|
||||
* host
|
||||
* scheme
|
||||
* httpHeaders
|
||||
|
||||
For an HTTP probe, the kubelet sends an HTTP request to the specified path and
|
||||
port to perform the check. The kubelet sends the probe to the container’s IP address,
|
||||
unless the address is overridden by the optional `host` field in `httpGet`.
|
||||
In most scenarios, you do not want to set the `host` field. Here's one scenario
|
||||
where you would set it. Suppose the Container listens on 127.0.0.1 and the Pod's
|
||||
`hostNetwork` field is true. Then `host`, under `httpGet`, should be set to 127.0.0.1.
|
||||
If your pod relies on virtual hosts, which is probably the more common case,
|
||||
you should not use `host`, but rather set the `Host` header in `httpHeaders`.
|
||||
|
||||
In addition to command probes and HTTP probes, Kubenetes supports
|
||||
[TCP probes](/docs/api-reference/v1/definitions/#_v1_tcpsocketaction).
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
{% capture whatsnext %}
|
||||
|
||||
* Learn more about
|
||||
[Container Probes](/docs/user-guide/pod-states/#container-probes).
|
||||
|
||||
* Learn more about
|
||||
[Health Checking section](/docs/user-guide/walkthrough/k8s201/#health-checking).
|
||||
|
||||
#### Reference
|
||||
|
||||
* [Pod](http://kubernetes.io/docs/api-reference/v1/definitions#_v1_pod)
|
||||
* [Container](/docs/api-reference/v1/definitions/#_v1_container)
|
||||
* [Probe](/docs/api-reference/v1/definitions/#_v1_probe)
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
{% include templates/task.md %}
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
|
||||
metadata:
|
||||
labels:
|
||||
test: liveness
|
||||
name: liveness-exec
|
||||
spec:
|
||||
containers:
|
||||
|
||||
- name: liveness
|
||||
|
||||
args:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600
|
||||
|
||||
image: gcr.io/google_containers/busybox
|
||||
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- cat
|
||||
- /tmp/healthy
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
test: liveness
|
||||
name: liveness-http
|
||||
spec:
|
||||
containers:
|
||||
|
||||
- name: liveness
|
||||
|
||||
args:
|
||||
- /server
|
||||
|
||||
image: gcr.io/google_containers/liveness
|
||||
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
httpHeaders:
|
||||
- name: X-Custom-Header
|
||||
value: Awesome
|
||||
initialDelaySeconds: 3
|
||||
periodSeconds: 3
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: private-reg
|
||||
spec:
|
||||
containers:
|
||||
- name: private-reg-container
|
||||
image: <your-private-image>
|
||||
imagePullSecrets:
|
||||
- name: regsecret
|
||||
|
||||
|
|
@ -0,0 +1,133 @@
|
|||
---
|
||||
title: Pulling an Image from a Private Registry
|
||||
---
|
||||
|
||||
{% capture overview %}
|
||||
|
||||
This page shows how to create a Pod that uses a Secret to pull an image from a
|
||||
private Docker registry or repository.
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
{% capture prerequisites %}
|
||||
|
||||
* {% include task-tutorial-prereqs.md %}
|
||||
|
||||
* To do this exercise, you need a
|
||||
[Docker ID](https://docs.docker.com/docker-id/) and password.
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
{% capture steps %}
|
||||
|
||||
### Logging in to Docker
|
||||
|
||||
docker login
|
||||
|
||||
When prompted, enter your Docker username and password.
|
||||
|
||||
The login process creates or updates a `config.json` file that holds an
|
||||
authorization token.
|
||||
|
||||
View the `configfile.json` file:
|
||||
|
||||
cat ~/.docker/config.json
|
||||
|
||||
The output contains a section similar to this:
|
||||
|
||||
{
|
||||
"auths": {
|
||||
"https://index.docker.io/v1/": {
|
||||
"auth": "c3RldmU1MzpTdGV2ZURvY2tAIzE2"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
### Creating a Secret that holds your authorization token
|
||||
|
||||
Create a Secret named `regsecret`:
|
||||
|
||||
kubectl create secret docker-registry regsecret --docker-username=<your-name> --docker-password=<your-pword> --docker-email=<your-email>
|
||||
|
||||
where:
|
||||
|
||||
* `<your-name>` is your Docker username.
|
||||
* `<your-pword>` is your Docker password.
|
||||
* `<your-email>` is your Docker email.
|
||||
|
||||
### Understanding your Secret
|
||||
|
||||
To understand what's in the Secret you just created, start by viewing the
|
||||
Secret in YAML format:
|
||||
|
||||
kubectl get secret regsecret --output=yaml
|
||||
|
||||
The output is similar to this:
|
||||
|
||||
apiVersion: v1
|
||||
data:
|
||||
.dockercfg: eyJodHRwczovL2luZGV4L ... J0QUl6RTIifX0=
|
||||
kind: Secret
|
||||
metadata:
|
||||
...
|
||||
name: regsecret
|
||||
...
|
||||
type: kubernetes.io/dockercfg
|
||||
|
||||
The value of the `.dockercfg` field is a base64 representation of your secret data.
|
||||
|
||||
Copy the base64 representation of the secret data into a file named `secret64`.
|
||||
|
||||
**Important**: Make sure there are no line breaks in your `secret64` file.
|
||||
|
||||
To understand what is in the `dockercfg` field, convert the secret data to a
|
||||
readable format:
|
||||
|
||||
base64 -d secret64
|
||||
|
||||
The output is similar to this:
|
||||
|
||||
{"https://index.docker.io/v1/":{"username":"janedoe","password":"xxxxxxxxxxx","email":"jdoe@example.com","auth":"c3RldmU1MzpTdGV2ZURvY2tAIzE2"}}
|
||||
|
||||
Notice that the secret data contains the authorization token from your
|
||||
`config.json` file.
|
||||
|
||||
### Creating a Pod that uses your Secret
|
||||
|
||||
Here is a configuration file for a Pod that needs access to your secret data:
|
||||
|
||||
{% include code.html language="yaml" file="private-reg-pod.yaml" ghlink="/docs/tasks/configure-pod-container/private-reg-pod.yaml" %}
|
||||
|
||||
Copy the contents of `private-reg-pod.yaml` to your own file named
|
||||
`my-private-reg-pod.yaml`. In your file, replace `<your-private-image>` with
|
||||
the path to an image in a private repository.
|
||||
|
||||
Example Docker Hub private image:
|
||||
|
||||
janedoe/jdoe-private:v1
|
||||
|
||||
To pull the image from the private repository, Kubernetes needs credentials. The
|
||||
`imagePullSecrets` field in the configuration file specifies that Kubernetes
|
||||
should get the credentials from a Secret named
|
||||
`regsecret`.
|
||||
|
||||
Create a Pod that uses your Secret, and verify that the Pod is running:
|
||||
|
||||
kubectl create -f my-private-reg-pod.yaml
|
||||
kubectl get pod private-reg
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
{% capture whatsnext %}
|
||||
|
||||
* Learn more about [Secrets](/docs/user-guide/secrets/).
|
||||
* Learn more about
|
||||
[using a private registry](/docs/user-guide/images/#using-a-private-registry).
|
||||
* See [kubectl create secret docker-registry](/docs/user-guide/kubectl/kubectl_create_secret_docker-registry/).
|
||||
* See [Secret](/docs/api-reference/v1/definitions/#_v1_secret)
|
||||
* See the `imagePullSecrets` field of
|
||||
[PodSpec](/docs/api-reference/v1/definitions/#_v1_podspec).
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
{% include templates/task.md %}
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: two-containers
|
||||
spec:
|
||||
|
||||
restartPolicy: Never
|
||||
|
||||
volumes:
|
||||
- name: shared-data
|
||||
emptyDir: {}
|
||||
|
||||
containers:
|
||||
|
||||
- name: nginx-container
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: shared-data
|
||||
mountPath: /usr/share/nginx/html
|
||||
|
||||
- name: debian-container
|
||||
image: debian
|
||||
volumeMounts:
|
||||
- name: shared-data
|
||||
mountPath: /pod-data
|
||||
command: ["/bin/sh"]
|
||||
args: ["-c", "echo Hello from the debian container > /pod-data/index.html"]
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
---
|
||||
|
||||
assignees:
|
||||
- cdrage
|
||||
|
||||
title: Kompose Overview
|
||||
---
|
||||
|
||||
`kompose` is a tool to help users who are familiar with `docker-compose` move to **Kubernetes**. `kompose` takes a Docker Compose file and translates it into Kubernetes resources.
|
||||
|
||||
`kompose` is a convenience tool to go from local Docker development to managing your application with Kubernetes. Transformation of the Docker Compose format to Kubernetes resources manifest may not be exact, but it helps tremendously when first deploying an application on Kubernetes.
|
||||
|
||||
## Use Case
|
||||
|
||||
If you have a Docker Compose `docker-compose.yml` or a Docker Distributed Application Bundle `docker-compose-bundle.dab` file, you can convert it into Kubernetes deployments and services like this:
|
||||
|
||||
```console
|
||||
$ kompose --bundle docker-compose-bundle.dab convert
|
||||
WARN[0000]: Unsupported key networks - ignoring
|
||||
file "redis-svc.json" created
|
||||
file "web-svc.json" created
|
||||
file "web-deployment.json" created
|
||||
file "redis-deployment.json" created
|
||||
|
||||
$ kompose -f docker-compose.yml convert
|
||||
WARN[0000]: Unsupported key networks - ignoring
|
||||
file "redis-svc.json" created
|
||||
file "web-svc.json" created
|
||||
file "web-deployment.json" created
|
||||
file "redis-deployment.json" created
|
||||
```
|
||||
|
||||
## Installation
|
||||
|
||||
Grab the latest [release](https://github.com/kubernetes-incubator/kompose/releases) for your OS, untar and extract the binary.
|
||||
|
||||
### Linux
|
||||
|
||||
```sh
|
||||
wget https://github.com/kubernetes-incubator/kompose/releases/download/v0.1.2/kompose_linux-amd64.tar.gz
|
||||
tar -xvf kompose_linux-amd64.tar.gz --strip 1
|
||||
sudo mv kompose /usr/local/bin
|
||||
```
|
||||
|
|
@ -0,0 +1,310 @@
|
|||
---
|
||||
|
||||
assignees:
|
||||
- cdrage
|
||||
|
||||
title: Kompose User Guide
|
||||
---
|
||||
|
||||
* TOC
|
||||
{:toc}
|
||||
|
||||
Kompose has support for two providers: OpenShift and Kubernetes.
|
||||
You can choose targeted provider either using global option `--provider`, or by setting environment variable `PROVIDER`.
|
||||
By setting environment variable `PROVIDER` you can permanently switch to OpenShift provider without need to always specify `--provider openshift` option.
|
||||
If no provider is specified Kubernetes is default provider.
|
||||
|
||||
|
||||
## Kompose convert
|
||||
|
||||
Currently Kompose supports to transform either Docker Compose file (both of v1 and v2) and [experimental Distributed Application Bundles](https://blog.docker.com/2016/06/docker-app-bundle/) into Kubernetes and OpenShift objects.
|
||||
There is a couple of sample files in the `examples/` directory for testing.
|
||||
You will convert the compose or dab file to Kubernetes or OpenShift objects with `kompose convert`.
|
||||
|
||||
### Kubernetes
|
||||
```console
|
||||
$ cd examples/
|
||||
|
||||
$ ls
|
||||
docker-compose.yml docker-compose-bundle.dab docker-gitlab.yml docker-voting.yml
|
||||
|
||||
$ kompose -f docker-gitlab.yml convert -y
|
||||
file "redisio-svc.yaml" created
|
||||
file "gitlab-svc.yaml" created
|
||||
file "postgresql-svc.yaml" created
|
||||
file "gitlab-deployment.yaml" created
|
||||
file "postgresql-deployment.yaml" created
|
||||
file "redisio-deployment.yaml" created
|
||||
|
||||
$ ls *.yaml
|
||||
gitlab-deployment.yaml postgresql-deployment.yaml redis-deployment.yaml redisio-svc.yaml web-deployment.yaml
|
||||
gitlab-svc.yaml postgresql-svc.yaml redisio-deployment.yaml redis-svc.yaml web-svc.yaml
|
||||
```
|
||||
|
||||
You can try with a Docker Compose version 2 like this:
|
||||
|
||||
```console
|
||||
$ kompose --file docker-voting.yml convert
|
||||
WARN[0000]: Unsupported key networks - ignoring
|
||||
WARN[0000]: Unsupported key build - ignoring
|
||||
file "worker-svc.json" created
|
||||
file "db-svc.json" created
|
||||
file "redis-svc.json" created
|
||||
file "result-svc.json" created
|
||||
file "vote-svc.json" created
|
||||
file "redis-deployment.json" created
|
||||
file "result-deployment.json" created
|
||||
file "vote-deployment.json" created
|
||||
file "worker-deployment.json" created
|
||||
file "db-deployment.json" created
|
||||
|
||||
$ ls
|
||||
db-deployment.json docker-compose.yml docker-gitlab.yml redis-deployment.json result-deployment.json vote-deployment.json worker-deployment.json
|
||||
db-svc.json docker-compose-bundle.dab docker-voting.yml redis-svc.json result-svc.json vote-svc.json worker-svc.json
|
||||
```
|
||||
|
||||
Using `--bundle, --dab` to specify a DAB file as below:
|
||||
|
||||
```console
|
||||
$ kompose --bundle docker-compose-bundle.dab convert
|
||||
WARN[0000]: Unsupported key networks - ignoring
|
||||
file "redis-svc.json" created
|
||||
file "web-svc.json" created
|
||||
file "web-deployment.json" created
|
||||
file "redis-deployment.json" created
|
||||
```
|
||||
|
||||
### OpenShift
|
||||
|
||||
```console
|
||||
$ kompose --provider openshift --file docker-voting.yml convert
|
||||
WARN[0000] [worker] Service cannot be created because of missing port.
|
||||
INFO[0000] file "vote-service.json" created
|
||||
INFO[0000] file "db-service.json" created
|
||||
INFO[0000] file "redis-service.json" created
|
||||
INFO[0000] file "result-service.json" created
|
||||
INFO[0000] file "vote-deploymentconfig.json" created
|
||||
INFO[0000] file "vote-imagestream.json" created
|
||||
INFO[0000] file "worker-deploymentconfig.json" created
|
||||
INFO[0000] file "worker-imagestream.json" created
|
||||
INFO[0000] file "db-deploymentconfig.json" created
|
||||
INFO[0000] file "db-imagestream.json" created
|
||||
INFO[0000] file "redis-deploymentconfig.json" created
|
||||
INFO[0000] file "redis-imagestream.json" created
|
||||
INFO[0000] file "result-deploymentconfig.json" created
|
||||
INFO[0000] file "result-imagestream.json" created
|
||||
```
|
||||
|
||||
In similar way you can convert DAB files to OpenShift.
|
||||
```console$
|
||||
$ kompose --bundle docker-compose-bundle.dab --provider openshift convert
|
||||
WARN[0000]: Unsupported key networks - ignoring
|
||||
INFO[0000] file "redis-svc.json" created
|
||||
INFO[0000] file "web-svc.json" created
|
||||
INFO[0000] file "web-deploymentconfig.json" created
|
||||
INFO[0000] file "web-imagestream.json" created
|
||||
INFO[0000] file "redis-deploymentconfig.json" created
|
||||
INFO[0000] file "redis-imagestream.json" created
|
||||
```
|
||||
|
||||
## Kompose up
|
||||
|
||||
Kompose supports a straightforward way to deploy your "composed" application to Kubernetes or OpenShift via `kompose up`.
|
||||
|
||||
|
||||
### Kubernetes
|
||||
```console
|
||||
$ kompose --file ./examples/docker-guestbook.yml up
|
||||
We are going to create Kubernetes deployments and services for your Dockerized application.
|
||||
If you need different kind of resources, use the 'kompose convert' and 'kubectl create -f' commands instead.
|
||||
|
||||
INFO[0000] Successfully created service: redis-master
|
||||
INFO[0000] Successfully created service: redis-slave
|
||||
INFO[0000] Successfully created service: frontend
|
||||
INFO[0001] Successfully created deployment: redis-master
|
||||
INFO[0001] Successfully created deployment: redis-slave
|
||||
INFO[0001] Successfully created deployment: frontend
|
||||
|
||||
Your application has been deployed to Kubernetes. You can run 'kubectl get deployment,svc,pods' for details.
|
||||
|
||||
$ kubectl get deployment,svc,pods
|
||||
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
|
||||
frontend 1 1 1 1 4m
|
||||
redis-master 1 1 1 1 4m
|
||||
redis-slave 1 1 1 1 4m
|
||||
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
frontend 10.0.174.12 <none> 80/TCP 4m
|
||||
kubernetes 10.0.0.1 <none> 443/TCP 13d
|
||||
redis-master 10.0.202.43 <none> 6379/TCP 4m
|
||||
redis-slave 10.0.1.85 <none> 6379/TCP 4m
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
frontend-2768218532-cs5t5 1/1 Running 0 4m
|
||||
redis-master-1432129712-63jn8 1/1 Running 0 4m
|
||||
redis-slave-2504961300-nve7b 1/1 Running 0 4m
|
||||
```
|
||||
Note:
|
||||
|
||||
- You must have a running Kubernetes cluster with a pre-configured kubectl context.
|
||||
- Only deployments and services are generated and deployed to Kubernetes. If you need different kind of resources, use the 'kompose convert' and 'kubectl create -f' commands instead.
|
||||
|
||||
### OpenShift
|
||||
```console
|
||||
$kompose --file ./examples/docker-guestbook.yml --provider openshift up
|
||||
We are going to create OpenShift DeploymentConfigs and Services for your Dockerized application.
|
||||
If you need different kind of resources, use the 'kompose convert' and 'oc create -f' commands instead.
|
||||
|
||||
INFO[0000] Successfully created service: redis-slave
|
||||
INFO[0000] Successfully created service: frontend
|
||||
INFO[0000] Successfully created service: redis-master
|
||||
INFO[0000] Successfully created deployment: redis-slave
|
||||
INFO[0000] Successfully created ImageStream: redis-slave
|
||||
INFO[0000] Successfully created deployment: frontend
|
||||
INFO[0000] Successfully created ImageStream: frontend
|
||||
INFO[0000] Successfully created deployment: redis-master
|
||||
INFO[0000] Successfully created ImageStream: redis-master
|
||||
|
||||
Your application has been deployed to OpenShift. You can run 'oc get dc,svc,is' for details.
|
||||
|
||||
$ oc get dc,svc,is
|
||||
NAME REVISION DESIRED CURRENT TRIGGERED BY
|
||||
dc/frontend 0 1 0 config,image(frontend:v4)
|
||||
dc/redis-master 0 1 0 config,image(redis-master:e2e)
|
||||
dc/redis-slave 0 1 0 config,image(redis-slave:v1)
|
||||
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
svc/frontend 172.30.46.64 <none> 80/TCP 8s
|
||||
svc/redis-master 172.30.144.56 <none> 6379/TCP 8s
|
||||
svc/redis-slave 172.30.75.245 <none> 6379/TCP 8s
|
||||
NAME DOCKER REPO TAGS UPDATED
|
||||
is/frontend 172.30.12.200:5000/fff/frontend
|
||||
is/redis-master 172.30.12.200:5000/fff/redis-master
|
||||
is/redis-slave 172.30.12.200:5000/fff/redis-slave v1
|
||||
```
|
||||
|
||||
Note:
|
||||
|
||||
- You must have a running OpenShift cluster with a pre-configured `oc` context (`oc login`)
|
||||
|
||||
## Kompose down
|
||||
|
||||
Once you have deployed "composed" application to Kubernetes, `kompose down` will help you to take the application out by deleting its deployments and services. If you need to remove other resources, use the 'kubectl' command.
|
||||
|
||||
```console
|
||||
$ kompose --file docker-guestbook.yml down
|
||||
INFO[0000] Successfully deleted service: redis-master
|
||||
INFO[0004] Successfully deleted deployment: redis-master
|
||||
INFO[0004] Successfully deleted service: redis-slave
|
||||
INFO[0008] Successfully deleted deployment: redis-slave
|
||||
INFO[0009] Successfully deleted service: frontend
|
||||
INFO[0013] Successfully deleted deployment: frontend
|
||||
```
|
||||
Note:
|
||||
- You must have a running Kubernetes cluster with a pre-configured kubectl context.
|
||||
|
||||
## Alternate formats
|
||||
|
||||
The default `kompose` transformation will generate Kubernetes [Deployments](http://kubernetes.io/docs/user-guide/deployments/) and [Services](http://kubernetes.io/docs/user-guide/services/), in json format. You have alternative option to generate yaml with `-y`. Also, you can alternatively generate [Replication Controllers](http://kubernetes.io/docs/user-guide/replication-controller/) objects, [Deamon Sets](http://kubernetes.io/docs/admin/daemons/), or [Helm](https://github.com/helm/helm) charts.
|
||||
|
||||
```console
|
||||
$ kompose convert
|
||||
file "redis-svc.json" created
|
||||
file "web-svc.json" created
|
||||
file "redis-deployment.json" created
|
||||
file "web-deployment.json" created
|
||||
```
|
||||
The `*-deployment.json` files contain the Deployment objects.
|
||||
|
||||
```console
|
||||
$ kompose convert --rc -y
|
||||
file "redis-svc.yaml" created
|
||||
file "web-svc.yaml" created
|
||||
file "redis-rc.yaml" created
|
||||
file "web-rc.yaml" created
|
||||
```
|
||||
|
||||
The `*-rc.yaml` files contain the Replication Controller objects. If you want to specify replicas (default is 1), use `--replicas` flag: `$ kompose convert --rc --replicas 3 -y`
|
||||
|
||||
```console
|
||||
$ kompose convert --ds -y
|
||||
file "redis-svc.yaml" created
|
||||
file "web-svc.yaml" created
|
||||
file "redis-daemonset.yaml" created
|
||||
file "web-daemonset.yaml" created
|
||||
```
|
||||
|
||||
The `*-daemonset.yaml` files contain the Daemon Set objects
|
||||
|
||||
If you want to generate a Chart to be used with [Helm](https://github.com/kubernetes/helm) simply do:
|
||||
|
||||
```console
|
||||
$ kompose convert -c -y
|
||||
file "web-svc.yaml" created
|
||||
file "redis-svc.yaml" created
|
||||
file "web-deployment.yaml" created
|
||||
file "redis-deployment.yaml" created
|
||||
chart created in "./docker-compose/"
|
||||
|
||||
$ tree docker-compose/
|
||||
docker-compose
|
||||
├── Chart.yaml
|
||||
├── README.md
|
||||
└── templates
|
||||
├── redis-deployment.yaml
|
||||
├── redis-svc.yaml
|
||||
├── web-deployment.yaml
|
||||
└── web-svc.yaml
|
||||
```
|
||||
|
||||
The chart structure is aimed at providing a skeleton for building your Helm charts.
|
||||
|
||||
## Unsupported docker-compose configuration options
|
||||
|
||||
Currently `kompose` does not support the following Docker Compose options.
|
||||
|
||||
```
|
||||
"build", "cgroup_parent", "devices", "depends_on", "dns", "dns_search", "domainname", "env_file", "extends", "external_links", "extra_hosts", "hostname", "ipc", "logging", "mac_address", "mem_limit", "memswap_limit", "network_mode", "networks", "pid", "security_opt", "shm_size", "stop_signal", "volume_driver", "uts", "read_only", "stdin_open", "tty", "user", "ulimits", "dockerfile", "net"
|
||||
```
|
||||
|
||||
For example:
|
||||
|
||||
```console
|
||||
$ cat nginx.yml
|
||||
nginx:
|
||||
image: nginx
|
||||
dockerfile: foobar
|
||||
build: ./foobar
|
||||
cap_add:
|
||||
- ALL
|
||||
container_name: foobar
|
||||
|
||||
$ kompose -f nginx.yml convert
|
||||
WARN[0000] Unsupported key build - ignoring
|
||||
WARN[0000] Unsupported key cap_add - ignoring
|
||||
WARN[0000] Unsupported key dockerfile - ignoring
|
||||
```
|
||||
|
||||
## Labels
|
||||
|
||||
`kompose` supports Kompose-specific labels within the `docker-compose.yml` file in order to explicitly imply a service type upon conversion.
|
||||
|
||||
The currently supported options are:
|
||||
|
||||
| Key | Value |
|
||||
|----------------------|-------------------------------------|
|
||||
| kompose.service.type | nodeport / clusterip / loadbalancer |
|
||||
|
||||
|
||||
Here is a brief example that uses the annotations / labels feature to specify a service type:
|
||||
|
||||
```yaml
|
||||
version: "2"
|
||||
services:
|
||||
nginx:
|
||||
image: nginx
|
||||
dockerfile: foobar
|
||||
build: ./foobar
|
||||
cap_add:
|
||||
- ALL
|
||||
container_name: foobar
|
||||
labels:
|
||||
kompose.service.type: nodeport
|
||||
```
|
||||
|
|
@ -29,6 +29,10 @@ each of which has a sequence of steps.
|
|||
|
||||
* [Running ZooKeeper, A CP Distributed System](/docs/tutorials/stateful-application/zookeeper/)
|
||||
|
||||
#### Services
|
||||
|
||||
* [Using SourceIP](/docs/tutorials/services/source-ip/)
|
||||
|
||||
### What's next
|
||||
|
||||
If you would like to write a tutorial, see
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ title: Overview
|
|||
<div class="row">
|
||||
<div class="col-md-9">
|
||||
<h2>What can Kubernetes do for you?</h2>
|
||||
<p>With modern web services, users expect applications to be available 24/7, and developers expect to deploy new versions of those applications several times a day. Containerization helps package software to serve these goals, enabling applications to be released and updated in an easy and fast way without downtime. Kubernetes helps you make sure those containerized applications run where and when you want, and helps them find the resources and tools they need to work. <a href="http://kubernetes.io/docs/whatisk8s/">Kubernetes</a> is a production-ready, open source platform designed with the Google's accumulated experience in container orchestration, combined with best-of-breed ideas from the community.</p>
|
||||
<p>With modern web services, users expect applications to be available 24/7, and developers expect to deploy new versions of those applications several times a day. Containerization helps package software to serve these goals, enabling applications to be released and updated in an easy and fast way without downtime. Kubernetes helps you make sure those containerized applications run where and when you want, and helps them find the resources and tools they need to work. <a href="http://kubernetes.io/docs/whatisk8s/">Kubernetes</a> is a production-ready, open source platform designed with Google's accumulated experience in container orchestration, combined with best-of-breed ideas from the community.</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
---
|
||||
title: Using Source IP
|
||||
---
|
||||
|
||||
{% capture overview %}
|
||||
|
|
|
|||
|
|
@ -174,7 +174,7 @@ You can acquire all these from the [nginx https example](https://github.com/kube
|
|||
```shell
|
||||
$ make keys secret KEY=/tmp/nginx.key CERT=/tmp/nginx.crt SECRET=/tmp/secret.json
|
||||
$ kubectl create -f /tmp/secret.json
|
||||
secrets/nginxsecret
|
||||
secret "nginxsecret" created
|
||||
$ kubectl get secrets
|
||||
NAME TYPE DATA
|
||||
default-token-il9rc kubernetes.io/service-account-token 1
|
||||
|
|
@ -183,19 +183,16 @@ nginxsecret Opaque 2
|
|||
|
||||
Now modify your nginx replicas to start an https server using the certificate in the secret, and the Service, to expose both ports (80 and 443):
|
||||
|
||||
{% include code.html language="yaml" file="nginx-secure-app.yaml" ghlink="/docs/user-guide/nginx-secure-app" %}
|
||||
{% include code.html language="yaml" file="nginx-secure-app.yaml" ghlink="/docs/user-guide/nginx-secure-app.yaml" %}
|
||||
|
||||
Noteworthy points about the nginx-secure-app manifest:
|
||||
|
||||
- It contains both rc and service specification in the same file
|
||||
- It contains both Deployment and Service specification in the same file
|
||||
- The [nginx server](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/https-nginx/default.conf) serves http traffic on port 80 and https traffic on 443, and nginx Service exposes both ports.
|
||||
- Each container has access to the keys through a volume mounted at /etc/nginx/ssl. This is setup *before* the nginx server is started.
|
||||
|
||||
```shell
|
||||
$ kubectl apply -f ./nginx-secure-app.yaml
|
||||
$ kubectl delete rc,svc -l app=nginx; kubectl create -f ./nginx-app.yaml
|
||||
service "my-nginx" configured
|
||||
deployment "my-nginx" configured
|
||||
$ kubectl delete deployments,svc my-nginx; kubectl create -f ./nginx-secure-app.yaml
|
||||
```
|
||||
|
||||
At this point you can reach the nginx server from any node.
|
||||
|
|
@ -216,11 +213,10 @@ Lets test this from a pod (the same secret is being reused for simplicity, the p
|
|||
|
||||
```shell
|
||||
$ kubectl create -f ./curlpod.yaml
|
||||
$ kubectl get pods
|
||||
$ kubectl get pods -l app=curlpod
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
curlpod 1/1 Running 0 2m
|
||||
|
||||
$ kubectl exec curlpod -- curl https://my-nginx --cacert /etc/nginx/ssl/nginx.crt
|
||||
curl-deployment-1515033274-1410r 1/1 Running 0 1m
|
||||
$ kubectl exec curl-deployment-1515033274-1410r -- curl https://my-nginx --cacert /etc/nginx/ssl/nginx.crt
|
||||
...
|
||||
<title>Welcome to nginx!</title>
|
||||
...
|
||||
|
|
@ -291,6 +287,7 @@ $ kubectl describe service my-nginx
|
|||
LoadBalancer Ingress: a320587ffd19711e5a37606cf4a74574-1142138393.us-east-1.elb.amazonaws.com
|
||||
...
|
||||
```
|
||||
|
||||
## Further reading
|
||||
|
||||
Kubernetes also supports Federated Services, which can span multiple
|
||||
|
|
|
|||
|
|
@ -172,7 +172,7 @@ $ kubectl logs --previous nginx-app-zibvs
|
|||
10.240.63.110 - - [14/Jul/2015:01:09:02 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.26.0" "-"
|
||||
```
|
||||
|
||||
See [Logging](/docs/user-guide/logging) for more information.
|
||||
See [Logging Overview](/docs/user-guide/logging/overview) for more information.
|
||||
|
||||
#### docker stop and docker rm
|
||||
|
||||
|
|
|
|||
|
|
@ -1,23 +0,0 @@
|
|||
## Building
|
||||
|
||||
For each container, the build steps are the same. The examples below
|
||||
are for the `show` container. Replace `show` with `backend` for the
|
||||
backend container.
|
||||
|
||||
## Google Container Registry ([GCR](https://cloud.google.com/tools/container-registry/))
|
||||
|
||||
docker build -t gcr.io/<project-name>/show .
|
||||
gcloud docker push gcr.io/<project-name>/show
|
||||
|
||||
## Docker Hub
|
||||
|
||||
docker build -t <username>/show .
|
||||
docker push <username>/show
|
||||
|
||||
## Change Pod Definitions
|
||||
|
||||
Edit both `show-rc.yaml` and `backend-rc.yaml` and replace the
|
||||
specified `image:` with the one that you built.
|
||||
|
||||
|
||||
|
||||
|
|
@ -20,7 +20,7 @@ might also help you create a Federated Kubernetes cluster.
|
|||
|
||||
You should also have a basic
|
||||
[working knowledge of Kubernetes](/docs/getting-started-guides/) in
|
||||
general and [Deployment](/docs/user-guide/deployment.md) in particular.
|
||||
general and [Deployment](/docs/user-guide/deployments) in particular.
|
||||
|
||||
## Overview
|
||||
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ The following topics in the Kubernetes User Guide can help you run applications
|
|||
1. [Managing deployments](/docs/user-guide/managing-deployments/)
|
||||
1. [Application introspection and debugging](/docs/user-guide/introspection-and-debugging/)
|
||||
1. [Using the Kubernetes web user interface](/docs/user-guide/ui/)
|
||||
1. [Logging](/docs/user-guide/logging/)
|
||||
1. [Logging](/docs/user-guide/logging/overview/)
|
||||
1. [Monitoring](/docs/user-guide/monitoring/)
|
||||
1. [Getting into containers via `exec`](/docs/user-guide/getting-into-containers/)
|
||||
1. [Connecting to containers via proxies](/docs/user-guide/connecting-to-applications-proxy/)
|
||||
|
|
|
|||
|
|
@ -107,7 +107,7 @@ Where `107.178.254.228` is the IP allocated by the Ingress controller to satisfy
|
|||
|
||||
### Simple fanout
|
||||
|
||||
As described previously, pods within kubernetes have ips only visible on the cluster network, so we need something at the edge accepting ingress traffic and proxying it to the right endpoints. This component is usually a highly available loadbalancer/s. An Ingress allows you to keep the number of loadbalancers down to a minimum, for example, a setup like:
|
||||
As described previously, pods within kubernetes have IPs only visible on the cluster network, so we need something at the edge accepting ingress traffic and proxying it to the right endpoints. This component is usually a highly available loadbalancer/s. An Ingress allows you to keep the number of loadbalancers down to a minimum, for example, a setup like:
|
||||
|
||||
```shell
|
||||
foo.bar.com -> 178.91.123.132 -> / foo s1:80
|
||||
|
|
|
|||
|
|
@ -347,7 +347,7 @@ status:
|
|||
|
||||
Learn about additional debugging tools, including:
|
||||
|
||||
* [Logging](/docs/user-guide/logging)
|
||||
* [Logging](/docs/user-guide/logging/overview)
|
||||
* [Monitoring](/docs/user-guide/monitoring)
|
||||
* [Getting into containers via `exec`](/docs/user-guide/getting-into-containers)
|
||||
* [Connecting to containers via proxies](/docs/user-guide/connecting-to-applications-proxy)
|
||||
|
|
|
|||
|
|
@ -84,6 +84,8 @@ The following table includes a list of all the supported resource types and thei
|
|||
Resource type | Abbreviated alias
|
||||
-------------------- | --------------------
|
||||
`clusters` |
|
||||
`clusterrolebindings` |
|
||||
`clusterroles` |
|
||||
`componentstatuses` |`cs`
|
||||
`configmaps` |`cm`
|
||||
`daemonsets` |`ds`
|
||||
|
|
@ -106,6 +108,8 @@ Resource type | Abbreviated alias
|
|||
`replicasets` |`rs`
|
||||
`replicationcontrollers` |`rc`
|
||||
`resourcequotas` |`quota`
|
||||
`rolebindings` |
|
||||
`roles` |
|
||||
`secrets` |
|
||||
`serviceaccounts` |`sa`
|
||||
`services` |`svc`
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ Modify kubeconfig files using subcommands like "kubectl config set current-conte
|
|||
The loading order follows these rules:
|
||||
|
||||
1. If the --kubeconfig flag is set, then only that file is loaded. The flag may only be set once and no merging takes place.
|
||||
2. If $KUBECONFIG environment variable is set, then it is used a list of paths (normal path delimitting rules for your system). These paths are merged. When a value is modified, it is modified in the file that defines the stanza. When a value is created, it is created in the first file that exists. If no files in the chain exist, then it creates the last file in the list.
|
||||
2. If $KUBECONFIG environment variable is set, then it is used as a list of paths (normal path delimitting rules for your system). These paths are merged. When a value is modified, it is modified in the file that defines the stanza. When a value is created, it is created in the first file that exists. If no files in the chain exist, then it creates the last file in the list.
|
||||
3. Otherwise, ${HOME}/.kube/config is used and no merging takes place.
|
||||
|
||||
```
|
||||
|
|
|
|||
|
|
@ -1,78 +0,0 @@
|
|||
---
|
||||
assignees:
|
||||
- mikedanese
|
||||
- thockin
|
||||
title: Checking Pod Health
|
||||
---
|
||||
|
||||
This example shows two types of pod [health checks](/docs/user-guide/production-pods/#liveness-and-readiness-probes-aka-health-checks): HTTP checks and container execution checks.
|
||||
|
||||
The [exec-liveness.yaml](/docs/user-guide/liveness/exec-liveness.yaml) demonstrates the container execution check.
|
||||
|
||||
{% include code.html language="yaml" file="exec-liveness.yaml" ghlink="/docs/user-guide/liveness/exec-liveness.yaml" %}
|
||||
|
||||
Kubelet executes the command `cat /tmp/health` in the container and reports failure if the command returns a non-zero exit code.
|
||||
|
||||
Note that the container removes the `/tmp/health` file after 10 seconds,
|
||||
|
||||
```shell
|
||||
echo ok > /tmp/health; sleep 10; rm -rf /tmp/health; sleep 600
|
||||
```
|
||||
|
||||
so when Kubelet executes the health check 15 seconds (defined by initialDelaySeconds) after the container started, the check would fail.
|
||||
|
||||
|
||||
The [http-liveness.yaml](/docs/user-guide/liveness/http-liveness.yaml) demonstrates the HTTP check.
|
||||
{% include code.html language="yaml" file="http-liveness.yaml" ghlink="/docs/user-guide/liveness/http-liveness.yaml" %}
|
||||
|
||||
|
||||
The Kubelet sends an HTTP request to the specified path and port to perform the health check. If you take a look at image/server.go, you will see the server starts to respond with an error code 500 after 10 seconds, so the check fails. The Kubelet sends probes to the container's IP address, unless overridden by the optional `host` field in httpGet. If the container listens on `127.0.0.1` and `hostNetwork` is `true` (i.e., it does not use the pod-specific network), then `host` should be specified as `127.0.0.1`. Be warned that, outside of less common cases like that, `host` does probably not result in what you would expect. If you set it to a non-existing hostname (or your competitor's!), probes will never reach the pod, defeating the whole point of health checks. If your pod relies on e.g. virtual hosts, which is probably the more common case, you should not use `host`, but rather set the `Host` header in `httpHeaders`.
|
||||
|
||||
### Using a named port for liveness probes
|
||||
|
||||
You can also use a named `ContainerPort` for HTTP liveness checks.
|
||||
|
||||
The [http-liveness-named-port.yaml](/docs/user-guide/liveness/http-liveness-named-port.yaml) demonstrates the named-port HTTP check.
|
||||
{% include code.html language="yaml" file="http-liveness-named-port.yaml" ghlink="/docs/user-guide/liveness/http-liveness-named-port.yaml" %}
|
||||
|
||||
This [guide](/docs/user-guide/walkthrough/k8s201/#health-checking) has more information on health checks.
|
||||
|
||||
## Get your hands dirty
|
||||
|
||||
To show the health check is actually working, first create the pods:
|
||||
|
||||
```shell
|
||||
$ kubectl create -f docs/user-guide/liveness/exec-liveness.yaml
|
||||
$ kubectl create -f docs/user-guide/liveness/http-liveness.yaml
|
||||
```
|
||||
|
||||
Check the status of the pods once they are created:
|
||||
|
||||
```shell
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
[...]
|
||||
liveness-exec 1/1 Running 0 13s
|
||||
liveness-http 1/1 Running 0 13s
|
||||
```
|
||||
|
||||
Check the status half a minute later, you will see the container restart count being incremented:
|
||||
|
||||
```shell
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
[...]
|
||||
liveness-exec 1/1 Running 1 36s
|
||||
liveness-http 1/1 Running 1 36s
|
||||
```
|
||||
|
||||
At the bottom of the *kubectl describe* output there are messages indicating that the liveness probes have failed, and the containers have been killed and recreated.
|
||||
|
||||
```shell
|
||||
$ kubectl describe pods liveness-exec
|
||||
[...]
|
||||
Sat, 27 Jun 2015 13:43:03 +0200 Sat, 27 Jun 2015 13:44:34 +0200 4 {kubelet kubernetes-node-6fbi} spec.containers{liveness} unhealthy Liveness probe failed: cat: can't open '/tmp/health': No such file or directory
|
||||
Sat, 27 Jun 2015 13:44:44 +0200 Sat, 27 Jun 2015 13:44:44 +0200 1 {kubelet kubernetes-node-6fbi} spec.containers{liveness} killing Killing with docker id 65b52d62c635
|
||||
Sat, 27 Jun 2015 13:44:44 +0200 Sat, 27 Jun 2015 13:44:44 +0200 1 {kubelet kubernetes-node-6fbi} spec.containers{liveness} created Created with docker id ed6bb004ee10
|
||||
Sat, 27 Jun 2015 13:44:44 +0200 Sat, 27 Jun 2015 13:44:44 +0200 1 {kubelet kubernetes-node-6fbi} spec.containers{liveness} started Started with docker id ed6bb004ee10
|
||||
```
|
||||
|
|
@ -1,40 +0,0 @@
|
|||
# Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Makefile for launching synthetic logging sources (any platform)
|
||||
# and for reporting the forwarding rules for the
|
||||
# Elasticsearch and Kibana pods for the GCE platform.
|
||||
# For examples of how to observe the ingested logs please
|
||||
# see the appropriate getting started guide e.g.
|
||||
# Google Cloud Logging: http://kubernetes.io/docs/getting-started-guides/logging/
|
||||
# With Elasticsearch and Kibana logging: http://kubernetes.io/docs/getting-started-guides/logging-elasticsearch/
|
||||
|
||||
.PHONY: up down logger-up logger-down logger10-up logger10-down
|
||||
|
||||
up: logger-up logger10-up
|
||||
|
||||
down: logger-down logger10-down
|
||||
|
||||
logger-up:
|
||||
kubectl create -f synthetic_0_25lps.yaml
|
||||
|
||||
logger-down:
|
||||
kubectl delete pod synthetic-logger-0.25lps-pod
|
||||
|
||||
logger10-up:
|
||||
kubectl create -f synthetic_10lps.yaml
|
||||
|
||||
logger10-down:
|
||||
kubectl delete pod synthetic-logger-10lps-pod
|
||||
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
assignees:
|
||||
- mikedanese
|
||||
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
This directory contains two [pod](https://kubernetes.io/docs/user-guide/pods) specifications which can be used as synthetic
|
||||
logging sources. The pod specification in [synthetic_0_25lps.yaml](synthetic_0_25lps.yaml)
|
||||
describes a pod that just emits a log message once every 4 seconds. The pod specification in
|
||||
[synthetic_10lps.yaml](synthetic_10lps.yaml)
|
||||
describes a pod that just emits 10 log lines per second.
|
||||
|
||||
See [logging document](https://kubernetes.io/docs/user-guide/logging/) for more details about logging. To observe the ingested log lines when using Google Cloud Logging please see the getting
|
||||
started instructions
|
||||
at [Cluster Level Logging to Google Cloud Logging](https://kubernetes.io/docs/getting-started-guides/logging).
|
||||
To observe the ingested log lines when using Elasticsearch and Kibana please see the getting
|
||||
started instructions
|
||||
at [Cluster Level Logging with Elasticsearch and Kibana](https://kubernetes.io/docs/getting-started-guides/logging-elasticsearch).
|
||||
|
|
@ -1,30 +0,0 @@
|
|||
# This pod specification creates an instance of a synthetic logger. The logger
|
||||
# is simply a program that writes out the hostname of the pod, a count which increments
|
||||
# by one on each iteration (to help notice missing log enteries) and the date using
|
||||
# a long format (RFC-3339) to nano-second precision. This program logs at a frequency
|
||||
# of 0.25 lines per second. The shellscript program is given directly to bash as -c argument
|
||||
# and could have been written out as:
|
||||
# i="0"
|
||||
# while true
|
||||
# do
|
||||
# echo -n "`hostname`: $i: "
|
||||
# date --rfc-3339 ns
|
||||
# sleep 4
|
||||
# i=$[$i+1]
|
||||
# done
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
name: synth-logging-source
|
||||
name: synthetic-logger-0.25lps-pod
|
||||
spec:
|
||||
containers:
|
||||
- name: synth-lgr
|
||||
image: ubuntu:14.04
|
||||
args:
|
||||
- bash
|
||||
- -c
|
||||
- 'i="0"; while true; do echo -n "`hostname`: $i: "; date --rfc-3339 ns; sleep
|
||||
4; i=$[$i+1]; done'
|
||||
|
||||
|
|
@ -1,30 +0,0 @@
|
|||
# This pod specification creates an instance of a synthetic logger. The logger
|
||||
# is simply a program that writes out the hostname of the pod, a count which increments
|
||||
# by one on each iteration (to help notice missing log enteries) and the date using
|
||||
# a long format (RFC-3339) to nano-second precision. This program logs at a frequency
|
||||
# of 0.25 lines per second. The shellscript program is given directly to bash as -c argument
|
||||
# and could have been written out as:
|
||||
# i="0"
|
||||
# while true
|
||||
# do
|
||||
# echo -n "`hostname`: $i: "
|
||||
# date --rfc-3339 ns
|
||||
# sleep 4
|
||||
# i=$[$i+1]
|
||||
# done
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
name: synth-logging-source
|
||||
name: synthetic-logger-10lps-pod
|
||||
spec:
|
||||
containers:
|
||||
- name: synth-lgr
|
||||
image: ubuntu:14.04
|
||||
args:
|
||||
- bash
|
||||
- -c
|
||||
- 'i="0"; while true; do echo -n "`hostname`: $i: "; date --rfc-3339 ns; sleep
|
||||
0.1; i=$[$i+1]; done'
|
||||
|
||||
|
|
@ -1,80 +0,0 @@
|
|||
---
|
||||
assignees:
|
||||
- mikedanese
|
||||
title: Retrieving Logs
|
||||
---
|
||||
|
||||
This page is designed to help you use logs to troubleshoot issues with your Kubernetes solution.
|
||||
|
||||
## Logging by Kubernetes Components
|
||||
|
||||
Kubernetes components, such as kubelet and apiserver, use the [glog](https://godoc.org/github.com/golang/glog) logging library. Developer conventions for logging severity are described in [docs/devel/logging.md](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/docs/devel/logging.md).
|
||||
|
||||
## Examining the logs of running containers
|
||||
|
||||
The logs of a running container may be fetched using the command `kubectl logs`. For example, given
|
||||
this pod specification [counter-pod.yaml](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/blog-logging/counter-pod.yaml), which has a container which writes out some text to standard
|
||||
output every second. (You can find different pod specifications [here](https://github.com/kubernetes/kubernetes.github.io/tree/{{page.docsbranch}}/docs/user-guide/logging-demo/).)
|
||||
|
||||
{% include code.html language="yaml" file="counter-pod.yaml" k8slink="/examples/blog-logging/counter-pod.yaml" %}
|
||||
|
||||
we can run the pod:
|
||||
|
||||
```shell
|
||||
$ kubectl create -f ./counter-pod.yaml
|
||||
pods/counter
|
||||
```
|
||||
|
||||
and then fetch the logs:
|
||||
|
||||
```shell
|
||||
$ kubectl logs counter
|
||||
0: Tue Jun 2 21:37:31 UTC 2015
|
||||
1: Tue Jun 2 21:37:32 UTC 2015
|
||||
2: Tue Jun 2 21:37:33 UTC 2015
|
||||
3: Tue Jun 2 21:37:34 UTC 2015
|
||||
4: Tue Jun 2 21:37:35 UTC 2015
|
||||
5: Tue Jun 2 21:37:36 UTC 2015
|
||||
...
|
||||
```
|
||||
|
||||
If a pod has more than one container then you need to specify which container's log files should
|
||||
be fetched e.g.
|
||||
|
||||
```shell
|
||||
$ kubectl logs kube-dns-v3-7r1l9 etcd
|
||||
2015/06/23 00:43:10 etcdserver: start to snapshot (applied: 30003, lastsnap: 20002)
|
||||
2015/06/23 00:43:10 etcdserver: compacted log at index 30003
|
||||
2015/06/23 00:43:10 etcdserver: saved snapshot at index 30003
|
||||
2015/06/23 02:05:42 etcdserver: start to snapshot (applied: 40004, lastsnap: 30003)
|
||||
2015/06/23 02:05:42 etcdserver: compacted log at index 40004
|
||||
2015/06/23 02:05:42 etcdserver: saved snapshot at index 40004
|
||||
2015/06/23 03:28:31 etcdserver: start to snapshot (applied: 50005, lastsnap: 40004)
|
||||
2015/06/23 03:28:31 etcdserver: compacted log at index 50005
|
||||
2015/06/23 03:28:31 etcdserver: saved snapshot at index 50005
|
||||
2015/06/23 03:28:56 filePurge: successfully removed file default.etcd/member/wal/0000000000000000-0000000000000000.wal
|
||||
2015/06/23 04:51:03 etcdserver: start to snapshot (applied: 60006, lastsnap: 50005)
|
||||
2015/06/23 04:51:03 etcdserver: compacted log at index 60006
|
||||
2015/06/23 04:51:03 etcdserver: saved snapshot at index 60006
|
||||
...
|
||||
```
|
||||
|
||||
## Cluster level logging to Google Cloud Logging
|
||||
|
||||
The getting started guide [Cluster Level Logging to Google Cloud Logging](/docs/getting-started-guides/logging)
|
||||
explains how container logs are ingested into [Google Cloud Logging](https://cloud.google.com/logging/docs/)
|
||||
and shows how to query the ingested logs.
|
||||
|
||||
## Cluster level logging with Elasticsearch and Kibana
|
||||
|
||||
The getting started guide [Cluster Level Logging with Elasticsearch and Kibana](/docs/getting-started-guides/logging-elasticsearch)
|
||||
describes how to ingest cluster level logs into Elasticsearch and view them using Kibana.
|
||||
|
||||
## Ingesting Application Log Files
|
||||
|
||||
Cluster level logging only collects the standard output and standard error output of the applications
|
||||
running in containers. The guide [Collecting log files from within containers with Fluentd and sending them to the Google Cloud Logging service](https://github.com/kubernetes/contrib/blob/master/logging/fluentd-sidecar-gcp/README.md) explains how the log files of applications can also be ingested into Google Cloud logging.
|
||||
|
||||
## Known issues
|
||||
|
||||
Kubernetes does log rotation for Kubernetes components and docker containers. The command `kubectl logs` currently only read the latest logs, not all historical ones.
|
||||
|
|
@ -1,18 +1,18 @@
|
|||
---
|
||||
assignees:
|
||||
- lavalamp
|
||||
- satnam6502
|
||||
- crassirostris
|
||||
- piosz
|
||||
title: Logging with Elasticsearch and Kibana
|
||||
---
|
||||
|
||||
On the Google Compute Engine (GCE) platform, the default logging support targets
|
||||
[Google Cloud Logging](https://cloud.google.com/logging/) as described in the
|
||||
[Logging](/docs/getting-started-guides/logging) getting-started guide. Here we
|
||||
describe how to set up a cluster to ingest logs into
|
||||
[Elasticsearch](https://github.com/elastic/elasticsearch) and view
|
||||
them using [Kibana](https://github.com/elastic/kibana) as an alternative to
|
||||
Google Cloud Logging when running on GCE (note that this will not work as
|
||||
written for Google Container Engine).
|
||||
[Stackdriver Logging](https://cloud.google.com/logging/), which is described in detail
|
||||
in the [Logging With Stackdriver Logging](/docs/user-guide/logging/stackdriver).
|
||||
|
||||
This article describes how to set up a cluster to ingest logs into
|
||||
[Elasticsearch](https://www.elastic.co/products/elasticsearch), and view
|
||||
them using [Kibana](https://www.elastic.co/products/kibana), as an alternative to
|
||||
Stackdriver Logging when running on GCE. Note that Elasticsearch and Kibana do not work with Kubernetes clusters hosted on Google Container Engine.
|
||||
|
||||
To use Elasticsearch and Kibana for cluster logging, you should set the
|
||||
following environment variable as shown below when creating your cluster with
|
||||
|
|
@ -0,0 +1,107 @@
|
|||
---
|
||||
assignees:
|
||||
- crassirostris
|
||||
- piosz
|
||||
title: Logging Overview
|
||||
---
|
||||
|
||||
Application and systems logs can help you understand what is happening inside your cluster. The logs are particularly useful for debugging problems and monitoring cluster activity. Most modern applications have some kind of logging mechanism; as such, most container engines are likewise designed to support some kind of logging. The easiest and most embraced logging method for containerized applications is to write to the standard output and standard error streams.
|
||||
|
||||
However, the native functionality provided by a container engine or runtime is usually not enough for a complete logging solution. For example, if a container crashes, a pod is evicted, or a node dies, you'll usually still want to access your application's logs. As such, logs should have a separate storage and lifecycle independent of nodes, pods, or containers; this concept is called __cluster-level-logging__. Cluster-level logging requires a separate back-end to store, analyze, and query logs. Kubernetes provides no native storage solution for logs data, but you can integrate many existing logging solutions into your Kubernetes cluster.
|
||||
|
||||
In this document, you can find:
|
||||
|
||||
* A basic demonstration of logging in Kubernetes using the standard output stream
|
||||
* A detailed description of the node logging architecture in Kubernetes
|
||||
* Guidance for implementing cluster-level logging in Kubernetes
|
||||
|
||||
The guidance for cluster-level logging assumes that a logging back-end is present inside or outside of your cluster. If you're not interested in having cluster-level logging, you might still find the description how logs are stored and handled on the node to be useful.
|
||||
|
||||
## Basic logging in Kubernetes
|
||||
|
||||
In this section, you can see an example of basic logging in Kubernetes that outputs data to the standard output stream. This demonstration uses a [pod specification](/docs/user-guide/logging/counter-pod.yaml) with a container that writes some text to standard output once per second.
|
||||
|
||||
{% include code.html language="yaml" file="counter-pod.yaml" %}
|
||||
|
||||
To run this pod, use the following command:
|
||||
|
||||
```shell
|
||||
$ kubectl create -f counter-pod.yaml
|
||||
pod "counter" created
|
||||
```
|
||||
|
||||
To fetch the logs, use the `kubectl logs` command, as follows
|
||||
|
||||
```shell
|
||||
$ kubectl logs counter
|
||||
0: Tue Jun 2 21:37:31 UTC 2015
|
||||
1: Tue Jun 2 21:37:32 UTC 2015
|
||||
2: Tue Jun 2 21:37:33 UTC 2015
|
||||
3: Tue Jun 2 21:37:34 UTC 2015
|
||||
4: Tue Jun 2 21:37:35 UTC 2015
|
||||
5: Tue Jun 2 21:37:36 UTC 2015
|
||||
...
|
||||
```
|
||||
|
||||
You can use `kubectl logs` to retrieve logs from a previous instantiation of a container with `--previous` flag, in case the container has crashed. If your pod has multiple containers, you should specify which container's logs you want to access by appending a container name to the command. See the [`kubectl logs` documentation](/docs/user-guide/kubectl/kubectl_logs) for more details.
|
||||
|
||||
## Logging at the node level
|
||||
|
||||

|
||||
|
||||
Everything a containerized application writes to `stdout` and `stderr` is handled and redirected somewhere by a container engine. For example, Docker container engine redirects those two streams to [a logging driver](https://docs.docker.com/engine/admin/logging/overview), which is configured in Kubernetes to write to a file in json format.
|
||||
|
||||
**Note:** The Docker json logging driver treats each line as a separate message. When using the Docker logging driver, there is no direct support for multi-line messages. To do so, you'll need to handle these at the logging agent level or higher.
|
||||
|
||||
By default, if a container restarts, kubelet keeps one terminated container with its logs. If a pod is evicted from the node, all corresponding containers are also evicted, along with their logs.
|
||||
|
||||
An important consideration in node-level logging is implementing log rotation, so that logs don't consume all available storage on the node. Kubernetes uses the [`logrotate`](http://www.linuxcommand.org/man_pages/logrotate8.html) tool to implement log rotation.
|
||||
|
||||
Kubernetes performs log rotation daily, or if the log file grows beyond 10MB in size. Each rotation belongs to a single container; if the container repeatedly fails or the pod is evicted, all previous rotations for the container are lost. By default, Kubernetes keeps up to five logging rotations per container.
|
||||
|
||||
The Kubernetes logging configuration differs depending on the node type. For example, you can find detailed information for GCI in the corresponding [configure helper](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/cluster/gce/gci/configure-helper.sh#L96).
|
||||
|
||||
When you run [`kubectl logs`](/docs/user-guide/kubectl/kubectl_logs), as in the basic logging example, the kubelet on the node handles the request and reads directly from the log file, returning the contents in the response. Note that `kubectl logs` **only returns the last rotation**; you must manually extract prior rotations, if desired.
|
||||
|
||||
### System components logs
|
||||
|
||||
Kubernetes system components use a different logging mechanism than the application containers in pods. Components such as `kube-proxy` (among others) use the [glog](https://godoc.org/github.com/golang/glog) logging library. You can find the conventions for logging severity for those components in the [development docs on logging](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/docs/devel/logging.md).
|
||||
|
||||
System components write directly to log files in the `/var/log` directory in the node's host filesystem. Like container logs, system component logs are rotated daily and based on size. However, system component logs have a higher size retention: by default, they store 100MB.
|
||||
|
||||
## Cluster-level logging architectures
|
||||
|
||||
While Kubernetes does not provide a native solution for cluster-level logging, there are several common approaches you can consider:
|
||||
|
||||
* You can use a node-level logging agent that runs on every node.
|
||||
* You can include a dedicated sidecar container for logging in an application pod.
|
||||
* You can push logs directly to a back-end from within an application.
|
||||
|
||||
### Using a node logging agent
|
||||
|
||||

|
||||
|
||||
You can implement cluster-level logging by including a _node-level logging agent_ on each node. The logging agent is a dedicated tool that exposes logs or pushes logs to a back-end. Commonly, the logging agent is a container that has access to a directory with log files from all of the application containers on that node.
|
||||
|
||||
Because the logging agent must run on every node, it's common to implement it as either a DaemonSet replica, a manifest pod, or a dedicated native process on the node. However the latter two approaches are deprecated and highly discouraged.
|
||||
|
||||
Using a node-level logging agent is the most common and encouraged approach for a Kubernetes cluster, since it creates only one agent per node and it doesn't require any changes to the applications running on the node. However, node-level logging _only works for applications' standard output and standard error_.
|
||||
|
||||
Kubernetes doesn't specify a logging agent, but two optional logging agents are packaged with the Kubernetes release: [Stackdriver Logging](/docs/user-guide/logging/stackdriver) for use with Google Cloud Platform, and [Elasticsearch](/docs/user-guide/logging/elasticsearch). You can find more information and instructions in the dedicated documents. Both use [fluentd](http://www.fluentd.org/) with custom configuration as an agent on the node.
|
||||
|
||||
### Using a sidecar container with the logging agent
|
||||
|
||||

|
||||
|
||||
You can implement cluster-level logging by including a dedicated logging agent _for each application_ on your cluster. You can include this logging agent as a "sidecar" container in the pod spec for each application; the sidecar container should contain only the logging agent.
|
||||
|
||||
The concrete implementation of the logging agent, the interface between agent and the application, and the interface between the logging agent and the logs back-end are completely up to a you. For an example implementation, see the [fluentd sidecar container](https://github.com/kubernetes/contrib/tree/b70447aa59ea14468f4cd349760e45b6a0a9b15d/logging/fluentd-sidecar-gcp) for the Stackdriver logging backend.
|
||||
|
||||
**Note:** Using a sidecar container for logging may lead to significant resource consumption.
|
||||
|
||||
### Exposing logs directly from the application
|
||||
|
||||

|
||||
|
||||
You can implement cluster-level logging by exposing or pushing logs directly from every application itself; however, the implementation for such a logging mechanism is outside the scope of Kubernetes.
|
||||
|
||||
|
|
@ -1,14 +1,17 @@
|
|||
---
|
||||
assignees:
|
||||
- lavalamp
|
||||
- satnam6502
|
||||
title: Logging
|
||||
- crassirostris
|
||||
- piosz
|
||||
title: Logging with Stackdriver Logging
|
||||
---
|
||||
|
||||
A Kubernetes cluster will typically be humming along running many system and application pods. How does the system administrator collect, manage and query the logs of the system pods? How does a user query the logs of their application which is composed of many pods which may be restarted or automatically generated by the Kubernetes system? These questions are addressed by the Kubernetes **cluster level logging** services.
|
||||
Before reading this page, it's recommended to familiarize yourself with the [overview of logging in Kubernetes](/docs/user-guide/logging/overview).
|
||||
|
||||
Cluster level logging for Kubernetes allows us to collect logs which persist beyond the lifetime of the pod's container images or the lifetime of the pod or even cluster. In this article we assume that a Kubernetes cluster has been created with cluster level logging support for sending logs to Google Cloud Logging. After a cluster has been created you will have a collection of system pods running in the `kube-system` namespace that support monitoring,
|
||||
logging and DNS resolution for names of Kubernetes services:
|
||||
This article assumes that you have created a Kubernetes cluster with cluster-level logging support for sending logs to Stackdriver Logging. You can do this either by selecting "Enable Stackdriver Logging" checkbox in create cluster dialogue in [GKE](https://cloud.google.com/container-engine/) or by setting flag `KUBE_LOGGING_DESTINATION` to `gcp` when manually starting cluster using `kube-up.sh`.
|
||||
|
||||
## Overview
|
||||
|
||||
After creation, your cluster has a collection of system pods running in the `kube-system` namespace that support monitoring, logging, and DNS resolution for Kuberentes service names. You can see these system pods by running the following command:
|
||||
|
||||
```shell
|
||||
$ kubectl get pods --namespace=kube-system
|
||||
|
|
@ -25,15 +28,14 @@ Here is the same information in a picture which shows how the pods might be plac
|
|||
|
||||

|
||||
|
||||
This diagram shows four nodes created on a Google Compute Engine cluster with the name of each VM node on a purple background. The internal and public IPs of each node are shown on gray boxes and the pods running in each node are shown in green boxes. Each pod box shows the name of the pod and the namespace it runs in, the IP address of the pod and the images which are run as part of the pod's execution. Here we see that every node is running a fluentd-cloud-logging pod which is collecting the log output of the containers running on the same node and sending them to Google Cloud Logging. A pod which provides the
|
||||
This diagram shows four nodes created on a Google Compute Engine cluster with the name of each VM node on a purple background. The internal and public IPs of each node are shown on gray boxes and the pods running in each node are shown in green boxes. Each pod box shows the name of the pod and the namespace it runs in, the IP address of the pod and the images which are run as part of the pod's execution. Here we see that every node is running a fluentd-cloud-logging pod which is collecting the log output of the containers running on the same node and sending them to Stackdriver Logging. A pod which provides the
|
||||
[cluster DNS service](/docs/admin/dns) runs on one of the nodes and a pod which provides monitoring support runs on another node.
|
||||
|
||||
To help explain how cluster level logging works let's start off with a synthetic log generator pod specification [counter-pod.yaml](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/blog-logging/counter-pod.yaml):
|
||||
To help explain how cluster-level logging works, consider the following synthetic log generator pod specification [counter-pod.yaml](/docs/user-guide/logging/counter-pod.yaml):
|
||||
|
||||
{% include code.html language="yaml" file="counter-pod.yaml" k8slink="/examples/blog-logging/counter-pod.yaml" %}
|
||||
{% include code.html language="yaml" file="counter-pod.yaml" %}
|
||||
|
||||
This pod specification has one container which runs a bash script when the container is born. This script simply writes out the value of a counter and the date once per second and runs indefinitely. Let's create the pod in the default
|
||||
namespace.
|
||||
This pod specification has one container which runs a bash script when the container is born. This script simply writes out the value of a counter and the date once per second and runs indefinitely. Let's create the pod in the default namespace.
|
||||
|
||||
```shell
|
||||
$ kubectl create -f examples/blog-logging/counter-pod.yaml
|
||||
|
|
@ -54,7 +56,7 @@ One of the nodes is now running the counter pod:
|
|||
|
||||

|
||||
|
||||
When the pod status changes to `Running` we can use the kubectl logs command to view the output of this counter pod.
|
||||
When the pod status changes to `Running` we can use the `kubectl logs` command to view the output of this counter pod.
|
||||
|
||||
```shell
|
||||
$ kubectl logs counter
|
||||
|
|
@ -79,7 +81,13 @@ root 479 0.0 0.0 4348 812 ? S 00:05 0:00 sleep 1
|
|||
root 480 0.0 0.0 15572 2212 ? R 00:05 0:00 ps aux
|
||||
```
|
||||
|
||||
<<<<<<< HEAD:docs/getting-started-guides/logging.md
|
||||
What happens if for any reason the image in this pod is killed off and then restarted by Kubernetes? Will we still see the log lines from the previous invocation of the container followed by the log lines for the started container? Or will we lose the log lines from the original container's execution and only see the log lines for the new container? Let's find out. First let's delete the currently running counter.
|
||||
=======
|
||||
If, for any reason, the image in this pod is killed off and then restarted by Kubernetes, or the pod was evicted from the node, logs for the container are lost.
|
||||
|
||||
Try deleting the currently running counter container:
|
||||
>>>>>>> 69304ab37e14fac33455b629442bdc3995d05ad2:docs/user-guide/logging/stackdriver.md
|
||||
|
||||
```shell
|
||||
$ kubectl delete pod counter
|
||||
|
|
@ -108,15 +116,9 @@ $ kubectl logs counter
|
|||
8: Tue Jun 2 21:51:48 UTC 2015
|
||||
```
|
||||
|
||||
We've lost the log lines from the first invocation of the container in this pod! Ideally, we want to preserve all the log lines from each invocation of each container in the pod. Furthermore, even if the pod is restarted we would still like to preserve all the log lines that were ever emitted by the containers in the pod. But don't fear, this is the functionality provided by cluster level logging in Kubernetes. When a cluster is created, the standard output and standard error output of each container can be ingested using a [Fluentd](http://www.fluentd.org/) agent running on each node into either [Google Cloud Logging](https://cloud.google.com/logging/docs/) or into Elasticsearch and viewed with Kibana.
|
||||
As expected, the log lines from the first invocation of the container in this pod have been lost. However, you'll likely want to preserve all the log lines from each invocation of each container in the pod. Furthermore, even if the pod is restarted, you might still want to preserve all the log lines that were ever emitted by the containers in the pod. This is exactly the functionality provided by cluster-level logging in Kubernetes.
|
||||
|
||||
When a Kubernetes cluster is created with logging to Google Cloud Logging enabled, the system creates a pod called `fluentd-cloud-logging` on each node of the cluster to collect Docker container logs. These pods were shown at the start of this blog article in the response to the first get pods command.
|
||||
|
||||
This log collection pod has a specification which looks something like this:
|
||||
|
||||
{% include code.html language="yaml" file="fluentd-gcp.yaml" k8slink="/cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml" %}
|
||||
|
||||
This pod specification maps the directory on the host containing the Docker log files, `/var/lib/docker/containers`, to a directory inside the container which has the same path. The pod runs one image, `gcr.io/google_containers/fluentd-gcp:1.6`, which is configured to collect the Docker log files from the logs directory and ingest them into Google Cloud Logging. One instance of this pod runs on each node of the cluster. Kubernetes will notice if this pod fails and automatically restart it.
|
||||
## Viewing logs
|
||||
|
||||
We can click on the Logs item under the Monitoring section of the Google Developer Console and select the logs for the counter container, which will be called kubernetes.counter_default_count. This identifies the name of the pod (counter), the namespace (default) and the name of the container (count) for which the log collection occurred. Using this name we can select just the logs for our counter container from the drop down menu:
|
||||
|
||||
|
|
@ -128,7 +130,7 @@ When we view the logs in the Developer Console we observe the logs for both invo
|
|||
|
||||
Note the first container counted to 108 and then it was terminated. When the next container image restarted the counting process resumed from 0. Similarly if we deleted the pod and restarted it we would capture the logs for all instances of the containers in the pod whenever the pod was running.
|
||||
|
||||
Logs ingested into Google Cloud Logging may be exported to various other destinations including [Google Cloud Storage](https://cloud.google.com/storage/) buckets and [BigQuery](https://cloud.google.com/bigquery/). Use the Exports tab in the Cloud Logging console to specify where logs should be streamed to. You can also follow this link to the
|
||||
Logs ingested into Stackdriver Logging may be exported to various other destinations including [Google Cloud Storage](https://cloud.google.com/storage/) buckets and [BigQuery](https://cloud.google.com/bigquery/). Use the Exports tab in the Cloud Logging console to specify where logs should be streamed to. You can also follow this link to the
|
||||
[settings tab](https://pantheon.corp.google.com/project/_/logs/settings).
|
||||
|
||||
We could query the ingested logs from BigQuery using the SQL query which reports the counter log lines showing the newest lines first:
|
||||
|
|
@ -165,6 +167,6 @@ $ cat 21\:00\:00_21\:59\:59_S0.json | jq '.structPayload.log'
|
|||
...
|
||||
```
|
||||
|
||||
This page has touched briefly on the underlying mechanisms that support gathering cluster level logs on a Kubernetes deployment. The approach here only works for gathering the standard output and standard error output of the processes running in the pod's containers. To gather other logs that are stored in files one can use a sidecar container to gather the required files as described at the page [Collecting log files within containers with Fluentd](https://github.com/kubernetes/contrib/blob/master/logging/fluentd-sidecar-gcp/README.md) and sending them to the Google Cloud Logging service.
|
||||
This page has touched briefly on the underlying mechanisms that support gathering cluster-level logs on a Kubernetes deployment. The approach here only works for gathering the standard output and standard error output of the processes running in the pod's containers. To gather other logs that are stored in files one can use a sidecar container to gather the required files as described at the page [Collecting log files within containers with Fluentd](https://github.com/kubernetes/contrib/blob/master/logging/fluentd-sidecar-gcp/README.md) and sending them to the Stackdriver Logging service.
|
||||
|
||||
Some of the material in this section also appears in the blog article [Cluster Level Logging with Kubernetes](http://blog.kubernetes.io/2015/06/cluster-level-logging-with-kubernetes.html)
|
||||
Some of the material in this section also appears in the blog article [Cluster-level Logging with Kubernetes](http://blog.kubernetes.io/2015/06/cluster-level-logging-with-kubernetes.html)
|
||||
|
|
@ -1,8 +1,9 @@
|
|||
---
|
||||
assignees:
|
||||
- erictune
|
||||
|
||||
title: Pod Templates
|
||||
---
|
||||
|
||||
Pod templates are [pod](/docs/user-guide/pods/) specifications which are included in other objects, such as
|
||||
[Replication Controllers](/docs/user-guide/replication-controller/), [Jobs](/docs/user-guide/jobs/), and
|
||||
[DaemonSets](/docs/admin/daemons/). Controllers use Pod Templates to make actual pods.
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
assignees:
|
||||
- erictune
|
||||
|
||||
title: Init Containers
|
||||
---
|
||||
|
||||
* TOC
|
||||
|
|
|
|||
|
|
@ -220,7 +220,7 @@ The specification of a pre-stop hook is similar to that of probes, but without t
|
|||
|
||||
## Termination message
|
||||
|
||||
In order to achieve a reasonably high level of availability, especially for actively developed applications, it's important to debug failures quickly. Kubernetes can speed debugging by surfacing causes of fatal errors in a way that can be display using [`kubectl`](/docs/user-guide/kubectl/) or the [UI](/docs/user-guide/ui), in addition to general [log collection](/docs/user-guide/logging). It is possible to specify a `terminationMessagePath` where a container will write its 'death rattle'?, such as assertion failure messages, stack traces, exceptions, and so on. The default path is `/dev/termination-log`.
|
||||
In order to achieve a reasonably high level of availability, especially for actively developed applications, it's important to debug failures quickly. Kubernetes can speed debugging by surfacing causes of fatal errors in a way that can be display using [`kubectl`](/docs/user-guide/kubectl/) or the [UI](/docs/user-guide/ui), in addition to general [log collection](/docs/user-guide/logging/overview). It is possible to specify a `terminationMessagePath` where a container will write its 'death rattle'?, such as assertion failure messages, stack traces, exceptions, and so on. The default path is `/dev/termination-log`.
|
||||
|
||||
Here is a toy example:
|
||||
|
||||
|
|
|
|||
|
|
@ -37,7 +37,6 @@ their ReplicaSets.
|
|||
|
||||
A ReplicaSet ensures that a specified number of pod “replicas” are running at any given
|
||||
time. However, a Deployment is a higher-level concept that manages ReplicaSets and
|
||||
|
||||
provides declarative updates to pods along with a lot of other useful features.
|
||||
Therefore, we recommend using Deployments instead of directly using ReplicaSets, unless
|
||||
you require custom update orchestration or don't require updates at all.
|
||||
|
|
|
|||
|
|
@ -113,8 +113,8 @@ metadata:
|
|||
name: mysecret
|
||||
type: Opaque
|
||||
data:
|
||||
password: MWYyZDFlMmU2N2Rm
|
||||
username: YWRtaW4=
|
||||
password: MWYyZDFlMmU2N2Rm
|
||||
```
|
||||
|
||||
The data field is a map. Its keys must match
|
||||
|
|
@ -142,8 +142,8 @@ Get back the secret created in the previous section:
|
|||
$ kubectl get secret mysecret -o yaml
|
||||
apiVersion: v1
|
||||
data:
|
||||
password: MWYyZDFlMmU2N2Rm
|
||||
username: YWRtaW4=
|
||||
password: MWYyZDFlMmU2N2Rm
|
||||
kind: Secret
|
||||
metadata:
|
||||
creationTimestamp: 2016-01-22T18:41:56Z
|
||||
|
|
|
|||
|
|
@ -531,6 +531,39 @@ before you can use it__
|
|||
|
||||
See the [Quobyte example](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/volumes/quobyte) for more details.
|
||||
|
||||
## Using subPath
|
||||
|
||||
Sometimes, it is useful to share one volume for multiple uses in a single pod. The `volumeMounts.subPath`
|
||||
property can be used to specify a sub-path inside the referenced volume instead of its root.
|
||||
|
||||
Here is an example of a pod with a LAMP stack (Linux Apache Mysql PHP) using a single, shared volume.
|
||||
The HTML contents are mapped to its `html` folder, and the databases will be stored in its `mysql` folder:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: my-lamp-site
|
||||
spec:
|
||||
containers:
|
||||
- name: mysql
|
||||
image: mysql
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/mysql
|
||||
name: site-data
|
||||
subPath: mysql
|
||||
- name: php
|
||||
image: php
|
||||
volumeMounts:
|
||||
- mountPath: /var/www/html
|
||||
name: site-data
|
||||
subPath: html
|
||||
volumes:
|
||||
- name: site-data
|
||||
persistentVolumeClaim:
|
||||
claimName: my-lamp-site-data
|
||||
```
|
||||
|
||||
## Resources
|
||||
|
||||
The storage media (Disk, SSD, etc.) of an `emptyDir` volume is determined by the
|
||||
|
|
|
|||
|
|
@ -76,7 +76,7 @@ Kubernetes satisfies a number of common needs of applications running in product
|
|||
* [load balancing](/docs/user-guide/services/),
|
||||
* [rolling updates](/docs/user-guide/update-demo/),
|
||||
* [resource monitoring](/docs/user-guide/monitoring/),
|
||||
* [log access and ingestion](/docs/user-guide/logging/),
|
||||
* [log access and ingestion](/docs/user-guide/logging/overview/),
|
||||
* [support for introspection and debugging](/docs/user-guide/introspection-and-debugging/), and
|
||||
* [identity and authorization](/docs/admin/authorization/).
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1 @@
|
|||
google-site-verification: googlead862a0628bec321.html
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 19 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 20 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 37 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 25 KiB |
|
|
@ -0,0 +1,10 @@
|
|||
# Put files you want to skip table of contents entry check here:
|
||||
docs/search.md
|
||||
docs/sitemap.md
|
||||
docs/user-guide/pods/_viewing-a-pod.md
|
||||
docs/user-guide/simple-yaml.md
|
||||
docs/user-guide/walkthrough/index.md
|
||||
docs/user-guide/walkthrough/k8s201.md
|
||||
docs/user-guide/logging-demo/README.md
|
||||
docs/user-guide/downward-api/README.md
|
||||
docs/user-guide/configmap/README.md
|
||||
|
|
@ -301,10 +301,6 @@ func TestExampleObjectSchemas(t *testing.T) {
|
|||
"namespace": {&api.Namespace{}},
|
||||
"valid-pod": {&api.Pod{}},
|
||||
},
|
||||
"../docs/user-guide/logging-demo": {
|
||||
"synthetic_0_25lps": {&api.Pod{}},
|
||||
"synthetic_10lps": {&api.Pod{}},
|
||||
},
|
||||
"../docs/user-guide/node-selection": {
|
||||
"pod": {&api.Pod{}},
|
||||
"pod-with-node-affinity": {&api.Pod{}},
|
||||
|
|
|
|||
|
|
@ -0,0 +1,22 @@
|
|||
#!/bin/bash
|
||||
|
||||
no_entry=false
|
||||
|
||||
# Verify all docs/.../*.md files are referenced in at least one of _data/*.yml
|
||||
# files. Skip checking files in skip_toc_check.txt
|
||||
for file in `find docs -name "*.md" -type f`; do
|
||||
if ! grep -q "${file}" skip_toc_check.txt; then
|
||||
path=${file%.*}
|
||||
# abc/index.md should point to abc, not abc/index
|
||||
path=${path%%index}
|
||||
if ! grep -q "${path}" _data/*.yml; then
|
||||
echo "Error: ${file} doesn't have an entry in the table of contents under _data/*.yml"
|
||||
no_entry=true
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
if ${no_entry}; then
|
||||
echo "Found files without entries. For how to fix it, see http://kubernetes.io/docs/contribute/write-new-topic/#creating-an-entry-in-the-table-of-contents"
|
||||
exit 1
|
||||
fi
|
||||
Loading…
Reference in New Issue