Merge branch 'master' into enhance-cni
This commit is contained in:
commit
0e5ded3a91
|
|
@ -174,14 +174,7 @@ example. If creating an image for a doc, follow the
|
|||
section on "Docker images" from the Kubernetes repository.
|
||||
|
||||
## Partners
|
||||
Kubernetes partners refers to the companies who contribute to the Kubernetes core codebase and/or extend their platform to support Kubernetes. Partners can get their logos added to the partner section of the [community page](http://k8s.io/community) by following the below steps and meeting the below logo specifications. Partners will also need to have a URL that is specific to integrating with Kubernetes ready; this URL will be the destination when the logo is clicked.
|
||||
|
||||
* The partner product logo should be a transparent png image centered in a 215x125 px frame. (look at the existing logos for reference)
|
||||
* The logo must link to a URL that is specific to integrating with Kubernetes, hosted on the partner's site.
|
||||
* The logo should be named *product-name*_logo.png and placed in the `/images/community_logos` folder.
|
||||
* The image reference (including the link to the partner URL) should be added in `community.html` under `<div class="partner-logos" > ...</div>`.
|
||||
* Please do not change the order of the existing partner images. Append your logo to the end of the list.
|
||||
* Once completed and tested the look and feel, submit the pull request.
|
||||
Kubernetes partners refers to the companies who contribute to the Kubernetes core codebase, extend their platform to support Kubernetes or provide managed services to users centered around the Kubernetes platform. Partners can get their services and offerings added to the [partner page](https://k8s.io/partners) by completing and submitting the [partner request form](https://goo.gl/qcSnZF). Once the information and assets are verified, the partner product/services will be listed in the partner page. This would typically take 7-10 days.
|
||||
|
||||
## Thank you!
|
||||
|
||||
|
|
|
|||
|
|
@ -14,6 +14,8 @@ toc:
|
|||
path: /docs/getting-started-guides/kops/
|
||||
- title: Hello World on Google Container Engine
|
||||
path: /docs/hellonode/
|
||||
- title: Installing kubectl
|
||||
path: /docs/getting-started-guides/kubectl/
|
||||
- title: Downloading or Building Kubernetes
|
||||
path: /docs/getting-started-guides/binary_release/
|
||||
- title: Online Training Course
|
||||
|
|
|
|||
|
|
@ -14,6 +14,8 @@ toc:
|
|||
path: /docs/contribute/stage-documentation-changes/
|
||||
- title: Using Page Templates
|
||||
path: /docs/contribute/page-templates/
|
||||
- title: Documentation Style Guide
|
||||
path: /docs/contribute/style-guide/
|
||||
|
||||
- title: Troubleshooting
|
||||
section:
|
||||
|
|
|
|||
|
|
@ -15,6 +15,14 @@ toc:
|
|||
section:
|
||||
- title: Using Port Forwarding to Access Applications in a Cluster
|
||||
path: /docs/tasks/access-application-cluster/port-forward-access-application-cluster/
|
||||
|
||||
|
||||
- title: Debugging Applications in a Cluster
|
||||
section:
|
||||
- title: Determining the Reason for Pod Failure
|
||||
path: /docs/tasks/debug-application-cluster/determine-reason-pod-failure/
|
||||
|
||||
|
||||
- title: Accessing the Kubernetes API
|
||||
section:
|
||||
- title: Using an HTTP Proxy to Access the Kubernetes API
|
||||
|
|
|
|||
|
|
@ -51,3 +51,7 @@ toc:
|
|||
path: /docs/tutorials/stateless-application/expose-external-ip-address-service/
|
||||
- title: Exposing an External IP Address to Access an Application in a Cluster
|
||||
path: /docs/tutorials/stateless-application/expose-external-ip-address/
|
||||
- title: Stateful Applications
|
||||
section:
|
||||
- title: Running a Single-Instance Stateful Application
|
||||
path: /docs/tutorials/stateful-application/run-stateful-application/
|
||||
|
|
|
|||
|
|
@ -20,6 +20,8 @@
|
|||
<a href="https://calendar.google.com/calendar/embed?src=nt2tcnbtbied3l6gi2h29slvc0%40group.calendar.google.com" class="calendar"><span>Events Calendar</span></a>
|
||||
</div>
|
||||
<div>
|
||||
<a href="//get.k8s.io" class="button">Download K8s</a>
|
||||
<a href="https://github.com/kubernetes/kubernetes" class="button">Contribute to the K8s codebase</a>
|
||||
</div>
|
||||
</div>
|
||||
<div id="miceType" class="center">© {{ 'now' | date: "%Y" }} Kubernetes</div>
|
||||
|
|
|
|||
|
|
@ -175,6 +175,20 @@
|
|||
link: 'https://aporeto.com/trireme',
|
||||
blurb: 'Aporeto makes cloud-native applications secure by default without impacting developer velocity and works at any scale, on any cloud.'
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'Giant Swarm',
|
||||
logo: 'giant_swarm',
|
||||
link: 'https://giantswarm.io',
|
||||
blurb: 'Giant Swarm provides fully-managed Kubernetes Clusters in your location of choice, so you can focus on your product.'
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'Mirantis',
|
||||
logo: 'mirantis',
|
||||
link: 'https://content.mirantis.com/Containerizing-OpenStack-on-Kubernetes-Video-Landing-Page.html',
|
||||
blurb: 'Mirantis builds and manages private clouds with open source software such as OpenStack, deployed as containers orchestrated by Kubernetes.'
|
||||
},
|
||||
{
|
||||
type: 1,
|
||||
name: 'Apprenda',
|
||||
|
|
@ -237,7 +251,14 @@
|
|||
logo: 'skippbox',
|
||||
link: 'http://www.skippbox.com/services/',
|
||||
blurb: 'Skippbox brings its Kubernetes expertise to help companies embrace Kubernetes on their way to digital transformation. Skippbox offers both professional services and expert training.'
|
||||
}
|
||||
},
|
||||
{
|
||||
type: 1,
|
||||
name: 'Endocode',
|
||||
logo: 'endocode',
|
||||
link: 'https://endocode.com/kubernetes/',
|
||||
blurb: 'Endocode practices and teaches the open source way. Kernel to cluster - Dev to Ops. We offer Kubernetes trainings, services and support.'
|
||||
}
|
||||
]
|
||||
|
||||
var isvContainer = document.getElementById('isvContainer')
|
||||
|
|
|
|||
|
|
@ -52,8 +52,8 @@
|
|||
(function(d,c,j){if(!document.getElementById(j)){var pd=d.createElement(c),s;pd.id=j;pd.src=('https:'==document.location.protocol)?'https://polldaddy.com/js/rating/rating.js':'http://i0.poll.fm/js/rating/rating.js';s=document.getElementsByTagName(c)[0];s.parentNode.insertBefore(pd,s);}}(document,'script','pd-rating-js'));
|
||||
</script>
|
||||
<a href="" onclick="window.open('https://github.com/kubernetes/kubernetes.github.io/issues/new?title=Issue%20with%20' +
|
||||
window.location.pathname)" class="button issue">Create Issue</a>
|
||||
<a href="/editdocs#{{ page.path }}" class="button issue">Edit This Page</a>
|
||||
window.location.pathname)" class="button issue">Create an Issue</a>
|
||||
<a href="/editdocs#{{ page.path }}" class="button issue">Edit this Page</a>
|
||||
{% endif %}
|
||||
</div>
|
||||
</section>
|
||||
|
|
@ -80,6 +80,34 @@
|
|||
})(window,document,'script','//www.google-analytics.com/analytics.js','ga');
|
||||
ga('create', 'UA-36037335-10', 'auto');
|
||||
ga('send', 'pageview');
|
||||
|
||||
// hide docs nav area if no nav is present, or if nav only contains a link to the current page
|
||||
(function () {
|
||||
window.addEventListener('DOMContentLoaded', init)
|
||||
|
||||
// play nice with our neighbors
|
||||
function init() {
|
||||
window.removeEventListener('DOMContentLoaded', init)
|
||||
hideNav()
|
||||
}
|
||||
|
||||
function hideNav(toc){
|
||||
if (!toc) toc = document.querySelector('#docsToc')
|
||||
var container = toc.querySelector('.container')
|
||||
|
||||
// container is built dynamically, so it may not be present on the first runloop
|
||||
if (container) {
|
||||
if (container.childElementCount === 0 || toc.querySelectorAll('a.item').length === 1) {
|
||||
toc.style.display = 'none'
|
||||
document.getElementById('docsContent').style.width = '100%'
|
||||
}
|
||||
} else {
|
||||
requestAnimationFrame(function () {
|
||||
hideNav(toc)
|
||||
})
|
||||
}
|
||||
}
|
||||
})();
|
||||
</script>
|
||||
<!-- Commenting out AnswerDash for now; we need to work on our list of questions/answers/design first
|
||||
<!-- Start of AnswerDash script <script>var AnswerDash;!function(e,t,n,s,a){if(!t.getElementById(s)){var i,r=t.createElement(n),c=t.getElementsByTagName(n)[0];e[a]||(i=e[a]=function(){i.__oninit.push(arguments)},i.__oninit=[]),r.type="text/javascript",r.async=!0,r.src="https://p1.answerdash.com/answerdash.min.js?siteid=756",r.setAttribute("id",s),c.parentNode.insertBefore(r,c)}}(window,document,"script","answerdash-script","AnswerDash");</script> <!-- End of AnswerDash script -->
|
||||
|
|
|
|||
|
|
@ -389,6 +389,14 @@ footer
|
|||
display: block
|
||||
height: 0
|
||||
overflow: hidden
|
||||
|
||||
&.button
|
||||
background-image: none
|
||||
width: auto
|
||||
height: auto
|
||||
|
||||
&:hover
|
||||
color: $blue
|
||||
|
||||
a.twitter
|
||||
background-position: 0 0
|
||||
|
|
@ -874,8 +882,19 @@ dd
|
|||
img
|
||||
max-width: 100%
|
||||
|
||||
a
|
||||
//font-weight: 700
|
||||
text-decoration: underline
|
||||
|
||||
a:visited
|
||||
color: blueviolet
|
||||
|
||||
a.button
|
||||
border-radius: 2px
|
||||
text-decoration: none
|
||||
|
||||
&:visited
|
||||
color: white
|
||||
|
||||
a.issue
|
||||
margin-left: 20px
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ ul, li
|
|||
ul
|
||||
margin: 0
|
||||
padding: 0
|
||||
|
||||
|
||||
a
|
||||
text-decoration: none
|
||||
|
||||
|
|
|
|||
|
|
@ -7,17 +7,20 @@ Add-ons extend the functionality of Kubernetes.
|
|||
|
||||
This page lists some of the available add-ons and links to their respective installation instructions.
|
||||
|
||||
Add-ons in each section are sorted alphabetically - the ordering does not imply any preferential status.
|
||||
|
||||
## Networking and Network Policy
|
||||
|
||||
* [Weave Net](https://github.com/weaveworks/weave-kube) provides networking and network policy, will carry on working on both sides of a network partition, and does not require an external database.
|
||||
* [Calico](http://docs.projectcalico.org/v1.5/getting-started/kubernetes/installation/hosted/) is a secure L3 networking and network policy provider.
|
||||
* [Calico](http://docs.projectcalico.org/v1.6/getting-started/kubernetes/installation/hosted/) is a secure L3 networking and network policy provider.
|
||||
* [Canal](https://github.com/tigera/canal/tree/master/k8s-install/kubeadm) unites Flannel and Calico, providing networking and network policy.
|
||||
* [Flannel](https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel.yml) is a overlay network provider that can be used with Kubernetes.
|
||||
* [Romana](http://romana.io) is a Layer 3 networking solution for pod networks that also supports the [NetworkPolicy API](/docs/user-guide/networkpolicies/). Kubeadm add-on installation details available [here](https://github.com/romana/romana/tree/master/containerize).
|
||||
* [Weave Net](https://github.com/weaveworks/weave-kube) provides networking and network policy, will carry on working on both sides of a network partition, and does not require an external database.
|
||||
|
||||
## Visualization & Control
|
||||
|
||||
* [Weave Scope](https://www.weave.works/documentation/scope-latest-installing/#k8s) is a tool for graphically visualizing your containers, pods, services etc. Use it in conjunction with a [Weave Cloud account](https://cloud.weave.works/) or host the UI yourself.
|
||||
* [Dashboard](https://github.com/kubernetes/dashboard#kubernetes-dashboard) is a dashboard web interface for Kubernetes.
|
||||
* [Weave Scope](https://www.weave.works/documentation/scope-latest-installing/#k8s) is a tool for graphically visualizing your containers, pods, services etc. Use it in conjunction with a [Weave Cloud account](https://cloud.weave.works/) or host the UI yourself.
|
||||
|
||||
## Legacy Add-ons
|
||||
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ assignees:
|
|||
- mikedanese
|
||||
- luxas
|
||||
- errordeveloper
|
||||
- jbeda
|
||||
|
||||
---
|
||||
|
||||
|
|
@ -104,17 +105,16 @@ and `--external-etcd-keyfile` flags.
|
|||
|
||||
- `--pod-network-cidr`
|
||||
|
||||
By default, `kubeadm init` does not set node CIDR's for pods and allows you to
|
||||
bring your own networking configuration through a CNI compatible network
|
||||
controller addon such as [Weave Net](https://github.com/weaveworks/weave-kube),
|
||||
[Calico](https://github.com/projectcalico/calico-containers/tree/master/docs/cni/kubernetes/manifests/kubeadm)
|
||||
or [Canal](https://github.com/tigera/canal/tree/master/k8s-install/kubeadm).
|
||||
If you are using a compatible cloud provider or flannel, you can specify a
|
||||
subnet to use for each pod on the cluster with the `--pod-network-cidr` flag.
|
||||
This should be a minimum of a /16 so that kubeadm is able to assign /24 subnets
|
||||
to each node in the cluster.
|
||||
For certain networking solutions the Kubernetes master can also play a role in
|
||||
allocating network ranges (CIDRs) to each node. This includes many cloud providers
|
||||
and flannel. You can specify a subnet range that will be broken down and handed out
|
||||
to each node with the `--pod-network-cidr` flag. This should be a minimum of a /16 so
|
||||
controller-manager is able to assign /24 subnets to each node in the cluster.
|
||||
If you are using flannel with [this manifest](https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel.yml)
|
||||
you should use `--pod-network-cidr=10.244.0.0/16`. Most CNI based networking solutions
|
||||
do not require this flag.
|
||||
|
||||
- `--service-cidr` (default '10.12.0.0/12')
|
||||
- `--service-cidr` (default '10.96.0.0/12')
|
||||
|
||||
You can use the `--service-cidr` flag to override the subnet Kubernetes uses to
|
||||
assign pods IP addresses. If you do, you will also need to update the
|
||||
|
|
@ -141,7 +141,7 @@ By default, `kubeadm init` automatically generates the token used to initialise
|
|||
each new node. If you would like to manually specify this token, you can use the
|
||||
`--token` flag. The token must be of the format `<6 character string>.<16 character string>`.
|
||||
|
||||
- `--use-kubernetes-version` (default 'v1.4.1') the kubernetes version to initialise
|
||||
- `--use-kubernetes-version` (default 'v1.4.4') the kubernetes version to initialise
|
||||
|
||||
`kubeadm` was originally built for Kubernetes version **v1.4.0**, older versions are not
|
||||
supported. With this flag you can try any future version, e.g. **v1.5.0-beta.1**
|
||||
|
|
@ -203,6 +203,27 @@ There are some environment variables that modify the way that `kubeadm` works.
|
|||
| `KUBE_COMPONENT_LOGLEVEL` | `--v=4` | Logging configuration for all Kubernetes components |
|
||||
|
||||
|
||||
## Releases and release notes
|
||||
|
||||
If you already have kubeadm installed and want to upgrade, run `apt-get update && apt-get upgrade` or `yum update` to get the latest version of kubeadm.
|
||||
|
||||
- Second release between v1.4 and v1.5: `v1.5.0-alpha.2.421+a6bea3d79b8bba`
|
||||
- Switch to the 10.96.0.0/12 subnet: [#35290](https://github.com/kubernetes/kubernetes/pull/35290)
|
||||
- Fix kubeadm on AWS by including /etc/ssl/certs in the controller-manager [#33681](https://github.com/kubernetes/kubernetes/pull/33681)
|
||||
- The API was refactored and is now componentconfig: [#33728](https://github.com/kubernetes/kubernetes/pull/33728), [#34147](https://github.com/kubernetes/kubernetes/pull/34147) and [#34555](https://github.com/kubernetes/kubernetes/pull/34555)
|
||||
- Allow kubeadm to get config options from a file: [#34501](https://github.com/kubernetes/kubernetes/pull/34501), [#34885](https://github.com/kubernetes/kubernetes/pull/34885) and [#34891](https://github.com/kubernetes/kubernetes/pull/34891)
|
||||
- Implement preflight checks: [#34341](https://github.com/kubernetes/kubernetes/pull/34341) and [#35843](https://github.com/kubernetes/kubernetes/pull/35843)
|
||||
- Using kubernetes v1.4.4 by default: [#34419](https://github.com/kubernetes/kubernetes/pull/34419) and [#35270](https://github.com/kubernetes/kubernetes/pull/35270)
|
||||
- Make api and discovery ports configurable and default to 6443: [#34719](https://github.com/kubernetes/kubernetes/pull/34719)
|
||||
- Implement kubeadm reset: [#34807](https://github.com/kubernetes/kubernetes/pull/34807)
|
||||
- Make kubeadm poll/wait for endpoints instead of directly fail when the master isn't available [#34703](https://github.com/kubernetes/kubernetes/pull/34703) and [#34718](https://github.com/kubernetes/kubernetes/pull/34718)
|
||||
- Allow empty directories in the directory preflight check: [#35632](https://github.com/kubernetes/kubernetes/pull/35632)
|
||||
- Started adding unit tests: [#35231](https://github.com/kubernetes/kubernetes/pull/35231), [#35326](https://github.com/kubernetes/kubernetes/pull/35326) and [#35332](https://github.com/kubernetes/kubernetes/pull/35332)
|
||||
- Various enhancements: [#35075](https://github.com/kubernetes/kubernetes/pull/35075), [#35111](https://github.com/kubernetes/kubernetes/pull/35111), [#35119](https://github.com/kubernetes/kubernetes/pull/35119), [#35124](https://github.com/kubernetes/kubernetes/pull/35124), [#35265](https://github.com/kubernetes/kubernetes/pull/35265) and [#35777](https://github.com/kubernetes/kubernetes/pull/35777)
|
||||
- Bug fixes: [#34352](https://github.com/kubernetes/kubernetes/pull/34352), [#34558](https://github.com/kubernetes/kubernetes/pull/34558), [#34573](https://github.com/kubernetes/kubernetes/pull/34573), [#34834](https://github.com/kubernetes/kubernetes/pull/34834), [#34607](https://github.com/kubernetes/kubernetes/pull/34607), [#34907](https://github.com/kubernetes/kubernetes/pull/34907) and [#35796](https://github.com/kubernetes/kubernetes/pull/35796)
|
||||
- Initial v1.4 release: `v1.5.0-alpha.0.1534+cf7301f16c0363`
|
||||
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
* Some users on RHEL/CentOS 7 have reported issues with traffic being routed incorrectly due to iptables being bypassed. You should ensure `net.bridge.bridge-nf-call-iptables` is set to 1 in your sysctl config, eg.
|
||||
|
|
|
|||
|
|
@ -83,7 +83,7 @@ talk to other VMs in your project. This is the same basic model.
|
|||
Until now this document has talked about containers. In reality, Kubernetes
|
||||
applies IP addresses at the `Pod` scope - containers within a `Pod` share their
|
||||
network namespaces - including their IP address. This means that containers
|
||||
within a `Pod` can all reach each other’s ports on `localhost`. This does imply
|
||||
within a `Pod` can all reach each other's ports on `localhost`. This does imply
|
||||
that containers within a `Pod` must coordinate port usage, but this is no
|
||||
different than processes in a VM. We call this the "IP-per-pod" model. This
|
||||
is implemented in Docker as a "pod container" which holds the network namespace
|
||||
|
|
@ -100,8 +100,19 @@ existence or non-existence of host ports.
|
|||
There are a number of ways that this network model can be implemented. This
|
||||
document is not an exhaustive study of the various methods, but hopefully serves
|
||||
as an introduction to various technologies and serves as a jumping-off point.
|
||||
If some techniques become vastly preferable to others, we might detail them more
|
||||
here.
|
||||
|
||||
The following networking options are sorted alphabetically - the order does not
|
||||
imply any preferential status.
|
||||
|
||||
### Contiv
|
||||
|
||||
[Contiv](https://github.com/contiv/netplugin) provides configurable networking (native l3 using BGP, overlay using vxlan, classic l2, or Cisco-SDN/ACI) for various use cases. [Contiv](http://contiv.io) is all open sourced.
|
||||
|
||||
### Flannel
|
||||
|
||||
[Flannel](https://github.com/coreos/flannel#flannel) is a very simple overlay
|
||||
network that satisfies the Kubernetes requirements. Many
|
||||
people have reported success with Flannel and Kubernetes.
|
||||
|
||||
### Google Compute Engine (GCE)
|
||||
|
||||
|
|
@ -158,29 +169,12 @@ Follow the "With Linux Bridge devices" section of [this very nice
|
|||
tutorial](http://blog.oddbit.com/2014/08/11/four-ways-to-connect-a-docker/) from
|
||||
Lars Kellogg-Stedman.
|
||||
|
||||
### Weave Net from Weaveworks
|
||||
|
||||
[Weave Net](https://www.weave.works/products/weave-net/) is a
|
||||
resilient and simple to use network for Kubernetes and its hosted applications.
|
||||
Weave Net runs as a [CNI plug-in](https://www.weave.works/docs/net/latest/cni-plugin/)
|
||||
or stand-alone. In either version, it doesn’t require any configuration or extra code
|
||||
to run, and in both cases, the network provides one IP address per pod - as is standard for Kubernetes.
|
||||
|
||||
|
||||
### Flannel
|
||||
|
||||
[Flannel](https://github.com/coreos/flannel#flannel) is a very simple overlay
|
||||
network that satisfies the Kubernetes requirements. It installs in minutes and
|
||||
should get you up and running if the above techniques are not working. Many
|
||||
people have reported success with Flannel and Kubernetes.
|
||||
|
||||
### OpenVSwitch
|
||||
|
||||
[OpenVSwitch](/docs/admin/ovs-networking) is a somewhat more mature but also
|
||||
complicated way to build an overlay network. This is endorsed by several of the
|
||||
"Big Shops" for networking.
|
||||
|
||||
|
||||
### Project Calico
|
||||
|
||||
[Project Calico](https://github.com/projectcalico/calico-containers/blob/master/docs/cni/kubernetes/README.md) is an open source container networking provider and network policy engine.
|
||||
|
|
@ -193,9 +187,13 @@ Calico can also be run in policy enforcement mode in conjunction with other netw
|
|||
|
||||
[Romana](http://romana.io) is an open source network and security automation solution that lets you deploy Kubernetes without an overlay network. Romana supports Kubernetes [Network Policy](/docs/user-guide/networkpolicies/) to provide isolation across network namespaces.
|
||||
|
||||
### Contiv
|
||||
### Weave Net from Weaveworks
|
||||
|
||||
[Contiv](https://github.com/contiv/netplugin) provides configurable networking (native l3 using BGP, overlay using vxlan, classic l2, or Cisco-SDN/ACI) for various use cases. [Contiv](http://contiv.io) is all open sourced.
|
||||
[Weave Net](https://www.weave.works/products/weave-net/) is a
|
||||
resilient and simple to use network for Kubernetes and its hosted applications.
|
||||
Weave Net runs as a [CNI plug-in](https://www.weave.works/docs/net/latest/cni-plugin/)
|
||||
or stand-alone. In either version, it doesn't require any configuration or extra code
|
||||
to run, and in both cases, the network provides one IP address per pod - as is standard for Kubernetes.
|
||||
|
||||
## Other reading
|
||||
|
||||
|
|
|
|||
43
docs/api.md
43
docs/api.md
|
|
@ -95,46 +95,3 @@ DaemonSets, Deployments, HorizontalPodAutoscalers, Ingress, Jobs and ReplicaSets
|
|||
Other extensions resources can be enabled by setting runtime-config on
|
||||
apiserver. runtime-config accepts comma separated values. For ex: to disable deployments and jobs, set
|
||||
`--runtime-config=extensions/v1beta1/deployments=false,extensions/v1beta1/jobs=false`
|
||||
|
||||
## v1beta1, v1beta2, and v1beta3 are deprecated; please move to v1 ASAP
|
||||
|
||||
As of June 4, 2015, the Kubernetes v1 API has been enabled by default. The v1beta1 and v1beta2 APIs were deleted on June 1, 2015. v1beta3 is planned to be deleted on July 6, 2015.
|
||||
|
||||
### v1 conversion tips (from v1beta3)
|
||||
|
||||
We're working to convert all documentation and examples to v1. Use `kubectl create --validate` in order to validate your json or yaml against our Swagger spec.
|
||||
|
||||
Changes to services are the most significant difference between v1beta3 and v1.
|
||||
|
||||
* The `service.spec.portalIP` property is renamed to `service.spec.clusterIP`.
|
||||
* The `service.spec.createExternalLoadBalancer` property is removed. Specify `service.spec.type: "LoadBalancer"` to create an external load balancer instead.
|
||||
* The `service.spec.publicIPs` property is deprecated and now called `service.spec.deprecatedPublicIPs`. This property will be removed entirely when v1beta3 is removed. The vast majority of users of this field were using it to expose services on ports on the node. Those users should specify `service.spec.type: "NodePort"` instead. Read [External Services](/docs/user-guide/services/#external-services) for more info. If this is not sufficient for your use case, please file an issue or contact @thockin.
|
||||
|
||||
Some other difference between v1beta3 and v1:
|
||||
|
||||
* The `pod.spec.containers[*].privileged` and `pod.spec.containers[*].capabilities` properties are now nested under the `pod.spec.containers[*].securityContext` property. See [Security Contexts](/docs/user-guide/security-context).
|
||||
* The `pod.spec.host` property is renamed to `pod.spec.nodeName`.
|
||||
* The `endpoints.subsets[*].addresses.IP` property is renamed to `endpoints.subsets[*].addresses.ip`.
|
||||
* The `pod.status.containerStatuses[*].state.termination` and `pod.status.containerStatuses[*].lastState.termination` properties are renamed to `pod.status.containerStatuses[*].state.terminated` and `pod.status.containerStatuses[*].lastState.terminated` respectively.
|
||||
* The `pod.status.Condition` property is renamed to `pod.status.conditions`.
|
||||
* The `status.details.id` property is renamed to `status.details.name`.
|
||||
|
||||
### v1beta3 conversion tips (from v1beta1/2)
|
||||
|
||||
Some important differences between v1beta1/2 and v1beta3:
|
||||
|
||||
* The resource `id` is now called `name`.
|
||||
* `name`, `labels`, `annotations`, and other metadata are now nested in a map called `metadata`
|
||||
* `desiredState` is now called `spec`, and `currentState` is now called `status`
|
||||
* `/minions` has been moved to `/nodes`, and the resource has kind `Node`
|
||||
* The namespace is required (for all namespaced resources) and has moved from a URL parameter to the path: `/api/v1beta3/namespaces/{namespace}/{resource_collection}/{resource_name}`. If you were not using a namespace before, use `default` here.
|
||||
* The names of all resource collections are now lower cased - instead of `replicationControllers`, use `replicationcontrollers`.
|
||||
* To watch for changes to a resource, open an HTTP or Websocket connection to the collection query and provide the `?watch=true` query parameter along with the desired `resourceVersion` parameter to watch from.
|
||||
* The `labels` query parameter has been renamed to `labelSelector`.
|
||||
* The `fields` query parameter has been renamed to `fieldSelector`.
|
||||
* The container `entrypoint` has been renamed to `command`, and `command` has been renamed to `args`.
|
||||
* Container, volume, and node resources are expressed as nested maps (e.g., `resources{cpu:1}`) rather than as individual fields, and resource values support [scaling suffixes](/docs/user-guide/compute-resources/#specifying-resource-quantities) rather than fixed scales (e.g., milli-cores).
|
||||
* Restart policy is represented simply as a string (e.g., `"Always"`) rather than as a nested map (`always{}`).
|
||||
* Pull policies changed from `PullAlways`, `PullNever`, and `PullIfNotPresent` to `Always`, `Never`, and `IfNotPresent`.
|
||||
* The volume `source` is inlined into `volume` rather than nested.
|
||||
* Host volumes have been changed from `hostDir` to `hostPath` to better reflect that they can be files or directories.
|
||||
|
|
@ -1,6 +1,4 @@
|
|||
---
|
||||
redirect_from:
|
||||
- /editdocs/
|
||||
---
|
||||
|
||||
{% capture overview %}
|
||||
|
|
@ -46,7 +44,7 @@ choose the
|
|||
[page type](/docs/contribute/page-templates/)
|
||||
that is the best fit for your content.
|
||||
|
||||
### Submitting a pull request to the master branch
|
||||
### Submitting a pull request to the master branch (Current Release)
|
||||
|
||||
If you want your change to be published in the released version Kubernetes docs,
|
||||
create a pull request against the master branch of the Kubernetes
|
||||
|
|
@ -64,7 +62,7 @@ site where you can verify that your changes have rendered correctly.
|
|||
If needed, revise your pull request by committing changes to your
|
||||
new branch in your fork.
|
||||
|
||||
### Submitting a pull request to the <vnext> branch
|
||||
### Submitting a pull request to the <vnext> branch (Upcoming Release)
|
||||
|
||||
If your documentation change should not be released until the next release of
|
||||
the Kubernetes product, create a pull request against the <vnext> branch
|
||||
|
|
|
|||
|
|
@ -0,0 +1,203 @@
|
|||
---
|
||||
---
|
||||
|
||||
{% capture overview %}
|
||||
This page gives writing style guidelines for the Kubernetes documentation.
|
||||
These are guidelines, not rules. Use your best judgment, and feel free to
|
||||
propose changes to this document in a pull request.
|
||||
|
||||
For additional information on creating new content for the Kubernetes
|
||||
docs, follow the instructions on
|
||||
[using page templates](/docs/contribute/page-templates/) and
|
||||
[creating a documentation pull request](/docs/contribute/create-pull-request/).
|
||||
{% endcapture %}
|
||||
|
||||
{% capture body %}
|
||||
|
||||
## Documentation formatting standards
|
||||
|
||||
### Capitalize API objects
|
||||
|
||||
Capitalize the names of API objects. Refer to API objects without saying
|
||||
"object."
|
||||
|
||||
<table>
|
||||
<tr><th>Do</th><th>Don't</th></tr>
|
||||
<tr><td>The Pod has two Containers.</td><td>The pod has two containers.</td></tr>
|
||||
<tr><td>The Deployment is responsible for ...</td><td>The Deployment object is responsible for ...</td></tr>
|
||||
</table>
|
||||
|
||||
### Use angle brackets for placeholders
|
||||
|
||||
Use angle brackets for placeholders. Tell the reader what a placeholder
|
||||
represents.
|
||||
|
||||
1. Display information about a pod:
|
||||
|
||||
kubectl describe pod <pod-name>
|
||||
|
||||
where `<pod-name>` is the name of one of your pods.
|
||||
|
||||
### Use bold for user interface elements
|
||||
|
||||
<table>
|
||||
<tr><th>Do</th><th>Don't</th></tr>
|
||||
<tr><td>Click <b>Fork</b>.</td><td>Click "Fork".</td></tr>
|
||||
<tr><td>Select <b>Other</b>.</td><td>Select 'Other'.</td></tr>
|
||||
</table>
|
||||
|
||||
### Use italics to define or introduce new terms
|
||||
|
||||
<table>
|
||||
<tr><th>Do</th><th>Don't</th></tr>
|
||||
<tr><td>A <i>cluster</i> is a set of nodes ...</td><td>A "cluster" is a set of nodes ...</td></tr>
|
||||
<tr><td>These components form the <i>control plane.</i></td><td>These components form the <b>control plane.</b></td></tr>
|
||||
</table>
|
||||
|
||||
### Use code style for filenames, directories, and paths
|
||||
|
||||
<table>
|
||||
<tr><th>Do</th><th>Don't</th></tr>
|
||||
<tr><td>Open the <code>envars.yaml</code> file.</td><td>Open the envars.yaml file.</td></tr>
|
||||
<tr><td>Go to the <code>/docs/tutorials</code> directory.</td><td>Go to the /docs/tutorials directory.</td></tr>
|
||||
<tr><td>Open the <code>/_data/concepts.yaml</code> file.</td><td>Open the /_data/concepts.yaml file.</td></tr>
|
||||
</table>
|
||||
|
||||
## Code snippet formatting
|
||||
|
||||
### Use code style for inline code and commands
|
||||
|
||||
For inline code in an HTML document, use the `<code>` tag. In a Markdown
|
||||
document, use the backtick (`).
|
||||
|
||||
<table>
|
||||
<tr><th>Do</th><th>Don't</th></tr>
|
||||
<tr><td>Set the value of the <code>replicas</code> field in the configuration file.</td><td>Set the value of the "replicas" field in the configuration file.</td></tr>
|
||||
<tr><td>The <code>kubectl run</code> command creates a Deployment.</td><td>The "kubectl run" command creates a Deployment.</td></tr>
|
||||
</table>
|
||||
|
||||
### Don't include the command prompt
|
||||
|
||||
<table>
|
||||
<tr><th>Do</th><th>Don't</th></tr>
|
||||
<tr><td>kubectl get pods</td><td>$ kubectl get pods</td></tr>
|
||||
</table>
|
||||
|
||||
### Separate commands from output
|
||||
|
||||
Verify that the pod is running on your chosen node:
|
||||
|
||||
kubectl get pods --output=wide
|
||||
|
||||
The output is similar to this:
|
||||
|
||||
NAME READY STATUS RESTARTS AGE IP NODE
|
||||
nginx 1/1 Running 0 13s 10.200.0.4 worker0
|
||||
|
||||
|
||||
{% comment %}## Kubernetes.io word list
|
||||
|
||||
A list of Kubernetes-specific terms and words to be used consistently across the site.
|
||||
|
||||
<table>
|
||||
<tr><th>Term</th><th>Useage</th></tr>
|
||||
<tr><td>TBD</td><td>TBD</td></tr>
|
||||
</table>{% endcomment %}
|
||||
|
||||
|
||||
## Content best practices
|
||||
|
||||
This section contains suggested best practices for clear, concise, and consistent content.
|
||||
|
||||
### Use present tense
|
||||
|
||||
<table>
|
||||
<tr><th>Do</th><th>Don't</th></tr>
|
||||
<tr><td>This command starts a proxy.</td><td>This command will start a proxy.</td></tr>
|
||||
</table>
|
||||
|
||||
Exception: Use future or past tense if it is required to convey the correct
|
||||
meaning.
|
||||
|
||||
### Use active voice
|
||||
|
||||
<table>
|
||||
<tr><th>Do</th><th>Don't</th></tr>
|
||||
<tr><td>You can explore the API using a browser.</td><td>The API can be explored using a browser.</td></tr>
|
||||
<tr><td>The YAML file specifies the replica count.</td><td>The replica count is specified in the YAML file.</td></tr>
|
||||
</table>
|
||||
|
||||
Exception: Use passive voice if active voice leads to an awkward construction.
|
||||
|
||||
### Use simple and direct language
|
||||
|
||||
Use simple and direct language. Avoid using unnecessary phrases, such as saying "please."
|
||||
|
||||
<table>
|
||||
<tr><th>Do</th><th>Don't</th></tr>
|
||||
<tr><td>To create a ReplicaSet, ...</td><td>In order to create a ReplicaSet, ...</td></tr>
|
||||
<tr><td>See the configuration file.</td><td>Please see the configuration file.</td></tr>
|
||||
<tr><td>View the Pods.</td><td>With this next command, we'll view the Pods.</td></tr>
|
||||
|
||||
</table>
|
||||
|
||||
### Address the reader as "you"
|
||||
|
||||
<table>
|
||||
<tr><th>Do</th><th>Don't</th></tr>
|
||||
<tr><td>You can create a Deployment by ...</td><td>We'll create a Deployment by ...</td></tr>
|
||||
<tr><td>In the preceding output, you can see...</td><td>In the preceding output, we can see ...</td></tr>
|
||||
</table>
|
||||
|
||||
## Patterns to avoid
|
||||
|
||||
### Avoid using "we"
|
||||
|
||||
Using "we" in a sentence can be confusing, because the reader might not know
|
||||
whether they're part of the "we" you're describing.
|
||||
|
||||
<table>
|
||||
<tr><th>Do</th><th>Don't</th></tr>
|
||||
<tr><td>Version 1.4 includes ...</td><td>In version 1.4, we have added ...</td></tr>
|
||||
<tr><td>Kubernetes provides a new feature for ...</td><td>We provide a new feature ...</td></tr>
|
||||
<tr><td>This page teaches you how to use pods.</td><td>In this page, we are going to learn about pods.</td></tr>
|
||||
</table>
|
||||
|
||||
### Avoid jargon and idioms
|
||||
|
||||
Some readers speak English as a second language. Avoid jargon and idioms to help make their understanding easier.
|
||||
|
||||
<table>
|
||||
<tr><th>Do</th><th>Don't</th></tr>
|
||||
<tr><td>Internally, ...</td><td>Under the hood, ...</td></tr>
|
||||
<tr><td>Create a new cluster.</td><td>Turn up a new cluster.</td></tr>
|
||||
</table>
|
||||
|
||||
### Avoid statements about the future
|
||||
|
||||
Avoid making promises or giving hints about the future. If you need to talk about
|
||||
an alpha feature, put the text under a heading that identifies it as alpha
|
||||
information.
|
||||
|
||||
### Avoid statements that will soon be out of date
|
||||
|
||||
Avoid words like "currently" and "new." A feature that is new today might not be
|
||||
considered new in a few months.
|
||||
|
||||
<table>
|
||||
<tr><th>Do</th><th>Don't</th></tr>
|
||||
<tr><td>In version 1.4, ...</td><td>In the current version, ...</td></tr>
|
||||
<tr><td>The Federation feature provides ...</td><td>The new Federation feature provides ...</td></tr>
|
||||
</table>
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% capture whatsnext %}
|
||||
* Learn about [writing a new topic](/docs/contribute/write-new-topic/).
|
||||
* Learn about [using page templates](/docs/contribute/page-templates/).
|
||||
* Learn about [staging your changes](/docs/contribute/stage-documentation-changes/)
|
||||
* Learn about [creating a pull request](/docs/contribute/create-pull-request/).
|
||||
{% endcapture %}
|
||||
|
||||
{% include templates/concept.md %}
|
||||
|
|
@ -13,7 +13,7 @@ li>.highlighter-rouge {position:relative; top:3px;}
|
|||
|
||||
## Overview
|
||||
|
||||
This quickstart shows you how to easily install a secure Kubernetes cluster on machines running Ubuntu 16.04 or CentOS 7.
|
||||
This quickstart shows you how to easily install a secure Kubernetes cluster on machines running Ubuntu 16.04, CentOS 7 or HypriotOS v1.0.1+.
|
||||
The installation uses a tool called `kubeadm` which is part of Kubernetes 1.4.
|
||||
|
||||
This process works with local VMs, physical servers and/or cloud servers.
|
||||
|
|
@ -23,7 +23,7 @@ See the full [`kubeadm` reference](/docs/admin/kubeadm) for information on all `
|
|||
|
||||
**The `kubeadm` tool is currently in alpha but please try it out and give us [feedback](/docs/getting-started-guides/kubeadm/#feedback)!
|
||||
Be sure to read the [limitations](#limitations); in particular note that kubeadm doesn't have great support for
|
||||
automatically configuring cloud providers. Please refer to the specific cloud provider documentation or
|
||||
automatically configuring cloud providers. Please refer to the specific cloud provider documentation or
|
||||
use another provisioning system.**
|
||||
|
||||
kubeadm assumes you have a set of machines (virtual or real) that are up and running. It is designed
|
||||
|
|
@ -38,7 +38,7 @@ If you are not constrained, other tools build on kubeadm to give you complete cl
|
|||
|
||||
## Prerequisites
|
||||
|
||||
1. One or more machines running Ubuntu 16.04, CentOS 7 or HypriotOS v1.0.1
|
||||
1. One or more machines running Ubuntu 16.04, CentOS 7 or HypriotOS v1.0.1+
|
||||
1. 1GB or more of RAM per machine (any less will leave little room for your apps)
|
||||
1. Full network connectivity between all machines in the cluster (public or private network is fine)
|
||||
|
||||
|
|
@ -61,6 +61,9 @@ You will install the following packages on all the machines:
|
|||
You will only need this on the master, but it can be useful to have on the other nodes as well.
|
||||
* `kubeadm`: the command to bootstrap the cluster.
|
||||
|
||||
NOTE: If you already have kubeadm installed, you should do a `apt-get update && apt-get upgrade` or `yum update` to get the latest version of kubeadm.
|
||||
See the reference doc if you want to read about the different [kubeadm releases](/docs/admin/kubeadm)
|
||||
|
||||
For each host in turn:
|
||||
|
||||
* SSH into the machine and become `root` if you are not already (for example, run `sudo su -`).
|
||||
|
|
@ -94,7 +97,7 @@ For each host in turn:
|
|||
|
||||
The kubelet is now restarting every few seconds, as it waits in a crashloop for `kubeadm` to tell it what to do.
|
||||
|
||||
Note: `setenforce 0` will no longer be necessary on CentOS once [#33555](https://github.com/kubernetes/kubernetes/pull/33555) is included in a released version of `kubeadm`.
|
||||
Note: To disable SELinux by running `setenforce 0` is required in order to allow containers to access the host filesystem, which is required by pod networks for example. You have to do this until kubelet can handle SELinux better.
|
||||
|
||||
### (2/4) Initializing your master
|
||||
|
||||
|
|
@ -103,6 +106,8 @@ All of these components run in pods started by `kubelet`.
|
|||
|
||||
Right now you can't run `kubeadm init` twice without tearing down the cluster in between, see [Tear down](#tear-down).
|
||||
|
||||
If you try to run `kubeadm init` and your machine is in a state that is incompatible with starting a Kubernetes cluster, `kubeadm` will warn you about things that might not work or it will error out for unsatisfied mandatory requirements.
|
||||
|
||||
To initialize the master, pick one of the machines you previously installed `kubelet` and `kubeadm` on, and run:
|
||||
|
||||
# kubeadm init
|
||||
|
|
@ -110,7 +115,7 @@ To initialize the master, pick one of the machines you previously installed `kub
|
|||
**Note:** this will autodetect the network interface to advertise the master on as the interface with the default gateway.
|
||||
If you want to use a different interface, specify `--api-advertise-addresses=<ip-address>` argument to `kubeadm init`.
|
||||
|
||||
If you want to use [flannel](https://github.com/coreos/flannel) as the pod network; specify `--pod-network-cidr=10.244.0.0/16` if you're using the daemonset manifest below. _However, please note that this is not required for any other networks, including Weave, which is the recommended pod network._
|
||||
If you want to use [flannel](https://github.com/coreos/flannel) as the pod network, specify `--pod-network-cidr=10.244.0.0/16` if you're using the daemonset manifest below. _However, please note that this is not required for any other networks besides Flannel._
|
||||
|
||||
Please refer to the [kubeadm reference doc](/docs/admin/kubeadm/) if you want to read more about the flags `kubeadm init` provides.
|
||||
|
||||
|
|
@ -201,16 +206,27 @@ For example:
|
|||
|
||||
A few seconds later, you should notice that running `kubectl get nodes` on the master shows a cluster with as many machines as you created.
|
||||
|
||||
### (Optional) Control your cluster from machines other than the master
|
||||
Note that there currently isn't a out-of-the-box way of connecting to the Master's API Server via `kubectl` from a node. Read issue [#35729](https://github.com/kubernetes/kubernetes/issues/35729) for more details.
|
||||
|
||||
### (Optional) Controlling your cluster from machines other than the master
|
||||
|
||||
In order to get a kubectl on your laptop for example to talk to your cluster, you need to copy the `KubeConfig` file from your master to your laptop like this:
|
||||
|
||||
# scp root@<master ip>:/etc/kubernetes/admin.conf .
|
||||
# kubectl --kubeconfig ./admin.conf get nodes
|
||||
|
||||
### (Optional) Connecting to the API Server
|
||||
|
||||
If you want to connect to the API Server for viewing the dashboard (note: not deployed by default) from outside the cluster for example, you can use `kubectl proxy`:
|
||||
|
||||
# scp root@<master ip>:/etc/kubernetes/admin.conf .
|
||||
# kubectl --kubeconfig ./admin.conf proxy
|
||||
|
||||
You can now access the API Server locally at `http://localhost:8001/api/v1`
|
||||
|
||||
### (Optional) Installing a sample application
|
||||
|
||||
As an example, install a sample microservices application, a socks shop, to put your cluster through its paces.
|
||||
As an example, install a sample microservices application, a socks shop, to put your cluster through its paces. Note that this demo does only work on `amd64`.
|
||||
To learn more about the sample microservices app, see the [GitHub README](https://github.com/microservices-demo/microservices-demo).
|
||||
|
||||
# kubectl create namespace sock-shop
|
||||
|
|
@ -242,17 +258,11 @@ If there is a firewall, make sure it exposes this port to the internet before yo
|
|||
|
||||
* To uninstall the socks shop, run `kubectl delete namespace sock-shop` on the master.
|
||||
|
||||
* To undo what `kubeadm` did, simply delete the machines you created for this tutorial, or run the script below and then start over or uninstall the packages.
|
||||
* To undo what `kubeadm` did, simply run:
|
||||
|
||||
# kubeadm reset
|
||||
|
||||
<br>
|
||||
Reset local state:
|
||||
<pre><code>systemctl stop kubelet;
|
||||
docker rm -f -v $(docker ps -q);
|
||||
find /var/lib/kubelet | xargs -n 1 findmnt -n -t tmpfs -o TARGET -T | uniq | xargs -r umount -v;
|
||||
rm -r -f /etc/kubernetes /var/lib/kubelet /var/lib/etcd;
|
||||
</code></pre>
|
||||
If you wish to start over, run `systemctl start kubelet` followed by `kubeadm init` or `kubeadm join`.
|
||||
<!-- *syntax-highlighting-hack -->
|
||||
|
||||
## Explore other add-ons
|
||||
|
||||
|
|
@ -275,19 +285,22 @@ kubeadm deb packages and binaries are built for amd64, arm and arm64, following
|
|||
|
||||
deb-packages are released for ARM and ARM 64-bit, but not RPMs (yet, reach out if there's interest).
|
||||
|
||||
Anyway, ARM had some issues when making v1.4, see [#32517](https://github.com/kubernetes/kubernetes/pull/32517) [#33485](https://github.com/kubernetes/kubernetes/pull/33485), [#33117](https://github.com/kubernetes/kubernetes/pull/33117) and [#33376](https://github.com/kubernetes/kubernetes/pull/33376).
|
||||
ARM had some issues when making v1.4, see [#32517](https://github.com/kubernetes/kubernetes/pull/32517) [#33485](https://github.com/kubernetes/kubernetes/pull/33485), [#33117](https://github.com/kubernetes/kubernetes/pull/33117) and [#33376](https://github.com/kubernetes/kubernetes/pull/33376).
|
||||
|
||||
However, thanks to the PRs above, `kube-apiserver` works on ARM from the `v1.4.1` release, so make sure you're at least using `v1.4.1` when running on ARM 32-bit
|
||||
|
||||
The multiarch flannel daemonset can be installed this way. Make sure you replace `ARCH=amd64` with `ARCH=arm` or `ARCH=arm64` if necessary.
|
||||
The multiarch flannel daemonset can be installed this way.
|
||||
|
||||
# ARCH=amd64 curl -sSL https://raw.githubusercontent.com/luxas/flannel/update-daemonset/Documentation/kube-flannel.yml | sed "s/amd64/${ARCH}/g" | kubectl create -f -
|
||||
# export ARCH=amd64
|
||||
# curl -sSL "https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel.yml?raw=true" | sed "s/amd64/${ARCH}/g" | kubectl create -f -
|
||||
|
||||
And obviously replace `ARCH=amd64` with `ARCH=arm` or `ARCH=arm64` depending on the platform you're running on.
|
||||
Replace `ARCH=amd64` with `ARCH=arm` or `ARCH=arm64` depending on the platform you're running on.
|
||||
Note that the Raspberry Pi 3 is in ARM 32-bit mode, so for RPi 3 you should set `ARCH` to `arm`, not `arm64`.
|
||||
|
||||
## Limitations
|
||||
|
||||
Please note: `kubeadm` is a work in progress and these limitations will be addressed in due course.
|
||||
Also you can take a look at the troubleshooting section in the [reference document](/docs/admin/kubeadm/#troubleshooting)
|
||||
|
||||
1. The cluster created here doesn't have cloud-provider integrations by default, so for example it doesn't work automatically with (for example) [Load Balancers](/docs/user-guide/load-balancer/) (LBs) or [Persistent Volumes](/docs/user-guide/persistent-volumes/walkthrough/) (PVs).
|
||||
To set up kubeadm with CloudProvider integrations (it's experimental, but try), refer to the [kubeadm reference](/docs/admin/kubeadm/) document.
|
||||
|
|
@ -302,6 +315,15 @@ Please note: `kubeadm` is a work in progress and these limitations will be addre
|
|||
1. `kubectl logs` is broken with `kubeadm` clusters due to [#22770](https://github.com/kubernetes/kubernetes/issues/22770).
|
||||
|
||||
Workaround: use `docker logs` on the nodes where the containers are running as a workaround.
|
||||
1. The HostPort functionality does not work with kubeadm due to that CNI networking is used, see issue [#31307](https://github.com/kubernetes/kubernetes/issues/31307).
|
||||
|
||||
Workaround: use the [NodePort feature of services](/docs/user-guide/services/#type-nodeport) instead, or use HostNetwork.
|
||||
1. A running `firewalld` service may conflict with kubeadm, so if you want to run `kubeadm`, you should disable `firewalld` until issue [#35535](https://github.com/kubernetes/kubernetes/issues/35535) is resolved.
|
||||
|
||||
Workaround: Disable `firewalld` or configure it to allow Kubernetes the pod and service cidrs.
|
||||
1. If you see errors like `etcd cluster unavailable or misconfigured`, it's because of high load on the machine which makes the `etcd` container a bit unresponsive (it might miss some requests) and therefore kubelet will restart it. This will get better with `etcd3`.
|
||||
|
||||
Workaround: Set `failureThreshold` in `/etc/kubernetes/manifests/etcd.json` to a larger value.
|
||||
|
||||
1. If you are using VirtualBox (directly or via Vagrant), you will need to ensure that `hostname -i` returns a routable IP address (i.e. one on the second network interface, not the first one).
|
||||
By default, it doesn't do this and kubelet ends-up using first non-loopback network interface, which is usually NATed.
|
||||
|
|
|
|||
|
|
@ -0,0 +1,110 @@
|
|||
---
|
||||
---
|
||||
|
||||
<style>
|
||||
li>.highlighter-rouge {position:relative; top:3px;}
|
||||
</style>
|
||||
|
||||
## Overview
|
||||
|
||||
kubectl is the command line tool you use to interact with Kubernetes clusters.
|
||||
|
||||
You should use a version of kubectl that is at least as new as your server.
|
||||
`kubectl version` will print the server and client versions. Using the same version of kubectl
|
||||
as your server naturally works; using a newer kubectl than your server also works; but if you use
|
||||
an older kubectl with a newer server you may see odd validation errors .
|
||||
|
||||
## Download a release
|
||||
|
||||
Download kubectl from the [official Kubernetes releases](https://console.cloud.google.com/storage/browser/kubernetes-release/release/):
|
||||
|
||||
On MacOS:
|
||||
|
||||
```shell
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.4.4/bin/darwin/amd64/kubectl
|
||||
chmod +x kubectl
|
||||
mv kubectl /usr/local/bin/kubectl
|
||||
```
|
||||
|
||||
On Linux:
|
||||
|
||||
```shell
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.4.4/bin/linux/amd64/kubectl
|
||||
chmod +x kubectl
|
||||
mv kubectl /usr/local/bin/kubectl
|
||||
```
|
||||
|
||||
|
||||
You may need to `sudo` the `mv`; you can put it anywhere in your `PATH` - some people prefer to install to `~/bin`.
|
||||
|
||||
|
||||
## Alternatives
|
||||
|
||||
### Download as part of the Google Cloud SDK
|
||||
|
||||
kubectl can be installed as part of the Google Cloud SDK:
|
||||
|
||||
First install the [Google Cloud SDK](https://cloud.google.com/sdk/).
|
||||
|
||||
After Google Cloud SDK installs, run the following command to install `kubectl`:
|
||||
|
||||
```shell
|
||||
gcloud components install kubectl
|
||||
```
|
||||
|
||||
Do check that the version is sufficiently up-to-date using `kubectl version`.
|
||||
|
||||
### Install with brew
|
||||
|
||||
If you are on MacOS and using brew, you can install with:
|
||||
|
||||
```shell
|
||||
brew install kubectl
|
||||
```
|
||||
|
||||
The homebrew project is independent from kubernetes, so do check that the version is
|
||||
sufficiently up-to-date using `kubectl version`.
|
||||
|
||||
|
||||
# Enabling shell autocompletion
|
||||
|
||||
kubectl includes autocompletion support, which can save a lot of typing!
|
||||
|
||||
The completion script itself is generated by kubectl, so you typically just need to invoke it from your profile.
|
||||
|
||||
Common examples are provided here, but for more details please consult `kubectl completion -h`
|
||||
|
||||
## On Linux, using bash
|
||||
|
||||
To add it to your current shell: `source <(kubectl completion bash)`
|
||||
|
||||
To add kubectl autocompletion to your profile (so it is automatically loaded in future shells):
|
||||
|
||||
```shell
|
||||
echo "source <(kubectl completion bash)" >> ~/.bashrc
|
||||
```
|
||||
|
||||
## On MacOS, using bash
|
||||
|
||||
On MacOS, you will need to install the bash-completion support first:
|
||||
|
||||
```shell
|
||||
brew install bash-completion
|
||||
```
|
||||
|
||||
To add it to your current shell:
|
||||
|
||||
```shell
|
||||
source $(brew --prefix)/etc/bash_completion
|
||||
source <(kubectl completion bash)
|
||||
```
|
||||
|
||||
To add kubectl autocompletion to your profile (so it is automatically loaded in future shells):
|
||||
|
||||
```shell
|
||||
echo "source $(brew --prefix)/etc/bash_completion" >> ~/.bash_profile
|
||||
echo "source <(kubectl completion bash)" >> ~/.bash_profile
|
||||
```
|
||||
|
||||
Please note that this only appears to work currently if you install using `brew install kubectl`,
|
||||
and not if you downloaded kubectl directly.
|
||||
|
|
@ -81,12 +81,12 @@ to implement one of the above options:
|
|||
|
||||
- **Use a network plugin which is called by Kubernetes**
|
||||
- Kubernetes supports the [CNI](https://github.com/containernetworking/cni) network plugin interface.
|
||||
- There are a number of solutions which provide plugins for Kubernetes:
|
||||
- There are a number of solutions which provide plugins for Kubernetes (listed alphabetically):
|
||||
- [Calico](http://docs.projectcalico.org/)
|
||||
- [Flannel](https://github.com/coreos/flannel)
|
||||
- [Calico](https://github.com/projectcalico/calico-containers)
|
||||
- [Weave](https://weave.works/)
|
||||
- [Romana](http://romana.io/)
|
||||
- [Open vSwitch (OVS)](http://openvswitch.org/)
|
||||
- [Romana](http://romana.io/)
|
||||
- [Weave](http://weave.works/)
|
||||
- [More found here](/docs/admin/networking#how-to-achieve-this)
|
||||
- You can also write your own.
|
||||
- **Compile support directly into Kubernetes**
|
||||
|
|
@ -381,7 +381,7 @@ The minimum version required is [v0.5.6](https://github.com/coreos/rkt/releases/
|
|||
minimum version required to match rkt v0.5.6 is
|
||||
[systemd 215](http://lists.freedesktop.org/archives/systemd-devel/2014-July/020903.html).
|
||||
|
||||
[rkt metadata service](https://github.com/coreos/rkt/blob/master/Documentation/networking.md) is also required
|
||||
[rkt metadata service](https://github.com/coreos/rkt/blob/master/Documentation/networking/overview.md) is also required
|
||||
for rkt networking support. You can start rkt metadata service by using command like
|
||||
`sudo systemd-run rkt metadata-service`
|
||||
|
||||
|
|
|
|||
|
|
@ -65,6 +65,7 @@ export GOVC_DATACENTER='ha-datacenter' # The datacenter to be used by vSphere cl
|
|||
```
|
||||
|
||||
Sample environment
|
||||
|
||||
```shell
|
||||
export GOVC_URL='10.161.236.217'
|
||||
export GOVC_USERNAME='administrator'
|
||||
|
|
@ -79,6 +80,7 @@ export GOVC_DATACENTER='Datacenter'
|
|||
```
|
||||
|
||||
Import this VMDK into your vSphere datastore:
|
||||
|
||||
```shell
|
||||
govc import.vmdk kube.vmdk ./kube/
|
||||
```
|
||||
|
|
|
|||
|
|
@ -129,7 +129,7 @@ Let’s now stop the container. You can list the docker containers with:
|
|||
docker ps
|
||||
```
|
||||
|
||||
You should something like see:
|
||||
You should see something like this:
|
||||
|
||||
```shell
|
||||
CONTAINER ID IMAGE COMMAND NAMES
|
||||
|
|
|
|||
|
|
@ -5,9 +5,7 @@ assignees:
|
|||
|
||||
---
|
||||
|
||||
<p>The Kubernetes documentation can help you set up Kubernetes, learn about the system, or get your applications and workloads running on Kubernetes.</p>
|
||||
|
||||
<p><a href="/docs/whatisk8s/" class="button">Read the Kubernetes Overview</a></p>
|
||||
<p>Kubernetes documentation can help you set up Kubernetes, learn about the system, or get your applications and workloads running on Kubernetes. To learn the basics of what Kubernetes is and how it works, read "<a href="/docs/whatisk8s/">What is Kubernetes</a>". </p>
|
||||
|
||||
<h2>Interactive Tutorial</h2>
|
||||
|
||||
|
|
@ -40,4 +38,4 @@ assignees:
|
|||
|
||||
<h2>Tools</h2>
|
||||
|
||||
<p>The <a href="/docs/tools/">tools</a> page contains a list of native and third-party tools for Kubernetes.</p>
|
||||
<p>The <a href="/docs/tools/">tools</a> page contains a list of native and third-party tools for Kubernetes.</p>
|
||||
|
|
|
|||
|
|
@ -0,0 +1,110 @@
|
|||
---
|
||||
---
|
||||
|
||||
{% capture overview %}
|
||||
|
||||
This page shows how to write and read a Container
|
||||
termination message.
|
||||
|
||||
Termination messages provide a way for containers to write
|
||||
information about fatal events to a location where it can
|
||||
be easily retrieved and surfaced by tools like dashboards
|
||||
and monitoring software. In most cases, information that you
|
||||
put in a termination message should also be written to
|
||||
the general
|
||||
[Kubernetes logs](/docs/user-guide/logging/).
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% capture prerequisites %}
|
||||
|
||||
{% include task-tutorial-prereqs.md %}
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% capture steps %}
|
||||
|
||||
### Writing and reading a termination message
|
||||
|
||||
In this exercise, you create a Pod that runs one container.
|
||||
The configuration file specifies a command that runs when
|
||||
the container starts.
|
||||
|
||||
{% include code.html language="yaml" file="termination.yaml" ghlink="/docs/tasks/debug-pod-container/termination.yaml" %}
|
||||
|
||||
1. Create a Pod based on the YAML configuration file:
|
||||
|
||||
export REPO=https://raw.githubusercontent.com/kubernetes/kubernetes.github.io/master
|
||||
kubectl create -f $REPO/docs/tasks/debug-pod-container/termination.yaml
|
||||
|
||||
In the YAML file, in the `cmd` and `args` fields, you can see that the
|
||||
container sleeps for 10 seconds and then writes "Sleep expired" to
|
||||
the `/dev/termination-log` file. After the container writes
|
||||
the "Sleep expired" message, it terminates.
|
||||
|
||||
1. Display information about the Pod:
|
||||
|
||||
kubectl get pod termination-demo
|
||||
|
||||
Repeat the preceding command until the Pod is no longer running.
|
||||
|
||||
1. Display detailed information about the Pod:
|
||||
|
||||
kubectl get pod --output=yaml
|
||||
|
||||
The output includes the "Sleep expired" message:
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
...
|
||||
lastState:
|
||||
terminated:
|
||||
containerID: ...
|
||||
exitCode: 0
|
||||
finishedAt: ...
|
||||
message: |
|
||||
Sleep expired
|
||||
...
|
||||
|
||||
1. Use a Go template to filter the output so that it includes
|
||||
only the termination message:
|
||||
|
||||
```
|
||||
{% raw %} kubectl get pod termination-demo -o go-template="{{range .status.containerStatuses}}{{.lastState.terminated.message}}{{end}}"{% endraw %}
|
||||
```
|
||||
|
||||
### Setting the termination log file
|
||||
|
||||
By default Kubernetes retrieves termination messages from
|
||||
`/dev/termination-log`. To change this to a different file,
|
||||
specify a `terminationMessagePath` field for your Container.
|
||||
|
||||
For example, suppose your Container writes termination messages to
|
||||
`/tmp/my-log`, and you want Kubernetes to retrieve those messages.
|
||||
Set `terminationMessagePath` as shown here:
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: msg-path-demo
|
||||
spec:
|
||||
containers:
|
||||
- name: msg-path-demo-container
|
||||
image: debian
|
||||
terminationMessagePath: "/tmp/my-log"
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
{% capture whatsnext %}
|
||||
|
||||
* See the `terminationMessagePath` field in
|
||||
[Container](/docs/api-reference/v1/definitions#_v1_container).
|
||||
* Learn about [retrieving logs](/docs/user-guide/logging/).
|
||||
* Learn about [Go templates](https://golang.org/pkg/text/template/).
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% include templates/task.md %}
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: termination-demo
|
||||
spec:
|
||||
containers:
|
||||
- name: termination-demo-container
|
||||
image: debian
|
||||
command: ["/bin/sh"]
|
||||
args: ["-c", "sleep 10 && echo Sleep expired > /dev/termination-log"]
|
||||
|
|
@ -15,6 +15,10 @@ The Tutorials section of the Kubernetes documentation is a work in progress.
|
|||
|
||||
* [Exposing an External IP Address to Access an Application in a Cluster](/docs/tutorials/stateless-application/expose-external-ip-address/)
|
||||
|
||||
#### Stateful Applications
|
||||
|
||||
* [Running a Single-Instance Stateful Application](/docs/tutorials/stateful-application/run-stateful-application/)
|
||||
|
||||
### What's next
|
||||
|
||||
If you would like to write a tutorial, see
|
||||
|
|
|
|||
|
|
@ -0,0 +1,12 @@
|
|||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: mysql-pv
|
||||
spec:
|
||||
capacity:
|
||||
storage: 20Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
gcePersistentDisk:
|
||||
pdName: mysql-disk
|
||||
fsType: ext4
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: mysql
|
||||
spec:
|
||||
ports:
|
||||
- port: 3306
|
||||
selector:
|
||||
app: mysql
|
||||
clusterIP: None
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: mysql-pv-claim
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 20Gi
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: mysql
|
||||
spec:
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: mysql
|
||||
spec:
|
||||
containers:
|
||||
- image: mysql:5.6
|
||||
name: mysql
|
||||
env:
|
||||
# Use secret in real usage
|
||||
- name: MYSQL_ROOT_PASSWORD
|
||||
value: password
|
||||
ports:
|
||||
- containerPort: 3306
|
||||
name: mysql
|
||||
volumeMounts:
|
||||
- name: mysql-persistent-storage
|
||||
mountPath: /var/lib/mysql
|
||||
volumes:
|
||||
- name: mysql-persistent-storage
|
||||
persistentVolumeClaim:
|
||||
claimName: mysql-pv-claim
|
||||
|
|
@ -0,0 +1,220 @@
|
|||
---
|
||||
---
|
||||
|
||||
{% capture overview %}
|
||||
|
||||
This page shows you how to run a single-instance stateful application
|
||||
in Kubernetes using a PersistentVolume and a Deployment. The
|
||||
application is MySQL.
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% capture objectives %}
|
||||
|
||||
* Create a PersistentVolume referencing a disk in your environment.
|
||||
* Create a MySQL Deployment.
|
||||
* Expose MySQL to other pods in the cluster at a known DNS name.
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% capture prerequisites %}
|
||||
|
||||
* {% include task-tutorial-prereqs.md %}
|
||||
|
||||
* For data persistence we will create a Persistent Volume that
|
||||
references a disk in your
|
||||
environment. See
|
||||
[here](/docs/user-guide/persistent-volumes/#types-of-persistent-volumes) for
|
||||
the types of environments supported. This Tutorial will demonstrate
|
||||
`GCEPersistentDisk` but any type will work. `GCEPersistentDisk`
|
||||
volumes only work on Google Compute Engine.
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% capture lessoncontent %}
|
||||
|
||||
### Set up a disk in your environment
|
||||
|
||||
You can use any type of persistent volume for your stateful app. See
|
||||
[Types of Persistent Volumes](/docs/user-guide/persistent-volumes/#types-of-persistent-volumes)
|
||||
for a list of supported environment disks. For Google Compute Engine, run:
|
||||
|
||||
```
|
||||
gcloud compute disks create --size=20GB mysql-disk
|
||||
```
|
||||
|
||||
Next create a PersistentVolume that points to the `mysql-disk`
|
||||
disk just created. Here is a configuration file for a PersistentVolume
|
||||
that points to the Compute Engine disk above:
|
||||
|
||||
{% include code.html language="yaml" file="gce-volume.yaml" ghlink="/docs/tutorials/stateful-application/gce-volume.yaml" %}
|
||||
|
||||
Notice that the `pdName: mysql-disk` line matches the name of the disk
|
||||
in the Compute Engine environment. See the
|
||||
[Persistent Volumes](/docs/user-guide/persistent-volumes/)
|
||||
for details on writing a PersistentVolume configuration file for other
|
||||
environments.
|
||||
|
||||
Create the persistent volume:
|
||||
|
||||
```
|
||||
kubectl create -f http://k8s.io/docs/tutorials/stateful-application/gce-volume.yaml
|
||||
```
|
||||
|
||||
|
||||
### Deploy MySQL
|
||||
|
||||
You can run a stateful application by creating a Kubernetes Deployment
|
||||
and connecting it to an existing PersistentVolume using a
|
||||
PersistentVolumeClaim. For example, this YAML file describes a
|
||||
Deployment that runs MySQL and references the PersistentVolumeClaim. The file
|
||||
defines a volume mount for /var/lib/mysql, and then creates a
|
||||
PersistentVolumeClaim that looks for a 20G volume. This claim is
|
||||
satisfied by any volume that meets the requirements, in this case, the
|
||||
volume created above.
|
||||
|
||||
Note: The password is defined in the config yaml, and this is insecure. See
|
||||
[Kubernetes Secrets](/docs/user-guide/secrets/)
|
||||
for a secure solution.
|
||||
|
||||
{% include code.html language="yaml" file="mysql-deployment.yaml" ghlink="/docs/tutorials/stateful-application/mysql-deployment.yaml" %}
|
||||
|
||||
1. Deploy the contents of the YAML file:
|
||||
|
||||
kubectl create -f http://k8s.io/docs/tutorials/stateful-application/mysql-deployment.yaml
|
||||
|
||||
1. Display information about the Deployment:
|
||||
|
||||
kubectl describe deployment mysql
|
||||
|
||||
Name: mysql
|
||||
Namespace: default
|
||||
CreationTimestamp: Tue, 01 Nov 2016 11:18:45 -0700
|
||||
Labels: app=mysql
|
||||
Selector: app=mysql
|
||||
Replicas: 1 updated | 1 total | 0 available | 1 unavailable
|
||||
StrategyType: Recreate
|
||||
MinReadySeconds: 0
|
||||
OldReplicaSets: <none>
|
||||
NewReplicaSet: mysql-63082529 (1/1 replicas created)
|
||||
Events:
|
||||
FirstSeen LastSeen Count From SubobjectPath Type Reason Message
|
||||
--------- -------- ----- ---- ------------- -------- ------ -------
|
||||
33s 33s 1 {deployment-controller } Normal ScalingReplicaSet Scaled up replica set mysql-63082529 to 1
|
||||
|
||||
1. List the pods created by the Deployment:
|
||||
|
||||
kubectl get pods -l app=mysql
|
||||
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
mysql-63082529-2z3ki 1/1 Running 0 3m
|
||||
|
||||
1. Inspect the Persistent Volume:
|
||||
|
||||
kubectl describe pv mysql-pv
|
||||
|
||||
Name: mysql-pv
|
||||
Labels: <none>
|
||||
Status: Bound
|
||||
Claim: default/mysql-pv-claim
|
||||
Reclaim Policy: Retain
|
||||
Access Modes: RWO
|
||||
Capacity: 20Gi
|
||||
Message:
|
||||
Source:
|
||||
Type: GCEPersistentDisk (a Persistent Disk resource in Google Compute Engine)
|
||||
PDName: mysql-disk
|
||||
FSType: ext4
|
||||
Partition: 0
|
||||
ReadOnly: false
|
||||
No events.
|
||||
|
||||
1. Inspect the PersistentVolumeClaim:
|
||||
|
||||
kubectl describe pvc mysql-pv-claim
|
||||
|
||||
Name: mysql-pv-claim
|
||||
Namespace: default
|
||||
Status: Bound
|
||||
Volume: mysql-pv
|
||||
Labels: <none>
|
||||
Capacity: 20Gi
|
||||
Access Modes: RWO
|
||||
No events.
|
||||
|
||||
### Accessing the MySQL instance
|
||||
|
||||
The preceding YAML file creates a service that
|
||||
allows other Pods in the cluster to access the database. The Service option
|
||||
`clusterIP: None` lets the Service DNS name resolve directly to the
|
||||
Pod's IP address. This is optimal when you have only one Pod
|
||||
behind a Service and you don't intend to increase the number of Pods.
|
||||
|
||||
Run a MySQL client to connect to the server:
|
||||
|
||||
```
|
||||
kubectl run -it --rm --image=mysql:5.6 mysql-client -- mysql -h mysql -ppassword
|
||||
```
|
||||
|
||||
This command creates a new Pod in the cluster running a mysql client
|
||||
and connects it to the server through the Service. If it connects, you
|
||||
know your stateful MySQL database is up and running.
|
||||
|
||||
```
|
||||
Waiting for pod default/mysql-client-274442439-zyp6i to be running, status is Pending, pod ready: false
|
||||
If you don't see a command prompt, try pressing enter.
|
||||
|
||||
mysql>
|
||||
```
|
||||
|
||||
### Updating
|
||||
|
||||
The image or any other part of the Deployment can be updated as usual
|
||||
with the `kubectl apply` command. Here are some precautions that are
|
||||
specific to stateful apps:
|
||||
|
||||
* Don't scale the app. This setup is for single-instance apps
|
||||
only. The underlying PersistentVolume can only be mounted to one
|
||||
Pod. For clustered stateful apps, see the
|
||||
[StatefulSet documentation](/docs/user-guide/petset/).
|
||||
* Use `strategy:` `type: Recreate` in the Deployment configuration
|
||||
YAML file. This instructs Kubernetes to _not_ use rolling
|
||||
updates. Rolling updates will not work, as you cannot have more than
|
||||
one Pod running at a time. The `Recreate` strategy will stop the
|
||||
first pod before creating a new one with the updated configuration.
|
||||
|
||||
### Deleting a deployment
|
||||
|
||||
Delete the deployed objects by name:
|
||||
|
||||
```
|
||||
kubectl delete deployment,svc mysql
|
||||
kubectl delete pvc mysql-pv-claim
|
||||
kubectl delete pv mysql-pv
|
||||
```
|
||||
|
||||
Also, if you are using Compute Engine disks:
|
||||
|
||||
```
|
||||
gcloud compute disks delete mysql-disk
|
||||
```
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% capture whatsnext %}
|
||||
|
||||
* Learn more about [Deployment objects](/docs/user-guide/deployments/).
|
||||
|
||||
* Learn more about [Deploying applications](/docs/user-guide/deploying-applications/)
|
||||
|
||||
* [kubectl run documentation](/docs/user-guide/kubectl/kubectl_run/)
|
||||
|
||||
* [Volumes](/docs/user-guide/volumes/) and [Persistent Volumes](/docs/user-guide/persistent-volumes/)
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
{% include templates/tutorial.md %}
|
||||
|
|
@ -129,7 +129,7 @@ To use it,
|
|||
* Write an application atop of the client-go clients. Note that client-go defines its own API objects, so if needed, please import API definitions from client-go rather than from the main repository, e.g., `import "k8s.io/client-go/1.4/pkg/api/v1"` is correct.
|
||||
|
||||
The Go client can use the same [kubeconfig file](/docs/user-guide/kubeconfig-file)
|
||||
as the kubectl CLI does to locate and authenticate to the apiserver. See this [example](https://github.com/kubernetes/client-go/examples/out-of-cluster.go):
|
||||
as the kubectl CLI does to locate and authenticate to the apiserver. See this [example](https://github.com/kubernetes/client-go/blob/master/examples/out-of-cluster/main.go):
|
||||
|
||||
```golang
|
||||
import (
|
||||
|
|
@ -183,7 +183,8 @@ From within a pod the recommended ways to connect to API are:
|
|||
in any container of the pod can access it. See this [example of using kubectl proxy
|
||||
in a pod](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/kubectl-container/).
|
||||
- use the Go client library, and create a client using the `client.NewInCluster()` factory.
|
||||
This handles locating and authenticating to the apiserver. [example](https://github.com/kubernetes/client-go/examples/in-cluster.go)
|
||||
This handles locating and authenticating to the apiserver. See this [example of using Go client
|
||||
library in a pod](https://github.com/kubernetes/client-go/blob/master/examples/in-cluster/main.go).
|
||||
|
||||
In each case, the credentials of the pod are used to communicate securely with the apiserver.
|
||||
|
||||
|
|
|
|||
|
|
@ -10,9 +10,9 @@ The Kubernetes **Guides** can help you work with various aspects of the Kubernet
|
|||
* The [Cluster Admin Guide](/docs/admin/) can help you set up and administrate your own Kubernetes cluster.
|
||||
* The [Developer Guide](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/docs/devel) can help you either write code to directly access the Kubernetes API, or to contribute directly to the Kubernetes project.
|
||||
|
||||
## <a name="user-guide-internal"></a>Kuberentes User Guide
|
||||
## <a name="user-guide-internal"></a>Kubernetes User Guide
|
||||
|
||||
The following topics in the Kubernets User Guide can help you run applications and services on a Kubernetes cluster:
|
||||
The following topics in the Kubernetes User Guide can help you run applications and services on a Kubernetes cluster:
|
||||
|
||||
1. [Quick start: launch and expose an application](/docs/user-guide/quick-start/)
|
||||
1. [Configuring and launching containers: configuring common container parameters](/docs/user-guide/configuring-containers/)
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ kind: Pod
|
|||
metadata:
|
||||
name: nginx
|
||||
annotations:
|
||||
pod.alpha.kubernetes.io/init-containers: '[
|
||||
pod.beta.kubernetes.io/init-containers: '[
|
||||
{
|
||||
"name": "install",
|
||||
"image": "busybox",
|
||||
|
|
|
|||
|
|
@ -173,7 +173,7 @@ on node N if node N has a label with key `failure-domain.beta.kubernetes.io/zone
|
|||
such that there is at least one node in the cluster with key `failure-domain.beta.kubernetes.io/zone` and
|
||||
value V that is running a pod that has a label with key "security" and value "S1".) The pod anti-affinity
|
||||
rule says that the pod cannot schedule onto a node if that node is already running a pod with label
|
||||
having key "security" and value "S2". (If the `topologyKey` were `failure-domain.beta.kuberntes.io/zone` then
|
||||
having key "security" and value "S2". (If the `topologyKey` were `failure-domain.beta.kubernetes.io/zone` then
|
||||
it would mean that the pod cannot schedule onto a node if that node is in the same zone as a pod with
|
||||
label having key "security" and value "S2".) See the [design doc](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/podaffinity.md).
|
||||
for many more examples of pod affinity and anti-affinity, both the `requiredDuringSchedulingIgnoredDuringExecution`
|
||||
|
|
|
|||
|
|
@ -66,8 +66,8 @@ The possible values for RestartPolicy are `Always`, `OnFailure`, or `Never`. If
|
|||
Three types of controllers are currently available:
|
||||
|
||||
- Use a [`Job`](/docs/user-guide/jobs/) for pods which are expected to terminate (e.g. batch computations).
|
||||
- Use a [`ReplicationController`](/docs/user-guide/replication-controller/) for pods which are not expected to
|
||||
terminate (e.g. web servers).
|
||||
- Use a [`ReplicationController`](/docs/user-guide/replication-controller/) or [`Deployment`](/docs/user-guide/deployments/)
|
||||
for pods which are not expected to terminate (e.g. web servers).
|
||||
- Use a [`DaemonSet`](/docs/admin/daemons/): Use for pods which need to run 1 per machine because they provide a
|
||||
machine-specific system service.
|
||||
If you are unsure whether to use ReplicationController or Daemon, then see [Daemon Set versus
|
||||
|
|
|
|||
|
|
@ -94,7 +94,7 @@ of the replicated pods.
|
|||
kubectl create -f hpa-rs.yaml
|
||||
```
|
||||
|
||||
Alternatively, you can just use the `kubectl autoscale` command to acommplish the same
|
||||
Alternatively, you can just use the `kubectl autoscale` command to acomplish the same
|
||||
(and it's easier!)
|
||||
|
||||
```shell
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ The system adds fields in several ways:
|
|||
|
||||
- Some fields are added synchronously with creation of the resource and some are set asynchronously.
|
||||
- For example: `metadata.uid` is set synchronously. (Read more about [metadata](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/docs/devel/api-conventions.md#metadata)).
|
||||
- For example, `status.hostIP` is set only after the pod has been scheduled. This often happens fast, but you may notice pods which do not have this set yet. This is called Late Initialization. (Read mode about [status](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/docs/devel/api-conventions.md#spec-and-status) and [late initialization](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/docs/devel/api-conventions.md#late-initialization) ).
|
||||
- For example, `status.hostIP` is set only after the pod has been scheduled. This often happens fast, but you may notice pods which do not have this set yet. This is called Late Initialization. (Read more about [status](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/docs/devel/api-conventions.md#spec-and-status) and [late initialization](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/docs/devel/api-conventions.md#late-initialization)).
|
||||
- Some fields are set to default values. Some defaults vary by cluster and some are fixed for the API at a certain version. (Read more about [defaulting](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/docs/devel/api-conventions.md#defaulting)).
|
||||
- For example, `spec.containers[0].imagePullPolicy` always defaults to `IfNotPresent` in api v1.
|
||||
- For example, `spec.containers[0].resources.limits.cpu` may be defaulted to `100m` on some clusters, to some other value on others, and not defaulted at all on others.
|
||||
|
|
|
|||
|
|
@ -0,0 +1,42 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
---
|
||||
|
||||
<!-- BEGIN: Gotta keep this section JS/HTML because it swaps out content dynamically -->
|
||||
<p> </p>
|
||||
<script language="JavaScript">
|
||||
var forwarding=window.location.hash.replace("#","");
|
||||
$( document ).ready(function() {
|
||||
if(forwarding) {
|
||||
$("#generalInstructions").hide();
|
||||
$("#continueEdit").show();
|
||||
$("#continueEditButton").text("Edit " + forwarding);
|
||||
$("#continueEditButton").attr("href", "https://github.com/kubernetes/kubernetes.github.io/edit/master/" + forwarding)
|
||||
} else {
|
||||
$("#generalInstructions").show();
|
||||
$("#continueEdit").hide();
|
||||
}
|
||||
});
|
||||
</script>
|
||||
<div id="continueEdit">
|
||||
|
||||
<h2>Continue your edit</h2>
|
||||
|
||||
<p>Click the below link to edit the page you were just on. When you are done, press "Commit Changes" at the bottom of the screen. This will create a copy of our site on your GitHub account called a "fork." You can make other changes in your fork after it is created, if you want. When you are ready to send us all your changes, go to the index page for your fork and click "New Pull Request" to let us know about it.</p>
|
||||
|
||||
<p><a id="continueEditButton" class="button"></a></p>
|
||||
|
||||
</div>
|
||||
<div id="generalInstructions">
|
||||
|
||||
<h2>Edit our site in the cloud</h2>
|
||||
|
||||
<p>Click the below button to visit the repo for our site. You can then click the "Fork" button in the upper-right area of the screen to create a copy of our site on your GitHub account called a "fork." Make any changes you want in your fork, and when you are ready to send those changes to us, go to the index page for your fork and click "New Pull Request" to let us know about it.</p>
|
||||
|
||||
<p><a class="button" href="https://github.com/kubernetes/kubernetes.github.io/">Browse this site's source code</a></p>
|
||||
|
||||
</div>
|
||||
<!-- END: Dynamic section -->
|
||||
|
||||
|
||||
{% include_relative README.md %}
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 13 KiB After Width: | Height: | Size: 12 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 5.1 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 9.5 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 17 KiB |
|
|
@ -127,11 +127,11 @@ func validateObject(obj runtime.Object) (errors field.ErrorList) {
|
|||
t.Namespace = api.NamespaceDefault
|
||||
}
|
||||
errors = expvalidation.ValidateDaemonSet(t)
|
||||
case *batch.ScheduledJob:
|
||||
case *batch.CronJob:
|
||||
if t.Namespace == "" {
|
||||
t.Namespace = api.NamespaceDefault
|
||||
}
|
||||
errors = batch_validation.ValidateScheduledJob(t)
|
||||
errors = batch_validation.ValidateCronJob(t)
|
||||
default:
|
||||
errors = field.ErrorList{}
|
||||
errors = append(errors, field.InternalError(field.NewPath(""), fmt.Errorf("no validation defined for %#v", obj)))
|
||||
|
|
@ -242,7 +242,7 @@ func TestExampleObjectSchemas(t *testing.T) {
|
|||
"redis-resource-deployment": &extensions.Deployment{},
|
||||
"redis-secret-deployment": &extensions.Deployment{},
|
||||
"run-my-nginx": &extensions.Deployment{},
|
||||
"sj": &batch.ScheduledJob{},
|
||||
"sj": &batch.CronJob{},
|
||||
},
|
||||
"../docs/admin": {
|
||||
"daemon": &extensions.DaemonSet{},
|
||||
|
|
@ -272,7 +272,7 @@ func TestExampleObjectSchemas(t *testing.T) {
|
|||
"../docs/user-guide/node-selection": {
|
||||
"pod": &api.Pod{},
|
||||
"pod-with-node-affinity": &api.Pod{},
|
||||
"pod-with-pod-affinity": &api.Pod{},
|
||||
"pod-with-pod-affinity": &api.Pod{},
|
||||
},
|
||||
"../docs/admin/resourcequota": {
|
||||
"best-effort": &api.ResourceQuota{},
|
||||
|
|
|
|||
Loading…
Reference in New Issue