Merge remote-tracking branch 'upstream/master' into lperkins/issue-8639-cassandra-pods

This commit is contained in:
lucperkins 2018-07-27 16:03:49 -07:00
commit 5dc61780fe
310 changed files with 53439 additions and 116397 deletions

3
.gitignore vendored
View File

@ -29,3 +29,6 @@ nohup.out
# Hugo output
public/
# User-specific editorconfig files
.editorconfig

View File

@ -7,16 +7,16 @@ install:
- export PATH=$GOPATH/bin:$PATH
- mkdir -p $HOME/gopath/src/k8s.io
- mv $TRAVIS_BUILD_DIR $HOME/gopath/src/k8s.io/website && cd $HOME/gopath/src/k8s.io/website
# Fetch dependencies for us to run the tests in test/examples_test.go
- go get -t -v k8s.io/website/test
# Make sure we are testing against the correct branch
- pushd $GOPATH/src/k8s.io/kubernetes && git checkout release-1.11 && popd
# Simplified deduplication of dependencies.
# Make sure we are testing against the correct branch
- pushd $GOPATH/src/k8s.io && git clone https://github.com/kubernetes/kubernetes && popd
- pushd $GOPATH/src/k8s.io/kubernetes && git checkout release-1.11 && popd
- cp -L -R $GOPATH/src/k8s.io/kubernetes/vendor/ $GOPATH/src/
- rm -r $GOPATH/src/k8s.io/kubernetes/vendor/
# Fetch additional dependencies to run the tests in examples/examples_test.go
- go get -t -v k8s.io/website/content/en/examples
script:
# TODO(bep)
- go test -v k8s.io/website/test #fixed by https://github.com/kubernetes/website/pull/8388
- go test -v k8s.io/website/content/en/examples
#- ./verify-docs-format.sh

View File

@ -1,5 +1,5 @@
DOCKER = docker
HUGO_VERSION = 0.40.3
HUGO_VERSION = 0.44
DOCKER_IMAGE = kubernetes-hugo
DOCKER_RUN = $(DOCKER) run --rm --interactive --tty --volume $(PWD):/src
@ -10,10 +10,6 @@ help: ## Show this help.
all: build ## Build site with production settings and put deliverables in _site.
sass: # Rebuild the SASS source into CSS
node-sass --output-style compact ./src/sass/styles.sass ./static/css/styles.css
node-sass --output-style compact ./src/sass/case_study_styles.sass ./static/css/case_study_styles.css
build: ## Build site with production settings and put deliverables in _site.
hugo
@ -21,7 +17,7 @@ build-preview: ## Build site with drafts and future posts enabled.
hugo -D -F
serve: ## Boot the development server.
hugo server
hugo server --ignoreCache --disableFastRender
docker-image:
$(DOCKER) build . --tag $(DOCKER_IMAGE) --build-arg HUGO_VERSION=$(HUGO_VERSION)
@ -29,5 +25,5 @@ docker-image:
docker-build:
$(DOCKER_RUN) $(DOCKER_IMAGE) hugo
stage:
docker-serve:
$(DOCKER_RUN) -p 1313:1313 $(DOCKER_IMAGE) hugo server --watch --bind 0.0.0.0

15
OWNERS
View File

@ -1,19 +1,22 @@
# Reviewers can /lgtm /approve but not sufficient for auto-merge without an
# approver
reviewers:
- zhangxiaoyu-zidif
- xiangpengzhao
- Rajakavitha1
- stewart-yu
- Rajakavitha1
- xiangpengzhao
- zhangxiaoyu-zidif
# Approvers have all the ability of reviewers but their /approve makes
# auto-merge happen if a /lgtm exists, or vice versa, or they can do both
# No need for approvers to also be listed as reviewers
approvers:
- heckj
- bradamant3
- bradtopol
- steveperry-53
- zacharysarah
- chenopis
- kbarnard10
- mistyhacks
- ryanmcginnis
- steveperry-53
- tengqm
- zacharysarah
- zparnold

View File

@ -61,11 +61,25 @@ aliases:
- smarterclayton
- soltysh
- sttts
sig-cluster-lifecycle: #GH: sig-cluster-lifecycle-pr-reviews
sig-cluster-lifecycle-kubeadm-approvers: # Approving changes to kubeadm documentation
- timothysc
- lukemarsden
- luxas
- roberthbailey
- fabriziopandini
- fabriziopandini
sig-cluster-lifecycle-kubeadm-reviewers: # Reviewing kubeadm documentation
- timothysc
- lukemarsden
- luxas
- roberthbailey
- fabriziopandini
- kad
- xiangpengzhao
- stealthybox
- liztio
- chuckha
- detiber
- dixudx
sig-cluster-ops:
- zehicle
- jdumars
@ -83,10 +97,19 @@ aliases:
- spxtr
sig-docs: #Team: documentation; GH: sig-docs-pr-reviews
- bradamant3
- steveperry-53
- zacharysarah
- bradtopol
- heckj
- chenopis
- kbarnard10
- mistyhacks
- rajakavitha1
- ryanmcginnis
- steveperry-53
- stewart-yu
- tengqm
- xiangpengzhao
- zacharysarah
- zhangxiaoyu-zidif
- zparnold
sig-federation: #Team: Federation; e.g. Federated Clusters
- csbell
sig-gcp: #Google Cloud Platform; GH: sig-gcp-pr-reviews

View File

@ -21,10 +21,10 @@ For more information about contributing to the Kubernetes documentation, see:
If you'd like, you can build the Kubernetes docs using Docker. To get started, build the image locally:
```bash
$ make docker-image
make docker-image
# The underlying command:
$ docker build . \
docker build . \
--tag kubernetes-hugo \
--build-arg HUGO_VERSION=0.40.3
```
@ -33,16 +33,19 @@ You can create an image for a different version of Hugo by changing the value of
Once the `kubernetes-hugo` image has been built locally, you can build the site:
```bash
$ make docker-serve
make stage
# The underlying command:
$ docker run \
docker run \
--rm \
--interactive \
--tty \
--volume $(PWD):/src \
kubernetes-hugo:latest \
hugo
-p 1313:1313 \
kubernetes-hugo \
hugo server \
--watch \
--bind 0.0.0.0
```
As when building without using a Docker container, the results of the build will be published to the `public` directory (the default output directory for [Hugo](https://gohugo.io), the static site generator used to build this site).
@ -50,4 +53,4 @@ As when building without using a Docker container, the results of the build will
## Thank you!
Kubernetes thrives on community participation, and we really appreciate your
contributions to our site and our documentation!
contributions to our site and our documentation!

View File

@ -7,7 +7,7 @@ enableRobotsTXT = true
disableKinds = ["taxonomy", "taxonomyTerm"]
ignoreFiles = [ "^OWNERS$", "README.md", "^node_modules$" ]
ignoreFiles = [ "^OWNERS$", "README.md", "^node_modules$", "content/en/docs/doc-contributor-tools" ]
contentDir = "content/en"
@ -34,14 +34,27 @@ blog = "/:section/:year/:month/:day/:slug/"
# Be explicit about the output formats. We (currently) only want an RSS feed for the home page.
[outputs]
home = [ "HTML", "RSS"]
home = [ "HTML", "RSS", "HEADERS" ]
page = [ "HTML"]
section = [ "HTML"]
# Add a "text/netlify" media type for auto-generating the _headers file
[mediaTypes]
[mediaTypes."text/netlify"]
suffix = ""
delimiter = ""
[outputFormats]
[outputFormats.RSS]
baseName = "feed"
# _headers file output (uses the template at layouts/index.headers)
[outputFormats.HEADERS]
mediatype = "text/netlify"
baseName = "_headers"
isPlainText = true
notAlternative = true
[params]
time_format_blog = "Monday, January 02, 2006"
@ -67,6 +80,17 @@ githubbranch = "v1.11.0"
docsbranch = "release-1.11"
url = "https://kubernetes.io"
[params.pushAssets]
css = [
"callouts",
"styles",
"custom-jekyll/tags"
]
js = [
"custom-jekyll/tags",
"script"
]
[[params.versions]]
fullversion = "v1.10.3"
version = "v1.10"
@ -95,7 +119,6 @@ githubbranch = "v1.7.6"
docsbranch = "release-1.7"
url = "https://v1-7.docs.kubernetes.io"
# Language definitions.
[languages]
@ -112,4 +135,3 @@ languageName ="Chinese"
weight = 2
contentDir = "content/cn"

View File

View File

@ -104,7 +104,7 @@ $ kubectl create -f ./secret.yaml
secret "mysecret" created
```
**编码注意:** secret 数据的序列化 JSON 和 YAML 值使用 base64 编码成字符串。换行符在这些字符串中无效,必须省略。当在 Darwin/OS X 上使用 `base64` 实用程序时,用户应避免使用 `-b` 选项来拆分长行。另外,对于 Linux 用户如果 `-w` 选项不可用的话,应该添加选项 `-w 0``base64` 命令或管道 `base64 | tr -d '\n' `
**编码注意:** secret 数据的序列化 JSON 和 YAML 值使用 base64 编码成字符串。换行符在这些字符串中无效,必须省略。当在 Darwin/macOS 上使用 `base64` 实用程序时,用户应避免使用 `-b` 选项来拆分长行。另外,对于 Linux 用户如果 `-w` 选项不可用的话,应该添加选项 `-w 0``base64` 命令或管道 `base64 | tr -d '\n' `
#### 解码 Secret

View File

@ -4,7 +4,7 @@ metadata:
name: default-mem-demo-2
spec:
containers:
- name: defalt-mem-demo-2-ctr
- name: default-mem-demo-2-ctr
image: nginx
resources:
limits:

View File

@ -47,6 +47,7 @@ cid: home
<button id="desktopShowVideoButton" onclick="kub.showVideo()">Watch Video</button>
<br>
<br>
<br>
<a href="https://www.lfasiallc.com/events/kubecon-cloudnativecon-china-2018/" button id= "desktopKCButton">Attend KubeCon in Shanghai on Nov. 14-15, 2018</a>
<br>
<br>
@ -121,21 +122,23 @@ cid: home
<h3>Case Studies</h3>
<div id="caseStudiesWrapper">
<div>
<p>Cloud Native at Northwestern Mutual</p>
<a href="/case-studies/northwestern-mutual/">Read more</a>
<p>Pinning Its Past, Present, and Future on Cloud Native</p>
<a href="/case-studies/pinterest">Read more</a>
</div>
<div>
<p>Launching and Scaling Up Experiments, Made Simple</p>
<a href="/case-studies/openai/">Read more</a>
<p>Reinventing the Worlds Largest Education Company With Kubernetes</p>
<a href="/case-studies/pearson">Read more</a>
</div>
<div>
<p>Supporting Fast Decisioning Applications with Kubernetes</p>
<a href="/case-studies/capital-one">Read more</a>
</div>
<div>
<p>The New York Times: From Print to the Web to Cloud Native</p>
<a href="/case-studies/newyorktimes/">Read more</a>
</div>
<div>
<p>Finding Millions in Potential Savings in a Tough Retail Climate</p>
<a href="/case-studies/nordstrom/">Read more</a>
<p>Driving Banking Innovation with Cloud Native</p>
<a href="/case-studies/ing">Read more</a>
</div>
</div>
<!--<div id="bigSocial">-->
@ -161,8 +164,16 @@ cid: home
<!--</div>-->
<!--</div>-->
<h5 style="text-align: center"><a href="/case-studies/" style="color: #3371E3; font-weight: 400">View all case studies</a></h5>
</main>
</section>
</main>
</section>
<section id="cncf">
<main>
<center>
<p>We are a <a href="https://cncf.io/">CNCF</a> graduated project</p>
</center>
</main>
</section>

View File

@ -8,7 +8,7 @@ At Devoxx Belgium and Devoxx Morocco, Ray Tsang and I showed a Raspberry Pi clus
### Wait! Why the heck build a Raspberry Pi cluster running Kubernetes?&nbsp;
We had two big reasons to build the Pi cluster at Quintor. First of all we wanted to experiment with container technology at scale on real hardware. You can try out container technology using virtual machines, but Kubernetes runs great on on bare metal too. To explore what thatd be like, we built a Raspberry Pi cluster just like we would build a cluster of machines in a production datacenter. This allowed us to understand and simulate how Kubernetes would work when we move it to our data centers.
We had two big reasons to build the Pi cluster at Quintor. First of all we wanted to experiment with container technology at scale on real hardware. You can try out container technology using virtual machines, but Kubernetes runs great on bare metal too. To explore what thatd be like, we built a Raspberry Pi cluster just like we would build a cluster of machines in a production datacenter. This allowed us to understand and simulate how Kubernetes would work when we move it to our data centers.
Secondly, we did not want to blow the budget to do this exploration. And what is cheaper than a Raspberry Pi! If you want to build a cluster comprising many nodes, each node should have a good cost to performance ratio. Our Pi cluster has 20 CPU cores, which is more than many servers, yet cost us less than $400. Additionally, the total power consumption is low and the form factor is small, which is great for these kind of demo systems.

View File

@ -57,7 +57,7 @@ While we could have decreased the “pod startup time” substantially by exclud
### Metrics from Kubernetes 1.2&nbsp;
So what was the result?We run our tests on Google Compute Engine, setting the size of the master VM based on on the size of the Kubernetes cluster. In particular for 1000-node clusters we use a n1-standard-32 VM for the master (32 cores, 120GB RAM).
So what was the result?We run our tests on Google Compute Engine, setting the size of the master VM based on the size of the Kubernetes cluster. In particular for 1000-node clusters we use a n1-standard-32 VM for the master (32 cores, 120GB RAM).
#### API responsiveness&nbsp;

View File

@ -10,7 +10,7 @@ While Kubernetes is one of the best tools for managing containerized application
For the past several months, several of us from the Kubernetes community have been working to fix this in the [Minikube](http://github.com/kubernetes/minikube) repository on GitHub. Our goal is to build an easy-to-use, high-fidelity Kubernetes distribution that can be run locally on Mac, Linux and Windows workstations and laptops with a single command.
Thanks to lots of help from members of the community, we're proud to announce the official release of Minikube. This release comes with support for [Kubernetes 1.3](https://kubernetes.io/blog/2016/07/kubernetes-1.3-bridging-cloud-native-and-enterprise-workloads), new commands to make interacting with your local cluster easier and experimental drivers for xhyve (on Mac OSX) and KVM (on Linux).
Thanks to lots of help from members of the community, we're proud to announce the official release of Minikube. This release comes with support for [Kubernetes 1.3](https://kubernetes.io/blog/2016/07/kubernetes-1.3-bridging-cloud-native-and-enterprise-workloads), new commands to make interacting with your local cluster easier and experimental drivers for xhyve (on macOS) and KVM (on Linux).
**Using Minikube**
@ -107,8 +107,8 @@ Here's a list of some of the things we're hoping to add to Minikube soon:
- Native hypervisor support for OSX and Windows
- We're planning to remove the dependency on Virtualbox, and integrate with the native hypervisors included in OSX and Windows (Hypervisor.framework and Hyper-v, respectively).
- Native hypervisor support for macOS and Windows
- We're planning to remove the dependency on Virtualbox, and integrate with the native hypervisors included in macOS and Windows (Hypervisor.framework and Hyper-v, respectively).
- Improved support for Kubernetes features
- We're planning to increase the range of supported Kubernetes features, to include things like Ingress.
- Configurable versions of Kubernetes

View File

@ -30,7 +30,7 @@ There is work in progress being done in Kubernetes for image authorization plugi
**Limit Direct Access to Kubernetes Nodes**
You should limit SSH access to Kubernetes nodes, reducing the risk for unauthorized access to host resource. Instead you should ask users to use "kubectl exec", which will provide direct access to the container environment without the ability to access the host.
You can use Kubernetes [Authorization Plugins](http://kubernetes.io/docs/admin/authorization/) to further control user access to resources. This allows defining fine-grained-access control rules for specific namespace, containers and operations.
You can use Kubernetes [Authorization Plugins](http://kubernetes.io/docs/reference/access-authn-authz/authorization/) to further control user access to resources. This allows defining fine-grained-access control rules for specific namespace, containers and operations.
**Create Administrative Boundaries between Resources**
Limiting the scope of user permissions can reduce the impact of mistakes or malicious activities. A Kubernetes namespace allows you to partition created resources into logically named groups. Resources created in one namespace can be hidden from other namespaces. By default, each resource created by a user in Kubernetes cluster runs in a default namespace, called default. You can create additional namespaces and attach resources and users to them. You can use Kubernetes Authorization plugins to create policies that segregate access to namespace resources between different users.
@ -205,7 +205,7 @@ Reference [here](http://kubernetes.io/docs/api-reference/v1/definitions/#_v1_pod
In case you are running containers with elevated privileges (--privileged) you should consider using the “DenyEscalatingExec” admission control. This control denies exec and attach commands to pods that run with escalated privileges that allow host access. This includes pods that run as privileged, have access to the host IPC namespace, and have access to the host PID namespace. For more details on admission controls, see the Kubernetes [documentation](http://kubernetes.io/docs/admin/admission-controllers/).
In case you are running containers with elevated privileges (--privileged) you should consider using the “DenyEscalatingExec” admission control. This control denies exec and attach commands to pods that run with escalated privileges that allow host access. This includes pods that run as privileged, have access to the host IPC namespace, and have access to the host PID namespace. For more details on admission controls, see the Kubernetes [documentation](http://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/).

View File

@ -6,7 +6,7 @@ url: /blog/2017/03/Kubernetes-1.6-Multi-User-Multi-Workloads-At-Scale
---
Today were announcing the release of Kubernetes 1.6.
In this release the communitys focus is on scale and automation, to help you deploy multiple workloads to multiple users on a cluster. We are announcing that 5,000 node clusters are supported. We moved dynamic storage provisioning to _stable_. Role-based access control ([RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)), [kubefed](https://kubernetes.io/docs/tutorials/federation/set-up-cluster-federation-kubefed/), [kubeadm](https://kubernetes.io/docs/getting-started-guides/kubeadm/), and several scheduling features are moving to _beta_. We have also added intelligent defaults throughout to enable greater automation out of the box.
In this release the communitys focus is on scale and automation, to help you deploy multiple workloads to multiple users on a cluster. We are announcing that 5,000 node clusters are supported. We moved dynamic storage provisioning to _stable_. Role-based access control ([RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)), [kubefed](https://kubernetes.io/docs/tutorials/federation/set-up-cluster-federation-kubefed/), [kubeadm](https://kubernetes.io/docs/getting-started-guides/kubeadm/), and several scheduling features are moving to _beta_. We have also added intelligent defaults throughout to enable greater automation out of the box.
**Whats New**
@ -14,9 +14,9 @@ In this release the communitys focus is on scale and automation, to help you
For users who want to scale beyond 5,000 nodes or spread across multiple regions or clouds, [federation](https://kubernetes.io/docs/concepts/cluster-administration/federation/) lets you combine multiple Kubernetes clusters and address them through a single API endpoint. In this release, the [kubefed](https://kubernetes.io//docs/tutorials/federation/set-up-cluster-federation-kubefed) command line utility graduated to _beta_ - with improved support for on-premise clusters. kubefed now [automatically configures](https://kubernetes.io//docs/tutorials/federation/set-up-cluster-federation-kubefed.md#kube-dns-configuration) kube-dns on joining clusters and can pass arguments to federated components.
**Security and Setup** : Users concerned with security will find that [RBAC](https://kubernetes.io//docs/admin/authorization/rbac), now _beta_ adds a significant security benefit through more tightly scoped default roles for system components. The default RBAC policies in 1.6 grant scoped permissions to control-plane components, nodes, and controllers. RBAC allows cluster administrators to selectively grant particular users or service accounts fine-grained access to specific resources on a per-namespace basis. RBAC users upgrading from 1.5 to 1.6 should view the guidance [here](https://kubernetes.io//docs/admin/authorization/rbac.md#upgrading-from-15).&nbsp;
**Security and Setup** : Users concerned with security will find that [RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac), now _beta_ adds a significant security benefit through more tightly scoped default roles for system components. The default RBAC policies in 1.6 grant scoped permissions to control-plane components, nodes, and controllers. RBAC allows cluster administrators to selectively grant particular users or service accounts fine-grained access to specific resources on a per-namespace basis. RBAC users upgrading from 1.5 to 1.6 should view the guidance [here](https://kubernetes.io/docs/reference/access-authn-authz/rbac#upgrading-from-1-5).&nbsp;
Users looking for an easy way to provision a secure cluster on physical or cloud servers can use [kubeadm](https://kubernetes.io/docs/getting-started-guides/kubeadm/), which is now _beta_. kubeadm has been enhanced with a set of command line flags and a base feature set that includes RBAC setup, use of the [Bootstrap Token system](http://kubernetes.io/docs/admin/bootstrap-tokens/) and an enhanced [Certificates API](https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/).
Users looking for an easy way to provision a secure cluster on physical or cloud servers can use [kubeadm](https://kubernetes.io/docs/getting-started-guides/kubeadm/), which is now _beta_. kubeadm has been enhanced with a set of command line flags and a base feature set that includes RBAC setup, use of the [Bootstrap Token system](http://kubernetes.io/docs/reference/access-authn-authz/bootstrap-tokens/) and an enhanced [Certificates API](https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/).
**Advanced Scheduling** : This release adds a set of [powerful and versatile scheduling constructs](https://kubernetes.io/docs/user-guide/node-selection/) to give you greater control over how pods are scheduled, including rules to restrict pods to particular nodes in heterogeneous clusters, and rules to spread or pack pods across failure domains such as nodes, racks, and zones.

View File

@ -13,7 +13,7 @@ The focus of this post is to highlight some of the interesting new capabilities
**RBAC vs ABAC**
Currently there are several [authorization mechanisms](https://kubernetes.io/docs/admin/authorization/) available for use with Kubernetes. Authorizers are the mechanisms that decide who is permitted to make what changes to the cluster using the Kubernetes API. This affects things like kubectl, system components, and also certain applications that run in the cluster and manipulate the state of the cluster, like Jenkins with the Kubernetes plugin, or [Helm](https://github.com/kubernetes/helm) that runs in the cluster and uses the Kubernetes API to install applications in the cluster. Out of the available authorization mechanisms, ABAC and RBAC are the mechanisms local to a Kubernetes cluster that allow configurable permissions policies.
Currently there are several [authorization mechanisms](https://kubernetes.io/docs/reference/access-authn-authz/authorization/) available for use with Kubernetes. Authorizers are the mechanisms that decide who is permitted to make what changes to the cluster using the Kubernetes API. This affects things like kubectl, system components, and also certain applications that run in the cluster and manipulate the state of the cluster, like Jenkins with the Kubernetes plugin, or [Helm](https://github.com/kubernetes/helm) that runs in the cluster and uses the Kubernetes API to install applications in the cluster. Out of the available authorization mechanisms, ABAC and RBAC are the mechanisms local to a Kubernetes cluster that allow configurable permissions policies.
ABAC, Attribute Based Access Control, is a powerful concept. However, as implemented in Kubernetes, ABAC is difficult to manage and understand. It requires ssh and root filesystem access on the master VM of the cluster to make authorization policy changes. For permission changes to take effect the cluster API server must be restarted.
@ -42,11 +42,11 @@ A RoleBinding maps a Role to a user or set of users, granting that Role's permis
[![](https://1.bp.blogspot.com/-ixDe91-cnqw/WOa0auxC0mI/AAAAAAAABBs/4LxVsr6shEgTYqUapt5QPISUeuTuztVwwCEw/s640/rbac2.png)](https://1.bp.blogspot.com/-ixDe91-cnqw/WOa0auxC0mI/AAAAAAAABBs/4LxVsr6shEgTYqUapt5QPISUeuTuztVwwCEw/s1600/rbac2.png)
Additionally there are cluster roles and cluster role bindings to consider. Cluster roles and cluster role bindings function like roles and role bindings except they have wider scope. The exact differences and how cluster roles and cluster role bindings interact with roles and role bindings are covered in the [Kubernetes documentation](https://kubernetes.io/docs/admin/authorization/rbac/#rolebinding-and-clusterrolebinding).
Additionally there are cluster roles and cluster role bindings to consider. Cluster roles and cluster role bindings function like roles and role bindings except they have wider scope. The exact differences and how cluster roles and cluster role bindings interact with roles and role bindings are covered in the [Kubernetes documentation](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#rolebinding-and-clusterrolebinding).
**RBAC in Kubernetes**
RBAC is now deeply integrated into Kubernetes and used by the system components to grant the permissions necessary for them to function. [System roles](https://kubernetes.io/docs/admin/authorization/rbac/#default-roles-and-role-bindings) are typically prefixed with system: so they can be easily recognized.
RBAC is now deeply integrated into Kubernetes and used by the system components to grant the permissions necessary for them to function. [System roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#default-roles-and-role-bindings) are typically prefixed with system: so they can be easily recognized.
```
@ -76,7 +76,7 @@ system:controller:certificate-controller ClusterRole.v1beta1.rbac.authorization.
The RBAC system roles have been expanded to cover the necessary permissions for running a Kubernetes cluster with RBAC only.
During the permission translation from ABAC to RBAC, some of the permissions that were enabled by default in many deployments of ABAC authorized clusters were identified as unnecessarily broad and were [scoped down](https://kubernetes.io/docs/admin/authorization/rbac/#upgrading-from-15) in RBAC. The area most likely to impact workloads on a cluster is the permissions available to service accounts. With the permissive ABAC configuration, requests from a pod using the pod mounted token to authenticate to the API server have broad authorization. As a concrete example, the curl command at the end of this sequence will return a JSON formatted result when ABAC is enabled and an error when only RBAC is enabled.
During the permission translation from ABAC to RBAC, some of the permissions that were enabled by default in many deployments of ABAC authorized clusters were identified as unnecessarily broad and were [scoped down](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#upgrading-from-1-5) in RBAC. The area most likely to impact workloads on a cluster is the permissions available to service accounts. With the permissive ABAC configuration, requests from a pod using the pod mounted token to authenticate to the API server have broad authorization. As a concrete example, the curl command at the end of this sequence will return a JSON formatted result when ABAC is enabled and an error when only RBAC is enabled.
```
@ -96,13 +96,13 @@ During the permission translation from ABAC to RBAC, some of the permissions tha
Any applications you run in your Kubernetes cluster that interact with the Kubernetes API have the potential to be affected by the permissions changes when transitioning from ABAC to RBAC.
To smooth the transition from ABAC to RBAC, you can create Kubernetes 1.6 clusters with both [ABAC and RBAC authorizers](https://kubernetes.io/docs/admin/authorization/rbac/#parallel-authorizers) enabled. When both ABAC and RBAC are enabled, authorization for a resource is granted if either authorization policy grants access. However, under that configuration the most permissive authorizer is used and it will not be possible to use RBAC to fully control permissions.
To smooth the transition from ABAC to RBAC, you can create Kubernetes 1.6 clusters with both [ABAC and RBAC authorizers](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#parallel-authorizers) enabled. When both ABAC and RBAC are enabled, authorization for a resource is granted if either authorization policy grants access. However, under that configuration the most permissive authorizer is used and it will not be possible to use RBAC to fully control permissions.
At this point, RBAC is complete enough that ABAC support should be considered deprecated going forward. It will still remain in Kubernetes for the foreseeable future but development attention is focused on RBAC.
Two different talks at the at the Google Cloud Next conference touched on RBAC related changes in Kubernetes 1.6, jump to the relevant parts [here](https://www.youtube.com/watch?v=Cd4JU7qzYbE#t=8m01s) and [here](https://www.youtube.com/watch?v=18P7cFc6nTU#t=41m06s). For more detailed information about using RBAC in Kubernetes 1.6 read the full [RBAC documentation](https://kubernetes.io/docs/admin/authorization/rbac/).
Two different talks at the at the Google Cloud Next conference touched on RBAC related changes in Kubernetes 1.6, jump to the relevant parts [here](https://www.youtube.com/watch?v=Cd4JU7qzYbE#t=8m01s) and [here](https://www.youtube.com/watch?v=18P7cFc6nTU#t=41m06s). For more detailed information about using RBAC in Kubernetes 1.6 read the full [RBAC documentation](https://kubernetes.io/docs/reference/access-authn-authz/rbac/).
**Get Involved**

View File

@ -16,7 +16,7 @@ Also, for power users, API aggregation in this release allows user-provided apis
Security:
- [The Network Policy API](https://kubernetes.io/docs/concepts/services-networking/network-policies/) is promoted to stable. Network policy, implemented through a network plug-in, allows users to set and enforce rules governing which pods can communicate with each other.&nbsp;
- [Node authorizer](https://kubernetes.io/docs/admin/authorization/node/) and admission control plugin are new additions that restrict kubelets access to secrets, pods and other objects based on its node.
- [Node authorizer](https://kubernetes.io/docs/reference/access-authn-authz/node/) and admission control plugin are new additions that restrict kubelets access to secrets, pods and other objects based on its node.
- [Encryption for Secrets](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/), and other resources in etcd, is now available as alpha.&nbsp;
- [Kubelet TLS bootstrapping](https://kubernetes.io/docs/admin/kubelet-tls-bootstrapping/) now supports client and server certificate rotation.
- [Audit logs](https://kubernetes.io/docs/tasks/debug-application-cluster/audit/) stored by the API server are now more customizable and extensible with support for event filtering and webhooks. They also provide richer data for system audit.
@ -36,7 +36,7 @@ Extensibility:
Additional Features:
- Alpha support for [external admission controllers](https://kubernetes.io/docs/admin/extensible-admission-controllers/) is introduced, providing two options for adding custom business logic to the API server for modifying objects as they are created and validating policy.&nbsp;
- Alpha support for [external admission controllers](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/) is introduced, providing two options for adding custom business logic to the API server for modifying objects as they are created and validating policy.&nbsp;
- [Policy-based Federated Resource Placement](https://kubernetes.io/docs/tasks/federation/set-up-placement-policies-federation/) is introduced as Alpha providing placement policies for the federated clusters, based on custom requirements such as regulation, pricing or performance.
Deprecation:&nbsp;

View File

@ -12,7 +12,7 @@ Were pleased to announce the delivery of Kubernetes 1.8, our third release th
## Spotlight on security
Kubernetes 1.8 graduates support for [role based access control](https://en.wikipedia.org/wiki/Role-based_access_control) (RBAC) to stable. RBAC allows cluster administrators to [dynamically define roles](https://kubernetes.io/docs/admin/authorization/rbac/) to enforce access policies through the Kubernetes API. Beta support for filtering outbound traffic through [network policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) augments existing support for filtering inbound traffic to a pod. RBAC and Network Policies are two powerful tools for enforcing organizational and regulatory security requirements within Kubernetes.
Kubernetes 1.8 graduates support for [role based access control](https://en.wikipedia.org/wiki/Role-based_access_control) (RBAC) to stable. RBAC allows cluster administrators to [dynamically define roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) to enforce access policies through the Kubernetes API. Beta support for filtering outbound traffic through [network policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) augments existing support for filtering inbound traffic to a pod. RBAC and Network Policies are two powerful tools for enforcing organizational and regulatory security requirements within Kubernetes.
Transport Layer Security (TLS) [certificate rotation](https://kubernetes.io/docs/admin/kubelet-tls-bootstrapping/) for the Kubelet graduates to beta. Automatic certificate rotation eases secure cluster operation.

View File

@ -6,7 +6,7 @@ url: /blog/2017/10/Using-Rbac-Generally-Available-18
---
**_Editor's note: this post is part of a [series of in-depth articles](https://kubernetes.io/blog/2017/10/five-days-of-kubernetes-18) on what's new in Kubernetes 1.8. Todays post comes from Eric Chiang, software engineer, CoreOS, and SIG-Auth co-lead._**
Kubernetes 1.8 represents a significant milestone for the [role-based access control (RBAC) authorizer](https://kubernetes.io/docs/admin/authorization/rbac/), which was promoted to GA in this release. RBAC is a mechanism for controlling access to the Kubernetes API, and since its [beta in 1.6](https://kubernetes.io/blog/2017/04/rbac-support-in-kubernetes), many Kubernetes clusters and provisioning strategies have enabled it by default.
Kubernetes 1.8 represents a significant milestone for the [role-based access control (RBAC) authorizer](https://kubernetes.io/docs/reference/access-authn-authz/rbac/), which was promoted to GA in this release. RBAC is a mechanism for controlling access to the Kubernetes API, and since its [beta in 1.6](https://kubernetes.io/blog/2017/04/rbac-support-in-kubernetes), many Kubernetes clusters and provisioning strategies have enabled it by default.
Going forward, we expect to see RBAC become a fundamental building block for securing Kubernetes clusters. This post explores using RBAC to manage user and application access to the Kubernetes API.

View File

@ -71,7 +71,7 @@ Once you log in, all of your clusters are available within Codefresh.
### Add Cluster
To add your cluster, click the down arrow, and then click **add cluste** r, select the project and cluster name. You can now deploy images!
To add your cluster, click the down arrow, and then click **add cluster**, select the project and cluster name. You can now deploy images!

View File

@ -11,7 +11,7 @@ The admission stage of API server processing is one of the most powerful tools f
## What is Admission?
[Admission](https://kubernetes.io/docs/admin/admission-controllers/#what-are-they) is the phase of [handling an API server request](https://blog.openshift.com/kubernetes-deep-dive-api-server-part-1/) that happens before a resource is persisted, but after authorization. Admission gets access to the same information as authorization (user, URL, etc) and the complete body of an API request (for most requests).
[Admission](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#what-are-they) is the phase of [handling an API server request](https://blog.openshift.com/kubernetes-deep-dive-api-server-part-1/) that happens before a resource is persisted, but after authorization. Admission gets access to the same information as authorization (user, URL, etc) and the complete body of an API request (for most requests).
[![](https://2.bp.blogspot.com/-p8WGg2BATsY/WlfywbD_tAI/AAAAAAAAAJw/mDqZV0dB4_Y0gXXQp_1tQ7CtMRSd6lHVwCK4BGAYYCw/s640/Screen%2BShot%2B2018-01-11%2Bat%2B3.22.07%2BPM.png)](http://2.bp.blogspot.com/-p8WGg2BATsY/WlfywbD_tAI/AAAAAAAAAJw/mDqZV0dB4_Y0gXXQp_1tQ7CtMRSd6lHVwCK4BGAYYCw/s1600/Screen%2BShot%2B2018-01-11%2Bat%2B3.22.07%2BPM.png)

View File

@ -17,7 +17,7 @@ This blog post is one of a number of efforts to make client-go more accessible t
The following API group promotions are part of Kubernetes 1.9:
- Workload objects (Deployments, DaemonSets, ReplicaSets, and StatefulSets) have been [promoted to the apps/v1 API group in Kubernetes 1.9](https://kubernetes.io/docs/reference/workloads-18-19/). client-go follows this transition and allows developers to use the latest version by importing the k8s.io/api/apps/v1 package instead of k8s.io/api/apps/v1beta1 and by using Clientset.AppsV1().
- Admission Webhook Registration has been promoted to the admissionregistration.k8s.io/v1beta1 API group in Kubernetes 1.9. The former ExternalAdmissionHookConfiguration type has been replaced by the incompatible ValidatingWebhookConfiguration and MutatingWebhookConfiguration types. Moreover, the webhook admission payload type AdmissionReview in admission.k8s.io has been promoted to v1beta1. Note that versioned objects are now passed to webhooks. Refer to the admission webhook [documentation](https://kubernetes.io/docs/admin/extensible-admission-controllers/#external-admission-webhooks) for details.
- Admission Webhook Registration has been promoted to the admissionregistration.k8s.io/v1beta1 API group in Kubernetes 1.9. The former ExternalAdmissionHookConfiguration type has been replaced by the incompatible ValidatingWebhookConfiguration and MutatingWebhookConfiguration types. Moreover, the webhook admission payload type AdmissionReview in admission.k8s.io has been promoted to v1beta1. Note that versioned objects are now passed to webhooks. Refer to the admission webhook [documentation](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#admission-webhooks) for details.
@ -84,12 +84,12 @@ spec.version in body should be one of [v1.0.0 v1.0.1]
Note that with [Admission Webhooks](https://kubernetes.io/docs/admin/extensible-admission-controllers/#external-admission-webhooks), Kubernetes 1.9 provides another beta feature to validate objects before they are created or updated. Starting with 1.9, these webhooks also allow mutation of objects (for example, to set defaults or to inject values). Of course, webhooks work with CRDs as well. Moreover, webhooks can be used to implement validations that are not easily expressible with CRD validation. Note that webhooks are harder to implement than CRD validation, so for many purposes, CRD validation is the right tool.
Note that with [Admission Webhooks](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#admission-webhooks), Kubernetes 1.9 provides another beta feature to validate objects before they are created or updated. Starting with 1.9, these webhooks also allow mutation of objects (for example, to set defaults or to inject values). Of course, webhooks work with CRDs as well. Moreover, webhooks can be used to implement validations that are not easily expressible with CRD validation. Note that webhooks are harder to implement than CRD validation, so for many purposes, CRD validation is the right tool.
## Creating namespaced informers
Often objects in one namespace or only with certain labels are to be processed in a controller. Informers [now allow](https://github.com/kubernetes/kubernetes/pull/54660) you to tweak the ListOptions used to query the API server to list and watch objects. Uninitialized objects (for consumption by [initializers](https://kubernetes.io/docs/admin/extensible-admission-controllers/#what-are-initializers)) can be made visible by setting IncludeUnitialized to true. All this can be done using the new NewFilteredSharedInformerFactory constructor for shared informers:
Often objects in one namespace or only with certain labels are to be processed in a controller. Informers [now allow](https://github.com/kubernetes/kubernetes/pull/54660) you to tweak the ListOptions used to query the API server to list and watch objects. Uninitialized objects (for consumption by [initializers](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#what-are-initializers)) can be made visible by setting IncludeUnitialized to true. All this can be done using the new NewFilteredSharedInformerFactory constructor for shared informers:
```

View File

@ -26,7 +26,7 @@ The number of respondents in the past year and 10 months increased at a rate of
![Minikube operating system usage](/images/blog/survey-results/2018-application-survey/minikube-os-usage.png)
Minikube is used primarily be people on MacOS and Linux. Yet, according to the 2018 Stack Overflow survey, [almost half of developers use Windows as their primary operating system](https://insights.stackoverflow.com/survey/2018/#technology-developers-primary-operating-systems). This is where Minikube would run.
Minikube is used primarily by people on macOS and Linux. Yet, according to the 2018 Stack Overflow survey, [almost half of developers use Windows as their primary operating system](https://insights.stackoverflow.com/survey/2018/#technology-developers-primary-operating-systems). This is where Minikube would run.
Seeing differences from other data sets is worth looking more deeply at to better understand our audience, where Kubernetes is at, and where it is on the journey it's headed.

View File

@ -0,0 +1,24 @@
---
layout: blog
title: Meet Our Contributors - Monthly Streaming YouTube Mentoring Series
date: 2018-07-10
---
**Author**: Paris Pittman (Google)
![meet_our_contributors](/images/blog/2018-06-05-meet-our-contributors-youtube-mentoring-series/meet-our-contributors.png)
July 11th at 2:30pm and 8pm UTC kicks off our next installment of Meet Our Contributors YouTube series. This month is special: members of the steering committee will be on to answer any and all questions from the community on the first 30 minutes of the 8pm UTC session. More on submitting questions below.
[Meet Our Contributors](https://github.com/kubernetes/community/blob/master/mentoring/meet-our-contributors.md) was created to give an opportunity to new and current contributors alike to get time in front of our upstream community to ask questions that you would typically ask a mentor. We have 3-6 contributors on each session (an AM and PM session depending on where you are in the world!) answer questions [live on a YouTube stream](https://www.youtube.com/c/KubernetesCommunity/live). If you miss it, dont stress, the recording is up after its over. Check out a past episode [here](https://www.youtube.com/watch?v=EVsXi3Zhlo0&list=PL69nYSiGNLP3QpQrhZq_sLYo77BVKv09F).
As you can imagine, the questions span broadly from introductory - “whats a SIG?” to more advanced - “whys my test flaking?” Youll also hear growth related advice questions such as “whats my best path to becoming an approver?” Were happy to do a live code/docs review or explain part of the codebase as long as we have a few days notice.
We answer at least 10 questions per session and have helped 500+ people to date. This is a scalable mentoring initiative that makes it easy for all parties to share information, get advice, and get going with what they are trying to accomplish. We encourage you to submit questions for our next session:
- Join the Kubernetes Slack channel - #meet-our-contributors - to ask your question or for more detailed information. DM paris@ if you would like to remain anonymous.
- Twitter works, too, with the hashtag #k8smoc
If you are contributor reading this that has wanted to mentor but just cant find the time - this is for you! [Reach out to us](https://goo.gl/forms/ZcnFiqNR5EQH03zm2).
You can join us live on June 6th at 2:30pm and 8pm UTC, and every first Wednesday of the month, on the [Kubernetes Community live stream](https://www.youtube.com/c/KubernetesCommunity/live). We look forward to seeing you there!

View File

@ -49,19 +49,18 @@ Each Special Interest Group (SIG) within the community continues to deliver the
## Availability
Kubernetes 1.11 is available for [download on GitHub](https://github.com/kubernetes/kubernetes/releases/tag/v1.11.0). To get started with Kubernetes, check out these [interactive tutorials](https://kubernetes.io/docs/tutorials/).
Kubernetes 1.11 is available for [download on GitHub](https://github.com/kubernetes/kubernetes/releases/tag/v1.11.0). To get started with Kubernetes, check out these [interactive tutorials](https://kubernetes.io/docs/tutorials/).
You can also install 1.11 using Kubeadm. Version 1.11.0 will be available as Deb and RPM packages, installable using the [Kubeadm cluster installer](https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/) sometime on June 28th.
## 5 Day Features Blog Series
## 4 Day Features Blog Series
If youre interested in exploring these features more in depth, check back in two weeks for our 5 Days of Kubernetes series where well highlight detailed walkthroughs of the following features:
If youre interested in exploring these features more in depth, check back in two weeks for our 4 Days of Kubernetes series where well highlight detailed walkthroughs of the following features:
* Day 1: IPVS-Based In-Cluster Service Load Balancing Graduates to General Availability
* Day 2: CoreDNS Promoted to General Availability
* Day 3: Dynamic Kubelet Configuration Moves to Beta
* Day 4: Custom Resource Definitions Can Now Define Multiple Versions
* Day 5: Overview of CSI Enhancements
* Day 1: [IPVS-Based In-Cluster Service Load Balancing Graduates to General Availability](/blog/2018/07/09/ipvs-based-in-cluster-load-balancing-deep-dive/)
* Day 2: [CoreDNS Promoted to General Availability](/blog/2018/07/10/coredns-ga-for-kubernetes-cluster-dns/)
* Day 3: [Dynamic Kubelet Configuration Moves to Beta](/blog/2018/07/11/dynamic-kubelet-configuration/)
* Day 4: [Resizing Persistent Volumes using Kubernetes](/blog/2018/07/11/resizing-persistent-volumes-using-kubernetes/)
## Release team

View File

@ -114,7 +114,7 @@ Use Travis or Jenkins to run unit and integration tests, bribe your favorite tea
### 2: CI/CD via Jenkins -> Docker Image
[Generate your Docker images and bump release version within your Jenkins build](https://getintodevops.com/blog/building-your-first-Docker-image-with-jenkins-2-guide-for-developers).
[Generate your Docker images and bump release version within your Jenkins build](https://getintodevops.com/blog/building-your-first-docker-image-with-jenkins-2-guide-for-developers).
### 3: Airflow launches task

View File

@ -0,0 +1,181 @@
---
layout: blog
title: 'IPVS-Based In-Cluster Load Balancing Deep Dive'
date: 2018-07-09
---
**Author**: Jun Du(Huawei), Haibin Xie(Huawei), Wei Liang(Huawei)
**Editors note: this post is part of a [series of in-depth articles](https://kubernetes.io/blog/2018/06/27/kubernetes-1.11-release-announcement/) on whats new in Kubernetes 1.11**
## Introduction
Per [the Kubernetes 1.11 release blog post ](https://kubernetes.io/blog/2018/06/27/kubernetes-1.11-release-announcement/), we announced that IPVS-Based In-Cluster Service Load Balancing graduates to General Availability. In this blog, we will take you through a deep dive of the feature.
## What Is IPVS?
**IPVS** (**IP Virtual Server**) is built on top of the Netfilter and implements transport-layer load balancing as part of the Linux kernel.
IPVS is incorporated into the LVS (Linux Virtual Server), where it runs on a host and acts as a load balancer in front of a cluster of real servers. IPVS can direct requests for TCP- and UDP-based services to the real servers, and make services of the real servers appear as virtual services on a single IP address. Therefore, IPVS naturally supports Kubernetes Service.
## Why IPVS for Kubernetes?
As Kubernetes grows in usage, the scalability of its resources becomes more and more important. In particular, the scalability of services is paramount to the adoption of Kubernetes by developers/companies running large workloads.
Kube-proxy, the building block of service routing has relied on the battle-hardened iptables to implement the core supported Service types such as ClusterIP and NodePort. However, iptables struggles to scale to tens of thousands of Services because it is designed purely for firewalling purposes and is based on in-kernel rule lists.
Even though Kubernetes already support 5000 nodes in release v1.6, the kube-proxy with iptables is actually a bottleneck to scale the cluster to 5000 nodes. One example is that with NodePort Service in a 5000-node cluster, if we have 2000 services and each services have 10 pods, this will cause at least 20000 iptable records on each worker node, and this can make the kernel pretty busy.
On the other hand, using IPVS-based in-cluster service load balancing can help a lot for such cases. IPVS is specifically designed for load balancing and uses more efficient data structures (hash tables) allowing for almost unlimited scale under the hood.
## IPVS-based Kube-proxy
### Parameter Changes
**Parameter: --proxy-mode** In addition to existing userspace and iptables modes, IPVS mode is configured via `--proxy-mode=ipvs`. It implicitly uses IPVS NAT mode for service port mapping.
**Parameter: --ipvs-scheduler**
A new kube-proxy parameter has been added to specify the IPVS load balancing algorithm, with the parameter being `--ipvs-scheduler`. If its not configured, then round-robin (rr) is the default value.
- rr: round-robin
- lc: least connection
- dh: destination hashing
- sh: source hashing
- sed: shortest expected delay
- nq: never queue
In the future, we can implement Service specific scheduler (potentially via annotation), which has higher priority and overwrites the value.
**Parameter: `--cleanup-ipvs`** Similar to the `--cleanup-iptables` parameter, if true, cleanup IPVS configuration and IPTables rules that are created in IPVS mode.
**Parameter: `--ipvs-sync-period`** Maximum interval of how often IPVS rules are refreshed (e.g. '5s', '1m'). Must be greater than 0.
**Parameter: `--ipvs-min-sync-period`** Minimum interval of how often the IPVS rules are refreshed (e.g. '5s', '1m'). Must be greater than 0.
**Parameter: `--ipvs-exclude-cidrs`** A comma-separated list of CIDR's which the IPVS proxier should not touch when cleaning up IPVS rules because IPVS proxier can't distinguish kube-proxy created IPVS rules from user original IPVS rules. If you are using IPVS proxier with your own IPVS rules in the environment, this parameter should be specified, otherwise your original rule will be cleaned.
### Design Considerations
#### IPVS Service Network Topology
When creating a ClusterIP type Service, IPVS proxier will do the following three things:
* Make sure a dummy interface exists in the node, defaults to kube-ipvs0
* Bind Service IP addresses to the dummy interface
* Create IPVS virtual servers for each Service IP address respectively
Here comes an example:
```
# kubectl describe svc nginx-service
Name: nginx-service
...
Type: ClusterIP
IP: 10.102.128.4
Port: http 3080/TCP
Endpoints: 10.244.0.235:8080,10.244.1.237:8080
Session Affinity: None
# ip addr
...
73: kube-ipvs0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN qlen 1000
link/ether 1a:ce:f5:5f:c1:4d brd ff:ff:ff:ff:ff:ff
inet 10.102.128.4/32 scope global kube-ipvs0
valid_lft forever preferred_lft forever
# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 10.102.128.4:3080 rr
-> 10.244.0.235:8080 Masq 1 0 0
-> 10.244.1.237:8080 Masq 1 0 0
```
Please note that the relationship between a Kubernetes Service and IPVS virtual servers is `1:N`. For example, consider a Kubernetes Service that has more than one IP address. An External IP type Service has two IP addresses - ClusterIP and External IP. Then the IPVS proxier will create 2 IPVS virtual servers - one for Cluster IP and another one for External IP. The relationship between a Kubernetes Endpoint (each IP+Port pair) and an IPVS virtual server is `1:1`.
Deleting of a Kubernetes service will trigger deletion of the corresponding IPVS virtual server, IPVS real servers and its IP addresses bound to the dummy interface.
#### Port Mapping
There are three proxy modes in IPVS: NAT (masq), IPIP and DR. Only NAT mode supports port mapping. Kube-proxy leverages NAT mode for port mapping. The following example shows IPVS mapping Service port 3080 to Pod port 8080.
```
TCP 10.102.128.4:3080 rr
-> 10.244.0.235:8080 Masq 1 0 0
-> 10.244.1.237:8080 Masq 1 0
```
#### Session Affinity
IPVS supports client IP session affinity (persistent connection). When a Service specifies session affinity, the IPVS proxier will set a timeout value (180min=10800s by default) in the IPVS virtual server. For example:
```
# kubectl describe svc nginx-service
Name: nginx-service
...
IP: 10.102.128.4
Port: http 3080/TCP
Session Affinity: ClientIP
# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 10.102.128.4:3080 rr persistent 10800
```
#### Iptables & Ipset in IPVS Proxier
IPVS is for load balancing and it can't handle other workarounds in kube-proxy, e.g. packet filtering, hairpin-masquerade tricks, SNAT, etc.
IPVS proxier leverages iptables in the above scenarios. Specifically, ipvs proxier will fall back on iptables in the following 4 scenarios:
- kube-proxy start with --masquerade-all=true
- Specify cluster CIDR in kube-proxy startup
- Support Loadbalancer type service
- Support NodePort type service
However, we don't want to create too many iptables rules. So we adopt ipset for the sake of decreasing iptables rules. The following is the table of ipset sets that IPVS proxier maintains:
| set name | members | usage |
| ------------------------------ | ---------------------------------------- | ---------------------------------------- |
| KUBE-CLUSTER-IP | All Service IP + port | masquerade for cases that `masquerade-all=true` or `clusterCIDR` specified |
| KUBE-LOOP-BACK | All Service IP + port + IP | masquerade for resolving hairpin issue |
| KUBE-EXTERNAL-IP | Service External IP + port | masquerade for packets to external IPs |
| KUBE-LOAD-BALANCER | Load Balancer ingress IP + port | masquerade for packets to Load Balancer type service |
| KUBE-LOAD-BALANCER-LOCAL | Load Balancer ingress IP + port with `externalTrafficPolicy=local` | accept packets to Load Balancer with `externalTrafficPolicy=local` |
| KUBE-LOAD-BALANCER-FW | Load Balancer ingress IP + port with `loadBalancerSourceRanges` | Drop packets for Load Balancer type Service with `loadBalancerSourceRanges` specified |
| KUBE-LOAD-BALANCER-SOURCE-CIDR | Load Balancer ingress IP + port + source CIDR | accept packets for Load Balancer type Service with `loadBalancerSourceRanges` specified |
| KUBE-NODE-PORT-TCP | NodePort type Service TCP port | masquerade for packets to NodePort(TCP) |
| KUBE-NODE-PORT-LOCAL-TCP | NodePort type Service TCP port with `externalTrafficPolicy=local` | accept packets to NodePort Service with `externalTrafficPolicy=local` |
| KUBE-NODE-PORT-UDP | NodePort type Service UDP port | masquerade for packets to NodePort(UDP) |
| KUBE-NODE-PORT-LOCAL-UDP | NodePort type service UDP port with `externalTrafficPolicy=local` | accept packets to NodePort Service with `externalTrafficPolicy=local` |
In general, for IPVS proxier, the number of iptables rules is static, no matter how many Services/Pods we have.
### Run kube-proxy in IPVS Mode
Currently, local-up scripts, GCE scripts, and kubeadm support switching IPVS proxy mode via exporting environment variables (`KUBE_PROXY_MODE=ipvs`) or specifying flag (`--proxy-mode=ipvs`). Before running IPVS proxier, please ensure IPVS required kernel modules are already installed.
```
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack_ipv4
```
Finally, for Kubernetes v1.10, feature gate `SupportIPVSProxyMode` is set to `true` by default. For Kubernetes v1.11, the feature gate is entirely removed. However, you need to enable `--feature-gates=SupportIPVSProxyMode=true` explicitly for Kubernetes before v1.10.
## Get Involved
The simplest way to get involved with Kubernetes is by joining one of the many [Special Interest Groups](https://github.com/kubernetes/community/blob/master/sig-list.md) (SIGs) that align with your interests. Have something youd like to broadcast to the Kubernetes community? Share your voice at our weekly [community meeting](https://github.com/kubernetes/community/blob/master/communication.md#weekly-meeting), and through the channels below.
Thank you for your continued feedback and support.
Post questions (or answer questions) on [Stack Overflow](http://stackoverflow.com/questions/tagged/kubernetes)
Join the community portal for advocates on [K8sPort](http://k8sport.org/)
Follow us on Twitter [@Kubernetesio](https://twitter.com/kubernetesio) for latest updates
Chat with the community on [Slack](http://slack.k8s.io/)
Share your Kubernetes [story](https://docs.google.com/a/linuxfoundation.org/forms/d/e/1FAIpQLScuI7Ye3VQHQTwBASrgkjQDSS5TP0g3AXfFhwSM9YpHgxRKFA/viewform)

View File

@ -0,0 +1,180 @@
---
layout: blog
title: "CoreDNS GA for Kubernetes Cluster DNS"
date: 2018-07-10
---
**Author**: John Belamaric (Infoblox)
**Editors note: this post is part of a [series of in-depth articles](https://kubernetes.io/blog/2018/06/27/kubernetes-1.11-release-announcement/) on whats new in Kubernetes 1.11**
## Introduction
In Kubernetes 1.11, [CoreDNS](https://coredns.io) has reached General Availability (GA) for DNS-based service discovery, as an alternative to the kube-dns addon. This means that CoreDNS will be offered as an option in upcoming versions of the various installation tools. In fact, the kubeadm team chose to make it the default option starting with Kubernetes 1.11.
DNS-based service discovery has been part of Kubernetes for a long time with the kube-dns cluster addon. This has generally worked pretty well, but there have been some concerns around the reliability, flexibility and security of the implementation.
CoreDNS is a general-purpose, authoritative DNS server that provides a backwards-compatible, but extensible, integration with Kubernetes. It resolves the issues seen with kube-dns, and offers a number of unique features that solve a wider variety of use cases.
In this article, you will learn about the differences in the implementations of kube-dns and CoreDNS, and some of the helpful extensions offered by CoreDNS.
## Implemenation differences
In kube-dns, several containers are used within a single pod: `kubedns`, `dnsmasq`, and `sidecar`. The `kubedns`
container watches the Kubernetes API and serves DNS records based on the [Kubernetes DNS specification](https://github.com/kubernetes/dns/blob/master/docs/specification.md), `dnsmasq` provides caching and stub domain support, and `sidecar` provides metrics and health checks.
This setup leads to a few issues that have been seen over time. For one, security vulnerabilities in `dnsmasq` have led to the need
for a security-patch release of Kubernetes in the past. Additionally, because `dnsmasq` handles the stub domains,
but `kubedns` handles the External Services, you cannot use a stub domain in an external service, which is very
limiting to that functionality (see [dns#131](https://github.com/kubernetes/dns/issues/131)).
All of these functions are done in a single container in CoreDNS, which is running a process written in Go. The
different plugins that are enabled replicate (and enhance) the functionality found in kube-dns.
## Configuring CoreDNS
In kube-dns, you can [modify a ConfigMap](https://kubernetes.io/blog/2017/04/configuring-private-dns-zones-upstream-nameservers-kubernetes/) to change the behavior of your service discovery. This allows the addition of
features such as serving stub domains, modifying upstream nameservers, and enabling federation.
In CoreDNS, you similarly can modify the ConfigMap for the CoreDNS [Corefile](https://coredns.io/2017/07/23/corefile-explained/) to change how service discovery
works. This Corefile configuration offers many more options than you will find in kube-dns, since it is the
primary configuration file that CoreDNS uses for configuration of all of its features, even those that are not
Kubernetes related.
When upgrading from kube-dns to CoreDNS using `kubeadm`, your existing ConfigMap will be used to generate the
customized Corefile for you, including all of the configuration for stub domains, federation, and upstream nameservers. See [Using CoreDNS for Service Discovery](https://kubernetes.io/docs/tasks/administer-cluster/coredns/) for more details.
## Bug fixes and enhancements
There are several open issues with kube-dns that are resolved in CoreDNS, either in default configuration or with some customized configurations.
* [dns#55 - Custom DNS entries for kube-dns](https://github.com/kubernetes/dns/issues/55) may be handled using the "fallthrough" mechanism in the [kubernetes plugin](https://coredns.io/plugins/kubernetes), using the [rewrite plugin](https://coredns.io/plugins/rewrite), or simply serving a subzone with a different plugin such as the [file plugin](https://coredns.io/plugins/file).
* [dns#116 - Only one A record set for headless service with pods having single hostname](https://github.com/kubernetes/dns/issues/116). This issue is fixed without any additional configuration.
* [dns#131 - externalName not using stubDomains settings](https://github.com/kubernetes/dns/issues/131). This issue is fixed without any additional configuration.
* [dns#167 - enable skyDNS round robin A/AAAA records](https://github.com/kubernetes/dns/issues/167). The equivalent functionality can be configured using the [load balance plugin](https://coredns.io/plugins/loadbalance).
* [dns#190 - kube-dns cannot run as non-root user](https://github.com/kubernetes/dns/issues/190). This issue is solved today by using a non-default image, but it will be made the default CoreDNS behavior in a future release.
* [dns#232 - fix pod hostname to be podname for dns srv records](https://github.com/kubernetes/dns/issues/232) is an enhancement that is supported through the "endpoint_pod_names" feature described below.
## Metrics
The functional behavior of the default CoreDNS configuration is the same as kube-dns. However,
one difference you need to be aware of is that the published metrics are not the same. In kube-dns,
you get separate metrics for `dnsmasq` and `kubedns` (skydns). In CoreDNS there is a completely
different set of metrics, since it is all a single process. You can find more details on these
metrics on the CoreDNS [Prometheus plugin](https://coredns.io/plugins/metrics/) page.
## Some special features
The standard CoreDNS Kubernetes configuration is designed to be backwards compatible with the prior
kube-dns behavior. But with some configuration changes, CoreDNS can allow you to modify how the
DNS service discovery works in your cluster. A number of these features are intended to still be
compliant with the [Kubernetes DNS specification](https://github.com/kubernetes/dns/blob/master/docs/specification.md);
they enhance functionality but remain backward compatible. Since CoreDNS is not
*only* made for Kubernetes, but is instead a general-purpose DNS server, there are many things you
can do beyond that specification.
### Pods verified mode
In kube-dns, pod name records are "fake". That is, any "a-b-c-d.namespace.pod.cluster.local" query will
return the IP address "a.b.c.d". In some cases, this can weaken the identity guarantees offered by TLS. So,
CoreDNS offers a "pods verified" mode, which will only return the IP address if there is a pod in the
specified namespace with that IP address.
### Endpoint names based on pod names
In kube-dns, when using a headless service, you can use an SRV request to get a list of
all endpoints for the service:
```
dnstools# host -t srv headless
headless.default.svc.cluster.local has SRV record 10 33 0 6234396237313665.headless.default.svc.cluster.local.
headless.default.svc.cluster.local has SRV record 10 33 0 6662363165353239.headless.default.svc.cluster.local.
headless.default.svc.cluster.local has SRV record 10 33 0 6338633437303230.headless.default.svc.cluster.local.
dnstools#
```
However, the endpoint DNS names are (for practical purposes) random. In CoreDNS, by default, you get endpoint
DNS names based upon the endpoint IP address:
```
dnstools# host -t srv headless
headless.default.svc.cluster.local has SRV record 0 25 443 172-17-0-14.headless.default.svc.cluster.local.
headless.default.svc.cluster.local has SRV record 0 25 443 172-17-0-18.headless.default.svc.cluster.local.
headless.default.svc.cluster.local has SRV record 0 25 443 172-17-0-4.headless.default.svc.cluster.local.
headless.default.svc.cluster.local has SRV record 0 25 443 172-17-0-9.headless.default.svc.cluster.local.
```
For some applications, it is desirable to have the pod name for this, rather than the pod IP
address (see for example [kubernetes#47992](https://github.com/kubernetes/kubernetes/issues/47992) and [coredns#1190](https://github.com/coredns/coredns/pull/1190)). To enable this in CoreDNS, you specify the "endpoint_pod_names" option in your Corefile, which results in this:
```
dnstools# host -t srv headless
headless.default.svc.cluster.local has SRV record 0 25 443 headless-65bb4c479f-qv84p.headless.default.svc.cluster.local.
headless.default.svc.cluster.local has SRV record 0 25 443 headless-65bb4c479f-zc8lx.headless.default.svc.cluster.local.
headless.default.svc.cluster.local has SRV record 0 25 443 headless-65bb4c479f-q7lf2.headless.default.svc.cluster.local.
headless.default.svc.cluster.local has SRV record 0 25 443 headless-65bb4c479f-566rt.headless.default.svc.cluster.local.
```
### Autopath
CoreDNS also has a special feature to improve latency in DNS requests for external names. In Kubernetes, the
DNS search path for pods specifies a long list of suffixes. This enables the use of short names when requesting
services in the cluster - for example, "headless" above, rather than "headless.default.svc.cluster.local". However,
when requesting an external name - "infoblox.com", for example - several invalid DNS queries are made by the client,
requiring a roundtrip from the client to kube-dns each time (actually to `dnsmasq` and then to `kubedns`, since [negative caching is disabled](https://github.com/kubernetes/dns/issues/121)):
* infoblox.com.default.svc.cluster.local -> NXDOMAIN
* infoblox.com.svc.cluster.local -> NXDOMAIN
* infoblox.com.cluster.local -> NXDOMAIN
* infoblox.com.your-internal-domain.com -> NXDOMAIN
* infoblox.com -> returns a valid record
In CoreDNS, an optional feature called [autopath](https://coredns.io/plugins/autopath) can be enabled that will cause this search path to be followed
*in the server*. That is, CoreDNS will figure out from the source IP address which namespace the client pod is in,
and it will walk this search list until it gets a valid answer. Since the first 3 of these are resolved internally
within CoreDNS itself, it cuts out all of the back and forth between the client and server, reducing latency.
### A few other Kubernetes specific features
In CoreDNS, you can use standard DNS zone transfer to export the entire DNS record set. This is useful for
debugging your services as well as importing the cluster zone into other DNS servers.
You can also filter by namespaces or a label selector. This can allow you to run specific CoreDNS instances that will only server records that match the filters, exposing only a limited set of your services via DNS.
## Extensibility
In addition to the features described above, CoreDNS is easily extended. It is possible to build custom versions
of CoreDNS that include your own features. For example, this ability has been used to extend CoreDNS to do recursive resolution
with the [unbound plugin](https://https://coredns.io/explugins/unbound), to server records directly from a database with the [pdsql plugin](https://coredns.io/explugins/pdsql), and to allow multiple CoreDNS instances to share a common level 2 cache with the [redisc plugin](https://coredns.io/explugins/redisc).
Many other interesting extensions have been added, which you will find on the [External Plugins](https://coredns.io/explugins/) page of the CoreDNS site. One that is really interesting for Kubernetes and Istio users is the [kubernetai plugin](https://coredns.io/explugins/kubernetai), which allows a single CoreDNS instance to connect to multiple Kubernetes clusters and provide service discovery across all of them.
## What's Next?
CoreDNS is an independent project, and as such is developing many features that are not directly
related to Kubernetes. However, a number of these will have applications within Kubernetes. For example,
the upcoming integration with policy engines will allow CoreDNS to make intelligent choices about which endpoint
to return when a headless service is requested. This could be used to route traffic to a local pod, or
to a more responsive pod. Many other features are in development, and of course as an open source project, we welcome you to suggest and contribute your own features!
The features and differences described above are a few examples. There is much more you can do with CoreDNS.
You can find out more on the [CoreDNS Blog](https://coredns.io/blog).
### Get involved with CoreDNS
CoreDNS is an incubated [CNCF](https:://cncf.io) project.
We're most active on Slack (and Github):
- Slack: #coredns on <https://slack.cncf.io>
- Github: <https://github.com/coredns/coredns>
More resources can be found:
- Website: <https://coredns.io>
- Blog: <https://blog.coredns.io>
- Twitter: [@corednsio](https://twitter.com/corednsio)
- Mailing list/group: <coredns-discuss@googlegroups.com>

View File

@ -0,0 +1,48 @@
---
layout: blog
title: 'Dynamic Kubelet Configuration'
date: 2018-07-11
---
**Author**: Michael Taufen (Google)
**Editors note: this post is part of a [series of in-depth articles](https://kubernetes.io/blog/2018/06/27/kubernetes-1.11-release-announcement/) on whats new in Kubernetes 1.11**
## Why Dynamic Kubelet Configuration?
Kubernetes provides API-centric tooling that significantly improves workflows for managing applications and infrastructure. Most Kubernetes installations, however, run the Kubelet as a native process on each host, outside the scope of standard Kubernetes APIs.
In the past, this meant that cluster administrators and service providers could not rely on Kubernetes APIs to reconfigure Kubelets in a live cluster. In practice, this required operators to either ssh into machines to perform manual reconfigurations, use third-party configuration management automation tools, or create new VMs with the desired configuration already installed, then migrate work to the new machines. These approaches are environment-specific and can be expensive.
Dynamic Kubelet configuration gives cluster administrators and service providers the ability to reconfigure Kubelets in a live cluster via Kubernetes APIs.
## What is Dynamic Kubelet Configuration?
Kubernetes v1.10 made it possible to configure the Kubelet via a beta [config file](https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/) API. Kubernetes already provides the ConfigMap abstraction for storing arbitrary file data in the API server.
Dynamic Kubelet configuration extends the Node object so that a Node can refer to a ConfigMap that contains the same type of config file. When a Node is updated to refer to a new ConfigMap, the associated Kubelet will attempt to use the new configuration.
## How does it work?
Dynamic Kubelet configuration provides the following core features:
* Kubelet attempts to use the dynamically assigned configuration.
* Kubelet "checkpoints" configuration to local disk, enabling restarts without API server access.
* Kubelet reports assigned, active, and last-known-good configuration sources in the Node status.
* When invalid configuration is dynamically assigned, Kubelet automatically falls back to a last-known-good configuration and reports errors in the Node status.
To use the dynamic Kubelet configuration feature, a cluster administrator or service provider will first post a ConfigMap containing the desired configuration, then set each Node.Spec.ConfigSource.ConfigMap reference to refer to the new ConfigMap. Operators can update these references at their preferred rate, giving them the ability to perform controlled rollouts of new configurations.
Each Kubelet watches its associated Node object for changes. When the Node.Spec.ConfigSource.ConfigMap reference is updated, the Kubelet will "checkpoint" the new ConfigMap by writing the files it contains to local disk. The Kubelet will then exit, and the OS-level process manager will restart it. Note that if the Node.Spec.ConfigSource.ConfigMap reference is not set, the Kubelet uses the set of flags and config files local to the machine it is running on.
Once restarted, the Kubelet will attempt to use the configuration from the new checkpoint. If the new configuration passes the Kubelet's internal validation, the Kubelet will update Node.Status.Config to reflect that it is using the new configuration. If the new configuration is invalid, the Kubelet will fall back to its last-known-good configuration and report an error in Node.Status.Config.
Note that the default last-known-good configuration is the combination of Kubelet command-line flags with the Kubelet's local configuration file. Command-line flags that overlap with the config file always take precedence over both the local configuration file and dynamic configurations, for backwards-compatibility.
See the following diagram for a high-level overview of a configuration update for a single Node:
![kubelet-diagram](/images/blog/2018-07-11-dynamic-kubelet-configuration/kubelet-diagram.png)
## How can I learn more?
Please see the official tutorial at https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/, which contains more in-depth details on user workflow, how a configuration becomes "last-known-good," how the Kubelet "checkpoints" config, and possible failure modes.

View File

@ -0,0 +1,86 @@
---
layout: blog
title: 'Resizing Persistent Volumes using Kubernetes'
date: 2018-07-12
---
**Author**: Hemant Kumar (Red Hat)
**Editors note: this post is part of a [series of in-depth articles](https://kubernetes.io/blog/2018/06/27/kubernetes-1.11-release-announcement/) on whats new in Kubernetes 1.11**
In Kubernetes v1.11 the persistent volume expansion feature is being promoted to beta. This feature allows users to easily resize an existing volume by editing the `PersistentVolumeClaim` (PVC) object. Users no longer have to manually interact with the storage backend or delete and recreate PV and PVC objects to increase the size of a volume. Shrinking persistent volumes is not supported.
Volume expansion was introduced in v1.8 as an Alpha feature, and versions prior to v1.11 required enabling the feature gate, `ExpandPersistentVolumes`, as well as the admission controller, `PersistentVolumeClaimResize` (which prevents expansion of PVCs whose underlying storage provider does not support resizing). In Kubernetes v1.11+, both the feature gate and admission controller are enabled by default.
Although the feature is enabled by default, a cluster admin must opt-in to allow users to resize their volumes. Kubernetes v1.11 ships with volume expansion support for the following in-tree volume plugins: AWS-EBS, GCE-PD, Azure Disk, Azure File, Glusterfs, Cinder, Portworx, and Ceph RBD. Once the admin has determined that volume expansion is supported for the underlying provider, they can make the feature available to users by setting the `allowVolumeExpansion` field to `true` in their `StorageClass` object(s). Only PVCs created from that `StorageClass` will be allowed to trigger volume expansion.
```
~> cat standard.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: standard
parameters:
type: pd-standard
provisioner: kubernetes.io/gce-pd
allowVolumeExpansion: true
reclaimPolicy: Delete
```
Any PVC created from this `StorageClass` can be edited (as illustrated below) to request more space. Kubernetes will interpret a change to the storage field as a request for more space, and will trigger automatic volume resizing.
![PVC StorageClass](/images/blog/2018-07-12-resizing-persistent-volumes-using-kubernetes/pvc-storageclass.png)
## File System Expansion
Block storage volume types such as GCE-PD, AWS-EBS, Azure Disk, Cinder, and Ceph RBD typically require a file system expansion before the additional space of an expanded volume is usable by pods. Kubernetes takes care of this automatically whenever the pod(s) referencing your volume are restarted.
Network attached file systems (like Glusterfs and Azure File) can be expanded without having to restart the referencing Pod, because these systems do not require special file system expansion.
File system expansion must be triggered by terminating the pod using the volume. More specifically:
* Edit the PVC to request more space.
* Once underlying volume has been expanded by the storage provider, then the PersistentVolume object will reflect the updated size and the PVC will have the `FileSystemResizePending` condition.
You can verify this by running `kubectl get pvc <pvc_name> -o yaml`
```
~> kubectl get pvc myclaim -o yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: myclaim
namespace: default
uid: 02d4aa83-83cd-11e8-909d-42010af00004
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 14Gi
storageClassName: standard
volumeName: pvc-xxx
status:
capacity:
storage: 9G
conditions:
- lastProbeTime: null
lastTransitionTime: 2018-07-11T14:51:10Z
message: Waiting for user to (re-)start a pod to finish file system resize of
volume on node.
status: "True"
type: FileSystemResizePending
phase: Bound
```
* Once the PVC has the condition `FileSystemResizePending` then pod that uses the PVC can be restarted to finish file system resizing on the node. Restart can be achieved by deleting and recreating the pod or by scaling down the deployment and then scaling it up again.
* Once file system resizing is done, the PVC will automatically be updated to reflect new size.
Any errors encountered while expanding file system should be available as events on pod.
## Online File System Expansion
Kubernetes v1.11 also introduces an alpha feature called online file system expansion. This feature enables file system expansion while a volume is still in-use by a pod. Because this feature is alpha, it requires enabling the feature gate, `ExpandInUsePersistentVolumes`. It is supported by the in-tree volume plugins GCE-PD, AWS-EBS, Cinder, and Ceph RBD. When this feature is enabled, pod referencing the resized volume do not need to be restarted. Instead, the file system will automatically be resized while in use as part of volume expansion. File system expansion does not happen until a pod references the resized volume, so if no pods referencing the volume are running file system expansion will not happen.
## How can I learn more?
Check out additional documentation on this feature here: http://k8s.io/docs/concepts/storage/persistent-volumes.

View File

@ -0,0 +1,275 @@
---
layout: blog
title: "How the sausage is made: the Kubernetes 1.11 release interview, from the Kubernetes Podcast"
date: 2018-07-16
---
<b>Author</b>: Craig Box (Google)
At KubeCon EU, my colleague Adam Glick and I were pleased to announce the [Kubernetes Podcast from Google](https://kubernetespodcast.com/). In this weekly conversation, we focus on all the great things that are happening in the world of Kubernetes and Cloud Native. From the news of the week, to interviews with people in the community, we help you stay up to date on everything Kubernetes.
We [recently had the pleasure of speaking](https://kubernetespodcast.com/episode/010-kubernetes-1.11/) to the release manager for Kubernetes 1.11, Josh Berkus from Red Hat, and the release manager for the upcoming 1.12, Tim Pepper from VMware.
In this conversation we learned about the release process, the impact of quarterly releases on end users, and how Kubernetes is like baking.
I encourage you to listen to [the podcast version](https://kubernetespodcast.com/episode/010-kubernetes-1.11/) if you have a commute, or a dog to walk. If you like what you hear, [we encourage you to subscribe](https://kubernetespodcast.com/subscribe)! In case you're short on time, or just want to browse quickly, we are delighted to share the transcript with you.
<hr/>
<b>CRAIG BOX: First of all, congratulations both, and thank you.</b>
JOSH BERKUS: Well, thank you. Congratulations for me, because my job is done.
[LAUGHTER]
Congratulations and sympathy for Tim.
[LAUGH]
TIM PEPPER: Thank you, and I guess thank you?
[LAUGH]
<b>ADAM GLICK: For those that don't know a lot about the process, why don't you help people understand — what is it like to be the release manager? What's the process that a release goes through to get to the point when everyone just sees, OK, it's released — 1.11 is available? What does it take to get up to that?</b>
JOSH BERKUS: We have a quarterly release cycle. So every three months, we're releasing. And ideally and fortunately, this is actually now how we are doing things. Somewhere around two, three weeks before the previous release, somebody volunteers to be the release lead. That person is confirmed by [SIG Release](https://github.com/kubernetes/sig-release). So far, we've never had more than one volunteer, so there hasn't been really a fight about it.
And then that person starts working with others to put together [a team](https://github.com/kubernetes/sig-release/blob/master/releases/release-1.11/release_team.md) called the release team. Tim's just gone through this with Stephen Augustus and [picking out a whole bunch of people](https://github.com/kubernetes/sig-release/blob/master/releases/release-1.12/release_team.md). And then after or a little before— probably after, because we want to wait for the retrospective from the previous release— the release lead then sets [a schedule](https://github.com/kubernetes/sig-release/blob/master/releases/release-1.11/release-1.11.md) for the upcoming release, as in when all the deadlines will be.
And this is a thing, because we're still tinkering with relative deadlines, and how long should code freeze be, and how should we track features? Because we don't feel that we've gotten down that sort of cadence perfectly yet. I mean, like, we've done pretty well, but we don't feel like we want to actually set [in stone], this is the schedule for each and every release.
Also, we have to adjust the schedule because of holidays, right? Because you can't have the code freeze deadline starting on July 4 or in the middle of design or sometime else when we're going to have a large group of contributors who are out on vacation.
TIM PEPPER: This is something I've had to spend some time looking at, thinking about 1.12. Going back to early June as we were tinkering with the code freeze date, starting to think about, well, what are the implications going to be on 1.12? When would these things start falling on the calendar? And then also for 1.11, we had one complexity. If we slipped the release past this week, we start running into the US 4th of July holiday, and we're not likely to get a lot done.
So much of a slip would mean slipping into the middle of July before we'd really know that we were successfully triaging things. And worst case maybe, we're quite a bit later into July.
So instead of quarterly with a three-month sort of cadence, well, maybe we've accidentally ended up chopping out one month out of the next release or pushing it quite a bit into the end of the year. And that made the deliberation around things quite complex, but thankfully this week, everything's gone smoothly in the end.
<b>CRAIG BOX: All the releases so far have been one quarter — they've been a 12-week release cycle, give or take. Is that something that you think will continue going forward, or is the release team thinking about different ways they could run releases?</b>
TIM PEPPER: The whole community is thinking about this. There are voices who'd like the cadence to be faster, and there are voices who'd like it to be shorter. And there's good arguments for both.
<b>ADAM GLICK: Because it's interesting. It sounds like it is a date-driven release cycle versus a feature-driven release cycle.</b>
JOSH BERKUS: Yeah, certainly. I really honestly think everybody in the world of software recognizes that feature-driven release cycles just don't work. And a big part of the duties of the release team collectively— several members of the team do this— is yanking things out of the release that are not ready. And the hard part of that is figuring out which things aren't ready, right? Because the person who's working on it tends to be super optimistic about what they can get done and what they can get fixed before the deadline.
<b>ADAM GLICK: Of course.</b>
TIM PEPPER: And this is one of the things I think that's useful about the process we have in place on the release team for having shadows who spend some time on the release team, working their way up into more of a lead position and gaining some experience, starting to get some exposure to see that optimism and see the processes for vetting.
And it's even an overstatement to say the process. Just see the way that we build the intuition for how to vet and understand and manage the risk, and really go after and chase things down proactively and early to get resolution in a timely way versus continuing to just all be optimistic and letting things maybe languish and put a release at risk.
<b>CRAIG BOX: I've been reading this week about the introduction of [feature branches](https://github.com/kubernetes/community/issues/566) to Kubernetes. The new server-side apply feature, for example, is being built in a branch so that it didn't have to be half-built in master and then ripped out again as the release approached, if the feature wasn't ready. That seems to me like something that's a normal part of software development? Is there a reason it's taken so long to bring that to core Kubernetes?</b>
JOSH BERKUS: I don't actually know the history of why we're not using feature branches. I mean, the reason why we're not using feature branches pervasively now is that we have to transition from a different system. And I'm not really clear on how we adopted that linear development system. But it's certainly something we discussed on the release team, because there were issues of features that we thought were going to be ready, and then developed major problems. And we're like, if we have to back this out, that's going to be painful. And we did actually have to back one feature out, which involved not pulling out a Git commit, but literally reversing the line changes, which is really not how you want to be doing things.
<b>CRAIG BOX: No.</b>
TIM PEPPER: The other big benefit, I think, to the release branches if they are well integrated with the CI system for continuous integration and testing, you really get the feedback, and you can demonstrate, this set of stuff is ready. And then you can do deferred commitment on the master branch. And what comes in to a particular release on the timely cadence that users are expecting is stuff that's ready. You don't have potentially destabilizing things, because you can get a lot more proof and evidence of readiness.
<b>ADAM GLICK: What are you looking at in terms of the tool chain that you're using to do this? You mentioned a couple of things, and I know it's obviously run through GitHub. But I imagine you have a number of other tools that you're using in order to manage the release, to make sure that you understand what's ready, what's not. You mentioned balancing between people who are very optimistic about the feature they're working on making it in versus the time-driven deadline, and balancing those two. Is that just a manual process, or do you have a set of tools that help you do that?</b>
JOSH BERKUS: Well, there's code review, obviously. So just first of all, process was somebody wants to actually put in a feature, commit, or any kind of merge really, right? So that has to be assigned to one of the SIGs, one of these Special Interest Groups. Possibly more than one, depending on what areas it touches.
And then two generally overlapping groups of people have to approve that. One would be the SIG that it's assigned to, and the second would be anybody represented in the OWNERS files in the code tree of the directories which get touched.
Now sometimes those are the same group of people. I'd say often, actually. But sometimes they're not completely the same group of people, because sometimes you're making a change to the network, but that also happens to touch GCP support and OpenStack support, and so they need to review it as well.
So the first part is the human part, which is a bunch of other people need to look at this. And possibly they're going to comment "Hey. This is a really weird way to do this. Do you have a reason for it?"
Then the second part of it is the automated testing that happens, the automated acceptance testing that happens via webhook on there. And actually, one of the things that we did that was a significant advancement in this release cycle— and by we, I actually mean not me, but the great folks at [SIG Scalability](https://github.com/kubernetes/community/tree/master/sig-scalability) did— was add an [additional acceptance test](https://k8s-testgrid.appspot.com/sig-release-master-blocking#gce-scale-performance) that does a mini performance test.
Because one of the problems we've had historically is our major performance tests are large and take a long time to run, and so by the time we find out that we're failing the performance tests, we've already accumulated, you know, 40, 50 commits. And so now we're having to do git bisect to find out which of those commits actually caused the performance regression, which can make them very slow to address.
And so adding that performance pre-submit, the performance acceptance test really has helped stabilize performance in terms of new commits. So then we have that level of testing that you have to get past.
And then when we're done with that level of testing, we run a whole large battery of larger tests— end-to-end tests, performance tests, upgrade and downgrade tests. And one of the things that we've added recently and we're integrating to the process something called conformance tests. And the conformance test is we're testing whether or not you broke backwards compatibility, because it's obviously a big deal for Kubernetes users if you do that when you weren't intending to.
One of the busiest roles in the release team is a role called [CI Signal](https://github.com/kubernetes/sig-release#ci-signal-lead). And it's that person's job just to watch all of the tests for new things going red and then to try to figure out why they went red and bring it to people's attention.
<b>ADAM GLICK: I've often heard what you're referring to kind of called a breaking change, because it breaks the existing systems that are running. How do you identify those to people so when they see, hey, there's a new version of Kubernetes out there, I want to try it out, is that just going to release notes? Or is there a special way that you identify breaking changes as opposed to new features?</b>
JOSH BERKUS: That goes into release notes. I mean, keep in mind that one of the things that happens with Kubernetes' features is we go through this alpha, beta, general availability phase, right? So a feature's alpha for a couple of releases and then becomes beta for a release or two, and then it becomes generally available. And part of the idea of having this that may require a feature to go through that cycle for a year or more before its general availability is by the time it's general availability, we really want it to be, we are not going to change the API for this.
However, stuff happens, and we do occasionally have to do those. And so far, our main way to identify that to people actually is in the release notes. If you look at [the current release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.11.md#no-really-you-must-do-this-before-you-upgrade), there are actually two things in there right now that are sort of breaking changes.
One of them is the bit with [priority and preemption](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/) in that preemption being on by default now allows badly behaved users of the system to cause trouble in new ways. I'd actually have to look at the release notes to see what the second one was...
TIM PEPPER: The [JSON capitalization case sensitivity](https://github.com/kubernetes/kubernetes/issues/64612).
JOSH BERKUS: Right. Yeah. And that was one of those cases where you have to break backwards compatibility, because due to a library switch, we accidentally enabled people using JSON in a case-insensitive way in certain APIs, which was never supposed to be the case. But because we didn't have a specific test for that, we didn't notice that we'd done it.
And so for three releases, people could actually shove in malformed JSON, and Kubernetes would accept it. Well, we have to fix that now. But that does mean that there are going to be users out in the field who have malformed JSON in their configuration management that is now going to break.
<b>CRAIG BOX: But at least the good news is Kubernetes was always outputting correct formatted JSON during this period, I understand.</b>
JOSH BERKUS: Mm-hmm.
TIM PEPPER: I think that also kind of reminds of one of the other areas— so kind of going back to the question of, well, how do you share word of breaking changes? Well, one of the ways you do that is to have as much quality CI that you can to catch these things that are important. Give the feedback to the developer who's making the breaking change, such that they don't make the breaking change. And then you don't actually have to communicate it out to users.
So some of this is bound to happen, because you always have test escapes. But it's also a reminder of the need to ensure that you're also really building and maintaining your test cases and the quality and coverage of your CI system over time.
<b>ADAM GLICK: What do you mean when you say test escapes?</b>
TIM PEPPER: So I guess it's a term in the art, but for those who aren't familiar with it, you have intended behavior that wasn't covered by test, and as a result, an unintended change happens to that. And instead of your intended behavior being shipped, you're shipping something else.
JOSH BERKUS: The JSON change is a textbook example of this, which is we were testing that the API would continue to accept correct JSON. We were not testing adequately that it wouldn't accept incorrect JSON.
TIM PEPPER: A test escape, another way to think of it as you shipped a bug because there was not a test case highlighting the possibility of the bug.
<b>ADAM GLICK: It's the classic, we tested to make sure the feature worked. We didn't test to make sure that breaking things didn't work.</b>
TIM PEPPER: It's common for us to focus on "I've created this feature and I'm testing the positive cases". And this also comes to thinking about things like secure by default and having a really robust system. A harder piece of engineering often is to think about the failure cases and really actively manage those well.
JOSH BERKUS: I had a conversation with a contributor recently where it became apparent that that contributor had never worked on a support team, because their conception of a badly behaved user was, like, a hacker, right? An attacker who comes from outside.
And I'm like, no, no, no. You're stable of badly behaved users is your own staff. You know, they will do bad things, not necessarily intending to do bad things, but because they're trying to take a shortcut. And that is actually your primary concern in terms of preventing breaking the system.
<b>CRAIG BOX: Josh, what was your preparation to be release manager for 1.11?</b>
JOSH BERKUS: I was on the release team for two cycles, plus I was kind of auditing the release team for half a cycle before that. So in 1.9, I originally joined to be the shadow for bug triage, except I ended up not being the shadow, because the person who was supposed to be the lead for bug triage then dropped out. Then I ended up being the bug triage lead, and had to kind of improvise it because there wasn't documentation on what was involved in the role at the time.
And then I was [bug triage lead](https://github.com/kubernetes/sig-release/blob/master/README.md#bug-triage-lead) for a second cycle, for the 1.10 cycle, and then took over as release lead for the cycle. And one of the things on my to-do list is to update the requirements to be release lead, because we actually do have written requirements, and to say that the expectation now is that you spend at least two cycles on the release team, one of them either as a lead or as a shadow to the release lead.
<b>CRAIG BOX: And is bug triage lead just what it sounds like?</b>
JOSH BERKUS: Yeah. Pretty much. There's more tracking involved than triage. Part of it is just deficiencies in tooling, something we're looking to address. But things like GitHub API limitations make it challenging to build automated tools that help us intelligently track issues. And we are actually working with GitHub on that. Like, they've been helpful. It's just, they have their own scaling problems.
But then beyond that, you know, a lot of that, it's what you would expect it to be in terms of what triage says, right? Which is looking at every issue and saying, first of all, is this a real issue? Second, is it a serious issue? Third, who needs to address this?
And that's a lot of the work, because for anybody who is a regular contributor to Kubernetes, the number of GitHub notifications that they receive per day means that most of us turn our GitHub notifications off.
<b>CRAIG BOX: Indeed.</b>
JOSH BERKUS: Because it's just this fire hose. And as a result, when somebody really needs to pay attention to something right now, that generally requires a human to go and track them down by email or Slack or whatever they prefer. Twitter in some cases. I've done that. And say, hey. We really need you to look at this issue, because it's about to hold up the beta release.
<b>ADAM GLICK: When you look at the process that you're doing now, what are the changes that are coming in the future that will make the release process even better and easier?</b>
JOSH BERKUS: Well, we just went through this whole retro, and I put in some recommendations for things. Obviously, some additional automation, which I'm going to be looking at doing now that I'm cycling off of the release team for a quarter and can actually look at more longer term goals, will help, particularly now that we've addressed actually some of our GitHub data flow issues.
Beyond that, I put in a whole bunch of recommendations in the retro, but it's actually up to Tim which recommendations he's going to try to implement. So I'll let him [comment].
TIM PEPPER: I think one of the biggest changes that happened in the 1.11 cycle is this emphasis on trying to keep our continuous integration test status always green. That is huge for software development and keeping velocity. If you have this more, I guess at this point antiquated notion of waterfall development, where you do feature development for a while and are accepting of destabilization, and somehow later you're going to come back and spend a period on stabilization and fixing, that really elongates the feedback loop for developers.
And they don't realize what was broken, and the problems become much more complex to sort out as time goes by. One, developers aren't thinking about what it was that they'd been working on anymore. They've lost the context to be able to efficiently solve the problem.
But then you start also getting interactions. Maybe a bug was introduced, and other people started working around it or depending on it, and you get complex dependencies then that are harder to fix. And when you're trying to do that type of complex resolution late in the cycle, it becomes untenable over time. So I think continuing on that and building on it, I'm seeing a little bit more focus on test cases and meaningful test coverage. I think that's a great cultural change to have happening.
And maybe because I'm following Josh into this role from a bug triage position and in his mentions earlier of just the communications and tracking involved with that versus triage, I do have a bit of a concern that at times, email and Slack are relatively quiet. Some of the SIG meeting notes are a bit sparse or YouTube videos slow to upload. So the general artifacts around choice making I think is an area where we need a little more rigor. So I'm hoping to see some of that.
And that can be just as subtle as commenting on issues like, hey, this commit doesn't say what it's doing. And for that reason on the release team, we can't assess its risk versus value. So could you give a little more information here? Things like that that give more information both to the release team and the development community as well, because this is open source. And to collaborate, you really do need to communicate in depth.
<b>CRAIG BOX: Speaking of cultural changes, professional baker to Kubernetes' release lead sounds like quite a journey.</b>
JOSH BERKUS: There was a lot of stuff in between.
<b>CRAIG BOX: Would you say there are a lot of similarities?</b>
JOSH BERKUS: You know, believe it or not, there actually are similarities. And here's where it's similar, because I was actually thinking about this earlier. So when I was a professional baker, one of the things that I had to do was morning pastry. Like, I was actually in charge of doing several other things for custom orders, but since I had to come to work at 3:00 AM anyway— which also distressingly has similarities with some of this process. Because I had to come to work at 3:00 AM anyway, one of my secondary responsibilities was traying the morning pastry.
And one of the parts of that is you have this great big gas-fired oven with 10 rotating racks in it that are constantly rotating. Like, you get things in and out in the oven by popping them in and out while the racks are moving. That takes a certain amount of skill. You get burn marks on your wrists for your first couple of weeks of work. And then different pastries require a certain number of rotations to be done.
And there's a lot of similarities to the release cadence, because what you're doing is you're popping something in the oven or you're seeing something get kicked off, and then you have a certain amount of time before you need to check on it or you need to pull it out. And you're doing that in parallel with a whole bunch of other things. You know, with 40 other trays.
<b>CRAIG BOX: And with presumably a bunch of colleagues who are all there at the same time.</b>
JOSH BERKUS: Yeah. And the other thing is that these deadlines are kind of absolute, right? You can't say, oh, well, I was reading a magazine article, and I didn't have time to pull that tray out. It's too late. The pastry is burned, and you're going to have to throw it away, and they're not going to have enough pastry in the front case for the morning rush. And the customers are not interested in your excuses for that.
So from that perspective, from the perspective of saying, hey, we have a bunch of things that need to happen in parallel, they have deadlines and those deadlines are hard deadlines, there it's actually fairly similar.
<b>CRAIG BOX: Tim, do you have any other history that helped get you to where you are today?</b>
TIM PEPPER: I think in some ways I'm more of a traditional journey. I've got a computer engineering bachelor's degree. But I'm also maybe a bit of an outlier. In the late '90s, I found a passion for open source and Linux. Maybe kind of an early adopter, early believer in that.
And was working in the industry in the Bay Area for a while. Got involved in the Silicon Valley and Bay Area Linux users groups a bit, and managed to find work as a Linux sysadmin, and then doing device driver and kernel work and on up into distro. So that was all kind of standard in a way. And then I also did some other work around hardware enablement, high-performance computing, non-uniform memory access. Things that are really, really systems work.
And then about three years ago, my boss was really bending my ear and trying to get me to work on this cloud-related project. And that just felt so abstract and different from the low-level bits type of stuff that I'd been doing.
But kind of grudgingly, I eventually came around to the realization that the cloud is interesting, and it's so much more complex than local machine-only systems work, the type of things that I'd been doing before. It's massively distributed and you have a high-latency, low-reliability interconnect on all the nodes in the distributed network. So it's wildly complex engineering problems that need solved.
And so that got me interested. Started working then on this open source orchestrator for virtual machines and containers. It was written in Go and was having a lot of fun. But it wasn't Kubernetes, and it was becoming clear that Kubernetes was taking off. So about a year ago, I made the deliberate choice to move over to Kubernetes work.
<b>ADAM GLICK: Previously, Josh, you spoke a little bit about your preparation for becoming a release manager. For other folks that are interested in getting involved in the community and maybe getting involved in release management, should they follow the same path that you did? Or what are ways that would be good for them to get involved? And for you, Tim, how you've approached the preparation for taking on the next release.</b>
JOSH BERKUS: The great thing with the release team is that we have this formal mentorship path. And it's fast, right? That's the advantage of releasing quarterly, right? Is that within six months, you can go from joining the team as a shadow to being the release lead if you have the time. And you know, by the time you work your way up to release time, you better have support from your boss about this, because you're going to end up spending a majority of your work time towards the end of the release on release management.
So the answer is to sign up to look when we're getting into the latter half of release cycle, to sign up as a shadow. Or at the beginning of a release cycle, to sign up as a shadow. Some positions actually can reasonably use more than one shadow. There's some position that just require a whole ton of legwork like release notes. And as a result, could actually use more than one shadow meaningfully. So there's probably still places where people could sign up for 1.12. Is that true, Tim?
TIM PEPPER: Definitely. I think— gosh, right now we have 34 volunteers on the release team, which is—
<b>ADAM GLICK: Wow.</b>
JOSH BERKUS: OK. OK. Maybe not then.
[LAUGH]
TIM PEPPER: It's potentially becoming a lot of cats to herd. But I think even outside of that formal volunteering to be a named shadow, anybody is welcome to show up to the release team meetings, follow the release team activities on [Slack](http://slack.k8s.io), start understanding how the process works. And really, this is the case all across open source. It doesn't even have to be the release team. If you're passionate about networking, start following what SIG Network is doing. It's the same sort of path, I think, into any area on the project.
Each of the SIGs [has] a channel. So it would be #SIG-whatever the name is. [In our] case, #SIG-Release.
I'd also maybe give a plug for a [talk I did at KubeCon](https://youtu.be/goAph8A20gQ) in Copenhagen this spring, talking about how the release team specifically can be a path for new contributors coming in. And had some ideas and suggestions there for newcomers.
<b>CRAIG BOX: There's three questions in the Google SRE postmortem template that I really like. And I'm sure you will have gone through these in the retrospective process as you released 1.11, so I'd like to ask them now one at a time.
First of all, what went well?</b>
JOSH BERKUS: Two things, I think, really improved things, both for contributors and for the release team. Thing number one was putting a strong emphasis on getting the test grid green well ahead of code freeze.
TIM PEPPER: Definitely.
JOSH BERKUS: Now partly that went well because we had a spectacular CI lead, [Aish Sundar](https://github.com/aishsundar), who's now in training to become the release lead.
TIM PEPPER: And I'd count that partly as one of the "Where were you lucky?" areas. We happened upon a wonderful person who just popped up and volunteered.
JOSH BERKUS: Yes. And then but part of that was also that we said, hey. You know, we're not going to do what we've done before which is not really care about these tests until code slush. We're going to care about these tests now.
And importantly— this is really important to the Kubernetes community— when we went to the various SIGs, the SIG Cluster Lifecycle and SIG Scalability and SIG Node and the other ones who were having test failures, and we said this to them. They didn't say, get lost. I'm busy. They said, what's failing?
<b>CRAIG BOX: Great.</b>
JOSH BERKUS: And so that made a big difference. And the second thing that was pretty much allowed by the first thing was to shorten the code freeze period. Because the code freeze period is frustrating for developers, because if they don't happen to be working on a 1.11 feature, even if they worked on one before, and they delivered it early in the cycle, and it's completely done, they're kind of paralyzed, and they can't do anything during code freeze. And so it's very frustrating for them, and we want to make that period as short as possible. And we did that this time, and I think it helped everybody.
<b>CRAIG BOX: What went poorly?</b>
JOSH BERKUS: We had a lot of problems with flaky tests. We have a lot of old tests that are not all that well maintained, and they're testing very complicated things like upgrading a cluster that has 40 nodes. And as a result, these tests have high failure rates that have very little to do with any change in the code.
And so one of the things that happened, and the reason we had a one-day delay in the release is, you know, we're a week out from release, and just by random luck of the draw, a bunch of these tests all at once got a run of failures. And it turned out that that run of failures didn't actually mean anything, having anything to do with Kubernetes. But there was no way for us to tell that without a lot of research, and we were not going to have enough time for that research without delaying the release.
So one of the things we're looking to address in the 1.12 cycle is to actually move some of those flaky tests out. Either fix them or move them out of the release blocking category.
TIM PEPPER: In a way, I think this also highlights one of the things that Josh mentioned that went well, the emphasis early on getting the test results green, it allows us to see the extent to which these flakes are such a problem. And then the unlucky occurrence of them all happening to overlap on a failure, again, highlights that these flakes have been called out in the community for quite some time. I mean, at least a year. I know one contributor who was really concerned about them.
But they became a second order concern versus just getting things done in the short term, getting features and proving that the features worked, and kind of accepting in a risk management way on the release team that, yes, those are flakes. We don't have time to do something about them, and it's OK. But because of the emphasis on keeping the test always green now, we have the luxury maybe to focus on improving these flakes, and really get to where we have truly high quality CI signal, and can really believe in the results that we have on an ongoing basis.
JOSH BERKUS: And having solved some of the more basic problems, we're now seeing some of the other problems like coordination between related features. Like we right now have a feature where— and this is one of the sort of backwards compatibility release notes— where the feature went into beta, and is on by default.
And the second feature that was supposed to provide access control for the first feature did not go in as beta, and is not on by default. And the team for the first feature did not realize the second feature was being held up until two days before the release. So it's going to result in us actually patching something in 11.1.
And so like, we put that into something that didn't go well. But on the other hand, as Tim points out, a few release cycles ago, we wouldn't even have identified that as a problem, because we were still struggling with just individual features having a clear idea well ahead of the release of what was going in and what wasn't going in.
TIM PEPPER: I think something like this also is a case that maybe advocates for the use of feature branches. If these things are related, we might have seen it and done more pre-testing within that branch and pre-integration, and decide maybe to merge a couple of what initially had been disjoint features into a single feature branch, and really convince ourselves that together they were good. And cross all the Ts, dot all the Is on them, and not have something that's gated on an alpha feature that's possibly falling away.
<b>CRAIG BOX: And then the final question, which I think you've both touched on a little. Where did you get lucky, or unlucky perhaps?</b>
JOSH BERKUS: I would say number one where I got lucky is truly having a fantastic team. I mean, we just had a lot of terrific people who were very good and very energetic and very enthusiastic about taking on their release responsibilities including Aish and Tim and Ben and Nick and Misty who took over Docs four weeks into the release. And then went crazy with it and said, well, I'm new here, so I'm going to actually change a bunch of things we've been doing that didn't work in the first place. So that was number one. I mean, that really made honestly all the difference.
And then the second thing, like I said, is that we didn't have sort of major, unexpected monkey wrenches thrown at us. So in the 1.10 cycle, we actually had two of those, which is why I still count Jace as heroic for pulling off a release that was only a week late.
You know, number one was having the scalability tests start failing for unrelated reasons for a long period, which then masked the fact that they were actually failing for real reasons when we actually got them working again. And as a result, ending up debugging a major and super complicated scalability issue within days of what was supposed to be the original release date. So that was monkey wrench number one for the 1.10 cycle.
Monkey wrench number two for the 1.10 cycle was we got a security hole that needed to be patched. And so again, a week out from what was supposed to be the original release date, we were releasing a security update, and that security update required patching the release branch. And it turns out that that patch against the release branch broke a bunch of incoming features. And we didn't get anything of that magnitude in the 1.11 release, and I'm thankful for that.
TIM PEPPER: Also, I would maybe argue in a way that a portion of that wasn't just luck. The extent to which this community has a good team, not just the release team but beyond, some of this goes to active work that folks all across the project, but especially in the contributor experience SIG are doing to cultivate a positive and inclusive culture here. And you really see that. When problems crop up, you're seeing people jump on and really try to constructively tackle them. And it's really fun to be a part of that.
<hr/>
<i>Thanks to Josh Berkus and Tim Pepper for talking to the Kubernetes Podcast from Google.
[Josh Berkus](https://github.com/jberkus) hangs out in #sig-release on the [Kubernetes Slack](slack.k8s.io). He maintains a newsletter called "[Last Week in Kubernetes Development](http://lwkd.info/)", with Noah Kantrowitz. You can read him on Twitter at [@fuzzychef](https://twitter.com/fuzzychef), but he does warn you that there's a lot of politics there as well.
[Tim Pepper](https://github.com/tpepper) is also on Slack - he's always open to folks reaching out with a question, looking for help or advice. On Twitter you'll find him at [@pythomit](https://twitter.com/pythomit), which is "Timothy P" backwards. Tim is an avid soccer fan and season ticket holder for the [Portland Timbers](https://portlandtimbers.com/) and the [Portland Thorns](https://portlandthorns.com/), so you'll get all sorts of opinions on soccer in addition to technology!
You can find the [Kubernetes Podcast from Google](http://www.kubernetespodcast.com/) at [@kubernetespod](https://twitter.com/KubernetesPod) on Twitter, and you can [subscribe](https://kubernetespodcast.com/subscribe/) so you never miss an episode.</i>

View File

@ -0,0 +1,310 @@
---
layout: blog
title: "11 Ways (Not) to Get Hacked"
date: 2018-07-18
---
**Author**: Andrew Martin (ControlPlane)
Kubernetes security has come a long way since the project&#39;s inception, but still contains some gotchas. Starting with the control plane, building up through workload and network security, and finishing with a projection into the future of security, here is a list of handy tips to help harden your clusters and increase their resilience if compromised.
- [Part One: The Control Plane](#part-one-the-control-plane)
* [1. TLS Everywhere](#1-tls-everywhere)
* [2. Enable RBAC with Least Privilege, Disable ABAC, and Monitor Logs](#2-enable-rbac-with-least-privilege-disable-abac-and-monitor-logs)
* [3. Use Third Party Auth for API Server](#3-use-third-party-auth-for-api-server)
* [4. Separate and Firewall your etcd Cluster](#4-separate-and-firewall-your-etcd-cluster)
* [5. Rotate Encryption Keys](#5-rotate-encryption-keys)
- [Part Two: Workloads](#part-two-workloads)
* [6. Use Linux Security Features and PodSecurityPolicies](#6-use-linux-security-features-and-podsecuritypolicies)
* [7. Statically Analyse YAML](#7-statically-analyse-yaml)
* [8. Run Containers as a Non-Root User](#8-run-containers-as-a-non-root-user)
* [9. Use Network Policies](#9-use-network-policies)
* [10. Scan Images and Run IDS](#10-scan-images-and-run-ids)
- [Part Three: The Future](#part-three-the-future)
* [11. Run a Service Mesh](#11-run-a-service-mesh)
- [Conclusion](#conclusion)
# Part One: The Control Plane
The control plane is Kubernetes&#39; brain. It has an overall view of every container and pod running on the cluster, can schedule new pods (which can include containers with root access to their parent node), and can read all the secrets stored in the cluster. This valuable cargo needs protecting from accidental leakage and malicious intent: when it&#39;s accessed, when it&#39;s at rest, and when it&#39;s being transported across the network.
## 1. TLS Everywhere
**TLS should be enabled for every component that supports it to prevent traffic sniffing, verify the identity of the server, and (for mutual TLS) verify the identity of the client.**
> Note that some components and installation methods may enable local ports over HTTP and administrators should familiarize themselves with the settings of each component to identify potentially unsecured traffic.
[Source](https://kubernetes.io/docs/tasks/administer-cluster/securing-a-cluster/#use-transport-level-security-tls-for-all-api-traffic)
This network diagram by [Lucas Käldström](https://docs.google.com/presentation/d/1Gp-2blk5WExI_QR59EUZdwfO2BWLJqa626mK2ej-huo/edit#slide=id.g1e639c415b_0_56) demonstrates some of the places TLS should ideally be applied: between every component on the master, and between the Kubelet and API server. [Kelsey Hightower](https://twitter.com/kelseyhightower/)&#39;s canonical [Kubernetes The Hard Way](https://github.com/kelseyhightower/kubernetes-the-hard-way/blob/1.9.0/docs/04-certificate-authority.md) provides detailed manual instructions, as does [etcd&#39;s security model](https://coreos.com/etcd/docs/latest/op-guide/security.html) documentation.
<img src="/images/blog/2018-06-05-11-ways-not-to-get-hacked/kubernetes-control-plane.png" width="800" />
Autoscaling Kubernetes nodes was historically difficult, as each node requires a TLS key to connect to the master, and baking secrets into base images is not good practice. [Kubelet TLS bootstrapping](https://medium.com/@toddrosner/kubernetes-tls-bootstrapping-cf203776abc7) provides the ability for a new kubelet to create a certificate signing request so that certificates are generated at boot time.
<img src="/images/blog/2018-06-05-11-ways-not-to-get-hacked/node-tls-bootstrap.png" width="800" />
## 2. Enable RBAC with Least Privilege, Disable ABAC, and Monitor Logs
**Role-based access control provides fine-grained policy management for user access to resources, such as access to namespaces.**
<img src="/images/blog/2018-06-05-11-ways-not-to-get-hacked/rbac2.png" width="800" />
Kubernetes' ABAC (Attribute Based Access Control) has been [superseded by RBAC](http://blog.kubernetes.io/2017/04/rbac-support-in-kubernetes.html) since release 1.6, and should not be enabled on the API server. Use RBAC instead:
```
--authorization-mode=RBAC
```
Or use this flag to disable it in GKE:
```
--no-enable-legacy-authorization
```
There are plenty of [good examples](https://docs.bitnami.com/kubernetes/how-to/configure-rbac-in-your-kubernetes-cluster/) of [RBAC policies for cluster services](https://github.com/uruddarraju/kubernetes-rbac-policies), as well as [the docs](https://kubernetes.io/docs/admin/authorization/rbac/#role-binding-examples). And it doesn&#39;t have to stop there - fine-grained RBAC policies can be extracted from audit logs with [audit2rbac](https://github.com/liggitt/audit2rbac).
Incorrect or excessively permissive RBAC policies are a security threat in case of a compromised pod. Maintaining least privilege, and continuously reviewing and improving RBAC rules, should be considered part of the "technical debt hygiene" that teams build into their development lifecycle.
[Audit Logging](https://kubernetes.io/docs/tasks/debug-application-cluster/audit/) (beta in 1.10) provides customisable API logging at the payload (e.g. request and response), and also metadata levels. Log levels can be tuned to your organisation&#39;s security policy - [GKE](https://cloud.google.com/kubernetes-engine/docs/how-to/audit-logging#audit_policy) provides sane defaults to get you started.
For read requests such as get, list, and watch, only the request object is saved in the audit logs; the response object is not. For requests involving sensitive data such as Secret and ConfigMap, only the metadata is exported. For all other requests, both request and response objects are saved in audit logs.
Don&#39;t forget: keeping these logs inside the cluster is a security threat in case of compromise. These, like all other security-sensitive logs, should be transported outside the cluster to prevent tampering in the event of a breach.
## 3. Use Third Party Auth for API Server
**Centralising authentication and authorisation across an organisation (aka Single Sign On) helps onboarding, offboarding, and consistent permissions for users**.
Integrating Kubernetes with third party auth providers (like Google or Github) uses the remote platform&#39;s identity guarantees (backed up by things like 2FA) and prevents administrators having to reconfigure the Kubernetes API server to add or remove users.
[Dex](https://github.com/coreos/dex) is an OpenID Connect Identity (OIDC) and OAuth 2.0 provider with pluggable connectors. Pusher takes this a stage further with [some custom tooling](https://thenewstack.io/kubernetes-single-sign-one-less-identity/), and there are some [other](https://github.com/negz/kuberos) [helpers](https://github.com/micahhausler/k8s-oidc-helper) available with slightly different use cases.
## 4. Separate and Firewall your etcd Cluster
**etcd stores information on state and secrets, and is a critical Kubernetes component - it should be protected differently from the rest of your cluster.**
Write access to the API server&#39;s etcd is equivalent to gaining root on the entire cluster, and even read access can be used to escalate privileges fairly easily.
The Kubernetes scheduler will search etcd for pod definitions that do not have a node. It then sends the pods it finds to an available kubelet for scheduling. Validation for submitted pods is performed by the API server before it writes them to etcd, so malicious users writing directly to etcd can bypass many security mechanisms - e.g. PodSecurityPolicies.
etcd should be configured with [peer and client TLS certificates](https://github.com/coreos/etcd/blob/master/Documentation/op-guide/security.md), and deployed on dedicated nodes. To mitigate against private keys being stolen and used from worker nodes, the cluster can also be firewalled to the API server.
## 5. Rotate Encryption Keys
**A security best practice is to regularly rotate encryption keys and certificates, in order to limit the &quot;blast radius&quot; of a key compromise.**
Kubernetes will [rotate some certificates automatically](https://kubernetes.io/docs/tasks/tls/certificate-rotation/) (notably, the kubelet client and server certs) by creating new CSRs as its existing credentials expire.
However, the [symmetric encryption keys](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/) that the API server uses to encrypt etcd values are not automatically rotated - they must be [rotated manually](https://www.twistlock.com/2017/08/02/kubernetes-secrets-encryption/). Master access is required to do this, so managed services (such as GKE or AKS) abstract this problem from an operator.
# Part Two: Workloads
With minimum viable security on the control plane the cluster is able to operate securely. But, like a ship carrying potentially dangerous cargo, the ship's containers must be protected to contain that cargo in the event of an unexpected accident or breach. The same is true for Kubernetes workloads (pods, deployments, jobs, sets, etc.) - they may be trusted at deployment time, but if they&#39;re internet-facing there&#39;s always a risk of later exploitation. Running workloads with minimal privileges and hardening their runtime configuration can help to mitigate this risk.
## 6. Use Linux Security Features and PodSecurityPolicies
**The Linux kernel has a number of overlapping security extensions (capabilities, SELinux, AppArmor, seccomp-bpf) that can be configured to provide least privilege to applications**.
Tools like [bane](https://github.com/genuinetools/bane) can help to generate AppArmor profiles, and [docker-slim](https://github.com/docker-slim/docker-slim#quick-seccomp-example) for seccomp profiles, but beware - a comprehensive test suite it required to exercise all code paths in your application when verifying the side effects of applying these policies.
[PodSecurityPolicies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) can be used to mandate the use of security extensions and other Kubernetes security directives. They provide a minimum contract that a pod must fulfil to be submitted to the API server - including security profiles, the privileged flag, and the sharing of host network, process, or IPC namespaces.
These directives are important, as they help to prevent containerised processes from escaping their isolation boundaries, and [Tim Allclair](https://twitter.com/tallclair)&#39;s [example PodSecurityPolicy](https://gist.github.com/tallclair/11981031b6bfa829bb1fb9dcb7e026b0) is a comprehensive resource that you can customise to your use case.
## 7. Statically Analyse YAML
**Where PodSecurityPolicies deny access to the API server, static analysis can also be used in the development workflow to model an organisation&#39;s compliance requirements or risk appetite.**
Sensitive information should not be stored in pod-type YAML resource (deployments, pods, sets, etc.), and sensitive configmaps and secrets should be encrypted with tools such as [vault](https://github.com/coreos/vault-operator) (with CoreOS&#39;s operator), [git-crypt](https://github.com/AGWA/git-crypt), [sealed secrets](https://github.com/bitnami-labs/sealed-secrets), or [cloud provider KMS](https://cloud.google.com/kms/).
Static analysis of YAML configuration can be used to establish a baseline for runtime security. [kubesec](https://kubesec.io/) generates risk scores for resources:
```json
{
"score": -30,
"scoring": {
"critical": [{
"selector": "containers[] .securityContext .privileged == true",
"reason": "Privileged containers can allow almost completely unrestricted host access"
}],
"advise": [{
"selector": "containers[] .securityContext .runAsNonRoot == true",
"reason": "Force the running image to run as a non-root user to ensure least privilege"
}, {
"selector": "containers[] .securityContext .capabilities .drop",
"reason": "Reducing kernel capabilities available to a container limits its attack surface",
"href": "https://kubernetes.io/docs/tasks/configure-pod-container/security-context/"
}]
}
}
```
And [kubetest](https://github.com/garethr/kubetest) is a unit test framework for Kubernetes configurations:
```python
#// vim: set ft=python:
def test_for_team_label():
if spec["kind"] == "Deployment":
labels = spec["spec"]["template"]["metadata"]["labels"]
assert_contains(labels, "team", "should indicate which team owns the deployment")
test_for_team_label()
```
These tools &quot;[shift left](https://en.wikipedia.org/wiki/Shift_left_testing)&quot; (moving checks and verification earlier in the development cycle). Security testing in the development phase gives users fast feedback about code and configuration that may be rejected by a later manual or automated check, and can reduce the friction of introducing more secure practices.
## 8. Run Containers as a Non-Root User
**Containers that run as root frequently have far more permissions than their workload requires which, in case of compromise, could help an attacker further their attack.**
Containers still rely on the traditional Unix security model (called [discretionary access control](https://www.linux.com/learn/overview-linux-kernel-security-features) or DAC) - everything is a file, and permissions are granted to users and groups.
User namespaces are not enabled in Kubernetes. This means that a container&#39;s user ID table maps to the host&#39;s user table, and running a process as the root user inside a container runs it as root on the host. Although we have layered security mechanisms to prevent container breakouts, running as root inside the container is still not recommended.
Many container images use the root user to run PID 1 - if that process is compromised, the attacker has root in the container, and any mis-configurations become much easier to exploit.
[Bitnami has done a lot of work](https://engineering.bitnami.com/articles/running-non-root-containers-on-openshift.html) moving their container images to [non-root users](https://github.com/bitnami/bitnami-docker-nginx/blob/b068b8bd01eb2f5a7314c09466724f86aa4548f9/1.12/Dockerfile#L28) (especially as OpenShift requires this by default), which may ease a migration to non-root container images.
This PodSecurityPolicy snippet prevents running processes as root inside a container, and also escalation to root:
```yaml
# Required to prevent escalations to root.
allowPrivilegeEscalation: false
runAsUser:
# Require the container to run without root privileges.
rule: 'MustRunAsNonRoot'
```
Non-root containers cannot bind to the privileged ports under 1024 (this is gated by the CAP\_NET\_BIND\_SERVICE kernel capability), but services can be used to disguise this fact. In this example the fictional MyApp application is bound to port 8443 in its container, but the service exposes it on 443 by proxying the request to the targetPort:
```yaml
kind: Service
apiVersion: v1
metadata:
name: my-service
spec:
selector:
app: MyApp
ports:
- protocol: TCP
port: 443
targetPort: 8443
```
Having to run workloads as a non-root user is not going to change until user namespaces are usable, or the ongoing work to [run containers without root](https://rootlesscontaine.rs/) lands in container runtimes.
## 9. Use Network Policies
**By default, Kubernetes networking allows all pod to pod traffic; this can be restricted using a** [**Network Policy**](https://kubernetes.io/docs/concepts/services-networking/network-policies/) **.**
<img src="/images/blog/2018-06-05-11-ways-not-to-get-hacked/kubernetes-networking.png" width="800" />
Traditional services are restricted with firewalls, which use static IP and port ranges for each service. As these IPs very rarely change they have historically been used as a form of identity. Containers rarely have static IPs - they are built to fail fast, be rescheduled quickly, and use service discovery instead of static IP addresses. These properties mean that firewalls become much more difficult to configure and review.
As Kubernetes stores all its system state in etcd it can configure dynamic firewalling - if it is supported by the CNI networking plugin. Calico, Cilium, kube-router, Romana, and Weave Net all support network policy.
It should be noted that these policies fail-closed, so the absence of a podSelector here defaults to a wildcard:
```yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-deny
spec:
podSelector:
```
Here&#39;s an example NetworkPolicy that denies all egress except UDP 53 (DNS), which also prevents inbound connections to your application. [NetworkPolicies are stateful](https://www.weave.works/blog/securing-microservices-kubernetes/), so the replies to outbound requests still reach the application.
```yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: myapp-deny-external-egress
spec:
podSelector:
matchLabels:
app: myapp
policyTypes:
- Egress
egress:
- ports:
- port: 53
protocol: UDP
- to:
- namespaceSelector: {}
```
Kubernetes network policies can not be applied to DNS names. This is because DNS can resolve round-robin to many IPs, or dynamically based on the calling IP, so network policies can be applied to a fixed IP or podSelector (for dynamic Kubernetes IPs) only.
Best practice is to start by denying all traffic for a namespace and incrementally add routes to allow an application to pass its acceptance test suite. This can become complex, so ControlPlane hacked together [netassert](https://github.com/controlplaneio/netassert) - network security testing for DevSecOps workflows with highly parallelised nmap:
```yaml
k8s: # used for Kubernetes pods
deployment: # only deployments currently supported
test-frontend: # pod name, defaults to `default` namespace
test-microservice: 80 # `test-microservice` is the DNS name of the target service
test-database: -80 # `test-frontend` should not be able to access test-databases port 80
169.254.169.254: -80, -443 # AWS metadata API
metadata.google.internal: -80, -443 # GCP metadata API
new-namespace:test-microservice: # `new-namespace` is the namespace name
test-database.new-namespace: 80 # longer DNS names can be used for other namespaces
test-frontend.default: 80
169.254.169.254: -80, -443 # AWS metadata API
metadata.google.internal: -80, -443 # GCP metadata API
```
Cloud provider metadata APIs are a constant source of escalation (as the recent [Shopify](https://hackerone.com/reports/341876) [bug bounty](https://hackerone.com/reports/341876) demonstrates), so specific tests to confirm that the APIs are blocked on the container network helps to guard against accidental misconfiguration.
## 10. Scan Images and Run IDS
**Web servers present an attack surface to the network they&#39;re attached to: scanning an image&#39;s installed files ensures the absence of known vulnerabilities that an attacker could exploit to gain remote access to the container. An IDS (Intrusion Detection System) detects them if they do.**
Kubernetes permits pods into the cluster through a series of [admission controller](https://kubernetes.io/docs/admin/admission-controllers/) gates, which are applied to pods and other resources like deployments. These gates can validate each pod for admission or change its contents, and they now support backend webhooks.
<img src="/images/blog/2018-06-05-11-ways-not-to-get-hacked/admission-controllers.png" width="800" />
These webhooks can be used by container image scanning tools to validate images before they are deployed to the cluster. Images that have failed checks can be refused admission.
Scanning container images for known vulnerabilities can reduce the window of time that an attacker can exploit a disclosed CVE. Free tools such as CoreOS&#39;s [Clair](https://github.com/coreos/clair) and Aqua&#39;s [Micro Scanner](https://github.com/aquasecurity/microscanner) should be used in a deployment pipeline to prevent the deployment of images with critical, exploitable vulnerabilities.
Tools such as [Grafeas](https://grafeas.io/) can store image metadata for constant compliance and vulnerability checks against a container&#39;s unique signature (a [content addressable](https://en.wikipedia.org/wiki/Content-addressable_storage) hash). This means that scanning a container image with that hash is the same as scanning the images deployed in production, and can be done continually without requiring access to production environments.
Unknown Zero Day vulnerabilities will always exist, and so intrusion detection tools such as [Twistlock](https://www.twistlock.com/), [Aqua](https://www.aquasec.com/), and [Sysdig Secure](https://sysdig.com/product/secure/) should be deployed in Kubernetes. IDS detects unusual behaviours in a container and pauses or kills it - [Sysdig&#39;s Falco](https://github.com/draios/falco) is a an Open Source rules engine, and an entrypoint to this ecosystem.
# Part Three: The Future
The next stage of security&#39;s &quot;cloud native evolution&quot; looks to be the service mesh, although adoption may take time - migration involves shifting complexity from applications to the mesh infrastructure, and organisations will be keen to understand best-practice.
<img src="/images/blog/2018-06-05-11-ways-not-to-get-hacked/service-mesh-@sebiwicb.png" width="800" />
## 11. Run a Service Mesh
**A service mesh is a web of encrypted persistent connections, made between high performance &quot;sidecar&quot; proxy servers like Envoy and Linkerd. It adds traffic management, monitoring, and policy - all without microservice changes.**
Offloading microservice security and networking code to a shared, battle tested set of libraries was already possible with [Linkerd](https://linkerd.io/), and the introduction of [Istio](https://istio.io/) by Google, IBM, and Lyft, has added an alternative in this space. With the addition of [SPIFFE](https://spiffe.io) for per-pod cryptographic identity and a plethora of [other features](https://istio.io/docs/concepts/what-is-istio/overview.html), Istio could simplify the deployment of the next generation of network security.
In &quot;Zero Trust&quot; networks there may be no need for traditional firewalling or Kubernetes network policy, as every interaction occurs over mTLS (mutual TLS), ensuring that both parties are not only communicating securely, but that the identity of both services is known.
This shift from traditional networking to Cloud Native security principles is not one we expect to be easy for those with a traditional security mindset, and the [Zero Trust Networking book](https://amzn.to/2Gg6Pav) from SPIFFE&#39;s [Evan Gilman](https://twitter.com/evan2645) is a highly recommended introduction to this brave new world.
Istio [0.8 LTS](https://istio.io/about/notes/0.8/) is out, and the project is rapidly approaching a 1.0 release. Its stability versioning is the same as the Kubernetes model: a stable core, with individual APIs identifying themselves under their own alpha/beta stability namespace. Expect to see an uptick in Istio adoption over the coming months.
# Conclusion
Cloud Native applications have a more fine-grained set of lightweight security primitives to lock down workloads and infrastructure. The power and flexibility of these tools is both a blessing and curse - with insufficient automation it has become easier to expose insecure workloads which permit breakouts from the container or its isolation model.
There are more defensive tools available than ever, but caution must be taken to reduce attack surfaces and the potential for misconfiguration.
However if security slows down an organisation&#39;s pace of feature delivery it will never be a first-class citizen. Applying Continuous Delivery principles to the software supply chain allows an organisation to achieve compliance, continuous audit, and enforced governance without impacting the business&#39;s bottom line.
Iteratating quickly on security is easiest when supported by a comprehensive test suite. This is achieved with Continuous Security - an alternative to point-in-time penetration tests, with constant pipeline validation ensuring an organisation&#39;s attack surface is known, and the risk constantly understood and managed.
This is ControlPlane&#39;s modus operandi: if we can help kickstart a Continuous Security discipline, deliver Kubernetes security and operations training, or co-implement a secure cloud native evolution for you, please [get in touch](https://control-plane.io).
---
Andrew Martin is a co-founder at [@controlplaneio](https://twitter.com/controlplaneio) and tweets about cloud native security at [@sublimino](https://twitter.com/sublimino)

View File

@ -0,0 +1,28 @@
---
layout: blog
title: "Kubernetes Wins the 2018 OSCON Most Impact Award"
date: 2018-07-19
slug: kubernetes-wins-2018-oscon-most-impact-award
---
**Authors**: Brian Grant (Principal Engineer, Google) and Tim Hockin (Principal Engineer, Google)
We are humbled to be recognized by the community with this award.
We had high hopes when we created Kubernetes. We wanted to change the way cloud applications were deployed and managed. Whether wed succeed or not was very uncertain. And look how far weve come in such a short time.
The core technology behind Kubernetes was informed by [lessons learned from Googles internal infrastructure](https://ai.google/research/pubs/pub44843), but nobody can deny the enormous role of the Kubernetes community in the success of the project. [The community, of which Google is a part](https://k8s.devstats.cncf.io/d/8/company-statistics-by-repository-group?orgId=1), now drives every aspect of the project: the design, development, testing, documentation, releases, and more. That is what makes Kubernetes fly.
While we actively sought partnerships and community engagement, none of us anticipated just how important the open-source community would be, how fast it would grow, or how large it would become. Honestly, we really didnt have much of a plan.
We looked to other open-source projects for inspiration and advice: Docker (now Moby), other open-source projects at Google such as Angular and Go, the Apache Software Foundation, OpenStack, Node.js, Linux, and others. But it became clear that there was no clear-cut recipe we could follow. So we winged it.
Rather than rehashing history, we thought wed share two high-level lessons we learned along the way.
First, in order to succeed, community health and growth needs to be treated as a top priority. Its hard, and it is time-consuming. It requires attention to both internal project dynamics and outreach, as well as constant vigilance to build and sustain relationships, be inclusive, maintain open communication, and remain responsive to contributors and users. Growing existing contributors and onboarding new ones is critical to sustaining project growth, but that takes time and energy that might otherwise be spent on development. These things have to become core values in order for contributors to keep them going.
Second, start simple with how the project is organized and operated, but be ready to adopt to more scalable approaches as it grows. Over time, Kubernetes has transitioned from what was effectively a single team and git repository to many subgroups (Special Interest Groups and Working Groups), sub-projects, and repositories. From manual processes to fully automated ones. From informal policies to formal governance.
We certainly didnt get everything right or always adapt quickly enough, and we constantly struggle with scale. [At this point](https://k8s.devstats.cncf.io/d/24/overall-project-statistics?orgId=1), Kubernetes has more than 20,000 contributors and is approaching one million comments on its issues and pull requests, [making it one of the fastest moving projects in the history of open source](https://www.cncf.io/blog/2017/02/27/measuring-popularity-kubernetes-using-bigquery/).
Thank you to all our contributors and to all the users whove stuck with us on the sometimes bumpy journey. This project would not be what it is today without the community.

View File

@ -0,0 +1,62 @@
---
layout: blog
title: "The History of Kubernetes & the Community Behind It"
date: 2018-07-20
---
**Authors**: Brendan Burns (Distinguished Engineer, Microsoft)
![oscon award](/images/blog/2018-07-20-history-kubernetes-community.png)
It is remarkable to me to return to Portland and OSCON to stand on stage with members of the Kubernetes community and accept this award for Most Impactful Open Source Project. It was scarcely three years ago, that on this very same stage we declared Kubernetes 1.0 and the project was added to the newly formed Cloud Native Computing Foundation.
To think about how far we have come in that short period of time and to see the ways in which this project has shaped the cloud computing landscape is nothing short of amazing. The success is a testament to the power and contributions of this amazing open source community. And the daily passion and quality contributions of our endlessly engaged, world-wide community is nothing short of humbling.
<blockquote class="twitter-tweet" data-lang="en"><p lang="en" dir="ltr">Congratulations <a href="https://twitter.com/kubernetesio?ref_src=twsrc%5Etfw">@kubernetesio</a> for winning the &quot;most impact&quot; award at <a href="https://twitter.com/hashtag/OSCON?src=hash&amp;ref_src=twsrc%5Etfw">#OSCON</a> I&#39;m so proud to be a part of this amazing community! <a href="https://twitter.com/CloudNativeFdn?ref_src=twsrc%5Etfw">@CloudNativeFdn</a> <a href="https://t.co/5sRUYyefAK">pic.twitter.com/5sRUYyefAK</a></p>&mdash; Jaice Singer DuMars (@jaydumars) <a href="https://twitter.com/jaydumars/status/1019993233487613952?ref_src=twsrc%5Etfw">July 19, 2018</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
<blockquote class="twitter-tweet" data-lang="en"><p lang="en" dir="ltr">👏 congrats <a href="https://twitter.com/kubernetesio?ref_src=twsrc%5Etfw">@kubernetesio</a> community on winning the <a href="https://twitter.com/hashtag/oscon?src=hash&amp;ref_src=twsrc%5Etfw">#oscon</a> Most Impact Award, we are proud of you! <a href="https://t.co/5ezDphi6J6">pic.twitter.com/5ezDphi6J6</a></p>&mdash; CNCF (@CloudNativeFdn) <a href="https://twitter.com/CloudNativeFdn/status/1019996928296095744?ref_src=twsrc%5Etfw">July 19, 2018</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
At a meetup in Portland this week, I had a chance to tell the story of Kubernetes past, its present and some thoughts about its future, so I thought I would write down some pieces of what I said for those of you who couldnt be there in person.
It all began in the fall of 2013, with three of us: Craig McLuckie, Joe Beda and I were working on public cloud infrastructure. If you cast your mind back to the world of cloud in 2013, it was a vastly different place than it is today. Imperative bash scripts were only just starting to give way to declarative configuration of IaaS with systems. Netflix was popularizing the idea of immutable infrastructure but doing it with heavy-weight full VM images. The notion of orchestration, and certainly container orchestration existed in a few internet scale companies, but not in cloud and certainly not in the enterprise.
Docker changed all of that. By popularizing a lightweight container runtime and providing a simple way to package, distributed and deploy applications onto a machine, the Docker tooling and experience popularized a brand-new cloud native approach to application packaging and maintenance. Were it not for Dockers shifting of the cloud developers perspective, Kubernetes simply would not exist.
I think that it was Joe who first suggested that we look at Docker in the summer of 2013, when Craig, Joe and I were all thinking about how we could bring a cloud native application experience to a broader audience. And for all three of us, the implications of this new tool were immediately obvious. We knew it was a critical component in the development of cloud native infrastructure.
But as we thought about it, it was equally obvious that Docker, with its focus on a single machine, was not the complete solution. While Docker was great at building and packaging individual containers and running them on individual machines, there was a clear need for an orchestrator that could deploy and manage large numbers of containers across a fleet of machines.
As we thought about it some more, it became increasingly obvious to Joe, Craig and I, that not only was such an orchestrator necessary, it was also inevitable, and it was equally inevitable that this orchestrator would be open source. This realization crystallized for us in the late fall of 2013, and thus began the rapid development of first a prototype, and then the system that would eventually become known as Kubernetes. As 2013 turned into 2014 we were lucky to be joined by some incredibly talented developers including Ville Aikas, Tim Hockin, Dawn Chen, Brian Grant and Daniel Smith.
<blockquote class="twitter-tweet" data-lang="en"><p lang="en" dir="ltr">Happy to see k8s team members winning the “most impact” award. <a href="https://twitter.com/hashtag/oscon?src=hash&amp;ref_src=twsrc%5Etfw">#oscon</a> <a href="https://t.co/D6mSIiDvsU">pic.twitter.com/D6mSIiDvsU</a></p>&mdash; Bridget Kromhout (@bridgetkromhout) <a href="https://twitter.com/bridgetkromhout/status/1019992441825341440?ref_src=twsrc%5Etfw">July 19, 2018</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
<blockquote class="twitter-tweet" data-lang="en"><p lang="en" dir="ltr">Kubernetes won the O&#39;Reilly Most Impact Award. Thanks to our contributors and users! <a href="https://t.co/T6Co1wpsAh">pic.twitter.com/T6Co1wpsAh</a></p>&mdash; Brian Grant (@bgrant0607) <a href="https://twitter.com/bgrant0607/status/1019995276235325440?ref_src=twsrc%5Etfw">July 19, 2018</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
The initial goal of this small team was to develop a “minimally viable orchestrator.” From experience we knew that the basic feature set for such an orchestrator was:
* Replication to deploy multiple instances of an application
* Load balancing and service discovery to route traffic to these replicated containers
* Basic health checking and repair to ensure a self-healing system
* Scheduling to group many machines into a single pool and distribute work to them
Along the way, we also spent a significant chunk of our time convincing executive leadership that open sourcing this project was a good idea. Im endlessly grateful to Craig for writing numerous whitepapers and to Eric Brewer, for the early and vocal support that he lent us to ensure that Kubernetes could see the light of day.
In June of 2014 when Kubernetes was released to the world, the list above was the sum total of its basic feature set. As an early stage open source community, we then spent a year building, expanding, polishing and fixing this initial minimally viable orchestrator into the product that we released as a 1.0 in OSCON in 2015. We were very lucky to be joined early on by the very capable OpenShift team which lent significant engineering and real world enterprise expertise to the project. Without their perspective and contributions, I dont think we would be standing here today.
Three years later, the Kubernetes community has grown exponentially, and Kubernetes has become synonymous with cloud native container orchestration. There are more than 1700 people who have contributed to Kubernetes, there are more than 500 Kubernetes meetups worldwide and more than 42000 users have joined the #kubernetes-dev channel. Whats more, the community that we have built works successfully across geographic, language and corporate boundaries. It is a truly open, engaged and collaborative community, and in-and-of-itself and amazing achievement. Many thanks to everyone who has helped make it what it is today. Kubernetes is a commodity in the public cloud because of you.
But if Kubernetes is a commodity, then what is the future? Certainly, there are an endless array of tweaks, adjustments and improvements to the core codebase that will occupy us for years to come, but the true future of Kubernetes are the applications and experiences that are being built on top of this new, ubiquitous platform.
Kubernetes has dramatically reduced the complexity to build new developer experiences, and a myriad of new experiences have been developed or are in the works that provide simplified or targeted developer experiences like Functions-as-a-Service, on top of core Kubernetes-as-a-Service.
The Kubernetes cluster itself is being extended with custom resource definitions (which I first described to Kelsey Hightower on a walk from OSCON to a nearby restaurant in 2015), these new resources allow cluster operators to enable new plugin functionality that extend and enhance the APIs that their users have access to.
By embedding core functionality like logging and monitoring in the cluster itself and enabling developers to take advantage of such services simply by deploying their application into the cluster, Kubernetes has reduced the learning necessary for developers to build scalable reliable applications.
Finally, Kubernetes has provided a new, common vocabulary for expressing the patterns and paradigms of distributed system development. This common vocabulary means that we can more easily describe and discuss the common ways in which our distributed systems are built, and furthermore we can build standardized, re-usable implementations of such systems. The net effect of this is the development of higher quality, reliable distributed systems, more quickly.
Its truly amazing to see how far Kubernetes has come, from a rough idea in the minds of three people in Seattle to a phenomenon that has redirected the way we think about cloud native development across the world. It has been an amazing journey, but whats truly amazing to me, is that I think were only just now scratching the surface of the impact that Kubernetes will have. Thank you to everyone who has enabled us to get this far, and thanks to everyone who will take us further.
Brendan

View File

@ -0,0 +1,160 @@
---
layout: blog
title: "Feature Highlight: CPU Manager"
date: 2018-07-24
---
**Authors**: Balaji Subramaniam ([Intel](mailto:balaji.subramaniam@intel.com)), Connor Doyle ([Intel](mailto:connor.p.doyle@intel.com))
This blog post describes the [CPU Manager](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/), a beta feature in [Kubernetes](https://kubernetes.io/). The CPU manager feature enables better placement of workloads in the [Kubelet](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/), the Kubernetes node agent, by allocating exclusive CPUs to certain pod containers.
![cpu manager](/images/blog/2018-07-24-cpu-manager/cpu-manager.png)
## Sounds Good! But Does the CPU Manager Help Me?
It depends on your workload. A single compute node in a Kubernetes cluster can run many [pods](https://kubernetes.io/docs/concepts/workloads/pods/pod/) and some of these pods could be running CPU-intensive workloads. In such a scenario, the pods might contend for the CPU resources available in that compute node. When this contention intensifies, the workload can move to different CPUs depending on whether the pod is throttled and the availability of CPUs at scheduling time. There might also be cases where the workload could be sensitive to context switches. In all the above scenarios, the performance of the workload might be affected.
If your workload is sensitive to such scenarios, then CPU Manager can be enabled to provide better performance isolation by allocating exclusive CPUs for your workload.
CPU manager might help workloads with the following characteristics:
* Sensitive to CPU throttling effects.
* Sensitive to context switches.
* Sensitive to processor cache misses.
* Benefits from sharing a processor resources (e.g., data and instruction caches).
* Sensitive to cross-socket memory traffic.
* Sensitive or requires hyperthreads from the same physical CPU core.
## Ok! How Do I use it?
Using the CPU manager is simple. First, [enable CPU manager with the Static policy](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#cpu-management-policies) in the Kubelet running on the compute nodes of your cluster. Then configure your pod to be in the [Guaranteed Quality of Service (QoS) class](https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/#create-a-pod-that-gets-assigned-a-qos-class-of-guaranteed). Request whole numbers of CPU cores (e.g., `1000m`, `4000m`) for containers that need exclusive cores. Create your pod in the same way as before (e.g., `kubectl create -f pod.yaml`). And _voilà_, the CPU manager will assign exclusive CPUs to each of container in the pod according to their CPU requests.
```
apiVersion: v1
kind: Pod
metadata:
name: exclusive-2
spec:
containers:
- image: quay.io/connordoyle/cpuset-visualizer
name: exclusive-2
resources:
# Pod is in the Guaranteed QoS class because requests == limits
requests:
# CPU request is an integer
cpu: 2
memory: "256M"
limits:
cpu: 2
memory: "256M"
```
_Pod specification requesting two exclusive CPUs._
## Hmm … How Does the CPU Manager Work?
For Kubernetes, and the purposes of this blog post, we will discuss three kinds of CPU resource controls available in most Linux distributions. The first two are CFS shares (what's my weighted fair share of CPU time on this system) and CFS quota (what's my hard cap of CPU time over a period). The CPU manager uses a third control called CPU affinity (on what logical CPUs am I allowed to execute).
By default, all the pods and the containers running on a compute node of your Kubernetes cluster can execute on any available cores in the system. The total amount of allocatable shares and quota are limited by the CPU resources explicitly [reserved for kubernetes and system daemons](https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/). However, limits on the CPU time being used can be specified using [CPU limits in the pod spec](https://kubernetes.io/docs/tasks/configure-pod-container/assign-cpu-resource/#specify-a-cpu-request-and-a-cpu-limit). Kubernetes uses [CFS quota](https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt) to enforce CPU limits on pod containers.
When CPU manager is enabled with the "static" policy, it manages a shared pool of CPUs. Initially this shared pool contains all the CPUs in the compute node. When a container with integer CPU request in a Guaranteed pod is created by the Kubelet, CPUs for that container are removed from the shared pool and assigned exclusively for the lifetime of the container. Other containers are migrated off these exclusively allocated CPUs.
All non-exclusive-CPU containers (Burstable, BestEffort and Guaranteed with non-integer CPU) run on the CPUs remaining in the shared pool. When a container with exclusive CPUs terminates, its CPUs are added back to the shared CPU pool.
## More Details Please ...
![cpu manager](/images/blog/2018-07-24-cpu-manager/cpu-manager-anatomy.png)
The figure above shows the anatomy of the CPU manager. The CPU Manager uses the Container Runtime Interface's `UpdateContainerResources` method to modify the CPUs on which containers can run. The Manager periodically reconciles the current State of the CPU resources of each running container with `cgroupfs`.
The CPU Manager uses [Policies](https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/cm/cpumanager/policy.go#L25) to decide the allocation of CPUs. There are two policies implemented: None and Static. By default, the CPU manager is enabled with the None policy from Kubernetes version 1.10.
The Static policy allocates exclusive CPUs to pod containers in the Guaranteed QoS class which request integer CPUs. On a best-effort basis, the Static policy tries to allocate CPUs topologically in the following order:
1. Allocate all the CPUs in the same processor socket if available and the container requests at least an entire socket worth of CPUs.
2. Allocate all the logical CPUs (hyperthreads) from the same physical CPU core if available and the container requests an entire core worth of CPUs.
3. Allocate any available logical CPU, preferring to acquire CPUs from the same socket.
## How is Performance Isolation Improved by CPU Manager?
With CPU manager static policy enabled, the workloads might perform better due to one of the following reasons:
1. Exclusive CPUs can be allocated for the workload container but not the other containers. These containers do not share the CPU resources. As a result, we expect better performance due to isolation when an aggressor or a co-located workload is involved.
2. There is a reduction in interference between the resources used by the workload since we can partition the CPUs among workloads. These resources might also include the cache hierarchies and memory bandwidth and not just the CPUs. This helps improve the performance of workloads in general.
3. CPU Manager allocates CPUs in a topological order on a best-effort basis. If a whole socket is free, the CPU Manager will exclusively allocate the CPUs from the free socket to the workload. This boosts the performance of the workload by avoiding any cross-socket traffic.
4. Containers in Guaranteed QoS pods are subject to CFS quota. Very bursty workloads may get scheduled, burn through their quota before the end of the period, and get throttled. During this time, there may or may not be meaningful work to do with those CPUs. Because of how the resource math lines up between CPU quota and number of exclusive CPUs allocated by the static policy, these containers are not subject to CFS throttling (quota is equal to the maximum possible cpu-time over the quota period).
## Ok! Ok! Do You Have Any Results?
Glad you asked! To understand the performance improvement and isolation provided by enabling the CPU Manager feature in the Kubelet, we ran experiments on a dual-socket compute node (Intel Xeon CPU E5-2680 v3) with hyperthreading enabled. The node consists of 48 logical CPUs (24 physical cores each with 2-way hyperthreading). Here we demonstrate the performance benefits and isolation provided by the CPU Manager feature using benchmarks and real-world workloads for three different scenarios.
### How Do I Interpret the Plots?
For each scenario, we show box plots that illustrates the normalized execution time and its variability of running a benchmark or real-world workload with and without CPU Manager enabled. The execution time of the runs are normalized to the best-performing run (1.00 on y-axis represents the best performing run and lower is better). The height of the box plot shows the variation in performance. For example if the box plot is a line, then there is no variation in performance across runs. In the box, middle line is the median, upper line is 75th percentile and lower line is 25th percentile. The height of the box (i.e., difference between 75th and 25th percentile) is defined as the interquartile range (IQR). Whiskers shows data outside that range and the points show outliers. The outliers are defined as any data 1.5x IQR below or above the lower or upper quartile respectively. Every experiment is run ten times.
### Protection from Aggressor Workloads
We ran six benchmarks from the [PARSEC benchmark suite](http://parsec.cs.princeton.edu/) (the victim workloads) co-located with a CPU stress container (the aggressor workload) with and without the CPU Manager feature enabled. The CPU stress container is run [as a pod](https://gist.github.com/balajismaniam/7c2d57b2f526a56bb79cf870c122a34c) in the Burstable QoS class requesting 23 CPUs with `--cpus 48` flag. [The benchmarks are run as pods](https://gist.github.com/balajismaniam/fac7923f6ee44f1f36969c29354e3902) in the Guaranteed QoS class requesting a full socket worth of CPUs (24 CPUs on this system). The figure below plots the normalized execution time of running a benchmark pod co-located with the stress pod, with and without the CPU Manager static policy enabled. We see improved performance and reduced performance variability when static policy is enabled for all test cases.
![execution time](/images/blog/2018-07-24-cpu-manager/execution-time.png)
### Performance Isolation for Co-located Workloads
In this section, we demonstrate how CPU manager can be beneficial to multiple workloads in a co-located workload scenario. In the box plots below we show the performance of two benchmarks (Blackscholes and Canneal) from the PARSEC benchmark suite run in the Guaranteed (Gu) and Burstable (Bu) QoS classes co-located with each other, with and without the CPU manager static policy enabled.
Starting from the top left and proceeding clockwise, we show the performance of Blackscholes in the Bu QoS class (top left), Canneal in the Bu QoS class (top right), Canneal in Gu QoS class (bottom right) and Blackscholes in the Gu QoS class (bottom left, respectively. In each case, they are co-located with Canneal in the Gu QoS class (top left), Blackscholes in the Gu QoS class (top right), Blackscholes in the Bu QoS class (bottom right) and Canneal in the Bu QoS class (bottom left) going clockwise from top left, respectively. For example, Bu-blackscholes-Gu-canneal plot (top left) is showing the performance of Blackscholes running in the Bu QoS class when co-located with Canneal running in the Gu QoS class. In each case, the pod in Gu QoS class requests cores worth a whole socket (i.e., 24 CPUs) and the pod in Bu QoS class request 23 CPUs.
There is better performance and less performance variation for both the co-located workloads in all the tests. For example, consider the case of Bu-blackscholes-Gu-canneal (top left) and Gu-canneal-Bu-blackscholes (bottom right). They show the performance of Blackscholes and Canneal run simultaneously with and without the CPU manager enabled. In this particular case, Canneal gets exclusive cores due to CPU manager since it is in the Gu QoS class and requesting integer number of CPU cores. But Blackscholes also gets exclusive set of CPUs as it is the only workload in the shared pool. As a result, both Blackscholes and Canneal get some performance isolation benefits due to the CPU manager.
![performance comparison](/images/blog/2018-07-24-cpu-manager/performance-comparison.png)
### Performance Isolation for Stand-Alone Workloads
This section shows the performance improvement and isolation provided by the CPU manager for stand-alone real-world workloads. We use two workloads from the [TensorFlow official models](https://github.com/tensorflow/models/tree/master/official): [wide and deep](https://github.com/tensorflow/models/tree/master/official/wide_deep) and [ResNet](https://github.com/tensorflow/models/tree/master/official/resnet). We use the census and CIFAR10 dataset for the wide and deep and ResNet models respectively. In each case the [pods](https://gist.github.com/balajismaniam/941db0d0ec14e2bc93b7dfe04d1f6c58) ([wide and deep](https://gist.github.com/balajismaniam/9953b54dd240ecf085b35ab1bc283f3c), [ResNet](https://gist.github.com/balajismaniam/a1919010fe9081ca37a6e1e7b01f02e3) request 24 CPUs which corresponds to a whole socket worth of cores. As shown in the plots, CPU manager enables better performance isolation in both cases.
![performance comparison](/images/blog/2018-07-24-cpu-manager/performance-comparison-2.png)
## Limitations
Users might want to get CPUs allocated on the socket near to the bus which connects to an external device, such as an accelerator or high-performance network card, in order to avoid cross-socket traffic. This type of alignment is not yet supported by CPU manager.
Since the CPU manager provides a best-effort allocation of CPUs belonging to a socket and physical core, it is susceptible to corner cases and might lead to fragmentation.
The CPU manager does not take the isolcpus Linux kernel boot parameter into account, although this is reportedly common practice for some low-jitter use cases.
## Acknowledgements
We thank the members of the community who have contributed to this feature or given feedback including members of WG-Resource-Management and SIG-Node.
cmx.io (for the fun drawing tool).
#### Notices and Disclaimers
Software and workloads used in performance tests may have been optimized for performance only on Intel microprocessors. Performance tests, such as SYSmark and MobileMark, are measured using specific computer systems, components, software, operations and functions. Any change to any of those factors may cause the results to vary. You should consult other information and performance tests to assist you in fully evaluating your contemplated purchases, including the performance of that product when combined with other products. For more information go to www.intel.com/benchmarks.
Intel technologies features and benefits depend on system configuration and may require enabled hardware, software or service activation. Performance varies depending on system configuration. No computer system can be absolutely secure. Check with your system manufacturer or retailer or learn more at intel.com.
Workload Configuration:
https://gist.github.com/balajismaniam/fac7923f6ee44f1f36969c29354e3902
https://gist.github.com/balajismaniam/7c2d57b2f526a56bb79cf870c122a34c
https://gist.github.com/balajismaniam/941db0d0ec14e2bc93b7dfe04d1f6c58
https://gist.github.com/balajismaniam/a1919010fe9081ca37a6e1e7b01f02e3
https://gist.github.com/balajismaniam/9953b54dd240ecf085b35ab1bc283f3c
System Configuration:
CPU
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Byte Order: Little Endian
CPU(s): 48
On-line CPU(s) list: 0-47
Thread(s) per core: 2
Core(s) per socket: 12
Socket(s): 2
NUMA node(s): 2
Vendor ID: GenuineIntel
Model name: Intel(R) Xeon(R) CPU E5-2680 v3
Memory
256 GB
OS/Kernel
Linux 3.10.0-693.21.1.el7.x86_64
Intel, the Intel logo, Xeon are trademarks of Intel Corporation or its subsidiaries in the U.S. and/or other countries.
*Other names and brands may be claimed as the property of others.
© Intel Corporation.

View File

@ -0,0 +1,107 @@
---
layout: blog
title: 'KubeVirt: Extending Kubernetes with CRDs for Virtualized Workloads'
date: 2018-07-27
---
**Author**: David Vossel (Red Hat)
## What is KubeVirt?
[KubeVirt](https://github.com/kubevirt/kubevirt) is a Kubernetes addon that provides users the ability to schedule traditional virtual machine workloads side by side with container workloads. Through the use of [Custom Resource Definitions](https://Kubernetes.io/docs/concepts/extend-Kubernetes/api-extension/custom-resources/) (CRDs) and other Kubernetes features, KubeVirt seamlessly extends existing Kubernetes clusters to provide a set of virtualization APIs that can be used to manage virtual machines.
## Why Use CRDs Over an Aggregated API Server?
Back in the middle of 2017, those of us working on KubeVirt were at a crossroads. We had to make a decision whether or not to extend Kubernetes using an aggregated API server or to make use of the new Custom Resource Definitions (CRDs) feature.
At the time, CRDs lacked much of the functionality we needed to deliver our feature set. The ability to create our own aggregated API server gave us all the flexibility we needed, but it had one major flaw. **An aggregated API server significantly increased the complexity involved with installing and operating KubeVirt.**
The crux of the issue for us was that aggregated API servers required access to etcd for object persistence. This meant that cluster admins would have to either accept that KubeVirt needs a separate etcd deployment which increases complexity, or provide KubeVirt with shared access to the Kubernetes etcd store which introduces risk.
We werent okay with this tradeoff. Our goal wasnt to just extend Kubernetes to run virtualization workloads, it was to do it in the most seamless and effortless way possible. We felt that the added complexity involved with an aggregated API server sacrificed the part of the user experience involved with installing and operating KubeVirt.
**Ultimately we chose to go with CRDs and trust that the Kubernetes ecosystem would grow with us to meet the needs of our use case.** Our bets were well placed. At this point there are either solutions in place or solutions under discussion that solve every feature gap we encountered back in 2017 when were evaluating CRDs vs an aggregated API server.
## Building Layered “Kubernetes like” APIs with CRDs
We designed KubeVirts API to follow the same patterns users are already familiar with in the Kubernetes core API.
For example, in Kubernetes the lowest level unit that users create to perform work is a Pod. Yes, Pods do have multiple containers but logically the Pod is the unit at the bottom of the stack. A Pod represents a mortal workload. The Pod gets scheduled, eventually the Pods workload terminates, and thats the end of the Pods lifecycle.
Workload controllers such as the ReplicaSet and StatefulSet are layered on top of the Pod abstraction to help manage scale out and stateful applications. From there we have an even higher level controller called a Deployment which is layered on top of ReplicaSets help manage things like rolling updates.
In KubeVirt, this concept of layering controllers is at the very center of our design. The KubeVirt VirtualMachineInstance (VMI) object is the lowest level unit at the very bottom of the KubeVirt stack. Similar in concept to a Pod, a VMI represents a single mortal virtualized workload that executes once until completion (powered off).
Layered on top of VMIs we have a workload controller called a VirtualMachine (VM). The VM controller is where we really begin to see the differences between how users manage virtualized workloads vs containerized workloads. Within the context of existing Kubernetes functionality, the best way to describe the VM controllers behavior is to compare it to a StatefulSet of size one. This is because the VM controller represents a single stateful (immortal) virtual machine capable of persisting state across both node failures and multiple restarts of its underlying VMI. This object behaves in the way that is familiar to users who have managed virtual machines in AWS, GCE, OpenStack or any other similar IaaS cloud platform. The user can shutdown a VM, then choose to start that exact same VM up again at a later time.
In addition to VMs, we also have a VirtualMachineInstanceReplicaSet (VMIRS) workload controller which manages scale out of identical VMI objects. This controller behaves nearly identically to the Kubernetes ReplicSet controller. The primary difference being that the VMIRS manages VMI objects and the ReplicaSet manages Pods. Wouldnt it be nice if we could come up with a way to [use the Kubernetes ReplicaSet controller to scale out CRDs?](https://github.com/kubernetes/kubernetes/issues/65622)
Each one of these KubeVirt objects (VMI, VM, VMIRS) are registered with Kubernetes as a CRD when the KubeVirt install manifest is posted to the cluster. By registering our APIs as CRDs with Kubernetes, all the tooling involved with managing Kubernetes clusters (like kubectl) have access to the KubeVirt APIs just as if they are native Kubernetes objects.
## Dynamic Webhooks for API Validation
One of the responsibilities of the Kubernetes API server is to intercept and validate requests prior to allowing objects to be persisted into etcd. For example, if someone tries to create a Pod using a malformed Pod specification, the Kubernetes API server immediately catches the error and rejects the POST request. This all occurs before the object is persistent into etcd preventing the malformed Pod specification from making its way into the cluster.
This validation occurs during a process called admission control. Until recently, it was not possible to extend the default Kubernetes admission controllers without altering code and compiling/deploying an entirely new Kubernetes API server. This meant that if we wanted to perform admission control on KubeVirts CRD objects while they are posted to the cluster, wed have to build our own version of the Kubernetes API server and convince our users to use that instead. That was not a viable solution for us.
Using the new [Dynamic Admission Control](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/) feature that first landed in Kubernetes 1.9, we now have a path for performing custom validation on KubeVirt API through the use of a [ValidatingAdmissionWebhook](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#external-admission-webhooks). This feature allows KubeVirt to dynamically register an HTTPS webhook with Kubernetes at KubeVirt install time. After registering the custom webhook, all requests related to KubeVirt API objects are forwarded from the Kubernetes API server to our HTTPS endpoint for validation. If our endpoint rejects a request for any reason, the object will not be persisted into etcd and the client receives our response outlining the reason for the rejection.
For example, if someone posts a malformed VirtualMachine object, theyll receive an error indicating what the problem is.
```
$ kubectl create -f my-vm.yaml
Error from server: error when creating "my-vm.yaml": admission webhook "virtualmachine-validator.kubevirt.io" denied the request: spec.template.spec.domain.devices.disks[0].volumeName 'registryvolume' not found.
```
In the example output above, that error response is coming directly from KubeVirts admission control webhook.
## CRD OpenAPIv3 Validation
In addition to the validating webhook, KubeVirt also uses the ability to provide an [OpenAPIv3 validation schema](https://kubernetes.io/docs/tasks/access-kubernetes-API/extend-api-custom-resource-definitions/#advanced-topics) when registering a CRD with the cluster. While the OpenAPIv3 schema does not let us express some of the more advanced validation checks that the validation webhook provides, it does offer the ability to enforce simple validation checks involving things like required fields, max/min value lengths, and verifying that values are formatted in a way that matches a regular expression string.
## Dynamic Webhooks for “PodPreset Like” Behavior
The Kubernetes Dynamic Admission Control feature is not only limited to validation logic, it also provides the ability for applications like KubeVirt to both intercept and mutate requests as they enter the cluster. This is achieved through the use of a **MutatingAdmissionWebhook** object. In KubeVirt, we are looking to use a mutating webhook to support our VirtualMachinePreset (VMPreset) feature.
A VMPreset acts in a similar way to a PodPreset. Just like a PodPreset allows users to define values that should automatically be injected into pods at creation time, a VMPreset allows users to define values that should be injected into VMs at creation time. Through the use of a mutating webhook, KubeVirt can intercept a request to create a VM, apply VMPresets to the VM spec, and then validate that the resulting VM object. This all occurs before the VM object is persisted into etcd which allows KubeVirt to immediately notify the user of any conflicts at the time the request is made.
## Subresources for CRDs
When comparing the use of CRDs to an aggregated API server, one of the features CRDs lack is the ability to support subresources. Subresources are used to provide additional resource functionality. For example, the `pod/logs` and `pod/exec` subresource endpoints are used behind the scenes to provide the `kubectl logs` and `kubectl exec` command functionality.
Just like Kubernetes uses the `pod/exec` subresource to provide access to a pods environment, in KubeVirt we want subresources to provide serial-console, VNC, and SPICE access to a virtual machine. By adding virtual machine guest access through subresources, we can leverage RBAC to provide access control for these features.
So, given that the KubeVirt team decided to use CRDs instead of an aggregated API server for custom resource support, how can we have subresources for CRDs when the CRD feature expiclity does not support subresources?
We created a workaround for this limitation by implementing a stateless aggregated API server that exists only to serve subresource requests. With no state, we dont have to worry about any of the issues we identified earlier with regards to access to etcd. This means the KubeVirt API is actually supported through a combination of both CRDs for resources and an aggregated API server for stateless subresources.
This isnt a perfect solution for us. Both aggregated API servers and CRDs require us to register an API GroupName with Kubernetes. This API GroupName field essentially namespaces the APIs REST path in a way that prevents API naming conflicts between other third party applications. Because CRDs and aggregated API servers cant share the same GroupName, we have to register two separate GroupNames. One is used by our CRDs and the other is used by the aggregated API server for subresource requests.
Having two GroupNames in our API is slightly inconvenient because it means the REST path for the endpoints that serve the KubeVirt subresource requests have a slightly different base path than the resources.
For example, the endpoint to create a VMI object is as follows.
**/apis/kubevirt.io/v1alpha2/namespaces/my-namespace/virtualmachineinstances/my-vm**
However, the subresource endpoint to access graphical VNC looks like this.
**/apis/subresources.kubevirt.io/v1alpha2/namespaces/my-namespace/virtualmachineinstances/my-vm/vnc**
Notice that the first request uses **kubevirt.io** and the second request uses **subresource.kubevirt.io**. We dont like that, but thats how weve managed to combine CRDs with a stateless aggregated API server for subresources.
One thing worth noting is that in Kubernetes 1.10 a very basic form of CRD subresource support was added in the form of the `/status` and `/scale` subresources. This support does not help us deliver the virtualization features we want subresources for. However, there have been discussions about exposing custom CRD subresources as webhooks in a future Kubernetes version. If this functionality lands, we will gladly transition away from our stateless aggregated API server workaround to use a subresource webhook feature.
## CRD Finalizers
A [CRD finalizer](https://kubernetes.io/docs/tasks/access-kubernetes-API/extend-api-custom-resource-definitions/#advanced-topics) is a feature that lets us provide a pre-delete hook in order to perform actions before allowing a CRD object to be removed from persistent storage. In KubeVirt, we use finalizers to guarantee a virtual machine has completely terminated before we allow the corresponding VMI object to be removed from etcd.
## API Versioning for CRDs
The Kubernetes core APIs have the ability to support multiple versions for a single object type and perform conversions between those versions. This gives the Kubernetes core APIs a path for advancing the `v1alpha1` version of an object to a `v1beta1` version and so forth.
Prior to Kubernetes 1.11, CRDs did not not have support for multiple versions. This meant when we wanted to progress a CRD from `kubevirt.io/v1alpha1` to `kubevirt.io/v1beta1`, the only path available to was to backup our CRD objects, delete the registered CRD from Kubernetes, register a new CRD with the updated version, convert the backed up CRD objects to the new version, and finally post the migrated CRD objects back to the cluster.
That strategy was not exactly a viable option for us.
Fortunately thanks to some recent [work to rectify this issue in Kubernetes](https://github.com/kubernetes/features/issues/544), the latest Kubernetes v1.11 now supports [CRDs with multiple versions](https://github.com/kubernetes/kubernetes/pull/63830). Note however that this initial multi version support is limited. While a CRD can now have multiple versions, the feature does not currently contain a path for performing conversions between versions. In KubeVirt, the lack of conversion makes it difficult us to evolve our API as we progress versions. Luckily, support for conversions between versions is underway and we look forward to taking advantage of that feature once it lands in a future Kubernetes release.

View File

@ -12,36 +12,33 @@ cid: caseStudies
<div class="content">
<div class="case-studies">
<div class="case-study">
<img src="/images/case_studies/northwestern_feature.png" alt="Northwestern Mutual">
<p class="quote">"No one would think a company thats 160-plus years old is foraying this deep into the cloud and infrastructure stack."</p>
<!--<p class="attrib">— APP PLATFORM TEAMS MANAGER, BRYAN PFREMMER</p>-->
<a href="/case-studies/northwestern-mutual/">Read about Northwestern Mutual</a>
<div class="case-study">
<img src="/images/case_studies/pinterest_feature.png" alt="Pinterest">
<p class="quote"> "We are in the position to run things at scale, in a public cloud environment, and test things out in way that a lot of people might not be able to do."</p>
<!--<p class="attrib">— MICHEAL BENEDICT, PRODUCT MANAGER FOR THE CLOUD AND THE DATA INFRASTRUCTURE GROUP AT PINTEREST</p>-->
<a href="/case-studies/pinterest/">Read about Pinterest</a>
</div>
<div class="case-study">
<img src="/images/case_studies/pearson_feature.png" alt="Pearson">
<p class="quote">"Were already seeing tremendous benefits with Kubernetes—improved engineering productivity, faster delivery of applications and a simplified infrastructure. But this is just the beginning. Kubernetes will help transform the way that educational content is delivered online."</p>
<!--<p class="attrib">— CHRIS JACKSON, DIRECTOR FOR CLOUD PLATFORMS & SRE AT PEARSON</p>-->
<a href="/case-studies/pearson/">Read about Pearson</a>
</div>
<div class="case-study">
<img src="/images/case_studies/ing_feature.png" alt="ING">
<p class="quote">"The big cloud native promise to our business is the ability to go from idea to production within 48 hours. We are some years away from this, but thats quite feasible to us."</p>
<!--<p class="attrib">— Thijs Ebbers, Infrastructure Architect, ING</p>-->
<a href="/case-studies/ing/">Read about ING</a>
</div>
<div class="case-study">
<img src="/images/case_studies/openai_feature.png" alt="openAI">
<p class="quote">"Research teams can now take advantage of the frameworks weve built on top of Kubernetes, which make it easy to launch experiments, scale them by 10x or 50x, and take little effort to manage."</p>
<!--<p class="attrib">— Christopher Berner, Head of Infrastructure for OpenAI</p>-->
<a href="/case-studies/openai/">Read about OpenAI</a>
<div class="case-study">
<img src="/images/case_studies/capitalone_feature.png" alt="Capital One">
<p class="quote">"With the scalability, the management, the coordination, Kubernetes really empowers us and gives us more time back than we had before."</p>
<!--<p class="attrib">— Jamil Jadallah, Scrum Master</p>-->
<a href="/case-studies/capital-one/">Read about Capital One</a>
</div>
<div class="case-study">
<img src="/images/case_studies/newyorktimes_feature.png" alt="the new york times">
<p class="quote">"I think once you get over the initial hump, things get a lot easier and actually a lot faster."</p>
<!--<p class="attrib">— Deep Kapadia, Executive Director, Engineering at The New York Times</p>-->
<a href="/case-studies/newyorktimes/">Read about The New York Times</a>
</div>
<div class="case-study">
<img src="/images/case_studies/nordstrom_feature.png" alt="nordstrom">
<p class="quote">"We are always looking for ways to optimize and provide more value through technology. With Kubernetes we are showcasing two types of efficiency that we can bring: Dev efficiency and Ops efficiency. Its a win-win."</p>
<!--<p class="attrib">— Dhawal Patel, senior engineer at Nordstrom</p>-->
<a href="/case-studies/nordstrom/">Read about Nordstrom</a>
</div>
</div>
</div>
</main>
@ -63,46 +60,90 @@ cid: caseStudies
<main>
<h3>Kubernetes Users</h3>
<div id="usersGrid">
<a target="_blank" href="/case-studies/northwestern-mutual/"><img src="/images/case_studies/northwestern_feature.png" alt="Northwestern Mutual"></a>
<a target="_blank" href="/case-studies/openai/"><img src="/images/case_studies/openai_feature.png" alt="OpenAI"></a>
<a target="_blank" href="/case-studies/newyorktimes/"><img src="/images/case_studies/newyorktimes_feature.png" alt="The New York Times"></a>
<a target="_blank" href="/case-studies/nordstrom/"><img src="/images/case_studies/nordstrom_feature.png" alt="Nordstrom"></a>
<a target="_blank" href="/case-studies/crowdfire/"><img src="/images/case_studies/crowdfire_feature.png" alt="Crowdfire"></a>
<a target="_blank" href="/case-studies/squarespace/"><img src="/images/case_studies/squarespace_feature.png" alt="Squarespace"></a>
<a target="_blank" href="/case-studies/zalando/"><img src="/images/case_studies/zalando_feature.png" alt="Zalando"></a>
<a target="_blank" href="/case-studies/amadeus/"><img src="/images/case_studies/amadeus.png" alt="Amadeus"></a>
<a target="_blank" href="/case-studies/ancestry/"><img src="/images/case_studies/ancestry.png" alt="Ancestry.com"></a>
<a target="_blank" href="/case-studies/blablacar/"><img src="/images/case_studies/blablacar.png" alt="BlaBlaCar"></a>
<a target="_blank" href="/case-studies/blackrock/"><img src="/images/case_studies/blackrock2.png" alt="BlackRock"></a>
<a target="_blank" href="/case-studies/box/"><img src="/images/case_studies/box_logo.png" alt="box"></a>
<a target="_blank" href="/case-studies/buffer/"><img src="/images/case_studies/buffer_logo.png" alt="Buffer"></a>
<a target="_blank" href="https://cloud.google.com/customers/ccp-games/"><img src="/images/case_studies/ccp.png" alt="CCP Games"></a>
<a target="_blank" href="https://youtu.be/lmeFkH-rHII"><img src="/images/case_studies/comcast_logo.png" alt="Comcast"></a>
<a target="_blank" href="http://searchitoperations.techtarget.com/news/450297178/Tech-firms-roll-out-Kubernetes-in-production"><img src="/images/case_studies/concur.png" alt="Concur"></a>
<a target="_blank" href="http://searchitoperations.techtarget.com/news/450297178/Tech-firms-roll-out-Kubernetes-in-production"><img src="/images/case_studies/concur.png" alt="Concur"></a>
<a target="_blank" href="/case-studies/crowdfire/"><img src="/images/case_studies/crowdfire_feature.png" alt="Crowdfire"></a>
<a target="_blank" href="http://www.nextplatform.com/2015/11/12/inside-ebays-shift-to-kubernetes-and-containers-atop-openstack/"><img src="/images/case_studies/ebay_logo.png" alt="Ebay"></a>
<a target="_blank" href="http://blogs.wsj.com/cio/2016/02/24/big-changes-in-goldmans-software-emerge-from-small-containers/"><img src="/images/case_studies/gs.png" alt="Goldman Sachs"></a>
<a target="_blank" href="http://blogs.wsj.com/cio/2016/02/24/big-changes-in-goldmans-software-emerge-from-small-containers/"><img src="/images/case_studies/gs.png" alt="Goldman Sachs"></a>
<a target="_blank" href="/case-studies/golfnow/"><img src="/images/case_studies/golfnow_logo.png" alt="GolfNow"></a>
<a target="_blank" href="/case-studies/haufegroup/"><img src="/images/case_studies/haufegroup_logo.png" alt="Haufe Group"></a>
<a target="_blank" href="https://www.youtube.com/watch?v=F3iMkz_NSvU"><img src="/images/case_studies/homeoffice.png" alt="UK Home Office"></a>
<a target="_blank" href="/case-studies/huawei/"><img src="/images/case_studies/huawei.png" alt="Huawei"></a>
<a target="_blank" href="/case-studies/ing/"><img src="/images/case_studies/ing_feature.png" alt="ING"></a>
<a target="_blank" href="https://kubernetes.io/blog/2017/02/inside-jd-com-shift-to-kubernetes-from-openstack"><img src="/images/case_studies/jd.png" alt="JD.COM"></a>
<a target="_blank" href="https://www.openstack.org/videos/video/running-kubernetes-on-openstack-at-liveperson"><img src="/images/case_studies/liveperson.png" alt="LivePerson"></a>
<a target="_blank" href="https://youtu.be/YkOY7DgXKyw"><img src="/images/case_studies/monzo_logo.png" alt="monzo"></a>
<a target="_blank" href="https://www.youtube.com/watch?v=P5qfyv_zGcU"><img src="/images/case_studies/nyt.png" alt="New York Times"></a>
<a target="_blank" href="https://openai.com/blog/infrastructure-for-deep-learning"><img src="/images/case_studies/openai.png" alt="OpenAI"></a>
<a target="_blank" href="/case-studies/newyorktimes/"><img src="/images/case_studies/newyorktimes_feature.png" alt="The New York Times"></a>
<a target="_blank" href="/case-studies/nordstrom/"><img src="/images/case_studies/nordstrom_feature.png" alt="Nordstrom"></a>
<a target="_blank" href="/case-studies/northwestern-mutual/"><img src="/images/case_studies/northwestern_feature.png" alt="Northwestern Mutual"></a>
<a target="_blank" href="/case-studies/openai/"><img src="/images/case_studies/openai_feature.png" alt="OpenAI"></a>
<a target="_blank" href="/case-studies/peardeck/"><img src="/images/case_studies/peardeck_logo.png" alt="peardeck"></a>
<a href="/case-studies/pearson/"><img src="/images/case_studies/pearson_logo.png" alt="Pearson"></a>
<a target="_blank" href="https://cloud.google.com/customers/philips/"><img src="/images/case_studies/philips_logo.png" alt="Philips"></a>
<a target="_blank" href="/case-studies/pinterest/"><img src="/images/case_studies/pinterest.png" alt="Pinterest"></a>
<a target="_blank" href="https://cloudplatform.googleblog.com/2016/09/bringing-Pokemon-GO-to-life-on-Google-Cloud.html"><img src="/images/case_studies/pokemon_go_logo.png" alt="Pokemon GO"></a>
<a target="_blank" href="http://www.nextplatform.com/2016/05/24/samsung-experts-put-kubernetes-paces/"><img src="/images/case_studies/sds.png" alt="Samsung SDS"></a>
<a target="_blank" href="https://youtu.be/4gyeixJLabo"><img src="/images/case_studies/sap.png" alt="SAP"></a>
<a target="_blank" href="http://www.nextplatform.com/2016/05/24/samsung-experts-put-kubernetes-paces/"><img src="/images/case_studies/sds.png" alt="Samsung SDS"></a>
<a target="_blank" href="/case-studies/squarespace/"><img src="/images/case_studies/squarespace_feature.png" alt="Squarespace"></a>
<a target="_blank" href="https://www.youtube.com/watch?v=5378N5iLb2Q"><img src="/images/case_studies/soundcloud.png" alt="SoundCloud"></a>
<a target="_blank" href="https://www.youtube.com/watch?v=F3iMkz_NSvU"><img src="/images/case_studies/homeoffice.png" alt="UK Home Office"></a>
<a target="_blank" href="http://thenewstack.io/wepay-kubernetes-changed-business/"><img src="/images/case_studies/wepay.png" alt="WePay"></a>
<a target="_blank" href="/case-studies/wink/"><img src="/images/case_studies/wink.png" alt="Wink"></a>
<a href="/case-studies/wikimedia/"><img src="/images/case_studies/wikimedia_logo.png" alt="Wikimedia"></a>
<a target="_blank" href="/case-studies/wink/"><img src="/images/case_studies/wink.png" alt="Wink"></a>
<a target="_blank" href="https://kubernetes.io/blog/2016/10/kubernetes-and-openstack-at-yahoo-japan"><img src="/images/case_studies/yahooJapan_logo.png" alt="Yahoo! Japan"></a>
<a target="_blank" href="/case-studies/zalando/"><img src="/images/case_studies/zalando_feature.png" alt="Zalando"></a>
<a target="_blank" href="#" onclick="event.preventDefault(); kub.showVideo()"><img src="/images/case_studies/zulily_logo.png" alt="zulily"></a>
<a target="_blank" href="https://docs.google.com/a/google.com/forms/d/e/1FAIpQLScuI7Ye3VQHQTwBASrgkjQDSS5TP0g3AXfFhwSM9YpHgxRKFA/viewform" class="tell-your-story"><img src="/images/case_studies/story.png" alt="Tell your story"></a>
</div>
</main>
</section>

View File

@ -0,0 +1,96 @@
---
title: Capital One Case Study
case_study_styles: true
cid: caseStudies
css: /css/style_case_studies.css
---
<div class="banner1 desktop" style="background-image: url('/images/CaseStudy_capitalone_banner1.jpg')">
<h1> CASE STUDY:<img src="/images/capitalone-logo.png" style="margin-bottom:-2%" class="header_logo"><br> <div class="subhead">Supporting Fast Decisioning Applications with Kubernetes
</div></h1>
</div>
<div class="details">
Company &nbsp;<b>Capital One</b>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Location &nbsp;<b>McLean, Virginia</b>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Industry &nbsp;<b>Retail banking</b>
</div>
<hr>
<section class="section1">
<div class="cols">
<div class="col1">
<h2>Challenge</h2>
The team set out to build a provisioning platform for <a href="https://www.capitalone.com/">Capital One</a> applications deployed on AWS that use streaming, big-data decisioning, and machine learning. One of these applications handles millions of transactions a day; some deal with critical functions like fraud detection and credit decisioning. The key considerations: resilience and speed—as well as full rehydration of the cluster from base AMIs.
<br>
<h2>Solution</h2>
The decision to run <a href="https://kubernetes.io/">Kubernetes</a> "is very strategic for us," says John Swift, Senior Director Software Engineering. "We use Kubernetes as a substrate or an operating system, if you will. Theres a degree of affinity in our product development."
</div>
<div class="col2">
<h2>Impact</h2>
"Kubernetes is a significant productivity multiplier," says Lead Software Engineer Keith Gasser, adding that to run the platform without Kubernetes would "easily see our costs triple, quadruple what they are now for the amount of pure AWS expense." Time to market has been improved as well: "Now, a team can come to us and we can have them up and running with a basic decisioning app in a fortnight, which before would have taken a whole quarter, if not longer." Deployments increased by several orders of magnitude. Plus, the rehydration/cluster-rebuild process, which took a significant part of a day to do manually, now takes a couple hours with Kubernetes automation and declarative configuration.
</div>
</div>
</section>
<div class="banner2">
<div class="banner2text">
<iframe width="560" height="315" src="https://www.youtube.com/embed/UHVW01ksg-s" frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe><br><br>
"With the scalability, the management, the coordination, Kubernetes really empowers us and gives us more time back than we had before." <span style="font-size:16px;text-transform:uppercase">— Jamil Jadallah, Scrum Master</span>
</div>
</div>
</div>
<section class="section2">
<div class="fullcol">
<h2></h2>
As a top 10 U.S. retail bank, Capital One has applications that handle millions of transactions a day. Big-data decisioning—for fraud detection, credit approvals and beyond—is core to the business. To support the teams that build applications with those functions for the bank, the cloud team led by Senior Director Software Engineering John Swift embraced Kubernetes for its provisioning platform. "Kubernetes and its entire ecosystem are very strategic for us," says Swift. "We use Kubernetes as a substrate or an operating system, if you will. Theres a degree of affinity in our product development."<br><br>
Almost two years ago, the team embarked on this journey by first working with Docker. Then came Kubernetes. "We wanted to put streaming services into Kubernetes as one feature of the workloads for fast decisioning, and to be able to do batch alongside it," says Lead Software Engineer Keith Gasser. "Once the data is streamed and batched, there are so many tool sets in <a href="https://flink.apache.org/">Flink</a> that we use for decisioning. We want to provide the tools in the same ecosystem, in a consistent way, rather than have a large custom snowflake ecosystem where every tool needs its own custom deployment. Kubernetes gives us the ability to bring all of these together, so the richness of the open source and even the license community dealing with big data can be corralled."
</div>
</section>
<div class="banner3" style="background-image: url('/images/CaseStudy_capitalone_banner3.jpg')">
<div class="banner3text">
"We want to provide the tools in the same ecosystem, in a consistent way, rather than have a large custom snowflake ecosystem where every tool needs its own custom deployment. Kubernetes gives us the ability to bring all of these together, so the richness of the open source and even the license community dealing with big data can be corralled."
</div>
</div>
<section class="section3">
<div class="fullcol">
In this first year, the impact has already been great. "Time to market is really huge for us," says Gasser. "Especially with fraud, you have to be very nimble in the way you respond to threats in the marketplace—being able to add and push new rules, detect new patterns of behavior, detect anomalies in account and transaction flows." With Kubernetes, "a team can come to us and we can have them up and running with a basic decisioning app in a fortnight, which before would have taken a whole quarter, if not longer. Kubernetes is a manifold productivity multiplier."<br><br>
Teams now have the tools to be autonomous in their deployments, and as a result, deployments have increased by two orders of magnitude. "And that was with just seven dedicated resources, without needing a whole group sitting there watching everything," says Scrum Master Jamil Jadallah. "Thats a huge cost savings. With the scalability, the management, the coordination, Kubernetes really empowers us and gives us more time back than we had before."
</div>
</section>
<div class="banner4" style="background-image: url('/images/CaseStudy_capitalone_banner4.jpg')">
<div class="banner4text">
With Kubernetes, "a team can come to us and we can have them up and running with a basic decisioning app in a fortnight, which before would have taken a whole quarter, if not longer. Kubernetes is a manifold productivity multiplier."
</div>
</div>
<section class="section5" style="padding:0px !important">
<div class="fullcol">
Kubernetes has also been a great time-saver for Capital Ones required period "rehydration" of clusters from base AMIs. To minimize the attack vulnerability profile for applications in the cloud, "Our entire clusters get rebuilt from scratch periodically, with new fresh instances and virtual server images that are patched with the latest and greatest security patches," says Gasser. This process used to take the better part of a day, and personnel, to do manually. Its now a quick Kubernetes job.<br><br>
Savings extend to both capital and operating expenses. "It takes very little to get into Kubernetes because its all open source," Gasser points out. "We went the DIY route for building our cluster, and we definitely like the flexibility of being able to embrace the latest from the community immediately without waiting for a downstream company to do it. Theres capex related to those licenses that we dont have to pay for. Moreover, theres capex savings for us from some of the proprietary software that we get to sunset in our particular domain. So that goes onto our ledger in a positive way as well." (Some of those open source technologies include Prometheus, Fluentd, gRPC, Istio, CNI, and Envoy.)
</div>
<div class="banner5">
<div class="banner5text">
"If we had to do all of this without Kubernetes, on underlying cloud services, I could easily see our costs triple, quadruple what they are now for the amount of pure AWS expense. That doesnt account for personnel to deploy and maintain all the additional infrastructure."
</div>
</div>
<div class="fullcol">
And on the opex side, Gasser says, the savings are high. "We run dozens of services, we have scores of pods, many daemon sets, and since were data-driven, we take advantage of EBS-backed volume claims for all of our stateful services. If we had to do all of this without Kubernetes, on underlying cloud services, I could easily see our costs triple, quadruple what they are now for the amount of pure AWS expense. That doesnt account for personnel to deploy and maintain all the additional infrastructure."<br><br>
The team is confident that the benefits will continue to multiply—without a steep learning curve for the engineers being exposed to the new technology. "As we onboard additional tenants in this ecosystem, I think the need for folks to understand Kubernetes may not necessarily go up. In fact, I think it goes down, and thats good," says Gasser. "Because that really demonstrates the scalability of the technology. You start to reap the benefits, and they can concentrate on all the features they need to build for great decisioning in the business— fraud decisions, credit decisions—and not have to worry about, Is my AWS server broken? Is my pod not running?"
</div>
</section>

View File

@ -0,0 +1,94 @@
---
title: ING Case Study
case_study_styles: true
cid: caseStudies
css: /css/style_case_studies.css
---
<div class="banner1" style="background-image: url('/images/CaseStudy_ing_banner1.jpg')">
<h1> CASE STUDY:<img src="/images/ing_logo.png" style="margin-bottom:-1.5%;" class="header_logo"><br> <div class="subhead"> Driving Banking Innovation with Cloud Native
</div></h1>
</div>
<div class="details">
Company &nbsp;<b>ING</b>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Location &nbsp;<b>Amsterdam, Netherlands
</b>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Industry &nbsp;<b>Finance</b>
</div>
<hr>
<section class="section1">
<div class="cols" style="width:85% !important;margin-left:10% !important">
<div class="col1" style="width:100% !important;">
<h2>Challenge</h2>
After undergoing an agile transformation, <a href="https://www.ing.com/">ING</a> realized it needed a standardized platform to support the work their developers were doing. "Our DevOps teams got empowered to be autonomous," says Infrastructure Architect Thijs Ebbers. "It has benefits; you get all kinds of ideas. But a lot of teams are going to devise the same wheel. Teams started tinkering with <a href="https://www.docker.com/">Docker</a>, Docker Swarm, <a href="https://kubernetes.io/">Kubernetes</a>, <a href="https://mesosphere.com/">Mesos</a>. Well, its not really useful for a company to have one hundred wheels, instead of one good wheel.
<br>
<br>
<h2>Solution</h2>
Using Kubernetes for container orchestration and Docker for containerization, the ING team began building an internal public cloud for its CI/CD pipeline and green-field applications. The pipeline, which has been built on Mesos Marathon, will be migrated onto Kubernetes. The bank-account management app <a href="https://www.yolt.com/">Yolt</a> in the U.K. (and soon France and Italy) market already is live hosted on a Kubernetes framework. At least two greenfield projects currently on the Kubernetes framework will be going into production later this year. By the end of 2018, the company plans to have converted a number of APIs used in the banking customer experience to cloud native APIs and host these on the Kubernetes-based&nbsp;platform.
<br>
</div>
<div class="col2" style="width:100% !important;padding-right:8%">
<br>
<h2>Impact</h2>
"Cloud native technologies are helping our speed, from getting an application to test to acceptance to production," says Infrastructure Architect Onno Van der Voort. "If you walk around ING now, you see all these DevOps teams, doing stand-ups, demoing. They try to get new functionality out there really fast. We held a hackathon for one of our existing components and basically converted it to cloud native within 2.5 days, though of course the tail takes more time before code is fully production ready."
<br>
</div>
</div>
</section>
<div class="banner2" style="padding-top:% !important">
<div class="banner2text" style="width:70% !important">
"The big cloud native promise to our business is the ability to go from idea to production within 48 hours. We are some years away from this, but thats quite feasible to us."
<span style="font-size:16px;text-transform:uppercase;letter-spacing:0.1em;"><br><br>— Thijs Ebbers, Infrastructure Architect, ING</span>
</div>
</div>
<section class="section2">
<div class="fullcol">
<h2>ING has long embraced innovation in banking, launching the internet-based ING Direct in&nbsp;1997. </h2>In that same spirit, the company underwent an agile transformation a few years ago. "Our DevOps teams got empowered to be autonomous," says Infrastructure Architect Thijs Ebbers. "It has benefits; you get all kinds of ideas. But a lot of teams are going to devise the same wheel. Teams started tinkering with Docker, Docker Swarm, Kubernetes, Mesos. Well, its not really useful for a company to have one hundred wheels, instead of one good wheel." <br><br>
Looking to standardize the deployment process within the companys strict security guidelines, the team looked at several solutions and found that in the past year, "Kubernetes won the container management framework wars," says Ebbers. "We decided to standardize ING on a Kubernetes framework." Everything is run on premise due to banking regulations, he adds, but "we will be building an internal public cloud. We are trying to get on par with what public clouds are doing. Thats one of the reasons we got Kubernetes."<br><br>
They also embraced Docker to address a major pain point in INGs CI/CD pipeline. Before containerization, "Every development team had to order a VM, and it was quite a heavy delivery model for them," says Infrastructure Architect Onno Van der Voort. "Another use case for containerization is when the application travels through the pipeline, they fire up Docker containers to do test work against the applications and after theyve done the work, the containers get killed again."
</div>
</section>
<div class="banner3" style="background-image: url('/images/CaseStudy_ing_banner3.jpg')">
<div class="banner3text">
"We decided to standardize ING on a Kubernetes framework." Everything is run on premise due to banking regulations, he adds, but "we will be building an internal public cloud. We are trying to get on par with what public clouds are doing. Thats one of the reasons we got Kubernetes."
<span style="font-size:16px;text-transform:uppercase;letter-spacing:0.1em;"><br><br>— Thijs Ebbers, Infrastructure Architect, ING</span>
</div>
</div>
<section class="section3">
<div class="fullcol">
Because of industry regulations, applications are only allowed to go through the pipeline, where compliance is enforced, rather than be deployed directly into a container. "We have to run the complete platform of services we need, many routing from different places," says Van der Voort. "We need this Kubernetes framework for deploying the containers, with all those components, monitoring, logging. Its complex." For that reason, ING has chosen to start on the <a href="https://www.openshift.org/">OpenShift Origin</a> Kubernetes distribution. <br><br>
Already, "cloud native technologies are helping our speed, from getting an application to test to acceptance to production," says Van der Voort. "If you walk around ING now, you see all these DevOps teams, doing stand-ups, demoing. They try to get new functionality out there really fast. We held a hackathon for one of our existing components and basically converted it to cloud native within 2.5 days, though of course the tail takes more time before code is fully production ready."<br><br>
The pipeline, which has been built on Mesos Marathon, will be migrated onto Kubernetes. Some legacy applications are also being rewritten as cloud native in order to run on the framework. At least two smaller greenfield projects built on Kubernetes will go into production this year. By the end of 2018, the company plans to have converted a number of APIs used in the banking customer experience to cloud native APIs and host these on the Kubernetes-based platform.
</div>
</section>
<div class="banner4" style="background-image: url('/images/CaseStudy_ing_banner4.jpg')">
<div class="banner4text">
"We have to run the complete platform of services we need, many routing from different places. We need this Kubernetes framework for deploying the containers, with all those components, monitoring, logging. Its complex." <span style="font-size:16px;text-transform:uppercase;letter-spacing:0.1em;"><br><br>— Onno Van der Voort, Infrastructure Architect, ING</span>
</div>
</div>
<div class="fullcol">
The team, however, doesnt see the banks back-end systems going onto the Kubernetes platform. "Our philosophy is it only makes sense to move things to cloud if they are cloud native," says Van der Voort. "If you have traditional architecture, build traditional patterns, it doesnt hold any value to go to the cloud." Adds Cloud Platform Architect Alfonso Fernandez-Barandiaran: "ING has a strategy about where we will go, in order to improve our agility. So its not about how cool this technology is, its about finding the right technology and the right approach."<br><br>
The Kubernetes framework will be hosting some greenfield projects that are high priority for ING: applications the company is developing in response to <a href="https://ec.europa.eu/info/law/payment-services-psd-2-directive-eu-2015-2366_en">PSD2</a>, the European Commission directive requiring more innovative online and mobile payments that went into effect at the beginning of 2018. For example, a bank-account management app called <a href="https://www.yolt.com/">Yolt</a>, serving the U.K. market (and soon France and Italy), was built on a Kubernetes platform and has gone into production. ING is also developing blockchain-enabled applications that will live on the Kubernetes platform. "Weve been contacted by a lot of development teams that have ideas with what they want to do with containers," says Ebbers.
</div>
<section class="section5" style="padding:0px !important">
<div class="banner5">
<div class="banner5text">
Even with the particular requirements that come in banking, ING has managed to take a lead in technology and innovation. "Every time we have constraints, we look for maybe a better way that we can use this technology." <span style="font-size:16px;text-transform:uppercase;letter-spacing:0.1em;"><br><br>— Alfonso Fernandez-Barandiaran, Cloud Platform Architect, ING</span></div>
</div>
<div class="fullcol">
Even with the particular requirements that come in banking, ING has managed to take a lead in technology and innovation. "Every time we have constraints, we look for maybe a better way that we can use this technology," says Fernandez-Barandiaran. <br><br>
The results, after all, are worth the effort. "The big cloud native promise to our business is the ability to go from idea to production within 48 hours," says Ebbers. "That would require all these projects to be mature. We are some years away from this, but thats quite feasible to us."
</div>
</section>

View File

@ -35,7 +35,7 @@ css: /css/style_case_studies.css
<div class="col2">
<h2>Impact</h2>
With the platform, there has been substantial improvements in productivity and speed of delivery. "In some cases, weve gone from nine months to provision physical assets in a data center to just a few minutes to provision and to get a new idea in front of a customer," says John Shirley, Lead Site Reliability Engineer for the Cloud Platform Team. Jackson estimates theyve achieved 15-20% developer productivity savings. Before, outages were an issue during their busiest time of year, the back-to-school period. Now, theres high confidence in their ability to meet aggressive customer SLAs.
With the platform, there has been substantial improvements in productivity and speed of delivery. "In some cases, weve gone from nine months to provision physical assets in a data center to just a few minutes to provision and get a new idea in front of a customer," says John Shirley, Lead Site Reliability Engineer for the Cloud Platform Team. Jackson estimates theyve achieved 15-20% developer productivity savings. Before, outages were an issue during their busiest time of year, the back-to-school period. Now, theres high confidence in their ability to meet aggressive customer SLAs.
<br>
</div>
@ -45,57 +45,60 @@ css: /css/style_case_studies.css
<div class="banner2" style="padding-top:% !important">
<div class="banner2text" style="width:70% !important">
"Were already seeing tremendous benefits with Kubernetes—improved engineering productivity, faster delivery of applications and a simplified infrastructure. But this is just the beginning. Kubernetes will help transform the way that educational content is delivered online."<span style="font-size:16px;text-transform:uppercase;letter-spacing:0.1em;">— Chris Jackson, Director for Cloud Platforms & SRE at Pearson</span>
"Were already seeing tremendous benefits with Kubernetes—improved engineering productivity, faster delivery of applications and a simplified infrastructure. But this is just the beginning. Kubernetes will help transform the way that educational content is delivered online."<br><br><span style="font-size:16px;text-transform:uppercase;letter-spacing:0.1em;">— Chris Jackson, Director for Cloud Platforms & SRE at Pearson</span>
</div>
</div>
<section class="section2">
<div class="fullcol">
In 2015, Pearson was already serving 75 million learners as the worlds largest education company, offering curriculum and assessment tools for Pre-K through college and beyond. Understanding that innovating the digital education experience was the key to the future of all forms of education, the company set out to increase its reach to 200 million people by 2025.<br><br>
That goal would require a transformation of its existing infrastructure, which was in data centers. In some cases, it took nine months to provision physical assets. In order to adapt to the demands of its growing online audience, Pearson needed an infrastructure platform that would be able to scale quickly and deliver business-critical products to market faster. "We had to think beyond simply enabling automated provisioning," says Chris Jackson, Director for Cloud Platforms & SRE at Pearson. "We realized we had to build a platform that would allow Pearson developers to build, manage and deploy applications in a completely different way." <br><br>
With 400 development groups and diverse brands with varying business and technical needs, Pearson embraced Docker container technology so that each brand could experiment with building new types of content using their preferred technologies, and then deliver it using containers. Jackson chose Kubernetes orchestration "because of its flexibility, ease of management and the way it would improve our engineers productivity," he says.<br><br>
The team adopted Kubernetes when it was still version 1.2 and are still going strong now on 1.7; they use Terraform and Ansible to deploy it on to basic AWS primitives. "We were trying to understand how we can create value for Pearson from this technology," says Ben Somogyi, Principal Architect for the Cloud Platforms. "It turned out that Kubernetes benefits are huge. Were trying to help our applications development teams that use our platform go faster, so we filled that gap with a CI/CD pipeline that builds their images for them, standardizes them, patches everything up, allows them to deploy their different environments onto the cluster, and obfuscates the details of how difficult the work underneath the covers is."
That goal would require a transformation of its existing infrastructure, which was in data centers. In some cases, it took nine months to provision physical assets. In order to adapt to the demands of its growing online audience, Pearson needed an infrastructure platform that would be able to scale quickly and deliver business-critical products to market faster. "We had to think beyond simply enabling automated provisioning," says Chris Jackson, Director for Cloud Platforms & SRE at Pearson. "We realized we had to build a platform that would allow Pearson developers to build, manage and deploy applications in a completely different way." <br><br>
With 400 development groups and diverse brands with varying business and technical needs, Pearson embraced Docker container technology so that each brand could experiment with building new types of content using their preferred technologies, and then deliver it using containers. Jackson chose Kubernetes orchestration "because of its flexibility, ease of management and the way it would improve our engineers productivity," he says.<br><br>
The team adopted Kubernetes when it was still version 1.2 and are still going strong now on 1.7; they use Terraform and Ansible to deploy it on to basic AWS primitives. "We were trying to understand how we can create value for Pearson from this technology," says Ben Somogyi, Principal Architect for the Cloud Platforms. "It turned out that Kubernetes benefits are huge. Were trying to help our applications development teams that use our platform go faster, so we filled that gap with a CI/CD pipeline that builds their images for them, standardizes them, patches everything up, allows them to deploy their different environments onto the cluster, and obfuscating the details of how difficult the work underneath the covers is."
</div>
</section>
<div class="banner3" style="background-image: url('/images/CaseStudy_pearson_banner3.jpg')">
<div class="banner3text">
"Your internal customers need to feel like they are choosing the very best option for them. We are experiencing this first hand in the growth of adoption. We are seeing triple-digit, year-on-year growth of the service."
"Your internal customers need to feel like they are choosing the very best option for them. We are experiencing this first hand in the growth of adoption. We are seeing triple-digit, year-on-year growth of the service."<span style="font-size:16px;text-transform:uppercase;letter-spacing:0.1em;"><br><br>— Chris Jackson, Director for Cloud Platforms & SRE at Pearson</span>
</div>
</div>
<section class="section3">
<div class="fullcol">
That work resulted in two tools for building and deploying applications in the cluster that Pearson has open sourced. "Were an education company, so we want to share what we can," says Somogyi.<br><br>
Now that development teams no longer have to worry about infrastructure, there have been substantial improvements in productivity and speed of delivery. "In some cases, weve gone from nine months to provision physical assets in a data center to just a few minutes to provision and to get a new idea in front of a customer," says John Shirley, Lead Site Reliability Engineer for the Cloud Platform Team. <br><br>
According to Jackson, the Cloud Platforms team can "provision a new proof-of-concept environment for a development team in minutes, and then they can take that to production as quickly as they are able to. This is the value proposition of all major technology services, and we had to compete like one to become our developers preferred choice. Just because you work for the same company, you do not have the right to force people into a mediocre service. Your internal customers need to feel like they are choosing the very best option for them. We are experiencing this first hand in the growth of adoption. We are seeing triple-digit, year-on-year growth of the service."<br><br>
Jackson estimates theyve achieved a 15-20% boost in productivity for developer teams who adopt the platform. They also see a reduction in the number of customer-impacting incidents. Plus, says Jackson, "Teams who were previously limited to 1-2 releases per academic year can now ship code multiple times per day!"
Now that development teams no longer have to worry about infrastructure, there have been substantial improvements in productivity and speed of delivery. "In some cases, weve gone from nine months to provision physical assets in a data center to just a few minutes to provision and to get a new idea in front of a customer," says John Shirley, Lead Site Reliability Engineer for the Cloud Platform Team. <br><br>
According to Jackson, the Cloud Platforms team can "provision a new proof-of-concept environment for a development team in minutes, and then they can take that to production as quickly as they are able to. This is the value proposition of all major technology services, and we had to compete like one to become our developers preferred choice. Just because you work for the same company, you do not have the right to force people into a mediocre service. Your internal customers need to feel like they are choosing the very best option for them. We are experiencing this first hand in the growth of adoption. We are seeing triple-digit, year-on-year growth of the service."<br><br>
Jackson estimates theyve achieved a 15-20% boost in productivity for developer teams who adopt the platform. They also see a reduction in the number of customer-impacting incidents. Plus, says Jackson, "Teams who were previously limited to 1-2 releases per academic year can now ship code multiple times per day!"
</div>
</section>
<div class="banner4" style="background-image: url('/images/CaseStudy_pearson_banner4.jpg')">
<div class="banner4text">
"Teams who were previously limited to 1-2 releases per academic year can now ship code multiple times per day!"
"Teams who were previously limited to 1-2 releases per academic year can now ship code multiple times per day!" <span style="font-size:16px;text-transform:uppercase;letter-spacing:0.1em;"><br><br>— Chris Jackson, Director for Cloud Platforms & SRE at Pearson</span>
</div>
</div>
<section class="section5" style="padding:0px !important">
<div class="fullcol">
Availability has also been positively impacted. The back-to-school period is the companys busiest time of year, and "you have to keep applications up," says Somogyi. Before, this was a pain point for the legacy infrastructure. Now, for the applications that have been migrated to the Kubernetes platform, "We have 100% uptime. Were not worried about 9s. There arent any. Its 100%, which is pretty astonishing for us, compared to some of the existing platforms that have legacy challenges," says Shirley.
Availability has also been positively impacted. The back-to-school period is the companys busiest time of year, and "you have to keep applications up," says Somogyi. Before, this was a pain point for the legacy infrastructure. Now, for the applications that have been migrated to the Kubernetes platform, "We have 100% uptime. Were not worried about 9s. There arent any. Its 100%, which is pretty astonishing for us, compared to some of the existing platforms that have legacy challenges," says Shirley.
<br><br>
"You cant even begin to put a price on how much that saves the company," Jackson explains. "A reduction in the number of support cases takes load out of our operations. The customer sentiment of having a reliable product drives customer retention and growth. It frees us to think about investing more into our digital transformation and taking a better quality of education to a global scale."
"You cant even begin to put a price on how much that saves the company," Jackson explains. "A reduction in the number of support cases takes load out of our operations. The customer sentiment of having a reliable product drives customer retention and growth. It frees us to think about investing more into our digital transformation and taking a better quality of education to a global scale."
<br><br>
The platform itself is also being broken down, "so we can quickly release smaller pieces of the platform, like upgrading our Kubernetes or all the different modules that make up our platform," says Somogyi. "One of the big focuses in 2018 is this scheme of delivery to update the platform itself."
The platform itself is also being broken down, "so we can quickly release smaller pieces of the platform, like upgrading our Kubernetes or all the different modules that make up our platform," says Somogyi. "One of the big focuses in 2018 is this scheme of delivery to update the platform itself."
<br><br>
Guided by Pearsons overarching goal of getting to 200 million users, the team has run internal tests of the platforms scalability. "We had a challenge: 28 million requests within a 10 minute period," says Shirley. "And we demonstrated that we can hit that, with an acceptable latency. We saw that we could actually get that pretty readily, and we scaled up in just a few seconds, using open source tools entirely. Shout out to <a href="https://locust.io/">Locust</a> for that one. So thats amazing."
Guided by Pearsons overarching goal of getting to 200 million users, the team has run internal tests of the platforms scalability. "We had a challenge: 28 million requests within a 10 minute period," says Shirley. "And we demonstrated that we can hit that, with an acceptable latency. We saw that we could actually get that pretty readily, and we scaled up in just a few seconds, using open source tools entirely. Shout out to <a href="https://locust.io/">Locust</a>for that one. So thats amazing."
</div>
<div class="banner5">
<div class="banner5text">
"We have 100% uptime. Were not worried about 9s. There arent any. Its 100%, which is pretty astonishing for us, compared to some of the existing platforms that have legacy challenges. You cant even begin to put a price on how much that saves the company."</div>
"We have 100% uptime. Were not worried about 9s. There arent any. Its 100%, which is pretty astonishing for us, compared to some of the existing platforms that have legacy challenges. You cant even begin to put a price on how much that saves the company." <span style="font-size:16px;text-transform:uppercase;letter-spacing:0.1em;"><br><br>— Benjamin Somogyi, Principal Systems Architect at Pearson</span></div>
</div>
<div class="fullcol">
In just two years, "Were already seeing tremendous benefits with Kubernetes—improved engineering productivity, faster delivery of applications and a simplified infrastructure," says Jackson. "But this is just the beginning. Kubernetes will help transform the way that educational content is delivered online."<br><br>
So far, about 15 production products are running on the new platform, including Pearsons new flagship digital education service, the Global Learning Platform. The Cloud Platform team continues to prepare, onboard and support customers that are a good fit for the platform. Some existing products will be refactored into 12-factor apps, while others are being developed so that they can live on the platform from the get-go. "There are challenges with bringing in new customers of course, because we have to help them to see a different way of developing, a different way of building," says Shirley. <br><br>
But, he adds, "It is our corporate motto: Always Learning. We encourage those teams that havent started a cloud native journey, to see the future of technology, to learn, to explore. It will pique your interest. Keep learning."
So far, about 15 production products are running on the new platform, including Pearsons new flagship digital education service, the Global Learning Platform. The Cloud Platform team continues to prepare, onboard and support customers that are a good fit for the platform. Some existing products will be refactored into 12-factor apps, while others are being developed so that they can live on the platform from the get-go. "There are challenges with bringing in new customers of course, because we have to help them to see a different way of developing, a different way of building," says Shirley. <br><br>
But, he adds, "It is our corporate motto: Always Learning. We encourage those teams that havent started a cloud native journey, to see the future of technology, to learn, to explore. It will pique your interest. Keep learning."
</div>
</section>

View File

@ -0,0 +1,104 @@
---
title: Pinterest Case Study
case_study_styles: true
cid: caseStudies
css: /css/style_case_studies.css
---
<div class="banner1 desktop" style="background-image: url('/images/CaseStudy_pinterest_banner1.jpg')">
<h1> CASE STUDY:<img src="/images/pinterest_logo.png" style="margin-bottom:-1%" class="header_logo"><br> <div class="subhead">Pinning Its Past, Present, and Future on Cloud Native
</div></h1>
</div>
<div class="details">
Company &nbsp;<b>Pinterest</b>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Location &nbsp;<b>San Francisco, California</b>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Industry &nbsp;<b>Web and Mobile App</b>
</div>
<hr>
<section class="section1">
<div class="cols">
<div class="col1">
<h2>Challenge</h2>
After eight years in existence, Pinterest had grown into 1,000 microservices and multiple layers of infrastructure and diverse set-up tools and platforms. In 2016 the company launched a roadmap towards a new compute platform, led by the vision of creating the fastest path from an idea to production, without making engineers worry about the underlying infrastructure.
<br>
<h2>Solution</h2>
The first phase involved moving services to Docker containers. Once these services went into production in early 2017, the team began looking at orchestration to help create efficiencies and manage them in a decentralized way. After an evaluation of various solutions, Pinterest went with Kubernetes.
</div>
<div class="col2">
<h2>Impact</h2>
"By moving to Kubernetes the team was able to build on-demand scaling and new failover policies, in addition to simplifying the overall deployment and management of a complicated piece of infrastructure such as Jenkins," says Micheal Benedict, Product Manager for the Cloud and the Data Infrastructure Group at Pinterest. "We not only saw reduced build times but also huge efficiency wins. For instance, the team reclaimed over 80 percent of capacity during non-peak hours. As a result, the Jenkins Kubernetes cluster now uses 30 percent less instance-hours per-day when compared to the previous static cluster."
</div>
</div>
</section>
<div class="banner2">
<div class="banner2text">
"So far its been good, especially the elasticity around how we can configure our Jenkins workloads on that Kubernetes shared cluster. That is the win we&nbsp;were&nbsp;pushing&nbsp;for." <span style="font-size:14px;letter-spacing:0.12em;padding-top:20px;text-transform:uppercase;line-height:14px"><br><br>— Micheal Benedict, Product Manager for the Cloud and the Data Infrastructure Group at Pinterest</span>
</div>
</div>
<section class="section2">
<div class="fullcol">
<h2></h2>Pinterest was born on the cloud—running on <a href="https://aws.amazon.com/">AWS</a> since day one in 2010—but even cloud native companies can experience some growing pains.</h2> Since its launch, Pinterest has become a household name, with more than 200 million active monthly users and 100 billion objects saved. Underneath the hood, there are 1,000 microservices running and hundreds of thousands of data jobs.<br><br>
With such growth came layers of infrastructure and diverse set-up tools and platforms for the different workloads, resulting in an inconsistent and complex end-to-end developer experience, and ultimately less velocity to get to production.
So in 2016, the company launched a roadmap toward a new compute platform, led by the vision of having the fastest path from an idea to production, without making engineers worry about the underlying infrastructure. <br><br>
The first phase involved moving to Docker. "Pinterest has been heavily running on virtual machines, on EC2 instances directly, for the longest time," says Micheal Benedict, Product Manager for the Cloud and the Data Infrastructure Group. "To solve the problem around packaging software and not make engineers own portions of the fleet and those kinds of challenges, we standardized the packaging mechanism and then moved that to the container on top of the VM. Not many drastic changes. We didnt want to boil the ocean at that point."
</div>
</section>
<div class="banner3" style="background-image: url('/images/CaseStudy_pinterest_banner3.jpg')">
<div class="banner3text">
"Though Kubernetes lacked certain things we wanted, we realized that by the time we get to productionizing many of those things, well be able to leverage what the community is doing." <span style="font-size:14px;letter-spacing:0.12em;padding-top:20px;text-transform:uppercase;line-height:14px"><br><br>— MICHEAL BENEDICT, PRODUCT MANAGER FOR THE CLOUD AND THE DATA INFRASTRUCTURE GROUP AT PINTEREST</span>
</div>
</div>
<section class="section3">
<div class="fullcol">
The first service that was migrated was the monolith API fleet that powers most of Pinterest. At the same time, Benedicts infrastructure governance team built chargeback and capacity planning systems to analyze how the company uses its virtual machines on AWS. "It became clear that running on VMs is just not sustainable with what were doing," says Benedict. "A lot of resources were underutilized. There were efficiency efforts, which worked fine at a certain scale, but now you have to move to a more decentralized way of managing that. So orchestration was something we thought could help solve that piece."<br><br>
That led to the second phase of the roadmap. In July 2017, after an eight-week evaluation period, the team chose Kubernetes over other orchestration platforms. "Kubernetes lacked certain things at the time—for example, we wanted Spark on Kubernetes," says Benedict. "But we realized that the dev cycles we would put in to even try building that is well worth the outcome, both for Pinterest as well as the community. Weve been in those conversations in the Big Data SIG. We realized that by the time we get to productionizing many of those things, well be able to leverage what the community is doing."<br><br>
At the beginning of 2018, the team began onboarding its first use case into the Kubernetes system: Jenkins workloads. "Although we have builds happening during a certain period of the day, we always need to allocate peak capacity," says Benedict. "They dont have any auto-scaling capabilities, so that capacity stays constant. It is difficult to speed up builds because ramping up takes more time. So given those kind of concerns, we thought that would be a perfect use case for us to work on."
</div>
</section>
<div class="banner4" style="background-image: url('/images/CaseStudy_pinterest_banner4.jpg')">
<div class="banner4text">
"So far its been good, especially the elasticity around how we can configure our Jenkins workloads on Kubernetes shared cluster. That is the win we were pushing for." <span style="font-size:14px;letter-spacing:0.12em;padding-top:20px;text-transform:uppercase;line-height:14px"><br><br>— MICHEAL BENEDICT, PRODUCT MANAGER FOR THE CLOUD AND THE DATA INFRASTRUCTURE GROUP AT PINTEREST</span>
</div>
</div>
<section class="section5" style="padding:0px !important">
<div class="fullcol">
They ramped up the cluster, and working with a team of four people, got the Jenkins Kubernetes cluster ready for production. "We still have our static Jenkins cluster," says Benedict, "but on Kubernetes, we are doing similar builds, testing the entire pipeline, getting the artifact ready and just doing the comparison to see, how much time did it take to build over here. Is the SLA okay, is the artifact generated correct, are there issues there?" <br><br>
"So far its been good," he adds, "especially the elasticity around how we can configure our Jenkins workloads on Kubernetes shared cluster. That is the win we were pushing for."<br><br>
By the end of Q1 2018, the team successfully migrated Jenkins Master to run natively on Kubernetes and also collaborated on the <a href="https://github.com/jenkinsci/kubernetes-plugin">Jenkins Kubernetes Plugin</a> to manage the lifecycle of workers. "Were currently building the entire Pinterest JVM stack (one of the larger monorepos at Pinterest which was recently bazelized) on this new cluster," says Benedict. "At peak, we run thousands of pods on a few hundred nodes. Overall, by moving to Kubernetes the team was able to build on-demand scaling and new failover policies, in addition to simplifying the overall deployment and management of a complicated piece of infrastructure such as Jenkins. We not only saw reduced build times but also huge efficiency wins. For instance, the team reclaimed over 80 percent of capacity during non-peak hours. As a result, the Jenkins Kubernetes cluster now uses 30 percent less instance-hours per-day when compared to the previous static cluster."
</div>
<div class="banner5">
<div class="banner5text">
"We are in the position to run things at scale, in a public cloud environment, and test things out in way that a lot of people might not be able to do." <span style="font-size:14px;letter-spacing:0.12em;padding-top:20px;text-transform:uppercase;line-height:14px"><br><br>— MICHEAL BENEDICT, PRODUCT MANAGER FOR THE CLOUD AND THE DATA INFRASTRUCTURE GROUP AT PINTEREST</span>
</div>
</div>
<div class="fullcol">
Benedict points to a "pretty robust roadmap" going forward. In addition to the Pinterest big data teams experiments with Spark on Kubernetes, the company collaborated with Amazons EKS team on an ENI/CNI plug in. <br><br>
Once the Jenkins cluster is up and running out of dark mode, Benedict hopes to establish best practices, including having governance primitives established—including integration with the chargeback system—before moving on to migrating the next service. "We have a healthy pipeline of use-cases to be on-boarded. After Jenkins, we want to enable support for Tensorflow and Apache Spark. At some point, we aim to move the companys monolithic API service. If we move that and understand the complexity around that, it builds our confidence," says Benedict. "It sets us up for migration of all our other services."<br><br>
After years of being a cloud native pioneer, Pinterest is eager to share its ongoing journey. "We are in the position to run things at scale, in a public cloud environment, and test things out in way that a lot of people might not be able to do," says Benedict. "Were in a great position to contribute back some of those learnings."
</div>
</section>

View File

@ -1,3 +0,0 @@
# Kubernetes Community Code of Conduct
Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md)

View File

@ -38,6 +38,17 @@ cid: community
frameborder="0" scrolling="no"></iframe>
</div>
</div> -->
<div class="content">
<h3>Code of Conduct</h3>
<p>The Kubernetes community values respect and inclusiveness, and
enforces a <a href="code-of-conduct/">Code of Conduct</a> in all
interactions. If you notice a violation of the Code of Conduct at
an event or meeting, in Slack, or in another communication
mechanism, reach out to the
<a href="mailto:steering-private@kubernetes.io">steering committee</a>.
Your anonymity will be protected.</p>
</p>
</div>
</main>
</section>

View File

@ -0,0 +1,28 @@
---
title: Community
layout: basic
cid: community
css: /css/community.css
---
<div class="community_main">
<h1>Kubernetes Community Code of Conduct</h1>
Kubernetes follows the
<a href="https://github.com/cncf/foundation/blob/master/code-of-conduct.md">CNCF Code of Conduct</a>.
The text of the CNCF CoC is replicated below, as of
<a href="https://github.com/cncf/foundation/blob/0ce4694e5103c0c24ca90c189da81e5408a46632/code-of-conduct.md">commit 0ce4694</a>.
If you notice that this is out of date, please
<a href="https://github.com/kubernetes/website/issues/new">file an issue</a>.
If you notice a violation of the Code of Conduct at an event or meeting, in
Slack, or in another communication mechanism, reach out to the
<a href="mailto:steering-private@kubernetes.io">steering committee</a>. Your
anonymity will be protected.
<div class="cncf_coc_container">
{{< include "/static/cncf-code-of-conduct.md" >}}
</div>
</div>

View File

@ -0,0 +1,2 @@
The files in this directory have been imported from other sources. Do not
edit them directly, except by replacing them with new versions.

View File

@ -0,0 +1,45 @@
<!-- Do not edit this file directly. Get the latest from
https://github.com/cncf/foundation/blob/master/code-of-conduct.md -->
## CNCF Community Code of Conduct v1.0
### Contributor Code of Conduct
As contributors and maintainers of this project, and in the interest of fostering
an open and welcoming community, we pledge to respect all people who contribute
through reporting issues, posting feature requests, updating documentation,
submitting pull requests or patches, and other activities.
We are committed to making participation in this project a harassment-free experience for
everyone, regardless of level of experience, gender, gender identity and expression,
sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
religion, or nationality.
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery
* Personal attacks
* Trolling or insulting/derogatory comments
* Public or private harassment
* Publishing other's private information, such as physical or electronic addresses,
without explicit permission
* Other unethical or unprofessional conduct.
Project maintainers have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are not
aligned to this Code of Conduct. By adopting this Code of Conduct, project maintainers
commit themselves to fairly and consistently applying these principles to every aspect
of managing this project. Project maintainers who do not follow or enforce the Code of
Conduct may be permanently removed from the project team.
This code of conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community.
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting a CNCF project maintainer, Sarah Novotny <sarahnovotny@google.com>, and/or Dan Kohn <dan@linuxfoundation.org>.
This Code of Conduct is adapted from the Contributor Covenant
(http://contributor-covenant.org), version 1.2.0, available at
http://contributor-covenant.org/version/1/2/0/
### CNCF Events Code of Conduct
CNCF events are governed by the Linux Foundation [Code of Conduct](http://events.linuxfoundation.org/events/cloudnativecon/attend/code-of-conduct) available on the event page. This is designed to be compatible with the above policy and also includes more details on responding to incidents.

View File

@ -1,11 +1,18 @@
---
title: Concepts
main_menu: true
content_template: templates/concept
weight: 40
---
{{% capture overview %}}
The Concepts section helps you learn about the parts of the Kubernetes system and the abstractions Kubernetes uses to represent your cluster, and helps you obtain a deeper understanding of how Kubernetes works.
{{% /capture %}}
{{% capture body %}}
## Overview
To work with Kubernetes, you use *Kubernetes API objects* to describe your cluster's *desired state*: what applications or other workloads you want to run, what container images they use, the number of replicas, what network and disk resources you want to make available, and more. You set your desired state by creating objects using the Kubernetes API, typically via the command-line interface, `kubectl`. You can also use the Kubernetes API directly to interact with the cluster and set or modify your desired state.
@ -57,9 +64,12 @@ The nodes in a cluster are the machines (VMs, physical servers, etc) that run yo
* [Annotations](/docs/concepts/overview/working-with-objects/annotations/)
{{% /capture %}}
### What's next
{{% capture whatsnext %}}
If you would like to write a concept page, see
[Using Page Templates](/docs/home/contribute/page-templates/)
for information about the concept page type and the concept template.
{{% /capture %}}

View File

@ -28,15 +28,15 @@ All communication paths from the cluster to the master terminate at the
apiserver (none of the other master components are designed to expose remote
services). In a typical deployment, the apiserver is configured to listen for
remote connections on a secure HTTPS port (443) with one or more forms of
client [authentication](/docs/admin/authentication/) enabled. One or more forms
of [authorization](/docs/admin/authorization/) should be enabled, especially
if [anonymous requests](/docs/admin/authentication/#anonymous-requests) or
[service account tokens](/docs/admin/authentication/#service-account-tokens)
client [authentication](/docs/reference/access-authn-authz/authentication/) enabled.
One or more forms of [authorization](/docs/reference/access-authn-authz/authorization/)
should be enabled, especially if [anonymous requests](/docs/reference/access-authn-authz/authentication/#anonymous-requests)
or [service account tokens](/docs/reference/access-authn-authz/authentication/#service-account-tokens)
are allowed.
Nodes should be provisioned with the public root certificate for the cluster
such that they can connect securely to the apiserver along with valid client
credentials. For example, on a default GCE deployment, the client credentials
credentials. For example, on a default GKE deployment, the client credentials
provided to the kubelet are in the form of a client certificate. See
[kubelet TLS bootstrapping](/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/)
for automated provisioning of kubelet client certificates.

View File

@ -32,7 +32,7 @@ Before choosing a guide, here are some considerations:
Note: Not all distros are actively maintained. Choose distros which have been tested with a recent version of Kubernetes.
If you are using a guide involving Salt, see [Configuring Kubernetes with Salt](/docs/admin/salt/).
-If you are using a guide involving Salt, see [Configuring Kubernetes with Salt](/docs/setup/salt/).
## Managing a cluster
@ -48,13 +48,13 @@ If you are using a guide involving Salt, see [Configuring Kubernetes with Salt](
* [Kubernetes Container Environment](/docs/concepts/containers/container-environment-variables/) describes the environment for Kubelet managed containers on a Kubernetes node.
* [Controlling Access to the Kubernetes API](/docs/admin/accessing-the-api/) describes how to set up permissions for users and service accounts.
* [Controlling Access to the Kubernetes API](/docs/reference/access-authn-authz/controlling-access/) describes how to set up permissions for users and service accounts.
* [Authenticating](/docs/admin/authentication/) explains authentication in Kubernetes, including the various authentication options.
* [Authenticating](/docs/reference/access-authn-authz/authentication/) explains authentication in Kubernetes, including the various authentication options.
* [Authorization](/docs/admin/authorization/) is separate from authentication, and controls how HTTP calls are handled.
* [Authorization](/docs/reference/access-authn-authz/authorization/) is separate from authentication, and controls how HTTP calls are handled.
* [Using Admission Controllers](/docs/admin/admission-controllers/) explains plug-ins which intercepts requests to the Kubernetes API server after authentication and authorization.
* [Using Admission Controllers](/docs/reference/access-authn-authz/admission-controllers/) explains plug-ins which intercepts requests to the Kubernetes API server after authentication and authorization.
* [Using Sysctls in a Kubernetes Cluster](/docs/concepts/cluster-administration/sysctl-cluster/) describes to an administrator how to use the `sysctl` command-line tool to set kernel parameters .

View File

@ -178,6 +178,10 @@ clusters up to 5000 nodes. See [Building Large Clusters](/docs/setup/cluster-lar
* See this [setup guide](/docs/tutorials/federation/set-up-cluster-federation-kubefed/) for cluster federation.
* See this [Kubecon2016 talk on federation](https://www.youtube.com/watch?v=pq9lbkmxpS8)
* See this [Kubecon2017 Europe update on federation](https://www.youtube.com/watch?v=kwOvOLnFYck)
* See this [Kubecon2018 Europe update on sig-multicluster](https://www.youtube.com/watch?v=vGZo5DaThQU)
* See this [Kubecon2018 Europe Federation-v2 prototype presentation](https://youtu.be/q27rbaX5Jis?t=7m20s)
* See this [Federation-v2 Userguide](https://github.com/kubernetes-sigs/federation-v2/blob/master/docs/userguide.md)
{{% /capture %}}

View File

@ -160,7 +160,7 @@ Consider the following example. A pod runs a single container, and the container
writes to two different log files, using two different formats. Here's a
configuration file for the Pod:
{{< code file="two-files-counter-pod.yaml" >}}
{{< codenew file="admin/logging/two-files-counter-pod.yaml" >}}
It would be a mess to have log entries of different formats in the same log
stream, even if you managed to redirect both components to the `stdout` stream of
@ -170,7 +170,7 @@ the logs to its own `stdout` stream.
Here's a configuration file for a pod that has two sidecar containers:
{{< code file="two-files-counter-pod-streaming-sidecar.yaml" >}}
{{< codenew file="admin/logging/two-files-counter-pod-streaming-sidecar.yaml" >}}
Now when you run this pod, you can access each log stream separately by
running the following commands:
@ -226,7 +226,7 @@ which uses fluentd as a logging agent. Here are two configuration files that
you can use to implement this approach. The first file contains
a [ConfigMap](/docs/tasks/configure-pod-container/configure-pod-configmap/) to configure fluentd.
{{< code file="fluentd-sidecar-config.yaml" >}}
{{< codenew file="admin/logging/fluentd-sidecar-config.yaml" >}}
**Note**: The configuration of fluentd is beyond the scope of this article. For
information about configuring fluentd, see the
@ -235,7 +235,7 @@ information about configuring fluentd, see the
The second file describes a pod that has a sidecar container running fluentd.
The pod mounts a volume where fluentd can pick up its configuration data.
{{< code file="two-files-counter-pod-agent-sidecar.yaml" >}}
{{< codenew file="admin/logging/two-files-counter-pod-agent-sidecar.yaml" >}}
After some time you can find log messages in the Stackdriver interface.

View File

@ -22,12 +22,12 @@ You've deployed your application and exposed it via a service. Now what? Kuberne
Many applications require multiple resources to be created, such as a Deployment and a Service. Management of multiple resources can be simplified by grouping them together in the same file (separated by `---` in YAML). For example:
{{< code file="nginx-app.yaml" >}}
{{< codenew file="application/nginx-app.yaml" >}}
Multiple resources can be created the same way as a single resource:
```shell
$ kubectl create -f https://k8s.io/docs/concepts/cluster-administration/nginx-app.yaml
$ kubectl create -f https://k8s.io/examples/application/nginx-app.yaml
service "my-nginx-svc" created
deployment "my-nginx" created
```
@ -37,13 +37,13 @@ The resources will be created in the order they appear in the file. Therefore, i
`kubectl create` also accepts multiple `-f` arguments:
```shell
$ kubectl create -f https://k8s.io/docs/concepts/cluster-administration/nginx/nginx-svc.yaml -f https://k8s.io/docs/concepts/cluster-administration/nginx/nginx-deployment.yaml
$ kubectl create -f https://k8s.io/examples/application/nginx/nginx-svc.yaml -f https://k8s.io/examples/application/nginx/nginx-deployment.yaml
```
And a directory can be specified rather than or in addition to individual files:
```shell
$ kubectl create -f https://k8s.io/docs/concepts/cluster-administration/nginx/
$ kubectl create -f https://k8s.io/examples/application/nginx/
```
`kubectl` will read any files with suffixes `.yaml`, `.yml`, or `.json`.
@ -53,8 +53,8 @@ It is a recommended practice to put resources related to the same microservice o
A URL can also be specified as a configuration source, which is handy for deploying directly from configuration files checked into github:
```shell
$ kubectl create -f https://raw.githubusercontent.com/kubernetes/website/master/docs/concepts/cluster-administration/nginx-deployment.yaml
deployment "nginx-deployment" created
$ kubectl create -f https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/application/nginx/nginx-deployment.yaml
deployment "my-nginx" created
```
## Bulk operations in kubectl
@ -62,7 +62,7 @@ deployment "nginx-deployment" created
Resource creation isn't the only operation that `kubectl` can perform in bulk. It can also extract resource names from configuration files in order to perform other operations, in particular to delete the same resources you created:
```shell
$ kubectl delete -f https://k8s.io/docs/concepts/cluster-administration/nginx-app.yaml
$ kubectl delete -f https://k8s.io/examples/application/nginx-app.yaml
deployment "my-nginx" deleted
service "my-nginx-svc" deleted
```
@ -89,7 +89,7 @@ NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
my-nginx-svc 10.0.0.208 <pending> 80/TCP 0s
```
With the above commands, we first create resources under `docs/concepts/cluster-administration/nginx/` and print the resources created with `-o name` output format
With the above commands, we first create resources under `examples/application/nginx/` and print the resources created with `-o name` output format
(print each resource as resource/name). Then we `grep` only the "service", and then print it with `kubectl get`.
If you happen to organize your resources across several subdirectories within a particular directory, you can recursively perform the operations on the subdirectories also, by specifying `--recursive` or `-R` alongside the `--filename,-f` flag.
@ -321,7 +321,7 @@ Then, you can use [`kubectl apply`](/docs/reference/generated/kubectl/kubectl-co
This command will compare the version of the configuration that you're pushing with the previous version and apply the changes you've made, without overwriting any automated changes to properties you haven't specified.
```shell
$ kubectl apply -f docs/concepts/cluster-administration/nginx/nginx-deployment.yaml
$ kubectl apply -f https://k8s.io/examples/application/nginx/nginx-deployment.yaml
deployment "my-nginx" configured
```
@ -371,7 +371,7 @@ and
In some cases, you may need to update resource fields that cannot be updated once initialized, or you may just want to make a recursive change immediately, such as to fix broken pods created by a Deployment. To change such fields, use `replace --force`, which deletes and re-creates the resource. In this case, you can simply modify your original configuration file:
```shell
$ kubectl replace -f docs/concepts/cluster-administration/nginx/nginx-deployment.yaml --force
$ kubectl replace -f https://k8s.io/examples/application/nginx/nginx-deployment.yaml --force
deployment "my-nginx" deleted
deployment "my-nginx" replaced
```
@ -405,4 +405,4 @@ That's it! The Deployment will declaratively update the deployed nginx applicati
- [Learn about how to use `kubectl` for application introspection and debugging.](/docs/tasks/debug-application-cluster/debug-application-introspection/)
- [Configuration Best Practices and Tips](/docs/concepts/configuration/overview/)
{{% /capture %}}
{{% /capture %}}

View File

@ -1,19 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
spec:
selector:
matchLabels:
app: nginx
replicas: 3
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.7.9
ports:
- containerPort: 80

View File

@ -178,6 +178,8 @@ in the section [Interlude: built-in node labels](#interlude-built-in-node-labels
processing which can slow down scheduling in large clusters significantly. We do
not recommend using them in clusters larger than several hundred nodes.
**Note:** Pod anti-affinity requires nodes to be consistently labelled, i.e. every node in the cluster must have an appropriate label matching `topologyKey`. If some or all nodes are missing the speficied `topologyKey` label, it can lead to unintended behavior.
As with node affinity, there are currently two types of pod affinity and anti-affinity, called `requiredDuringSchedulingIgnoredDuringExecution` and
`preferredDuringSchedulingIgnoredDuringExecution` which denote "hard" vs. "soft" requirements.
See the description in the node affinity section earlier.

View File

@ -54,6 +54,7 @@ One cpu, in Kubernetes, is equivalent to:
- 1 AWS vCPU
- 1 GCP Core
- 1 Azure vCore
- 1 IBM vCPU
- 1 *Hyperthread* on a bare-metal Intel processor with Hyperthreading
Fractional requests are allowed. A Container with
@ -296,14 +297,13 @@ Container in the Pod was terminated and restarted five times.
You can call `kubectl get pod` with the `-o go-template=...` option to fetch the status
of previously terminated Containers:
```shell{% raw %}
```shell
[13:59:01] $ kubectl get pod -o go-template='{{range.status.containerStatuses}}{{"Container Name: "}}{{.name}}{{"\r\nLastState: "}}{{.lastState}}{{end}}' simmemleak-hra99
Container Name: simmemleak
LastState: map[terminated:map[exitCode:137 reason:OOM Killed startedAt:2015-07-07T20:58:43Z finishedAt:2015-07-07T20:58:43Z containerID:docker://0e4095bba1feccdfe7ef9fb6ebffe972b4b14285d5acdec6f0d3ae8a22fad8b2]]{% endraw %}
LastState: map[terminated:map[exitCode:137 reason:OOM Killed startedAt:2015-07-07T20:58:43Z finishedAt:2015-07-07T20:58:43Z containerID:docker://0e4095bba1feccdfe7ef9fb6ebffe972b4b14285d5acdec6f0d3ae8a22fad8b2]]
```
You can see that the Container was terminated because of `reason:OOM Killed`,
where `OOM` stands for Out Of Memory.
You can see that the Container was terminated because of `reason:OOM Killed`, where `OOM` stands for Out Of Memory.
## Local ephemeral storage
{{< feature-state state="beta" >}}

View File

@ -12,36 +12,37 @@ weight: 70
{{< feature-state for_k8s_version="1.8" state="alpha" >}}
{{< feature-state for_k8s_version="1.11" state="beta" >}}
[Pods](/docs/user-guide/pods) can have _priority_. Priority
indicates the importance of a Pod relative to other Pods. If a Pod cannot be scheduled,
the scheduler tries to preempt (evict) lower priority Pods to make scheduling of the
[Pods](/docs/user-guide/pods) can have _priority_. Priority indicates the
importance of a Pod relative to other Pods. If a Pod cannot be scheduled, the
scheduler tries to preempt (evict) lower priority Pods to make scheduling of the
pending Pod possible.
In Kubernetes 1.9 and later, Priority also affects scheduling
order of Pods and out-of-resource eviction ordering on the Node.
In Kubernetes 1.9 and later, Priority also affects scheduling order of Pods and
out-of-resource eviction ordering on the Node.
Pod priority and preemption are moved to beta since Kubernetes 1.11 and are enabled by default in
this release and later.
Pod priority and preemption are moved to beta since Kubernetes 1.11 and are
enabled by default in this release and later.
In Kubernetes versions where Pod priority and preemption is still an alpha-level
feature, you need to explicitly enable it. To use these features in the older versions of
Kubernetes, follow the instructions in the documentation for your Kubernetes version, by
going to the documentation archive version for your Kubernetes version.
feature, you need to explicitly enable it. To use these features in the older
versions of Kubernetes, follow the instructions in the documentation for your
Kubernetes version, by going to the documentation archive version for your
Kubernetes version.
| Kubernetes Version | Priority and Preemption State | Enabled by default |
| -------- |:-----:|:----:|
| 1.8 | alpha | no |
| 1.9 | alpha | no |
| 1.10 | alpha | no |
| 1.11 | beta | yes |
Kubernetes Version | Priority and Preemption State | Enabled by default
------------------ | :---------------------------: | :----------------:
1.8 | alpha | no
1.9 | alpha | no
1.10 | alpha | no
1.11 | beta | yes
{{< warning >}}
**Warning**: In a cluster where not all users are trusted, a malicious
user could create pods at the highest possible priorities, causing
{{< warning >}} **Warning**: In a cluster where not all users are trusted, a
malicious user could create pods at the highest possible priorities, causing
other pods to be evicted/not get scheduled. To resolve this issue,
[ResourceQuota](https://kubernetes.io/docs/concepts/policy/resource-quotas/) is augmented to support
Pod priority. An admin can create ResourceQuota for users at specific priority levels, preventing
them from creating pods at high priorities. However, this feature is in alpha as of Kubernetes 1.11.
[ResourceQuota](https://kubernetes.io/docs/concepts/policy/resource-quotas/) is
augmented to support Pod priority. An admin can create ResourceQuota for users
at specific priority levels, preventing them from creating pods at high
priorities. However, this feature is in alpha as of Kubernetes 1.11.
{{< /warning >}}
{{% /capture %}}
@ -49,37 +50,52 @@ them from creating pods at high priorities. However, this feature is in alpha as
{{% capture body %}}
## How to use priority and preemption
To use priority and preemption in Kubernetes 1.11 and later, follow these steps:
1. Add one or more [PriorityClasses](#priorityclass).
1. Add one or more [PriorityClasses](#priorityclass).
1. Create Pods with[`priorityClassName`](#pod-priority) set to one of the added PriorityClasses.
Of course you do not need to create the Pods directly; normally you would add
`priorityClassName` to the Pod template of a collection object like a Deployment.
1. Create Pods with[`priorityClassName`](#pod-priority) set to one of the added
PriorityClasses. Of course you do not need to create the Pods directly;
normally you would add `priorityClassName` to the Pod template of a
collection object like a Deployment.
Keep reading for more information about these steps.
If you try the feature and then decide to disable it, you must remove the PodPriority
command-line flag or set it to `false`, and then restart the API server and
scheduler. After the feature is disabled, the existing Pods keep their priority
fields, but preemption is disabled, and priority fields are ignored. If the feature
is disabled, you cannot set `priorityClassName` in new Pods.
If you try the feature and then decide to disable it, you must remove the
PodPriority command-line flag or set it to `false`, and then restart the API
server and scheduler. After the feature is disabled, the existing Pods keep
their priority fields, but preemption is disabled, and priority fields are
ignored. If the feature is disabled, you cannot set `priorityClassName` in new
Pods.
## How to disable preemption
{{< note >}}
**Note**: In Kubernetes 1.11, critical pods (except DaemonSet pods, which are
still scheduled by the DaemonSet controller) rely on scheduler preemption to be
scheduled when a cluster is under resource pressure. For this reason, we do not
recommend disabling this feature. If you still have to disable this feature,
follow the instructions below.
{{< /note >}}
{{< note >}} **Note**: In Kubernetes 1.11, critical pods (except DaemonSet pods,
which are still scheduled by the DaemonSet controller) rely on scheduler
preemption to be scheduled when a cluster is under resource pressure. For this
reason, you will need to run an older version of Rescheduler if you decide to
disable preemption. More on this is provided below. {{< /note >}}
#### Option 1: Disable both Pod priority and preemption
Disabling Pod priority disables preemption as well. In order to disable Pod
Priority, set the feature to false for API server, Scheduler, and Kubelet.
Disabling the feature on Kubelets is not vital. You can leave the feature on for
Kubelets if rolling out is hard.
```
--feature-gates=PodPriority=false
```
#### Option 2: Disable Preemption only
In Kubernetes 1.11 and later, preemption is controlled by a kube-scheduler flag
`disablePreemption`, which is set to `false` by default.
To disable preemption, set `disablePreemption` to true. This keeps pod priority
enabled but disables preemption. Here is a sample configuration:
This option is available in component configs only and is not available in
old-style command line options. Below is a sample component config to disable
preemption:
```yaml
apiVersion: componentconfig/v1alpha1
@ -90,44 +106,57 @@ algorithmSource:
...
disablePreemption: true
```
Although preemption of the scheduler is enabled by default, it is disabled if `PodPriority`
feature is disabled.
### Start an older version of Rescheduler in the cluster
When priority or preemption is disabled, we must run Rescheduler v0.3.1 (instead
of v0.4.0) to ensure that critical Pods are scheduled when nodes or cluster are
under resource pressure. Since critical Pod annotation is still supported in
this release, running Rescheduler should be enough and no other changes to the
configuration of Pods should be needed.
Rescheduler images can be found at:
[gcr.io/k8s-image-staging/rescheduler](http://gcr.io/k8s-image-staging/rescheduler).
In the code, changing the Rescheduler version back to v.0.3.1 is the reverse of
[this PR](https://github.com/kubernetes/kubernetes/pull/65454).
## PriorityClass
A PriorityClass is a non-namespaced object that defines a mapping from a priority
class name to the integer value of the priority. The name is specified in the `name`
field of the PriorityClass object's metadata. The value is specified in the required
`value` field. The higher the value, the higher the priority.
A PriorityClass is a non-namespaced object that defines a mapping from a
priority class name to the integer value of the priority. The name is specified
in the `name` field of the PriorityClass object's metadata. The value is
specified in the required `value` field. The higher the value, the higher the
priority.
A PriorityClass object can have any 32-bit integer value smaller than or equal to
1 billion. Larger numbers are reserved for critical system Pods that should not
normally be preempted or evicted. A cluster admin should create one PriorityClass
object for each such mapping that they want.
A PriorityClass object can have any 32-bit integer value smaller than or equal
to 1 billion. Larger numbers are reserved for critical system Pods that should
not normally be preempted or evicted. A cluster admin should create one
PriorityClass object for each such mapping that they want.
PriorityClass also has two optional fields: `globalDefault` and `description`.
The `globalDefault` field indicates that the value of this PriorityClass should
be used for Pods without a `priorityClassName`. Only one PriorityClass with
`globalDefault` set to true can exist in the system. If there is no PriorityClass
with `globalDefault` set, the priority of Pods with no `priorityClassName` is zero.
`globalDefault` set to true can exist in the system. If there is no
PriorityClass with `globalDefault` set, the priority of Pods with no
`priorityClassName` is zero.
The `description` field is an arbitrary string. It is meant to tell users of
the cluster when they should use this PriorityClass.
The `description` field is an arbitrary string. It is meant to tell users of the
cluster when they should use this PriorityClass.
### Notes about PodPriority and existing clusters
- If you upgrade your existing cluster and enable this feature, the priority
of your existing Pods is effectively zero.
- Addition of a PriorityClass with `globalDefault` set to `true` does not
change the priorities of existing Pods. The value of such a PriorityClass is used only
for Pods created after the PriorityClass is added.
- If you upgrade your existing cluster and enable this feature, the priority
of your existing Pods is effectively zero.
- If you delete a PriorityClass, existing Pods that use the name of the
deleted PriorityClass remain unchanged, but you cannot create more Pods
that use the name of the deleted PriorityClass.
- Addition of a PriorityClass with `globalDefault` set to `true` does not
change the priorities of existing Pods. The value of such a PriorityClass is
used only for Pods created after the PriorityClass is added.
- If you delete a PriorityClass, existing Pods that use the name of the
deleted PriorityClass remain unchanged, but you cannot create more Pods that
use the name of the deleted PriorityClass.
### Example PriorityClass
@ -145,13 +174,13 @@ description: "This priority class should be used for XYZ service pods only."
After you have one or more PriorityClasses, you can create Pods that specify one
of those PriorityClass names in their specifications. The priority admission
controller uses the `priorityClassName` field and populates the integer value
of the priority. If the priority class is not found, the Pod is rejected.
The following YAML is an example of a Pod configuration that uses the PriorityClass
created in the preceding example. The priority admission controller checks the
specification and resolves the priority of the Pod to 1000000.
controller uses the `priorityClassName` field and populates the integer value of
the priority. If the priority class is not found, the Pod is rejected.
The following YAML is an example of a Pod configuration that uses the
PriorityClass created in the preceding example. The priority admission
controller checks the specification and resolves the priority of the Pod to
1000000.
```yaml
apiVersion: v1
@ -170,36 +199,40 @@ spec:
### Effect of Pod priority on scheduling order
In Kubernetes 1.9 and later, when Pod priority is enabled, scheduler orders pending
Pods by their priority and a pending Pod is placed ahead of other pending Pods with
lower priority in the scheduling queue. As a result, the higher priority Pod may
by scheduled sooner that Pods with lower priority if its scheduling requirements
are met. If such Pod cannot be scheduled, scheduler will continue and tries to
schedule other lower priority Pods.
In Kubernetes 1.9 and later, when Pod priority is enabled, scheduler orders
pending Pods by their priority and a pending Pod is placed ahead of other
pending Pods with lower priority in the scheduling queue. As a result, the
higher priority Pod may by scheduled sooner that Pods with lower priority if its
scheduling requirements are met. If such Pod cannot be scheduled, scheduler will
continue and tries to schedule other lower priority Pods.
## Preemption
When Pods are created, they go to a queue and wait to be scheduled. The scheduler
picks a Pod from the queue and tries to schedule it on a Node. If no Node is found
that satisfies all the specified requirements of the Pod, preemption logic is triggered
for the pending Pod. Let's call the pending Pod P. Preemption logic tries to find a Node
where removal of one or more Pods with lower priority than P would enable P to be scheduled
on that Node. If such a Node is found, one or more lower priority Pods get
deleted from the Node. After the Pods are gone, P can be scheduled on the Node.
When Pods are created, they go to a queue and wait to be scheduled. The
scheduler picks a Pod from the queue and tries to schedule it on a Node. If no
Node is found that satisfies all the specified requirements of the Pod,
preemption logic is triggered for the pending Pod. Let's call the pending Pod P.
Preemption logic tries to find a Node where removal of one or more Pods with
lower priority than P would enable P to be scheduled on that Node. If such a
Node is found, one or more lower priority Pods get deleted from the Node. After
the Pods are gone, P can be scheduled on the Node.
### User exposed information
When Pod P preempts one or more Pods on Node N, `nominatedNodeName` field of Pod P's status is set to
the name of Node N. This field helps scheduler track resources reserved for Pod P and also gives
users information about preemptions in their clusters.
When Pod P preempts one or more Pods on Node N, `nominatedNodeName` field of Pod
P's status is set to the name of Node N. This field helps scheduler track
resources reserved for Pod P and also gives users information about preemptions
in their clusters.
Please note that Pod P is not necessarily scheduled to the "nominated Node". After victim Pods are
preempted, they get their graceful termination period. If another node becomes available while
scheduler is waiting for the victim Pods to terminate, scheduler will use the other node to schedule
Pod P. As a result `nominatedNodeName` and `nodeName` of Pod spec are not always the same. Also, if
scheduler preempts Pods on Node N, but then a higher priority Pod than Pod P arrives, scheduler may
give Node N to the new higher priority Pod. In such a case, scheduler clears `nominatedNodeName` of
Pod P. By doing this, scheduler makes Pod P eligible to preempt Pods on another Node.
Please note that Pod P is not necessarily scheduled to the "nominated Node".
After victim Pods are preempted, they get their graceful termination period. If
another node becomes available while scheduler is waiting for the victim Pods to
terminate, scheduler will use the other node to schedule Pod P. As a result
`nominatedNodeName` and `nodeName` of Pod spec are not always the same. Also, if
scheduler preempts Pods on Node N, but then a higher priority Pod than Pod P
arrives, scheduler may give Node N to the new higher priority Pod. In such a
case, scheduler clears `nominatedNodeName` of Pod P. By doing this, scheduler
makes Pod P eligible to preempt Pods on another Node.
### Limitations of preemption
@ -212,67 +245,127 @@ killed. This graceful termination period creates a time gap between the point
that the scheduler preempts Pods and the time when the pending Pod (P) can be
scheduled on the Node (N). In the meantime, the scheduler keeps scheduling other
pending Pods. As victims exit or get terminated, the scheduler tries to schedule
Pods in the pending queue. Therefore, there is usually a time gap between the point
that scheduler preempts victims and the time that Pod P is scheduled. In order to
minimize this gap, one can set graceful termination period of lower priority Pods
to zero or a small number.
Pods in the pending queue. Therefore, there is usually a time gap between the
point that scheduler preempts victims and the time that Pod P is scheduled. In
order to minimize this gap, one can set graceful termination period of lower
priority Pods to zero or a small number.
#### PodDisruptionBudget is supported, but not guaranteed!
A [Pod Disruption Budget (PDB)](/docs/concepts/workloads/pods/disruptions/)
allows application owners to limit the number Pods of a replicated application that
are down simultaneously from voluntary disruptions. Kubernetes 1.9 supports PDB
when preempting Pods, but respecting PDB is best effort. The Scheduler tries to
find victims whose PDB are not violated by preemption, but if no such victims are
found, preemption will still happen, and lower priority Pods will be removed
despite their PDBs being violated.
allows application owners to limit the number Pods of a replicated application
that are down simultaneously from voluntary disruptions. Kubernetes 1.9 supports
PDB when preempting Pods, but respecting PDB is best effort. The Scheduler tries
to find victims whose PDB are not violated by preemption, but if no such victims
are found, preemption will still happen, and lower priority Pods will be removed
despite their PDBs being violated.
#### Inter-Pod affinity on lower-priority Pods
A Node is considered for preemption only when
the answer to this question is yes: "If all the Pods with lower priority than
the pending Pod are removed from the Node, can the pending Pod be scheduled on
the Node?"
A Node is considered for preemption only when the answer to this question is
yes: "If all the Pods with lower priority than the pending Pod are removed from
the Node, can the pending Pod be scheduled on the Node?"
{{< note >}}
**Note:** Preemption does not necessarily remove all lower-priority Pods. If the
pending Pod can be scheduled by removing fewer than all lower-priority Pods, then
only a portion of the lower-priority Pods are removed. Even so, the answer to the
preceding question must be yes. If the answer is no, the Node is not considered
for preemption.
{{< /note >}}
{{< note >}} **Note:** Preemption does not necessarily remove all lower-priority
Pods. If the pending Pod can be scheduled by removing fewer than all
lower-priority Pods, then only a portion of the lower-priority Pods are removed.
Even so, the answer to the preceding question must be yes. If the answer is no,
the Node is not considered for preemption. {{< /note >}}
If a pending Pod has inter-pod affinity to one or more of the lower-priority Pods
on the Node, the inter-Pod affinity rule cannot be satisfied in the absence of those
lower-priority Pods. In this case, the scheduler does not preempt any Pods on the
Node. Instead, it looks for another Node. The scheduler might find a suitable Node
or it might not. There is no guarantee that the pending Pod can be scheduled.
If a pending Pod has inter-pod affinity to one or more of the lower-priority
Pods on the Node, the inter-Pod affinity rule cannot be satisfied in the absence
of those lower-priority Pods. In this case, the scheduler does not preempt any
Pods on the Node. Instead, it looks for another Node. The scheduler might find a
suitable Node or it might not. There is no guarantee that the pending Pod can be
scheduled.
Our recommended solution for this problem is to create inter-Pod affinity only towards
equal or higher priority Pods.
Our recommended solution for this problem is to create inter-Pod affinity only
towards equal or higher priority Pods.
#### Cross node preemption
Suppose a Node N is being considered for preemption so that a pending Pod P
can be scheduled on N. P might become feasible on N only if a Pod on another
Node is preempted. Here's an example:
Suppose a Node N is being considered for preemption so that a pending Pod P can
be scheduled on N. P might become feasible on N only if a Pod on another Node is
preempted. Here's an example:
* Pod P is being considered for Node N.
* Pod Q is running on another Node in the same Zone as Node N.
* Pod P has Zone-wide anti-affinity with Pod Q
(`topologyKey: failure-domain.beta.kubernetes.io/zone`).
* There are no other cases of anti-affinity between Pod P and other Pods in the Zone.
* In order to schedule Pod P on Node N, Pod Q can be preempted, but scheduler
does not perform cross-node preemption. So, Pod P will be deemed unschedulable
on Node N.
* Pod P is being considered for Node N.
* Pod Q is running on another Node in the same Zone as Node N.
* Pod P has Zone-wide anti-affinity with Pod Q (`topologyKey:
failure-domain.beta.kubernetes.io/zone`).
* There are no other cases of anti-affinity between Pod P and other Pods in
the Zone.
* In order to schedule Pod P on Node N, Pod Q can be preempted, but scheduler
does not perform cross-node preemption. So, Pod P will be deemed
unschedulable on Node N.
If Pod Q were removed from its Node, the Pod anti-affinity violation would be gone,
and Pod P could possibly be scheduled on Node N.
If Pod Q were removed from its Node, the Pod anti-affinity violation would be
gone, and Pod P could possibly be scheduled on Node N.
We may consider adding cross Node preemption in future versions if we find an
algorithm with reasonable performance. We cannot promise anything at this point,
algorithm with reasonable performance. We cannot promise anything at this point,
and cross Node preemption will not be considered a blocker for Beta or GA.
## Debugging Pod Priority and Preemption
Pod Priority and Preemption is a major feature that could potentially disrupt
Pod scheduling if it has bugs.
### Potential problems caused by Priority and Preemption
The followings are some of the potential problems that could be caused by bugs
in the implementation of the feature. This list is not exhaustive.
#### Pods are preempted unnecessarily
Preemption removes existing Pods from a cluster under resource pressure to make
room for higher priority pending Pods. If a user gives high priorities to
certain Pods by mistake, these unintentional high priority Pods may cause
preemption in the cluster. As mentioned above, Pod priority is specified by
setting the `priorityClassName` field of `podSpec`. The integer value of
priority is then resolved and populated to the `priority` field of `podSpec`.
To resolve the problem, `priorityClassName` of the Pods must be changed to use
lower priority classes or should be left empty. Empty `priorityClassName` is
resolved to zero by default.
When a Pod is preempted, there will be events recorded for the preempted Pod.
Preemption should happen only when a cluster does not have enough resources for
a Pod. In such cases, preemption happens only when the priority of the pending
Pod (preemptor) is higher than the victim Pods. Preemption must not happen when
there is no pending Pod, or when the pending Pods have equal or higher priority
than the victims. If preemption happens in such scenarios, please file an issue.
#### Pods are preempted, but the preemptor is not scheduled
When pods are preempted, they receive their requested graceful termination
period, which is by default 30 seconds, but it can be any different value as
specified in the PodSpec. If the victim Pods do not terminate within this period
they are force-terminated. Once all the victims go away, the preemptor Pod can
be scheduled.
While the preemptor Pod is waiting for the victims to go away, a higher priority
Pod may be created that fits on the same node. In this case, the scheduler will
schedule the higher priority Pod instead of the preemptor.
In the absence of such a higher priority Pod, we expect the preemptor Pod to be
scheduled after the graceful termination period of the victims is over.
#### Higher priority Pods are preempted before lower priority pods
The scheduler tries to find nodes that can run a pending Pod and if no node is
found, it tries to remove Pods with lower priority from one node to make room
for the pending pod. If a node with low priority Pods is not feasible to run the
pending Pod, the scheduler may choose another node with higher priority Pods
(compared to the Pods on the other node) for preemption. The victims must still
have lower priority than the preemptor Pod.
When there are multiple nodes available for preemption, the scheduler tries to
choose the node with a set of Pods with lowest priority. However, if such Pods
have PodDisruptionBudget that would be violated if they are preempted then the
scheduler may choose another node with higher priority Pods.
When multiple nodes exist for preemption and none of the above scenarios apply,
we expect the scheduler to choose a node with the lowest priority. If that is
not the case, it may indicate a bug in the scheduler.
{{% /capture %}}

View File

@ -135,7 +135,7 @@ secret "mysecret" created
**Encoding Note:** The serialized JSON and YAML values of secret data are
encoded as base64 strings. Newlines are not valid within these strings and must
be omitted. When using the `base64` utility on Darwin/OS X users should avoid
be omitted. When using the `base64` utility on Darwin/macOS users should avoid
using the `-b` option to split long lines. Conversely Linux users *should* add
the option `-w 0` to `base64` commands or the pipeline `base64 | tr -d '\n'` if
`-w` option is not available.

View File

@ -160,7 +160,7 @@ pods that shouldn't be running. A few of the use cases are
a particular set of users, you can add a taint to those nodes (say,
`kubectl taint nodes nodename dedicated=groupName:NoSchedule`) and then add a corresponding
toleration to their pods (this would be done most easily by writing a custom
[admission controller](/docs/admin/admission-controllers/)).
[admission controller](/docs/reference/access-authn-authz/admission-controllers/)).
The pods with the tolerations will then be allowed to use the tainted (dedicated) nodes as
well as any other nodes in the cluster. If you want to dedicate the nodes to them *and*
ensure they *only* use the dedicated nodes, then you should additionally add a label similar
@ -176,12 +176,12 @@ hardware (e.g. `kubectl taint nodes nodename special=true:NoSchedule` or
`kubectl taint nodes nodename special=true:PreferNoSchedule`) and adding a corresponding
toleration to pods that use the special hardware. As in the dedicated nodes use case,
it is probably easiest to apply the tolerations using a custom
[admission controller](/docs/admin/admission-controllers/)).
[admission controller](/docs/reference/access-authn-authz/admission-controllers/).
For example, it is recommended to use [Extended
Resources](/docs/concepts/configuration/manage-compute-resources-container/#extended-resources)
to represent the special hardware, taint your special hardware nodes with the
extended resource name and run the
[ExtendedResourceToleration](/docs/admin/admission-controllers/#extendedresourcetoleration)
[ExtendedResourceToleration](/docs/reference/access-authn-authz/admission-controllers/#extendedresourcetoleration)
admission controller. Now, because the nodes are tainted, no pods without the
toleration will schedule on them. But when you submit a pod that requests the
extended resource, the `ExtendedResourceToleration` admission controller will

View File

@ -27,7 +27,7 @@ you can do one of the following:
- set the `imagePullPolicy` of the container to `Always`;
- use `:latest` as the tag for the image to use;
- enable the [AlwaysPullImages](/docs/admin/admission-controllers/#alwayspullimages) admission controller.
- enable the [AlwaysPullImages](/docs/reference/access-authn-authz/admission-controllers/#alwayspullimages) admission controller.
If you did not specify tag of your image, it will be assumed as `:latest`, with
pull image policy of `Always` correspondingly.
@ -130,17 +130,36 @@ Once you have those variables filled in you can
### Configuring Nodes to Authenticate to a Private Repository
{{< note >}}
**Note:** If you are running on Google Kubernetes Engine, there will already be a `.dockercfg` on each node with credentials for Google Container Registry. You cannot use this approach.
{{< /note >}}
{{< note >}}
**Note:** If you are running on AWS EC2 and are using the EC2 Container Registry (ECR), the kubelet on each node will
manage and update the ECR login credentials. You cannot use this approach.
{{< /note >}}
{{< note >}}
**Note:** This approach is suitable if you can control node configuration. It
will not work reliably on GCE, and any other cloud provider that does automatic
node replacement.
{{< /note >}}
Docker stores keys for private registries in the `$HOME/.dockercfg` or `$HOME/.docker/config.json` file. If you put this
in the `$HOME` of user `root` on a kubelet, then docker will use it.
Docker stores keys for private registries in the `$HOME/.dockercfg` or `$HOME/.docker/config.json` file. If you put the same file
in the search paths list below, kubelet uses it as the credential provider when pulling images.
* `{--root-dir:-/var/lib/kubelet}/config.json`
* `{cwd of kubelet}/config.json`
* `${HOME}/.docker/config.json`
* `/.docker/config.json`
* `{--root-dir:-/var/lib/kubelet}/.dockercfg`
* `{cwd of kubelet}/.dockercfg`
* `${HOME}/.dockercfg`
* `/.dockercfg`
{{< note >}}
**Note**: You may have to set `HOME=/root` explicitly in your environment file for kubelet.
{{< /note >}}
Here are the recommended steps to configuring your nodes to use a private registry. In this
example, run these on your desktop/laptop:
@ -150,13 +169,13 @@ example, run these on your desktop/laptop:
1. Get a list of your nodes, for example:
- if you want the names: `nodes=$(kubectl get nodes -o jsonpath='{range.items[*].metadata}{.name} {end}')`
- if you want to get the IPs: `nodes=$(kubectl get nodes -o jsonpath='{range .items[*].status.addresses[?(@.type=="ExternalIP")]}{.address} {end}')`
1. Copy your local `.docker/config.json` to the home directory of root on each node.
- for example: `for n in $nodes; do scp ~/.docker/config.json root@$n:/root/.docker/config.json; done`
1. Copy your local `.docker/config.json` to one of the search paths list above.
- for example: `for n in $nodes; do scp ~/.docker/config.json root@$n:/var/lib/kubelet/config.json; done`
Verify by creating a pod that uses a private image, e.g.:
```yaml
$ cat <<EOF > /tmp/private-image-test-1.yaml
kubectl create -f - <<EOF
apiVersion: v1
kind: Pod
metadata:
@ -168,22 +187,20 @@ spec:
imagePullPolicy: Always
command: [ "echo", "SUCCESS" ]
EOF
$ kubectl create -f /tmp/private-image-test-1.yaml
pod "private-image-test-1" created
$
```
If everything is working, then, after a few moments, you should see:
```shell
$ kubectl logs private-image-test-1
kubectl logs private-image-test-1
SUCCESS
```
If it failed, then you will see:
```shell
$ kubectl describe pods/private-image-test-1 | grep "Failed"
kubectl describe pods/private-image-test-1 | grep "Failed"
Fri, 26 Jun 2015 15:36:13 -0700 Fri, 26 Jun 2015 15:39:13 -0700 19 {kubelet node-i2hq} spec.containers{uses-private-image} failed Failed to pull image "user/privaterepo:v1": Error: image user/privaterepo:v1 not found
```
@ -197,11 +214,15 @@ registry keys are added to the `.docker/config.json`.
### Pre-pulling Images
{{< note >}}
**Note:** If you are running on Google Kubernetes Engine, there will already be a `.dockercfg` on each node with credentials for Google Container Registry. You cannot use this approach.
{{< /note >}}
{{< note >}}
**Note:** This approach is suitable if you can control node configuration. It
will not work reliably on GCE, and any other cloud provider that does automatic
node replacement.
{{< /note >}}
By default, the kubelet will try to pull each image from the specified registry.
However, if the `imagePullPolicy` property of the container is set to `IfNotPresent` or `Never`,
@ -216,8 +237,10 @@ All pods will have read access to any pre-pulled images.
### Specifying ImagePullSecrets on a Pod
{{< note >}}
**Note:** This approach is currently the recommended approach for Google Kubernetes Engine, GCE, and any cloud-providers
where node creation is automated.
{{< /note >}}
Kubernetes supports specifying registry keys on a pod.
@ -226,7 +249,7 @@ Kubernetes supports specifying registry keys on a pod.
Run the following command, substituting the appropriate uppercase values:
```shell
$ kubectl create secret docker-registry myregistrykey --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL
kubectl create secret docker-registry myregistrykey --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL
secret "myregistrykey" created.
```
@ -315,10 +338,10 @@ common use cases and suggested solutions.
- It will work better with cluster autoscaling than manual node configuration.
- Or, on a cluster where changing the node configuration is inconvenient, use `imagePullSecrets`.
1. Cluster with a proprietary images, a few of which require stricter access control.
- Ensure [AlwaysPullImages admission controller](/docs/admin/admission-controllers/#alwayspullimages) is active. Otherwise, all Pods potentially have access to all images.
- Ensure [AlwaysPullImages admission controller](/docs/reference/access-authn-authz/admission-controllers/#alwayspullimages) is active. Otherwise, all Pods potentially have access to all images.
- Move sensitive data into a "Secret" resource, instead of packaging it in an image.
1. A multi-tenant cluster where each tenant needs own private registry.
- Ensure [AlwaysPullImages admission controller](/docs/admin/admission-controllers/#alwayspullimages) is active. Otherwise, all Pods of all tenants potentially have access to all images.
- Ensure [AlwaysPullImages admission controller](/docs/reference/access-authn-authz/admission-controllers/#alwayspullimages) is active. Otherwise, all Pods of all tenants potentially have access to all images.
- Run a private registry with authorization required.
- Generate registry credential for each tenant, put into secret, and populate secret to each tenant namespace.
- The tenant adds that secret to imagePullSecrets of each namespace.

View File

@ -141,12 +141,13 @@ ensure the continuous functioning of the device allocations during the upgrade.
For examples of device plugin implementations, see:
* The official [NVIDIA GPU device plugin](https://github.com/NVIDIA/k8s-device-plugin)
* it requires using [nvidia-docker 2.0](https://github.com/NVIDIA/nvidia-docker) which allows you to run GPU enabled docker containers
* The [NVIDIA GPU device plugin for COS base OS](https://github.com/GoogleCloudPlatform/container-engine-accelerators/tree/master/cmd/nvidia_gpu).
* Requires [nvidia-docker 2.0](https://github.com/NVIDIA/nvidia-docker) which allows you to run GPU enabled docker containers.
* The [NVIDIA GPU device plugin for COS base OS](https://github.com/GoogleCloudPlatform/container-engine-accelerators/tree/master/cmd/nvidia_gpu)
* The [RDMA device plugin](https://github.com/hustcat/k8s-rdma-device-plugin)
* The [Solarflare device plugin](https://github.com/vikaschoudhary16/sfc-device-plugin)
* The [AMD GPU device plugin](https://github.com/RadeonOpenCompute/k8s-device-plugin)
* The [SRIOV Network device plugin](https://github.com/intel/sriov-network-device-plugin)
* The [Intel device plugins](https://github.com/intel/intel-device-plugins-for-kubernetes) for GPU and FPGA devices
{{% /capture %}}

View File

@ -42,7 +42,7 @@ Customization approaches can be broadly divided into *configuration*, which only
Flags and configuration files may not always be changeable in a hosted Kubernetes service or a distribution with managed installation. When they are changeable, they are usually only changeable by the cluster administrator. Also, they are subject to change in future Kubernetes versions, and setting them may require restarting processes. For those reasons, they should be used only when there are no other options.
*Built-in Policy APIs*, such as [ResourceQuota](/docs/concepts/policy/resource-quotas/), [PodSecurityPolicies](/docs/concepts/policy/pod-security-policy/), [NetworkPolicy](/docs/concepts/services-networking/network-policies/) and Role-based Access Control ([RBAC](/docs/admin/authorization/rbac/)), are built-in Kubernetes APIs. APIs are typically used with hosted Kubernetes services and with managed Kubernetes installations. They are declarative and use the same conventions as other Kubernetes resources like pods, so new cluster configuration can be repeatable and be managed the same way as applications. And, where they are stable, they enjoy a [defined support policy](/docs/reference/deprecation-policy/) like other Kubernetes APIs. For these reasons, they are preferred over *configuration files* and *flags* where suitable.
*Built-in Policy APIs*, such as [ResourceQuota](/docs/concepts/policy/resource-quotas/), [PodSecurityPolicies](/docs/concepts/policy/pod-security-policy/), [NetworkPolicy](/docs/concepts/services-networking/network-policies/) and Role-based Access Control ([RBAC](/docs/reference/access-authn-authz/rbac/)), are built-in Kubernetes APIs. APIs are typically used with hosted Kubernetes services and with managed Kubernetes installations. They are declarative and use the same conventions as other Kubernetes resources like pods, so new cluster configuration can be repeatable and be managed the same way as applications. And, where they are stable, they enjoy a [defined support policy](/docs/reference/deprecation-policy/) like other Kubernetes APIs. For these reasons, they are preferred over *configuration files* and *flags* where suitable.
## Extensions
@ -132,31 +132,31 @@ Adding an API does not directly let you affect the behavior of existing APIs (e.
### API Access Extensions
When a request reaches the Kubernetes API Server, it is first Authenticated, then Authorized, then subject to various types of Admission Control. See [[Accessing the API](/docs/admin/accessing-the-api/)] for more on this flow.
When a request reaches the Kubernetes API Server, it is first Authenticated, then Authorized, then subject to various types of Admission Control. See [Controlling Access to the Kubernetes API](/docs/reference/access-authn-authz/controlling-access/)] for more on this flow.
Each of these steps offers extension points.
Kubernetes has several built-in authentication methods that it supports. It can also sit behind an authenticating proxy, and it can send a token from an Authorization header to a remote service for verification (a webhook). All of these methods are covered in the [Authentication documentation](/docs/admin/authentication/).
Kubernetes has several built-in authentication methods that it supports. It can also sit behind an authenticating proxy, and it can send a token from an Authorization header to a remote service for verification (a webhook). All of these methods are covered in the [Authentication documentation](/docs/reference/access-authn-authz/authentication/).
### Authentication
[Authentication](/docs/admin/authentication) maps headers or certificates in all requests to a username for the client making the request.
[Authentication](/docs/reference/access-authn-authz/authentication/) maps headers or certificates in all requests to a username for the client making the request.
Kubernetes provides several built-in authentication methods, and an [Authentication webhook](/docs/admin/authentication/#webhook-token-authentication) method if those don't meet your needs.
Kubernetes provides several built-in authentication methods, and an [Authentication webhook](/docs/reference/access-authn-authz/authentication/#webhook-token-authentication) method if those don't meet your needs.
### Authorization
[Authorization](/docs/admin/authorization/webhook/) determines whether specific users can read, write, and do other operations on API resources. It just works at the level of whole resources -- it doesn't discriminate based on arbitrary object fields. If the built-in authorization options don't meet your needs, and [Authorization webhook](/docs/admin/authorization/webhook/) allows calling out to user-provided code to make an authorization decision.
[Authorization](/docs/reference/access-authn-authz/webhook/) determines whether specific users can read, write, and do other operations on API resources. It just works at the level of whole resources -- it doesn't discriminate based on arbitrary object fields. If the built-in authorization options don't meet your needs, and [Authorization webhook](/docs/reference/access-authn-authz/webhook/) allows calling out to user-provided code to make an authorization decision.
### Dynamic Admission Control
After a request is authorized, if it is a write operation, it also goes through [Admission Control](/docs/admin/admission-controllers/) steps. In addition to the built-in steps, there are several extensions:
After a request is authorized, if it is a write operation, it also goes through [Admission Control](/docs/reference/access-authn-authz/admission-controllers/) steps. In addition to the built-in steps, there are several extensions:
* The [Image Policy webhook](/docs/admin/admission-controllers/#imagepolicywebhook) restricts what images can be run in containers.
* To make arbitrary admission control decisions, a general [Admission webhook](/docs/admin/extensible-admission-controllers/#admission-webhooks) can be used. Admission Webhooks can reject creations or updates.
* [Initializers](/docs/admin/extensible-admission-controllers/#initializers) are controllers that can modify objects before they are created. Initializers can modify initial object creations but cannot affect updates to objects. Initializers can also reject objects.
* The [Image Policy webhook](/docs/reference/access-authn-authz/admission-controllers/#imagepolicywebhook) restricts what images can be run in containers.
* To make arbitrary admission control decisions, a general [Admission webhook](/docs/reference/access-authn-authz/extensible-admission-controllers/#admission-webhooks) can be used. Admission Webhooks can reject creations or updates.
* [Initializers](/docs/reference/access-authn-authz/extensible-admission-controllers/#initializers) are controllers that can modify objects before they are created. Initializers can modify initial object creations but cannot affect updates to objects. Initializers can also reject objects.
## Infrastructure Extensions
@ -201,7 +201,7 @@ the nodes chosen for a pod.
{{% capture whatsnext %}}
* Learn more about [Custom Resources](/docs/concepts/api-extension/custom-resources/)
* Learn about [Dynamic admission control](/docs/admin/extensible-admission-controllers/)
* Learn about [Dynamic admission control](/docs/reference/access-authn-authz/extensible-admission-controllers/)
* Learn more about Infrastructure extensions
* [Network Plugins](/docs/concepts/cluster-administration/network-plugins/)
* [Device Plugins](/docs/concepts/cluster-administration/device-plugins/)

View File

@ -12,7 +12,7 @@ Overall API conventions are described in the [API conventions doc](https://git.k
API endpoints, resource types and samples are described in [API Reference](/docs/reference).
Remote access to the API is discussed in the [access doc](/docs/admin/accessing-the-api).
Remote access to the API is discussed in the [Controlling API Access doc](/docs/reference/access-authn-authz/controlling-access/).
The Kubernetes API also serves as the foundation for the declarative configuration schema for the system. The [kubectl](/docs/reference/kubectl/overview/) command-line tool can be used to create, update, delete, and get API objects.

View File

@ -65,18 +65,18 @@ configuration file that was used to create the object.
Here's an example of an object configuration file:
{{< code file="simple_deployment.yaml" >}}
{{< codenew file="application/simple_deployment.yaml" >}}
Create the object using `kubectl apply`:
```shell
kubectl apply -f https://k8s.io/docs/concepts/overview/object-management-kubectl/simple_deployment.yaml
kubectl apply -f https://k8s.io/examples/application/simple_deployment.yaml
```
Print the live configuration using `kubectl get`:
```shell
kubectl get -f https://k8s.io/docs/concepts/overview/object-management-kubectl/simple_deployment.yaml -o yaml
kubectl get -f https://k8s.io/examples/application/simple_deployment.yaml -o yaml
```
The output shows that the `kubectl.kubernetes.io/last-applied-configuration` annotation
@ -139,12 +139,12 @@ kubectl apply -f <directory>/
Here's an example configuration file:
{{< code file="simple_deployment.yaml" >}}
{{< codenew file="application/simple_deployment.yaml" >}}
Create the object using `kubectl apply`:
```shell
kubectl apply -f https://k8s.io/docs/concepts/overview/object-management-kubectl/simple_deployment.yaml
kubectl apply -f https://k8s.io/examples/application/simple_deployment.yaml
```
{{< note >}}
@ -155,7 +155,7 @@ configuration file instead of a directory.
Print the live configuration using `kubectl get`:
```shell
kubectl get -f https://k8s.io/docs/concepts/overview/object-management-kubectl/simple_deployment.yaml -o yaml
kubectl get -f https://k8s.io/examples/application/simple_deployment.yaml -o yaml
```
The output shows that the `kubectl.kubernetes.io/last-applied-configuration` annotation
@ -210,7 +210,7 @@ kubectl scale deployment/nginx-deployment --replicas=2
Print the live configuration using `kubectl get`:
```shell
kubectl get -f https://k8s.io/docs/concepts/overview/object-management-kubectl/simple_deployment.yaml -o yaml
kubectl get -f https://k8s.io/examples/application/simple_deployment.yaml -o yaml
```
The output shows that the `replicas` field has been set to 2, and the `last-applied-configuration`
@ -257,18 +257,18 @@ spec:
Update the `simple_deployment.yaml` configuration file to change the image from
`nginx:1.7.9` to `nginx:1.11.9`, and delete the `minReadySeconds` field:
{{< code file="update_deployment.yaml" >}}
{{< codenew file="application/update_deployment.yaml" >}}
Apply the changes made to the configuration file:
```shell
kubectl apply -f https://k8s.io/docs/concepts/overview/object-management-kubectl/update_deployment.yaml
kubectl apply -f https://k8s.io/examples/application/update_deployment.yaml
```
Print the live configuration using `kubectl get`:
```
kubectl get -f https://k8s.io/docs/concepts/overview/object-management-kubectl/simple_deployment.yaml -o yaml
kubectl get -f https://k8s.io/examples/application/simple_deployment.yaml -o yaml
```
The output shows the following changes to the live configuration:
@ -417,7 +417,7 @@ to calculate which fields should be deleted or set:
Here's an example. Suppose this is the configuration file for a Deployment object:
{{< code file="update_deployment.yaml" >}}
{{< codenew file="application/update_deployment.yaml" >}}
Also, suppose this is the live configuration for the same Deployment object:
@ -463,7 +463,10 @@ Here are the merge calculations that would be performed by `kubectl apply`:
1. Calculate the fields to delete by reading values from
`last-applied-configuration` and comparing them to values in the
configuration file. In this example, `minReadySeconds` appears in the
configuration file.
Clear fields explicitly set to null in the local object configuration file
regardless of whether they appear in the `last-applied-configuration`.
In this example, `minReadySeconds` appears in the
`last-applied-configuration` annotation, but does not appear in the configuration file.
**Action:** Clear `minReadySeconds` from the live configuration.
2. Calculate the fields to set by reading values from the configuration
@ -517,12 +520,6 @@ spec:
# ...
```
{{< comment >}}
TODO(1.6): For 1.6, add the following bullet point to 1.
- clear fields explicitly set to null in the local object configuration file regardless of whether they appear in the last-applied-configuration
{{< /comment >}}
### How different types of fields are merged
How a particular field in a configuration file is merged with
@ -716,18 +713,18 @@ not specified when the object is created.
Here's a configuration file for a Deployment. The file does not specify `strategy`:
{{< code file="simple_deployment.yaml" >}}
{{< codenew file="application/simple_deployment.yaml" >}}
Create the object using `kubectl apply`:
```shell
kubectl apply -f https://k8s.io/docs/concepts/overview/object-management-kubectl/simple_deployment.yaml
kubectl apply -f https://k8s.io/examples/application/simple_deployment.yaml
```
Print the live configuration using `kubectl get`:
```shell
kubectl get -f https://k8s.io/docs/concepts/overview/object-management-kubectl/simple_deployment.yaml -o yaml
kubectl get -f https://k8s.io/examples/application/simple_deployment.yaml -o yaml
```
The output shows that the API server set several fields to default values in the live
@ -871,31 +868,10 @@ Recommendation: These fields should be explicitly defined in the object configur
### How to clear server-defaulted fields or fields set by other writers
As of Kubernetes 1.5, fields that do not appear in the configuration file cannot be
cleared by a merge operation. Here are some workarounds:
Option 1: Remove the field by directly modifying the live object.
{{< note >}}
**Note:** As of Kubernetes 1.5, `kubectl edit` does not work with `kubectl apply`.
Using these together will cause unexpected behavior.
{{< /note >}}
Option 2: Remove the field through the configuration file.
1. Add the field to the configuration file to match the live object.
1. Apply the configuration file; this updates the annotation to include the field.
1. Delete the field from the configuration file.
1. Apply the configuration file; this deletes the field from the live object and annotation.
{{< comment >}}
TODO(1.6): Update this with the following for 1.6
Fields that do not appear in the configuration file can be cleared by
setting their values to `null` and then applying the configuration file.
For fields defaulted by the server, this triggers re-defaulting
the values.
{{< /comment >}}
## How to change ownership of a field between the configuration file and direct imperative writers
@ -994,13 +970,6 @@ template:
controller-selector: "extensions/v1beta1/deployment/nginx"
```
## Known Issues
* Prior to Kubernetes 1.6, `kubectl apply` did not support operating on objects stored in a
[custom resource](/docs/concepts/api-extension/custom-resources/).
For these cluster versions, you should instead use [imperative object configuration](/docs/concepts/overview/object-management-kubectl/imperative-config/).
{{% /capture %}}
{{% capture whatsnext %}}
- [Managing Kubernetes Objects Using Imperative Commands](/docs/concepts/overview/object-management-kubectl/imperative-command/)
- [Imperative Management of Kubernetes Objects Using Configuration Files](/docs/concepts/overview/object-management-kubectl/imperative-config/)

View File

@ -0,0 +1,173 @@
---
title: Recommended Labels
content_template: templates/concept
---
{{% capture overview %}}
You can visualize and manage Kubernetes objects with more tools than kubectl and
the dashboard. A common set of labels allows tools to work interoperably, describing
objects in a common manner that all tools can understand.
In addition to supporting tooling, the recommended labels describe applications
in a way that can be queried.
{{% /capture %}}
{{% capture body %}}
The metadata is organized around the concept of an _application_. Kubernetes is not
a platform as a service (PaaS) and doesn't have or enforce a formal notion of an application.
Instead, applications are informal and described with metadata. The definition of
what an application contains is loose.
{{< note >}}
**Note:** These are recommended labels. They make it easier to manage applications
but aren't required for any core tooling.
{{< /note >}}
Shared labels and annotations share a common prefix: `app.kubernetes.io`. Labels
without a prefix are private to users. The shared prefix ensures that shared labels
do not interfere with custom user labels.
## Labels
In order to take full advantage of using these labels, they should be applied
on every resource object.
| Key | Description | Example | Type |
| ----------------------------------- | --------------------- | -------- | ---- |
| `app.kubernetes.io/name` | The name of the application | `mysql` | string |
| `app.kubernetes.io/instance` | A unique name identifying the instance of an application | `wordpress-abcxzy` | string |
| `app.kubernetes.io/version` | The current version of the application (e.g., a semantic version, revision hash, etc.) | `5.7.21` | string |
| `app.kubernetes.io/component` | The component within the architecture | `database` | string |
| `app.kubernetes.io/part-of` | The name of a higher level application this one is part of | `wordpress` | string |
| `app.kubernetes.io/managed-by` | The tool being used to manage the operation of an application | `helm` | string |
To illustrate these labels in action, consider the following StatefulSet object:
```yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
labels:
app.kubernetes.io/name: mysql
app.kubernetes.io/instance: wordpress-abcxzy
app.kubernetes.io/version: "5.7.21"
app.kubernetes.io/component: database
app.kubernetes.io/part-of: wordpress
app.kubernetes.io/managed-by: helm
```
## Applications And Instances Of Applications
An application can be installed one or more times into a Kubernetes cluster and,
in some cases, the same namespace. For example, wordpress can be installed more
than once where different websites are different installations of wordpress.
The name of an application and the instance name are recorded separately. For
example, WordPress has a `app.kubernetes.io/name` of `wordpress` while it has
an instance name, represented as `app.kubernetes.io/instance` with a value of
`wordpress-abcxzy`. This enables the application and instance of the application
to be identifiable. Every instance of an application must have a unique name.
## Examples
To illustrate different ways to use these labels the following examples have varying complexity.
### A Simple Stateless Service
Consider the case for a simple stateless service deployed using `Deployment` and `Service` objects. The following two snippets represent how the labels could be used in their simplest form.
The `Deployment` is used to oversee the pods running the application itself.
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubernetes.io/name: myservice
app.kubernetes.io/instance: myservice-abcxzy
...
```
The `Service` is used to expose the application.
```yaml
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/name: myservice
app.kubernetes.io/instance: myservice-abcxzy
...
```
### Web Application With A Database
Consider a slightly more complicated application: a web application (WordPress)
using a database (MySQL), installed using Helm. The following snippets illustrate
the start of objects used to deploy this application.
The start to the following `Deployment` is used for WordPress:
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubernetes.io/name: wordpress
app.kubernetes.io/instance: wordpress-abcxzy
app.kubernetes.io/version: "4.9.4"
app.kubernetes.io/managed-by: helm
app.kubernetes.io/component: server
app.kubernetes.io/part-of: wordpress
...
```
The `Service` is used to expose WordPress:
```yaml
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/name: wordpress
app.kubernetes.io/instance: wordpress-abcxzy
app.kubernetes.io/version: "4.9.4"
app.kubernetes.io/managed-by: helm
app.kubernetes.io/component: server
app.kubernetes.io/part-of: wordpress
...
```
MySQL is exposed as a `StatefulSet` with metadata for both it and the larger application it belongs to:
```yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
labels:
app.kubernetes.io/name: mysql
app.kubernetes.io/instance: wordpress-abcxzy
app.kubernetes.io/managed-by: helm
app.kubernetes.io/component: database
app.kubernetes.io/part-of: wordpress
app.kubernetes.io/version: "5.7.21"
...
```
The `Service` is used to expose MySQL as part of WordPress:
```yaml
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/name: mysql
app.kubernetes.io/instance: wordpress-abcxzy
app.kubernetes.io/managed-by: helm
app.kubernetes.io/component: database
app.kubernetes.io/part-of: wordpress
app.kubernetes.io/version: "5.7.21"
...
```
With the MySQL `StatefulSet` and `Service` you'll notice information about both MySQL and Wordpress, the broader application, are included.
{{% /capture %}}

View File

@ -0,0 +1,58 @@
---
title: Field Selectors
weight: 60
---
_Field selectors_ let you [select Kubernetes resources](/docs/concepts/overview/working-with-objects/kubernetes-objects) based on the value of one or more resource fields. Here are some example field selector queries:
* `metadata.name=my-service`
* `metadata.namespace!=default`
* `status.phase=Pending`
This `kubectl` command selects all Pods for which the value of the [`status.phase`](/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) field is `Running`:
```shell
$ kubectl get pods --field-selector status.phase=Running
```
{{< note >}}
Field selectors are essentially resource *filters*. By default, no selectors/filters are applied, meaning that all resources of the specified type are selected. This makes the following `kubectl` queries equivalent:
```shell
$ kubectl get pods
$ kubectl get pods --field-selector ""
```
{{< /note >}}
## Supported fields
Supported field selectors vary by Kubernetes resource type. All resource types support the `metadata.name` and `metadata.namespace` fields. Using unsupported field selectors produces an error. For example:
```shell
$ kubectl get ingress --field-selector foo.bar=baz
Error from server (BadRequest): Unable to find "ingresses" that match label selector "", field selector "foo.bar=baz": "foo.bar" is not a known field selector: only "metadata.name", "metadata.namespace"
```
## Supported operators
You can use the `=`, `==`, and `!=` operators with field selectors (`=` and `==` mean the same thing). This `kubectl` command, for example, selects all Kubernetes Services that aren't in the `default` namespace:
```shell
$ kubectl get services --field-selector metadata.namespace!=default
```
## Chained selectors
As with [label](/docs/concepts/overview/working-with-objects/labels) and other selectors, field selectors can be chained together as a comma-separated list. This `kubectl` command selects all Pods for which the `status.phase` does not equal `Running` and the `spec.restartPolicy` field equals `Always`:
```shell
$ kubectl get pods --field-selector=status.phase!=Running,spec.restartPolicy=Always
```
## Multiple resource types
You use field selectors across multiple resource types. This `kubectl` command selects all Statefulsets and Services that are not in the `default` namespace:
```shell
$ kubectl get statefulsets,services --field-selector metadata.namespace!=default
```

View File

@ -36,12 +36,14 @@ When you create an object in Kubernetes, you must provide the object spec that d
Here's an example `.yaml` file that shows the required fields and object spec for a Kubernetes Deployment:
{{< code file="nginx-deployment.yaml" >}}
{{< codenew file="application/deployment.yaml" >}}
One way to create a Deployment using a `.yaml` file like the one above is to use the [`kubectl create`](/docs/reference/generated/kubectl/kubectl-commands#create) command in the `kubectl` command-line interface, passing the `.yaml` file as an argument. Here's an example:
One way to create a Deployment using a `.yaml` file like the one above is to use the
[`kubectl create`](/docs/reference/generated/kubectl/kubectl-commands#create) command
in the `kubectl` command-line interface, passing the `.yaml` file as an argument. Here's an example:
```shell
$ kubectl create -f https://k8s.io/docs/concepts/overview/working-with-objects/nginx-deployment.yaml --record
$ kubectl create -f https://k8s.io/examples/application/deployment.yaml --record
```
The output is similar to this:

View File

@ -58,7 +58,7 @@ Kubernetes starts with three initial namespaces:
* `default` The default namespace for objects with no other namespace
* `kube-system` The namespace for objects created by the Kubernetes system
* `kube-public` The namespace is created automatically and readable by all users (including those not authenticated). This namespace is mostly reserved for cluster usage, in case that some resources should be visible and readable publicly throughout the whole cluster. The public aspect of this namespace is only a convention, not a requirement.
* `kube-public` This namespace is created automatically and is readable by all users (including those not authenticated). This namespace is mostly reserved for cluster usage, in case that some resources should be visible and readable publicly throughout the whole cluster. The public aspect of this namespace is only a convention, not a requirement.
### Setting the namespace for a request
@ -98,4 +98,14 @@ in some namespaces. However namespace resources are not themselves in a namespa
And low-level resources, such as [nodes](/docs/admin/node) and
persistentVolumes, are not in any namespace.
{{% /capture %}}
To see which Kubernetes resources are and aren't in a namespace:
```shell
# In a namespace
$ kubectl api-resources --namespaced=true
# Not in a namespace
$ kubectl api-resources --namespaced=false
```
{{% /capture %}}

View File

@ -1,19 +0,0 @@
apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
kind: Deployment
metadata:
name: nginx-deployment
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.7.9
ports:
- containerPort: 80

View File

@ -31,7 +31,7 @@ administrator to control the following:
| Control Aspect | Field Names |
| ----------------------------------------------------| ------------------------------------------- |
| Running of privileged containers | [`privileged`](#privileged) |
| Usage of the root namespaces | [`hostPID`, `hostIPC`](#host-namespaces) |
| Usage of host namespaces | [`hostPID`, `hostIPC`](#host-namespaces) |
| Usage of host networking and ports | [`hostNetwork`, `hostPorts`](#host-namespaces) |
| Usage of volume types | [`volumes`](#volumes-and-file-systems) |
| Usage of the host filesystem | [`allowedHostPaths`](#volumes-and-file-systems) |
@ -51,9 +51,9 @@ administrator to control the following:
Pod security policy control is implemented as an optional (but recommended)
[admission
controller](/docs/admin/admission-controllers/#podsecuritypolicy). PodSecurityPolicies
controller](/docs/reference/access-authn-authz/admission-controllers/#podsecuritypolicy). PodSecurityPolicies
are enforced by [enabling the admission
controller](/docs/admin/admission-controllers/#how-do-i-turn-on-an-admission-control-plug-in),
controller](/docs/reference/access-authn-authz/admission-controllers/#how-do-i-turn-on-an-admission-control-plug-in),
but doing so without authorizing any policies **will prevent any pods from being
created** in the cluster.
@ -80,8 +80,8 @@ pod's service account (see [example](#run-another-pod)).
### Via RBAC
[RBAC](/docs/admin/authorization/rbac/) is a standard Kubernetes authorization
mode, and can easily be used to authorize use of policies.
[RBAC](/docs/reference/access-authn-authz/rbac/) is a standard Kubernetes
authorization mode, and can easily be used to authorize use of policies.
First, a `Role` or `ClusterRole` needs to grant access to `use` the desired
policies. The rules to grant access look like this:
@ -136,20 +136,20 @@ paired with system groups to grant access to all pods run in the namespace:
```
For more examples of RBAC bindings, see [Role Binding
Examples](/docs/admin/authorization/rbac#role-binding-examples). For a complete
example of authorizing a PodSecurityPolicy, see
Examples](/docs/reference/access-authn-authz/rbac#role-binding-examples).
For a complete example of authorizing a PodSecurityPolicy, see
[below](#example).
### Troubleshooting
- The [Controller Manager](/docs/admin/kube-controller-manager/) must be run
against [the secured API port](/docs/admin/accessing-the-api/), and must not
have superuser permissions. Otherwise requests would bypass authentication and
authorization modules, all PodSecurityPolicy objects would be allowed, and users
would be able to create privileged containers. For more details on configuring
Controller Manager authorization, see [Controller
Roles](/docs/admin/authorization/rbac/#controller-roles).
against [the secured API port](/docs/reference/access-authn-authz/controlling-access/),
and must not have superuser permissions. Otherwise requests would bypass
authentication and authorization modules, all PodSecurityPolicy objects would be
allowed, and users would be able to create privileged containers. For more details
on configuring Controller Manager authorization, see [Controller
Roles](/docs/reference/access-authn-authz/rbac/#controller-roles).
## Policy Order
@ -176,17 +176,17 @@ Set up a namespace and a service account to act as for this example. We'll use
this service account to mock a non-admin user.
```shell
$ kubectl create namespace psp-example
$ kubectl create serviceaccount -n psp-example fake-user
$ kubectl create rolebinding -n psp-example fake-editor --clusterrole=edit --serviceaccount=psp-example:fake-user
kubectl create namespace psp-example
kubectl create serviceaccount -n psp-example fake-user
kubectl create rolebinding -n psp-example fake-editor --clusterrole=edit --serviceaccount=psp-example:fake-user
```
To make it clear which user we're acting as and save some typing, create 2
aliases:
```shell
$ alias kubectl-admin='kubectl -n psp-example'
$ alias kubectl-user='kubectl --as=system:serviceaccount:psp-example:fake-user -n psp-example'
alias kubectl-admin='kubectl -n psp-example'
alias kubectl-user='kubectl --as=system:serviceaccount:psp-example:fake-user -n psp-example'
```
### Create a policy and a pod
@ -199,13 +199,13 @@ simply prevents the creation of privileged pods.
And create it with kubectl:
```shell
$ kubectl-admin create -f example-psp.yaml
kubectl-admin create -f example-psp.yaml
```
Now, as the unprivileged user, try to create a simple pod:
```shell
$ kubectl-user create -f- <<EOF
kubectl-user create -f- <<EOF
apiVersion: v1
kind: Pod
metadata:
@ -222,34 +222,38 @@ Error from server (Forbidden): error when creating "STDIN": pods "pause" is forb
pod's service account nor `fake-user` have permission to use the new policy:
```shell
$ kubectl-user auth can-i use podsecuritypolicy/example
kubectl-user auth can-i use podsecuritypolicy/example
no
```
Create the rolebinding to grant `fake-user` the `use` verb on the example
policy:
_Note: This is not the recommended way! See the [next section](#run-another-pod)
{{< note >}}
**Note:** _This is not the recommended way! See the [next section](#run-another-pod)
for the preferred approach._
{{< /note >}}
```shell
$ kubectl-admin create role psp:unprivileged \
kubectl-admin create role psp:unprivileged \
--verb=use \
--resource=podsecuritypolicy \
--resource-name=example
role "psp:unprivileged" created
$ kubectl-admin create rolebinding fake-user:psp:unprivileged \
kubectl-admin create rolebinding fake-user:psp:unprivileged \
--role=psp:unprivileged \
--serviceaccount=psp-example:fake-user
rolebinding "fake-user:psp:unprivileged" created
$ kubectl-user auth can-i use podsecuritypolicy/example
kubectl-user auth can-i use podsecuritypolicy/example
yes
```
Now retry creating the pod:
```shell
$ kubectl-user create -f- <<EOF
kubectl-user create -f- <<EOF
apiVersion: v1
kind: Pod
metadata:
@ -266,7 +270,7 @@ It works as expected! But any attempts to create a privileged pod should still
be denied:
```shell
$ kubectl-user create -f- <<EOF
kubectl-user create -f- <<EOF
apiVersion: v1
kind: Pod
metadata:
@ -284,7 +288,7 @@ Error from server (Forbidden): error when creating "STDIN": pods "privileged" is
Delete the pod before moving on:
```shell
$ kubectl-user delete pod pause
kubectl-user delete pod pause
```
### Run another pod
@ -292,11 +296,13 @@ $ kubectl-user delete pod pause
Let's try that again, slightly differently:
```shell
$ kubectl-user run pause --image=k8s.gcr.io/pause
kubectl-user run pause --image=k8s.gcr.io/pause
deployment "pause" created
$ kubectl-user get pods
kubectl-user get pods
No resources found.
$ kubectl-user get events | head -n 2
kubectl-user get events | head -n 2
LASTSEEN FIRSTSEEN COUNT NAME KIND SUBOBJECT TYPE REASON SOURCE MESSAGE
1m 2m 15 pause-7774d79b5 ReplicaSet Warning FailedCreate replicaset-controller Error creating: pods "pause-7774d79b5-" is forbidden: no providers available to validate pod request
```
@ -314,7 +320,7 @@ account instead. In this case (since we didn't specify it) the service account
is `default`:
```shell
$ kubectl-admin create rolebinding default:psp:unprivileged \
kubectl-admin create rolebinding default:psp:unprivileged \
--role=psp:unprivileged \
--serviceaccount=psp-example:default
rolebinding "default:psp:unprivileged" created
@ -324,7 +330,7 @@ Now if you give it a minute to retry, the replicaset-controller should
eventually succeed in creating the pod:
```shell
$ kubectl-user get pods --watch
kubectl-user get pods --watch
NAME READY STATUS RESTARTS AGE
pause-7774d79b5-qrgcb 0/1 Pending 0 1s
pause-7774d79b5-qrgcb 0/1 Pending 0 1s
@ -338,7 +344,7 @@ pause-7774d79b5-qrgcb 1/1 Running 0 2s
Delete the namespace to clean up most of the example resources:
```shell
$ kubectl-admin delete ns psp-example
kubectl-admin delete ns psp-example
namespace "psp-example" deleted
```
@ -346,7 +352,7 @@ Note that `PodSecurityPolicy` resources are not namespaced, and must be cleaned
up separately:
```shell
$ kubectl-admin delete psp example
kubectl-admin delete psp example
podsecuritypolicy "example" deleted
```

View File

@ -154,7 +154,7 @@ The following types are supported:
| `persistentvolumeclaims` | The total number of [persistent volume claims](/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) that can exist in the namespace. |
| `pods` | The total number of pods in a non-terminal state that can exist in the namespace. A pod is in a terminal state if `.status.phase in (Failed, Succeeded)` is true. |
| `replicationcontrollers` | The total number of replication controllers that can exist in the namespace. |
| `resourcequotas` | The total number of [resource quotas](/docs/admin/admission-controllers/#resourcequota) that can exist in the namespace. |
| `resourcequotas` | The total number of [resource quotas](/docs/reference/access-authn-authz/admission-controllers/#resourcequota) that can exist in the namespace. |
| `services` | The total number of services that can exist in the namespace. |
| `services.loadbalancers` | The total number of services of type load balancer that can exist in the namespace. |
| `services.nodeports` | The total number of services of type node port that can exist in the namespace. |
@ -194,7 +194,7 @@ The `Terminating`, `NotTerminating`, and `NotBestEffort` scopes restrict a quota
### Resource Quota Per PriorityClass
{{< feature-state for_k8s_version="1.11" state="beta" >}}
{{< feature-state for_k8s_version="1.11" state="alpha" >}}
Pods can be created at a specific [priority](/docs/concepts/configuration/pod-priority-preemption/#pod-priority).
You can control a pod's consumption of system resources based on a pod's priority, by using the `scopeSelector`
@ -266,7 +266,7 @@ works as follows:
2. Apply it using `kubectl create`.
```shell
kubectl create -f ./quota.yml`
kubectl create -f ./quota.yml
resourcequota/pods-high created
resourcequota/pods-medium created

View File

@ -44,13 +44,17 @@ fe00::2 ip6-allrouters
10.200.0.4 nginx
```
by default, the hosts file only includes ipv4 and ipv6 boilerplates like `localhost` and its own hostname.
By default, the `hosts` file only includes IPv4 and IPv6 boilerplates like
`localhost` and its own hostname.
## Adding Additional Entries with HostAliases
In addition to the default boilerplate, we can add additional entries to the hosts file to resolve `foo.local`, `bar.local` to `127.0.0.1` and `foo.remote`, `bar.remote` to `10.1.2.3`, we can by adding HostAliases to the Pod under `.spec.hostAliases`:
In addition to the default boilerplate, we can add additional entries to the
`hosts` file to resolve `foo.local`, `bar.local` to `127.0.0.1` and `foo.remote`,
`bar.remote` to `10.1.2.3`, we can by adding HostAliases to the Pod under
`.spec.hostAliases`:
{{< code file="hostaliases-pod.yaml" >}}
{{< codenew file="service/networking/hostaliases-pod.yaml" >}}
This Pod can be started with the following commands:
@ -63,7 +67,7 @@ NAME READY STATUS RESTARTS AGE IP
hostaliases-pod 0/1 Completed 0 6s 10.244.135.10 node3
```
The hosts file content would look like this:
The `hosts` file content would look like this:
```shell
$ kubectl logs hostaliases-pod
@ -83,22 +87,17 @@ fe00::2 ip6-allrouters
With the additional entries specified at the bottom.
## Limitations
HostAlias is only supported in 1.7+.
HostAlias support in 1.7 is limited to non-hostNetwork Pods because kubelet only manages the hosts file for non-hostNetwork Pods.
In 1.8, HostAlias is supported for all Pods regardless of network configuration.
## Why Does Kubelet Manage the Hosts File?
Kubelet [manages](https://github.com/kubernetes/kubernetes/issues/14633) the hosts file for each container of the Pod to prevent Docker from [modifying](https://github.com/moby/moby/issues/17190) the file after the containers have already been started.
Kubelet [manages](https://github.com/kubernetes/kubernetes/issues/14633) the
`hosts` file for each container of the Pod to prevent Docker from
[modifying](https://github.com/moby/moby/issues/17190) the file after the
containers have already been started.
Because of the managed-nature of the file, any user-written content will be overwritten whenever the hosts file is remounted by Kubelet in the event of a container restart or a Pod reschedule. Thus, it is not suggested to modify the contents of the file.
Because of the managed-nature of the file, any user-written content will be
overwritten whenever the `hosts` file is remounted by Kubelet in the event of
a container restart or a Pod reschedule. Thus, it is not suggested to modify
the contents of the file.
{{% /capture %}}
{{% capture whatsnext %}}
{{% /capture %}}

View File

@ -28,11 +28,12 @@ This guide uses a simple nginx server to demonstrate proof of concept. The same
## Exposing pods to the cluster
We did this in a previous example, but let's do it once again and focus on the networking perspective. Create an nginx pod, and note that it has a container port specification:
We did this in a previous example, but let's do it once again and focus on the networking perspective.
Create an nginx Pod, and note that it has a container port specification:
{{< code file="run-my-nginx.yaml" >}}
{{< codenew file="service/networking/run-my-nginx.yaml" >}}
This makes it accessible from any node in your cluster. Check the nodes the pod is running on:
This makes it accessible from any node in your cluster. Check the nodes the Pod is running on:
```shell
$ kubectl create -f ./run-my-nginx.yaml
@ -69,9 +70,15 @@ service "my-nginx" exposed
This is equivalent to `kubectl create -f` the following yaml:
{{< code file="nginx-svc.yaml" >}}
{{< codenew file="service/networking/nginx-svc.yaml" >}}
This specification will create a Service which targets TCP port 80 on any Pod with the `run: my-nginx` label, and expose it on an abstracted Service port (`targetPort`: is the port the container accepts traffic on, `port`: is the abstracted Service port, which can be any port other pods use to access the Service). View [service API object](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#service-v1-core) to see the list of supported fields in service definition.
This specification will create a Service which targets TCP port 80 on any Pod
with the `run: my-nginx` label, and expose it on an abstracted Service port
(`targetPort`: is the port the container accepts traffic on, `port`: is the
abstracted Service port, which can be any port other pods use to access the
Service).
View [Service](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#service-v1-core)
API object to see the list of supported fields in service definition.
Check your Service:
```shell
@ -80,7 +87,13 @@ NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
my-nginx 10.0.162.149 <none> 80/TCP 21s
```
As mentioned previously, a Service is backed by a group of pods. These pods are exposed through `endpoints`. The Service's selector will be evaluated continuously and the results will be POSTed to an Endpoints object also named `my-nginx`. When a pod dies, it is automatically removed from the endpoints, and new pods matching the Service's selector will automatically get added to the endpoints. Check the endpoints, and note that the IPs are the same as the pods created in the first step:
As mentioned previously, a Service is backed by a group of Pods. These Pods are
exposed through `endpoints`. The Service's selector will be evaluated continuously
and the results will be POSTed to an Endpoints object also named `my-nginx`.
When a Pod dies, it is automatically removed from the endpoints, and new Pods
matching the Service's selector will automatically get added to the endpoints.
Check the endpoints, and note that the IPs are the same as the Pods created in
the first step:
```shell
$ kubectl describe svc my-nginx
@ -101,15 +114,22 @@ NAME ENDPOINTS AGE
my-nginx 10.244.2.5:80,10.244.3.4:80 1m
```
You should now be able to curl the nginx Service on `<CLUSTER-IP>:<PORT>` from any node in your cluster. Note that the Service IP is completely virtual, it never hits the wire, if you're curious about how this works you can read more about the [service proxy](/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies).
You should now be able to curl the nginx Service on `<CLUSTER-IP>:<PORT>` from
any node in your cluster. Note that the Service IP is completely virtual, it
never hits the wire. If you're curious about how this works you can read more
about the [service proxy](/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies).
## Accessing the Service
Kubernetes supports 2 primary modes of finding a Service - environment variables and DNS. The former works out of the box while the latter requires the [kube-dns cluster addon](http://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/dns/kube-dns/README.md).
Kubernetes supports 2 primary modes of finding a Service - environment variables
and DNS. The former works out of the box while the latter requires the
[kube-dns cluster addon](http://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/dns/kube-dns/README.md).
### Environment Variables
When a Pod runs on a Node, the kubelet adds a set of environment variables for each active Service. This introduces an ordering problem. To see why, inspect the environment of your running nginx pods (your pod name will be different):
When a Pod runs on a Node, the kubelet adds a set of environment variables for
each active Service. This introduces an ordering problem. To see why, inspect
the environment of your running nginx Pods (your Pod name will be different):
```shell
$ kubectl exec my-nginx-3800858182-jr4a2 -- printenv | grep SERVICE
@ -118,7 +138,14 @@ KUBERNETES_SERVICE_PORT=443
KUBERNETES_SERVICE_PORT_HTTPS=443
```
Note there's no mention of your Service. This is because you created the replicas before the Service. Another disadvantage of doing this is that the scheduler might put both pods on the same machine, which will take your entire Service down if it dies. We can do this the right way by killing the 2 pods and waiting for the Deployment to recreate them. This time around the Service exists *before* the replicas. This will give you scheduler-level Service spreading of your pods (provided all your nodes have equal capacity), as well as the right environment variables:
Note there's no mention of your Service. This is because you created the replicas
before the Service. Another disadvantage of doing this is that the scheduler might
put both Pods on the same machine, which will take your entire Service down if
it dies. We can do this the right way by killing the 2 Pods and waiting for the
Deployment to recreate them. This time around the Service exists *before* the
replicas. This will give you scheduler-level Service spreading of your Pods
(provided all your nodes have equal capacity), as well as the right environment
variables:
```shell
$ kubectl scale deployment my-nginx --replicas=0; kubectl scale deployment my-nginx --replicas=2;
@ -150,7 +177,11 @@ NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kube-dns 10.0.0.10 <none> 53/UDP,53/TCP 8m
```
If it isn't running, you can [enable it](http://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/dns/README.md#how-do-i-configure-it). The rest of this section will assume you have a Service with a long lived IP (my-nginx), and a dns server that has assigned a name to that IP (the kube-dns cluster addon), so you can talk to the Service from any pod in your cluster using standard methods (e.g. gethostbyname). Let's run another curl application to test this:
If it isn't running, you can [enable it](http://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/dns/README.md#how-do-i-configure-it).
The rest of this section will assume you have a Service with a long lived IP
(my-nginx), and a DNS server that has assigned a name to that IP (the kube-dns
cluster addon), so you can talk to the Service from any pod in your cluster using
standard methods (e.g. gethostbyname). Let's run another curl application to test this:
```shell
$ kubectl run curl --image=radial/busyboxplus:curl -i --tty
@ -221,13 +252,16 @@ nginxsecret Opaque 2 1m
Now modify your nginx replicas to start an https server using the certificate in the secret, and the Service, to expose both ports (80 and 443):
{{< code file="nginx-secure-app.yaml" >}}
{{< codenew file="service/networking/nginx-secure-app.yaml" >}}
Noteworthy points about the nginx-secure-app manifest:
- It contains both Deployment and Service specification in the same file.
- The [nginx server](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/https-nginx/default.conf) serves http traffic on port 80 and https traffic on 443, and nginx Service exposes both ports.
- Each container has access to the keys through a volume mounted at /etc/nginx/ssl. This is setup *before* the nginx server is started.
- The [nginx server](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/https-nginx/default.conf)
serves HTTP traffic on port 80 and HTTPS traffic on 443, and nginx Service
exposes both ports.
- Each container has access to the keys through a volume mounted at `/etc/nginx/ssl`.
This is setup *before* the nginx server is started.
```shell
$ kubectl delete deployments,svc my-nginx; kubectl create -f ./nginx-secure-app.yaml
@ -247,7 +281,7 @@ Note how we supplied the `-k` parameter to curl in the last step, this is becaus
so we have to tell curl to ignore the CName mismatch. By creating a Service we linked the CName used in the certificate with the actual DNS name used by pods during Service lookup.
Let's test this from a pod (the same secret is being reused for simplicity, the pod only needs nginx.crt to access the Service):
{{< code file="curlpod.yaml" >}}
{{< codenew file="service/networking/curlpod.yaml" >}}
```shell
$ kubectl create -f ./curlpod.yaml
@ -262,7 +296,11 @@ $ kubectl exec curl-deployment-1515033274-1410r -- curl https://my-nginx --cacer
## Exposing the Service
For some parts of your applications you may want to expose a Service onto an external IP address. Kubernetes supports two ways of doing this: NodePorts and LoadBalancers. The Service created in the last section already used `NodePort`, so your nginx https replica is ready to serve traffic on the internet if your node has a public IP.
For some parts of your applications you may want to expose a Service onto an
external IP address. Kubernetes supports two ways of doing this: NodePorts and
LoadBalancers. The Service created in the last section already used `NodePort`,
so your nginx HTTPS replica is ready to serve traffic on the internet if your
node has a public IP.
```shell
$ kubectl get svc my-nginx -o yaml | grep nodePort -C 5

View File

@ -148,6 +148,10 @@ A record at that name, pointing to the Pod's IP. Both pods "`busybox1`" and
The Endpoints object can specify the `hostname` for any endpoint addresses,
along with its IP.
{{< note >}}
**Note:** Because A records are not created for Pod names, `hostname` is required for the Pod's A record to be created. A Pod with no `hostname` but with `subdomain` only will only create the A record for the headless service (`default-subdomain.my-namespace.svc.cluster.local`), pointing to the Pod's IP address.
{{< /note >}}
### Pod's DNS Policy
DNS policies can be set on a per-pod basis. Currently Kubernetes supports the
@ -233,7 +237,7 @@ Below are the properties a user can specify in the `dnsConfig` field:
The following is an example Pod with custom DNS settings:
{{< code file="custom-dns.yaml" >}}
{{< codenew file="service/networking/custom-dns.yaml" >}}
When the Pod above is created, the container `test` gets the following contents
in its `/etc/resolv.conf` file:

View File

@ -94,6 +94,7 @@ In order for the Ingress resource to work, the cluster must have an Ingress cont
* [Kong](https://konghq.com/) offers [community](https://discuss.konghq.com/c/kubernetes) or [commercial](https://konghq.com/api-customer-success/) support and maintenance for the [Kong Ingress Controller for Kubernetes](https://konghq.com/blog/kubernetes-ingress-controller-for-kong/)
* [Traefik](https://github.com/containous/traefik) is a fully featured ingress controller
([Let's Encrypt](https://letsencrypt.org), secrets, http2, websocket...), and it also comes with commercial support by [Containous](https://containo.us/services)
* [NGINX, Inc.](https://www.nginx.com/) offers support and maintenance for the [NGINX Ingress Controller for Kubernetes](https://www.nginx.com/products/nginx/kubernetes-ingress-controller)
{{< note >}}
Review the documentation for your controller to find its specific support policy.
@ -111,9 +112,11 @@ Make sure you review your controller's specific docs so you understand the cavea
### Single Service Ingress
There are existing Kubernetes concepts that allow you to expose a single service (see [alternatives](#alternatives)), however you can do so through an Ingress as well, by specifying a *default backend* with no rules.
There are existing Kubernetes concepts that allow you to expose a single Service
(see [alternatives](#alternatives)), however you can do so through an Ingress
as well, by specifying a *default backend* with no rules.
{{< code file="ingress.yaml" >}}
{{< codenew file="service/networking/ingress.yaml" >}}
If you create it using `kubectl create -f` you should see:
@ -123,11 +126,17 @@ NAME RULE BACKEND ADDRESS
test-ingress - testsvc:80 107.178.254.228
```
Where `107.178.254.228` is the IP allocated by the Ingress controller to satisfy this Ingress. The `RULE` column shows that all traffic sent to the IP is directed to the Kubernetes Service listed under `BACKEND`.
Where `107.178.254.228` is the IP allocated by the Ingress controller to satisfy
this Ingress. The `RULE` column shows that all traffic sent to the IP are
directed to the Kubernetes Service listed under `BACKEND`.
### Simple fanout
As described previously, pods within kubernetes have IPs only visible on the cluster network, so we need something at the edge accepting ingress traffic and proxying it to the right endpoints. This component is usually a highly available loadbalancer. An Ingress allows you to keep the number of loadbalancers down to a minimum, for example, a setup like:
As described previously, Pods within kubernetes have IPs only visible on the
cluster network, so we need something at the edge accepting ingress traffic and
proxying it to the right endpoints. This component is usually a highly available
loadbalancer. An Ingress allows you to keep the number of loadbalancers down
to a minimum. For example, a setup like:
```shell
foo.bar.com -> 178.91.123.132 -> / foo s1:80
@ -168,7 +177,10 @@ test -
/foo s1:80
/bar s2:80
```
The Ingress controller will provision an implementation specific loadbalancer that satisfies the Ingress, as long as the services (s1, s2) exist. When it has done so, you will see the address of the loadbalancer under the last column of the Ingress.
The Ingress controller will provision an implementation specific loadbalancer
that satisfies the Ingress, as long as the services (`s1`, `s2`) exist.
When it has done so, you will see the address of the loadbalancer under the
last column of the Ingress.
### Name based virtual hosting
@ -180,7 +192,8 @@ foo.bar.com --| |-> foo.bar.com s1:80
bar.foo.com --| |-> bar.foo.com s2:80
```
The following Ingress tells the backing loadbalancer to route requests based on the [Host header](https://tools.ietf.org/html/rfc7230#section-5.4).
The following Ingress tells the backing loadbalancer to route requests based on
the [Host header](https://tools.ietf.org/html/rfc7230#section-5.4).
```yaml
apiVersion: extensions/v1beta1
@ -203,11 +216,23 @@ spec:
servicePort: 80
```
__Default Backends__: An Ingress with no rules, like the one shown in the previous section, sends all traffic to a single default backend. You can use the same technique to tell a loadbalancer where to find your website's 404 page, by specifying a set of rules *and* a default backend. Traffic is routed to your default backend if none of the Hosts in your Ingress match the Host in the request header, and/or none of the paths match the URL of the request.
__Default Backends__: An Ingress with no rules, like the one shown in the previous
section, sends all traffic to a single default backend. You can use the same
technique to tell a loadbalancer where to find your website's 404 page, by
specifying a set of rules *and* a default backend. Traffic is routed to your
default backend if none of the Hosts in your Ingress match the Host in the
request header, and/or none of the paths match the URL of the request.
### TLS
You can secure an Ingress by specifying a [secret](/docs/user-guide/secrets) that contains a TLS private key and certificate. Currently the Ingress only supports a single TLS port, 443, and assumes TLS termination. If the TLS configuration section in an Ingress specifies different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension (provided the Ingress controller supports SNI). The TLS secret must contain keys named `tls.crt` and `tls.key` that contain the certificate and private key to use for TLS, e.g.:
You can secure an Ingress by specifying a [secret](/docs/concepts/configuration/secret)
that contains a TLS private key and certificate. Currently the Ingress only
supports a single TLS port, 443, and assumes TLS termination. If the TLS
configuration section in an Ingress specifies different hosts, they will be
multiplexed on the same port according to the hostname specified through the
SNI TLS extension (provided the Ingress controller supports SNI). The TLS secret
must contain keys named `tls.crt` and `tls.key` that contain the certificate
and private key to use for TLS, e.g.:
```yaml
apiVersion: v1
@ -221,7 +246,8 @@ metadata:
type: Opaque
```
Referencing this secret in an Ingress will tell the Ingress controller to secure the channel from the client to the loadbalancer using TLS:
Referencing this secret in an Ingress will tell the Ingress controller to
secure the channel from the client to the loadbalancer using TLS:
```yaml
apiVersion: extensions/v1beta1
@ -236,13 +262,30 @@ spec:
servicePort: 80
```
Note that there is a gap between TLS features supported by various Ingress controllers. Please refer to documentation on [nginx](https://git.k8s.io/ingress-nginx/README.md#https), [GCE](https://git.k8s.io/ingress-gce/README.md#frontend-https), or any other platform specific Ingress controller to understand how TLS works in your environment.
Note that there is a gap between TLS features supported by various Ingress
controllers. Please refer to documentation on
[nginx](https://git.k8s.io/ingress-nginx/README.md#https),
[GCE](https://git.k8s.io/ingress-gce/README.md#frontend-https), or any other
platform specific Ingress controller to understand how TLS works in your environment.
### Loadbalancing
An Ingress controller is bootstrapped with some load balancing policy settings that it applies to all Ingress, such as the load balancing algorithm, backend weight scheme, and others. More advanced load balancing concepts (e.g.: persistent sessions, dynamic weights) are not yet exposed through the Ingress. You can still get these features through the [service loadbalancer](https://github.com/kubernetes/ingress-nginx/blob/master/docs/ingress-controller-catalog.md). With time, we plan to distill load balancing patterns that are applicable cross platform into the Ingress resource.
An Ingress controller is bootstrapped with some load balancing policy settings
that it applies to all Ingress, such as the load balancing algorithm, backend
weight scheme, and others. More advanced load balancing concepts
(e.g. persistent sessions, dynamic weights) are not yet exposed through the
Ingress. You can still get these features through the
[service loadbalancer](https://github.com/kubernetes/ingress-nginx/blob/master/docs/ingress-controller-catalog.md).
With time, we plan to distill load balancing patterns that are applicable
cross platform into the Ingress resource.
It's also worth noting that even though health checks are not exposed directly through the Ingress, there exist parallel concepts in Kubernetes such as [readiness probes](/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/) which allow you to achieve the same end result. Please review the controller specific docs to see how they handle health checks ([nginx](https://git.k8s.io/ingress-nginx/README.md), [GCE](https://git.k8s.io/ingress-gce/README.md#health-checks)).
It's also worth noting that even though health checks are not exposed directly
through the Ingress, there exist parallel concepts in Kubernetes such as
[readiness probes](/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/)
which allow you to achieve the same end result. Please review the controller
specific docs to see how they handle health checks (
[nginx](https://git.k8s.io/ingress-nginx/README.md),
[GCE](https://git.k8s.io/ingress-gce/README.md#health-checks)).
## Updating an Ingress

View File

@ -30,7 +30,7 @@ Pods become isolated by having a NetworkPolicy that selects them. Once there is
## The `NetworkPolicy` Resource
See the [NetworkPolicy](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#networkpolicy-v1-networking) for a full definition of the resource.
See the [NetworkPolicy](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#networkpolicy-v1-networking-k8s-io) for a full definition of the resource.
An example `NetworkPolicy` might look like this:

View File

@ -135,32 +135,15 @@ The traffic will be routed to endpoints defined by the user (`1.2.3.4:9376` in
this example).
An ExternalName service is a special case of service that does not have
selectors. It does not define any ports or Endpoints. Rather, it serves as a
way to return an alias to an external service residing outside the cluster.
```yaml
kind: Service
apiVersion: v1
metadata:
name: my-service
namespace: prod
spec:
type: ExternalName
externalName: my.database.example.com
```
When looking up the host `my-service.prod.svc.CLUSTER`, the cluster DNS service
will return a `CNAME` record with the value `my.database.example.com`. Accessing
such a service works in the same way as others, with the only difference that
the redirection happens at the DNS level and no proxying or forwarding occurs.
Should you later decide to move your database into your cluster, you can start
its pods, add appropriate selectors or endpoints and change the service `type`.
selectors and uses DNS names instead. For more information, see the
[ExternalName](#externalname) section later in this document.
## Virtual IPs and service proxies
Every node in a Kubernetes cluster runs a `kube-proxy`. `kube-proxy` is
Every node in a Kubernetes cluster runs a `kube-proxy`. `kube-proxy` is
responsible for implementing a form of virtual IP for `Services` of type other
than `ExternalName`.
than [`ExternalName`](#externalname).
In Kubernetes v1.0, `Services` are a "layer 4" (TCP/UDP over IP) construct, the
proxy was purely in userspace. In Kubernetes v1.1, the `Ingress` API was added
(beta) to represent "layer 7"(HTTP) services, iptables proxy was added too,
@ -273,7 +256,7 @@ Note that the port names must only contain lowercase alphanumeric characters and
You can specify your own cluster IP address as part of a `Service` creation
request. To do this, set the `.spec.clusterIP` field. For example, if you
already have an existing DNS entry that you wish to replace, or legacy systems
already have an existing DNS entry that you wish to reuse, or legacy systems
that are configured for a specific IP address and difficult to re-configure.
The IP address that a user chooses must be a valid IP address and within the
`service-cluster-ip-range` CIDR range that is specified by flag to the API
@ -348,7 +331,8 @@ can do a DNS SRV query for `"_http._tcp.my-service.my-ns"` to discover the port
number for `"http"`.
The Kubernetes DNS server is the only way to access services of type
`ExternalName`. More information is available in the [DNS Pods and Services](/docs/concepts/services-networking/dns-pod-service/).
`ExternalName`. More information is available in the [DNS Pods and
Services](/docs/concepts/services-networking/dns-pod-service/).
## Headless services
@ -378,7 +362,7 @@ For headless services that do not define selectors, the endpoints controller doe
not create `Endpoints` records. However, the DNS system looks for and configures
either:
* CNAME records for `ExternalName`-type services.
* CNAME records for [`ExternalName`](#externalname)-type services.
* A records for any `Endpoints` that share a name with the service, for all
other types.
@ -396,19 +380,20 @@ The default is `ClusterIP`.
* `ClusterIP`: Exposes the service on a cluster-internal IP. Choosing this value
makes the service only reachable from within the cluster. This is the
default `ServiceType`.
* `NodePort`: Exposes the service on each Node's IP at a static port (the `NodePort`).
A `ClusterIP` service, to which the `NodePort` service will route, is automatically
created. You'll be able to contact the `NodePort` service, from outside the cluster,
* [`NodePort`](#nodeport): Exposes the service on each Node's IP at a static port
(the `NodePort`). A `ClusterIP` service, to which the `NodePort` service will
route, is automatically created. You'll be able to contact the `NodePort` service,
from outside the cluster,
by requesting `<NodeIP>:<NodePort>`.
* `LoadBalancer`: Exposes the service externally using a cloud provider's load balancer.
`NodePort` and `ClusterIP` services, to which the external load balancer will route,
are automatically created.
* `ExternalName`: Maps the service to the contents of the `externalName` field
(e.g. `foo.bar.example.com`), by returning a `CNAME` record with its value.
No proxying of any kind is set up. This requires version 1.7 or higher of
`kube-dns`.
* [`LoadBalancer`](#loadbalancer): Exposes the service externally using a cloud
provider's load balancer. `NodePort` and `ClusterIP` services, to which the external
load balancer will route, are automatically created.
* [`ExternalName`](#externalname): Maps the service to the contents of the
`externalName` field (e.g. `foo.bar.example.com`), by returning a `CNAME` record
with its value. No proxying of any kind is set up. This requires version 1.7 or
higher of `kube-dns`.
### Type NodePort
### Type NodePort {#nodeport}
If you set the `type` field to `NodePort`, the Kubernetes master will
allocate a port from a range specified by `--service-node-port-range` flag (default: 30000-32767), and each
@ -429,7 +414,7 @@ even to just expose one or more nodes' IPs directly.
Note that this Service will be visible as both `<NodeIP>:spec.ports[*].nodePort`
and `.spec.clusterIP:spec.ports[*].port`. (If the `--nodeport-addresses` flag in kube-proxy is set, <NodeIP> would be filtered NodeIP(s).)
### Type LoadBalancer
### Type LoadBalancer {#loadbalancer}
On cloud providers which support external load balancers, setting the `type`
field to `LoadBalancer` will provision a load balancer for your `Service`.
@ -467,8 +452,7 @@ cloud provider does not support the feature, the field will be ignored.
**Special notes for Azure**: To use user-specified public type `loadBalancerIP`, a static type
public IP address resource needs to be created first, and it should be in the same resource
group of the cluster. Specify the assigned IP address as loadBalancerIP. Verify you have
securityGroupName in the cloud provider configuration file.
group of the other automatically created resources of the cluster. For example, `MC_myResourceGroup_myAKSCluster_eastus`. Specify the assigned IP address as loadBalancerIP. Ensure that you have updated the securityGroupName in the cloud provider configuration file. For information about troubleshooting `CreatingLoadBalancerFailed` permission issues see, [Use a static IP address with the Azure Kubernetes Service (AKS) load balancer](https://docs.microsoft.com/en-us/azure/aks/static-ip) or [CreatingLoadBalancerFailed on AKS cluster with advanced networking](https://github.com/Azure/AKS/issues/357).
#### Internal load balancer
In a mixed environment it is sometimes necessary to route traffic from services inside the same VPC.
@ -756,6 +740,40 @@ spec:
**Note:** NLB only works with certain instance classes, see the [AWS documentation](http://docs.aws.amazon.com/elasticloadbalancing/latest/network/target-group-register-targets.html#register-deregister-targets)
for supported instance types.
### Type ExternalName {#externalname}
{{< note >}}
**NOTE:** ExternalName Services are available only with `kube-dns` version 1.7 and later.
{{< /note >}}
Services of type ExternalName map a service to a DNS name (specified using
the `spec.externalName` parameter) rather than to a typical selector like
`my-service` or `cassandra`. This Service definition, for example, would map
the `my-service` Service in the `prod` namespace to `my.database.example.com`:
```yaml
kind: Service
apiVersion: v1
metadata:
name: my-service
namespace: prod
spec:
type: ExternalName
externalName: my.database.example.com
```
When looking up the host `my-service.prod.svc.CLUSTER`, the cluster DNS service
will return a `CNAME` record with the value `my.database.example.com`. Accessing
`my-service` works in the same way as other Services but with the crucial
difference that redirection happens at the DNS level rather than via proxying or
forwarding. Should you later decide to move your database into your cluster, you
can start its pods, add appropriate selectors or endpoints, and change the
service's `type`.
{{< note >}}
This section is indebted to the [Kubernetes Tips - Part
1](https://akomljen.com/kubernetes-tips-part-1/) blog post from [Alen Komljen](https://akomljen.com/).
{{< /note >}}
### External IPs

View File

@ -110,7 +110,7 @@ dynamically provisioned if no storage class is specified. A cluster administrato
can enable this behavior by:
- Marking one `StorageClass` object as *default*;
- Making sure that the [`DefaultStorageClass` admission controller](/docs/admin/admission-controllers/#defaultstorageclass)
- Making sure that the [`DefaultStorageClass` admission controller](/docs/reference/access-authn-authz/admission-controllers/#defaultstorageclass)
is enabled on the API server.
An administrator can mark a specific `StorageClass` as default by adding the

View File

@ -59,7 +59,7 @@ provisioning to occur. Claims that request the class `""` effectively disable
dynamic provisioning for themselves.
To enable dynamic storage provisioning based on storage class, the cluster administrator
needs to enable the `DefaultStorageClass` [admission controller](/docs/admin/admission-controllers/#defaultstorageclass)
needs to enable the `DefaultStorageClass` [admission controller](/docs/reference/access-authn-authz/admission-controllers/#defaultstorageclass)
on the API server. This can be done, for example, by ensuring that `DefaultStorageClass` is
among the comma-delimited, ordered list of values for the `--enable-admission-plugins` flag of
the API server component. For more information on API server command line flags,
@ -215,7 +215,7 @@ You can only resize volumes containing a file system if the file system is XFS,
When a volume contains a file system, the file system is only resized when a new Pod is started using
the `PersistentVolumeClaim` in ReadWrite mode. Therefore, if a pod or deployment is using a volume and
you want to expand it, you need to delete or recreate the pod after the volume has been exxpanded by the cloud provider in the controller-manager. You can check the status of resize operation by running the `kubectl describe pvc` command:
you want to expand it, you need to delete or recreate the pod after the volume has been expanded by the cloud provider in the controller-manager. You can check the status of resize operation by running the `kubectl describe pvc` command:
```
kubectl describe pvc <pvc_name>
@ -466,7 +466,7 @@ equal to `""` is always interpreted to be requesting a PV with no class, so it
can only be bound to PVs with no class (no annotation or one set equal to
`""`). A PVC with no `storageClassName` is not quite the same and is treated differently
by the cluster depending on whether the
[`DefaultStorageClass` admission plugin](/docs/admin/admission-controllers/#defaultstorageclass)
[`DefaultStorageClass` admission plugin](/docs/reference/access-authn-authz/admission-controllers/#defaultstorageclass)
is turned on.
* If the admission plugin is turned on, the administrator may specify a

View File

@ -527,9 +527,9 @@ parameters:
cluster, and `skuName` and `location` are ignored.
During provision, a secret is created for mounting credentials. If the cluster
has enabled both [RBAC](/docs/admin/authorization/rbac/) and
[Controller Roles](/docs/admin/authorization/rbac/#controller-roles), add the
`create` permission of resource `secret` for clusterrole
has enabled both [RBAC](/docs/reference/access-authn-authz/rbac/) and
[Controller Roles](/docs/reference/access-authn-authz/rbac/#controller-roles),
add the `create` permission of resource `secret` for clusterrole
`system:controller:persistent-volume-binder`.
### Portworx Volume

View File

@ -67,36 +67,36 @@ mount each volume.
Kubernetes supports several types of Volumes:
* `awsElasticBlockStore`
* `azureDisk`
* `azureFile`
* `cephfs`
* `configMap`
* `csi`
* `downwardAPI`
* `emptyDir`
* `fc` (fibre channel)
* `flocker`
* `gcePersistentDisk`
* `gitRepo`
* `glusterfs`
* `hostPath`
* `iscsi`
* `local`
* `nfs`
* `persistentVolumeClaim`
* `projected`
* `portworxVolume`
* `quobyte`
* `rbd`
* `scaleIO`
* `secret`
* `storageos`
* `vsphereVolume`
* [awsElasticBlockStore](#awselasticblockstore)
* [azureDisk](#azuredisk)
* [azureFile](#azurefile)
* [cephfs](#cephfs)
* [configMap](#configmap)
* [csi](#csi)
* [downwardAPI](#downwardapi)
* [emptyDir](#emptydir)
* [fc (fibre channel)](#fc)
* [flocker](#flocker)
* [gcePersistentDisk](#gcepersistentdisk)
* [gitRepo (deprecated)](#gitrepo)
* [glusterfs](#glusterfs)
* [hostPath](#hostpath)
* [iscsi](#iscsi)
* [local](#local)
* [nfs](#nfs)
* [persistentVolumeClaim](#persistentvolumeclaim)
* [projected](#projected)
* [portworxVolume](#portworxvolume)
* [quobyte](#quobyte)
* [rbd](#rbd)
* [scaleIO](#scaleio)
* [secret](#secret)
* [storageos](#storageos)
* [vsphereVolume](#vspherevolume)
We welcome additional contributions.
### awsElasticBlockStore
### awsElasticBlockStore {#awselasticblockstore}
An `awsElasticBlockStore` volume mounts an Amazon Web Services (AWS) [EBS
Volume](http://aws.amazon.com/ebs/) into your Pod. Unlike
@ -148,20 +148,20 @@ spec:
fsType: ext4
```
### azureDisk
### azureDisk {#azuredisk}
A `azureDisk` is used to mount a Microsoft Azure [Data Disk](https://azure.microsoft.com/en-us/documentation/articles/virtual-machines-linux-about-disks-vhds/) into a Pod.
More details can be found [here](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/volumes/azure_disk/README.md).
### azureFile
### azureFile {#azurefile}
A `azureFile` is used to mount a Microsoft Azure File Volume (SMB 2.1 and 3.0)
into a Pod.
More details can be found [here](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/volumes/azure_file/README.md).
### cephfs
### cephfs {#cephfs}
A `cephfs` volume allows an existing CephFS volume to be
mounted into your Pod. Unlike `emptyDir`, which is erased when a Pod is
@ -176,7 +176,7 @@ writers simultaneously.
See the [CephFS example](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/volumes/cephfs/) for more details.
### configMap
### configMap {#configmap}
The [`configMap`](/docs/tasks/configure-pod-container/configure-pod-configmap/) resource
provides a way to inject configuration data into Pods.
@ -224,7 +224,7 @@ keyed with `log_level`.
receive ConfigMap updates.
{{< /note >}}
### downwardAPI
### downwardAPI {#downwardapi}
A `downwardAPI` volume is used to make downward API data available to applications.
It mounts a directory and writes the requested data in plain text files.
@ -236,7 +236,7 @@ receive Downward API updates.
See the [`downwardAPI` volume example](/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/) for more details.
### emptyDir
### emptyDir {#emptydir}
An `emptyDir` volume is first created when a Pod is assigned to a Node, and
exists as long as that Pod is running on that node. As the name says, it is
@ -283,7 +283,7 @@ spec:
emptyDir: {}
```
### fc (fibre channel)
### fc (fibre channel) {#fc}
An `fc` volume allows an existing fibre channel volume to be mounted in a Pod.
You can specify single or multiple target World Wide Names using the parameter
@ -296,7 +296,7 @@ targetWWNs expect that those WWNs are from multi-path connections.
See the [FC example](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/volumes/fibre_channel) for more details.
### flocker
### flocker {#flocker}
[Flocker](https://github.com/ClusterHQ/flocker) is an open-source clustered Container data volume manager. It provides management
and orchestration of data volumes backed by a variety of storage backends.
@ -313,7 +313,7 @@ can be "handed off" between Pods as required.
See the [Flocker example](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/volumes/flocker) for more details.
### gcePersistentDisk
### gcePersistentDisk {#gcepersistentdisk}
A `gcePersistentDisk` volume mounts a Google Compute Engine (GCE) [Persistent
Disk](http://cloud.google.com/compute/docs/disks) into your Pod. Unlike
@ -401,7 +401,11 @@ spec:
fsType: ext4
```
### gitRepo
### gitRepo (deprecated) {#gitrepo}
{{< warning >}}
**Warning:** The gitRepo volume type is deprecated. To provision a container with a git repo, mount an [EmptyDir](#emptydir) into an InitContainer that clones the repo using git, then mount the [EmptyDir](#emptydir) into the Pod's container.
{{< /warning >}}
A `gitRepo` volume is an example of what can be done as a volume plugin. It
mounts an empty directory and clones a git repository into it for your Pod to
@ -429,7 +433,7 @@ spec:
revision: "22f1d8406d464b0c0874075539c1f2e96c253775"
```
### glusterfs
### glusterfs {#glusterfs}
A `glusterfs` volume allows a [Glusterfs](http://www.gluster.org) (an open
source networked filesystem) volume to be mounted into your Pod. Unlike
@ -445,7 +449,7 @@ simultaneously.
See the [GlusterFS example](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/volumes/glusterfs) for more details.
### hostPath
### hostPath {#hostpath}
A `hostPath` volume mounts a file or directory from the host node's filesystem
into your Pod. This is not something that most Pods will need, but it offers a
@ -509,7 +513,7 @@ spec:
type: Directory
```
### iscsi
### iscsi {#iscsi}
An `iscsi` volume allows an existing iSCSI (SCSI over IP) volume to be mounted
into your Pod. Unlike `emptyDir`, which is erased when a Pod is removed, the
@ -529,7 +533,7 @@ simultaneous writers allowed.
See the [iSCSI example](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/volumes/iscsi) for more details.
### local
### local {#local}
{{< feature-state for_k8s_version="v1.10" state="beta" >}}
@ -612,7 +616,7 @@ user if the external static provisioner is not used to manage the volume
lifecycle.
{{< /note >}}
### nfs
### nfs {#nfs}
An `nfs` volume allows an existing NFS (Network File System) share to be
mounted into your Pod. Unlike `emptyDir`, which is erased when a Pod is
@ -627,7 +631,7 @@ writers simultaneously.
See the [NFS example](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/volumes/nfs) for more details.
### persistentVolumeClaim
### persistentVolumeClaim {#persistentVolumeClaim}
A `persistentVolumeClaim` volume is used to mount a
[PersistentVolume](/docs/concepts/storage/persistent-volumes/) into a Pod. PersistentVolumes are a
@ -637,7 +641,7 @@ iSCSI volume) without knowing the details of the particular cloud environment.
See the [PersistentVolumes example](/docs/concepts/storage/persistent-volumes/) for more
details.
### projected
### projected {#projected}
A `projected` volume maps several existing volume sources into the same directory.
@ -772,16 +776,17 @@ in the audience of the token, and otherwise should reject the token. This field
is optional and it defaults to the identifier of the API server.
The `expirationSeconds` is the expected duration of validity of the service account
token. It defaults to 1 hour and must be at least 10 minutes (600 seconds).
The `path` field specifies a relative path to the mount point of the projected
volume.
token. It defaults to 1 hour and must be at least 10 minutes (600 seconds). An administrator
can also limit its maximum value by specifying the `--service-account-max-token-expiration`
option for the API server. The `path` field specifies a relative path to the mount point
of the projected volume.
{{< note >}}
**Note:** A Container using a projected volume source as a [subPath](#using-subpath) volume mount will not
receive updates for those volume sources.
{{< /note >}}
### portworxVolume
### portworxVolume {#portworxvolume}
A `portworxVolume` is an elastic block storage layer that runs hyperconverged with
Kubernetes. Portworx fingerprints storage in a server, tiers based on capabilities,
@ -819,7 +824,7 @@ before using it in the Pod.
More details and examples can be found [here](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/volumes/portworx/README.md).
### quobyte
### quobyte {#quobyte}
A `quobyte` volume allows an existing [Quobyte](http://www.quobyte.com) volume to
be mounted into your Pod.
@ -831,7 +836,7 @@ created before you can use it.
See the [Quobyte example](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/volumes/quobyte) for more details.
### rbd
### rbd {#rbd}
An `rbd` volume allows a [Rados Block
Device](http://ceph.com/docs/master/rbd/rbd/) volume to be mounted into your
@ -852,7 +857,7 @@ simultaneous writers allowed.
See the [RBD example](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/volumes/rbd) for more details.
### scaleIO
### scaleIO {#scaleio}
ScaleIO is a software-based storage platform that can use existing hardware to
create clusters of scalable shared block networked storage. The `scaleIO` volume
@ -894,7 +899,7 @@ spec:
For further detail, please the see the [ScaleIO examples](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/volumes/scaleio).
### secret
### secret {#secret}
A `secret` volume is used to pass sensitive information, such as passwords, to
Pods. You can store secrets in the Kubernetes API and mount them as files for
@ -913,7 +918,7 @@ receive Secret updates.
Secrets are described in more detail [here](/docs/user-guide/secrets).
### storageOS
### storageOS {#storageos}
A `storageos` volume allows an existing [StorageOS](https://www.storageos.com)
volume to be mounted into your Pod.
@ -966,7 +971,7 @@ spec:
For more information including Dynamic Provisioning and Persistent Volume Claims, please see the
[StorageOS examples](https://github.com/kubernetes/examples/blob/master/staging/volumes/storageos).
### vsphereVolume
### vsphereVolume {#vsphereVolume}
{{< note >}}
**Prerequisite:** Kubernetes with vSphere Cloud Provider configured. For cloudprovider

View File

@ -10,10 +10,7 @@ weight: 80
{{% capture overview %}}
A _Cron Job_ manages time based [Jobs](/docs/concepts/workloads/controllers/jobs-run-to-completion/), namely:
* Once at a specified point in time
* Repeatedly at a specified point in time
A _Cron Job_ creates [Jobs](/docs/concepts/workloads/controllers/jobs-run-to-completion/) on a time-based schedule.
One CronJob object is like one line of a _crontab_ (cron table) file. It runs a job periodically
on a given schedule, written in [Cron](https://en.wikipedia.org/wiki/Cron) format.
@ -36,10 +33,16 @@ If `startingDeadlineSeconds` is set to a large value or left unset (the default)
and if `concurrencyPolicy` is set to `Allow`, the jobs will always run
at least once.
Jobs may fail to run if the CronJob controller is not running or broken for a
span of time from before the start time of the CronJob to start time plus
`startingDeadlineSeconds`, or if the span covers multiple start times and
`concurrencyPolicy` does not allow concurrency.
For every CronJob, the CronJob controller checks how many schedules it missed in the duration from its last scheduled time until now. If there are more than 100 missed schedules, then it does not start the job and logs the error
````
Cannot determine if job needs to be started. Too many missed start time (> 100). Set or decrease .spec.startingDeadlineSeconds or check clock skew.
````
It is important to note that if the `startingDeadlineSeconds` field is set (not `nil`), the controller counts how many missed jobs occurred from the value of `startingDeadlineSeconds` until now rather than from the last scheduled time until now. For example, if `startingDeadlineSeconds` is `200`, the controller counts how many missed jobs occurred in the last 200 seconds.
A CronJob is counted as missed if it has failed to be created at its scheduled time. For example, If `concurrencyPolicy` is set to `Forbid` and a CronJob was attempted to be scheduled when there was a previous schedule still running, then it would count as missed.
For example, suppose a cron job is set to start at exactly `08:30:00` and its
`startingDeadlineSeconds` is set to 10, if the CronJob controller happens to
be down from `08:29:00` to `08:42:00`, the job will not start.

View File

@ -153,7 +153,7 @@ nodeAffinity:
In addition, `node.kubernetes.io/unschedulable:NoSchedule` toleration is added
automatically to DaemonSet Pods. The DaemonSet controller ignores
`unschedulable` Nodes when scheduling DaemonSet Pods. You must enable
`TaintModesByCondition` to ensure that the default scheduler behaves the same
`TaintNodesByCondition` to ensure that the default scheduler behaves the same
way and schedules DaemonSet pods on `unschedulable` nodes.
When this feature and `TaintNodesByCondition` are enabled together, if DaemonSet

View File

@ -356,9 +356,9 @@ nginx-deployment-2035384211) and new replicas (nginx-deployment-3066724191) are
```shell
$ kubectl get rs
NAME DESIRED CURRENT READY AGE
nginx-deployment-1564180365 2 2 0 25s
nginx-deployment-1564180365 2 2 2 25s
nginx-deployment-2035384211 0 0 0 36s
nginx-deployment-3066724191 2 2 2 6s
nginx-deployment-3066724191 2 2 0 6s
```
Looking at the Pods created, you will see that the 2 Pods created by new ReplicaSet are stuck in an image pull loop.

View File

@ -9,9 +9,6 @@ weight: 60
The role of the Kubernetes garbage collector is to delete certain objects
that once had an owner, but no longer have an owner.
**Note**: Garbage collection is a beta feature and is enabled by default in
Kubernetes version 1.4 and later.
{{% /capture %}}
@ -89,7 +86,7 @@ the owner object.
Note that in the "foregroundDeletion", only dependents with
`ownerReference.blockOwnerDeletion` block the deletion of the owner object.
Kubernetes version 1.7 added an [admission controller](/docs/admin/admission-controllers/#ownerreferencespermissionenforcement) that controls user access to set
Kubernetes version 1.7 added an [admission controller](/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement) that controls user access to set
`blockOwnerDeletion` to true based on delete permissions on the owner object, so that
unauthorized dependents cannot delay deletion of an owner object.

View File

@ -45,7 +45,6 @@ provides a set of stateless replicas. Controllers such as
## Limitations
* StatefulSet was a beta resource prior to 1.9 and not available in any Kubernetes release prior to 1.5.
* As with all alpha/beta resources, you can disable StatefulSet through the `--runtime-config` option passed to the apiserver.
* The storage for a given Pod must either be provisioned by a [PersistentVolume Provisioner](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/persistent-volume-provisioning/README.md) based on the requested `storage class`, or pre-provisioned by an admin.
* Deleting and/or scaling a StatefulSet down will *not* delete the volumes associated with the StatefulSet. This is done to ensure data safety, which is generally more valuable than an automatic purge of all related StatefulSet resources.
* StatefulSets currently require a [Headless Service](/docs/concepts/services-networking/service/#headless-services) to be responsible for the network identity of the Pods. You are responsible for creating this Service.

View File

@ -55,23 +55,23 @@ array has six possible fields:
* The `message` field is a human-readable message indicating details
about the transition.
* The `reason` field is a unique, one-word, CamelCase reason for the condition's last transition.
A Pod has a PodStatus, which has an array of
[PodConditions](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podcondition-v1-core). Each element
of the PodCondition array has a `type` field and a `status` field. The `type`
field is a string with the following possible values:
* The `status` field is a string, with possible values "`True`", "`False`", and "`Unknown`".
* The `type` field is a string with the following possible values:
* `PodScheduled`: the Pod has been scheduled to a node;
* `Ready`: the Pod is able to serve requests and should be added to the load
balancing pools of all matching Services;
* `Initialized`: all [init containers](/docs/concepts/workloads/pods/init-containers)
have started successfully;
* `Unschedulable`: the scheduler cannot schedule the Pod right now, for example
due to lacking of resources or other constraints;
* `ContainersReady`: all containers in the Pod are ready.
* `PodScheduled`: the Pod has been scheduled to a node;
* `Ready`: the Pod is able to serve requests and should be added to the load
balancing pools of all matching Services;
* `Initialized`: all [init containers](/docs/concepts/workloads/pods/init-containers)
have started successfully;
* `Unschedulable`: the scheduler cannot schedule the Pod right now, for example
due to lacking of resources or other constraints;
* `ContainersReady`: all containers in the Pod are ready.
The `status` field is a string, with possible values "`True`", "`False`", and
"`Unknown`".
## Container probes

View File

@ -0,0 +1,2 @@
Tools for Kubernetes docs contributors. View `README.md` files in
subdirectories for more info.

View File

@ -0,0 +1,51 @@
# Snippets for Atom
Snippets are bits of text that get inserted into your editor, to save typing and
reduce syntax errors. The snippets provided in `atom-snippets.cson` are scoped to
only work on Markdown files within Atom.
## Installation
Copy the contents of the `atom-snippets.cson` file into your existing
`~/.atom/snippets.cson`. **Do not replace your existing file.**
You do not need to restart Atom.
## Usage
Have a look through `atom-snippets.cson` and note the titles and `prefix` values
of the snippets.
You can trigger a given snippet in one of two ways:
- By typing the snippet's `prefix` and pressing the `<TAB>` key
- By searching for the snippet's title in **Packages / Snippets / Available**
For example, open a Markdown file and type `anote` and press `<TAB>`. A blank
note is added, with the correct Hugo shortcodes.
A snippet can insert a single line or multiple lines of text. Some snippets
have placeholder values. To get to the next placeholder, press `<TAB>` again.
Some of the snippets only insert partially-formed Markdown or Hugo syntax.
For instance, `coverview` inserts the start of a concept overview tag, while
`cclose` inserts a close-capture tag. This is because every type of capture
needs a capture-close tab.
## Creating new topics using snippets
To create a new concept, task, or tutorial from a blank file, use one of the
following:
- `newconcept`
- `newtask`
- `newtutorial`
Placeholder text is included.
## Submitting new snippets
1. Develop the snippet locally and verify that it works as expected.
2. Copy the template's code into the `atom-snippets.cson` file on Github. Raise a
pull request, and ask for review from another Atom user in #sig-docs on
Kubernetes Slack.

View File

@ -0,0 +1,226 @@
# Your snippets
#
# Atom snippets allow you to enter a simple prefix in the editor and hit tab to
# expand the prefix into a larger code block with templated values.
#
# You can create a new snippet in this file by typing "snip" and then hitting
# tab.
#
# An example CoffeeScript snippet to expand log to console.log:
#
# '.source.coffee':
# 'Console log':
# 'prefix': 'log'
# 'body': 'console.log $1'
#
# Each scope (e.g. '.source.coffee' above) can only be declared once.
#
# This file uses CoffeeScript Object Notation (CSON).
# If you are unfamiliar with CSON, you can read more about it in the
# Atom Flight Manual:
# http://flight-manual.atom.io/using-atom/sections/basic-customization/#_cson
'.source.gfm':
# Capture variables for concept template
# For full concept template see 'newconcept' below
'Insert concept template':
'prefix': 'ctemplate'
'body': 'content_template: templates/concept'
'Insert concept overview':
'prefix': 'coverview'
'body': '{{% capture overview %}}'
'Insert concept body':
'prefix': 'cbody'
'body': '{{% capture body %}}'
'Insert concept whatsnext':
'prefix': 'cnext'
'body': '{{% capture whatsnext %}}'
# Capture variables for task template
# For full task template see 'newtask' below
'Insert task template':
'prefix': 'ttemplate'
'body': 'content_template: templates/task'
'Insert task overview':
'prefix': 'toverview'
'body': '{{% capture overview %}}'
'Insert task prerequisites':
'prefix': 'tprereq'
'body': """
{{% capture prerequisites %}}
{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}}
{{% /capture %}}
"""
'Insert task steps':
'prefix': 'tsteps'
'body': '{{% capture steps %}}'
'Insert task discussion':
'prefix': 'tdiscuss'
'body': '{{% capture discussion %}}'
# Capture variables for tutorial template
# For full tutorial template see 'newtutorial' below
'Insert tutorial template':
'prefix': 'tutemplate'
'body': 'content_template: templates/tutorial'
'Insert tutorial overview':
'prefix': 'tuoverview'
'body': '{{% capture overview %}}'
'Insert tutorial prerequisites':
'prefix': 'tuprereq'
'body': '{{% capture prerequisites %}}'
'Insert tutorial objectives':
'prefix': 'tuobjectives'
'body': '{{% capture objectives %}}'
'Insert tutorial lesson content':
'prefix': 'tulesson'
'body': '{{% capture lessoncontent %}}'
'Insert tutorial whatsnext':
'prefix': 'tunext'
'body': '{{% capture whatsnext %}}'
'Close capture':
'prefix': 'ccapture'
'body': '{{% /capture %}}'
'Insert note':
'prefix': 'anote'
'body': """
{{< note >}}
**Note**: $1
{{< /note >}}
"""
# Admonitions
'Insert caution':
'prefix': 'acaution'
'body': """
{{< caution >}}
**Caution**: $1
{{< /caution >}}
"""
'Insert warning':
'prefix': 'awarning'
'body': """
{{< warning >}}
**Warning**: $1
{{< /warning >}}
"""
# Misc one-liners
'Insert TOC':
'prefix': 'toc'
'body': '{{< toc >}}'
'Insert code from file':
'prefix': 'codefile'
'body': '{{< code file="$1" >}}'
'Insert feature state':
'prefix': 'fstate'
'body': '{{< feature-state for_k8s_version="$1" state="$2" >}}'
'Insert figure':
'prefix': 'fig'
'body': '{{< figure src="$1" title="$2" alt="$3" caption="$4" >}}'
'Insert Youtube link':
'prefix': 'yt'
'body': '{{< youtube $1 >}}'
# Full concept template
'Create new concept':
'prefix': 'newconcept'
'body': """
---
reviewers:
- ${1:"github-id-or-group"}
title: ${2:"topic-title"}
content_template: templates/concept
---
{{% capture overview %}}
${3:"overview-content"}
{{% /capture %}}
{{< toc >}}
{{% capture body %}}
${4:"h2-heading-per-subtopic"}
{{% /capture %}}
{{% capture whatsnext %}}
${5:"next-steps-or-delete"}
{{% /capture %}}
"""
# Full task template
'Create new task':
'prefix': 'newtask'
'body': """
---
reviewers:
- ${1:"github-id-or-group"}
title: ${2:"topic-title"}
content_template: templates/task
---
{{% capture overview %}}
${3:"overview-content"}
{{% /capture %}}
{{< toc >}}
{{% capture prerequisites %}}
{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}}
${4:"additional-prereqs-or-delete"}
{{% /capture %}}
{{% capture steps %}}
${5:"h2-heading-per-step"}
{{% /capture %}}
{{% capture discussion %}}
${6:"task-discussion-or-delete"}
{{% /capture %}}
"""
# Full tutorial template
'Create new tutorial':
'prefix': 'newtutorial'
'body': """
---
reviewers:
- ${1:"github-id-or-group"}
title: ${2:"topic-title"}
content_template: templates/tutorial
---
{{% capture overview %}}
${3:"overview-content"}
{{% /capture %}}
{{< toc >}}
{{% capture prerequisites %}}
{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}}
${4:"additional-prereqs-or-delete"}
{{% /capture %}}
{{% capture objectives %}}
${5:"tutorial-objectives"}
{{% /capture %}}
{{% capture lessoncontent %}}
${6:"lesson-content"}
{{% /capture %}}
{{% capture whatsnext %}}
${7:"next-steps-or-delete"}
{{% /capture %}}
"""

View File

@ -120,10 +120,7 @@ KUBELET_ADDRESS="--address=0.0.0.0"
KUBELET_HOSTNAME="--hostname-override=fed-node"
# location of the api-server
KUBELET_ARGS="--cgroup-driver=systemd --kubeconfig=/etc/kubernetes/master-kubeconfig.yaml --require-kubeconfig"
# Add your own!
KUBELET_ARGS=""
KUBELET_ARGS="--cgroup-driver=systemd --kubeconfig=/etc/kubernetes/master-kubeconfig.yaml"
```

Some files were not shown because too many files have changed in this diff Show More