commit
						c5181466f1
					
				|  | @ -5,6 +5,9 @@ charset = utf-8 | |||
| max_line_length = 80 | ||||
| trim_trailing_whitespace = true | ||||
| 
 | ||||
| [*.md] | ||||
| trim_trailing_whitespace = false | ||||
| 
 | ||||
| [*.{css,html,js,json,sass,md,mmark,toml,yaml}] | ||||
| indent_style = space | ||||
| indent_size = 2 | ||||
|  |  | |||
|  | @ -0,0 +1,7 @@ | |||
| # See the OWNERS docs at https://go.k8s.io/owners | ||||
| 
 | ||||
| reviewers: | ||||
| - sig-docs-en-reviews # Defined in OWNERS_ALIASES | ||||
| 
 | ||||
| approvers: | ||||
| - sig-docs-en-owners # Defined in OWNERS_ALIASES | ||||
|  | @ -0,0 +1,11 @@ | |||
| # See the OWNERS docs at https://go.k8s.io/owners | ||||
| 
 | ||||
| # When modifying this file, consider the security implications of | ||||
| # allowing listed reviewers / approvals to modify or remove any | ||||
| # configured GitHub Actions. | ||||
| 
 | ||||
| reviewers: | ||||
| - sig-docs-leads | ||||
| 
 | ||||
| approvers: | ||||
| - sig-docs-leads | ||||
|  | @ -16,7 +16,7 @@ RUN apk add --no-cache \ | |||
|     build-base \ | ||||
|     libc6-compat \ | ||||
|     npm && \ | ||||
|     npm install -G autoprefixer postcss-cli | ||||
|     npm install -D autoprefixer postcss-cli | ||||
| 
 | ||||
| ARG HUGO_VERSION | ||||
| 
 | ||||
|  |  | |||
							
								
								
									
										2
									
								
								Makefile
								
								
								
								
							
							
						
						
									
										2
									
								
								Makefile
								
								
								
								
							|  | @ -65,7 +65,7 @@ container-image: | |||
| 		--build-arg HUGO_VERSION=$(HUGO_VERSION) | ||||
| 
 | ||||
| container-build: module-check | ||||
| 	$(CONTAINER_RUN) --read-only $(CONTAINER_IMAGE) hugo --minify | ||||
| 	$(CONTAINER_RUN) --read-only --mount type=tmpfs,destination=/tmp,tmpfs-mode=01777 $(CONTAINER_IMAGE) sh -c "npm ci && hugo --minify" | ||||
| 
 | ||||
| container-serve: module-check | ||||
| 	$(CONTAINER_RUN) --read-only --mount type=tmpfs,destination=/tmp,tmpfs-mode=01777 -p 1313:1313 $(CONTAINER_IMAGE) hugo server --buildFuture --bind 0.0.0.0 --destination /tmp/hugo --cleanDestinationDir | ||||
|  |  | |||
|  | @ -120,10 +120,11 @@ aliases: | |||
|     - bells17 | ||||
|     # cstoku | ||||
|     - inductor | ||||
|     - kakts | ||||
|     - makocchi-git | ||||
|     # MasayaAoyama | ||||
|     - nasa9084 | ||||
|     - oke-py | ||||
|     # oke-py | ||||
|   sig-docs-ko-owners: # Admins for Korean content | ||||
|     - ClaudiaJKang | ||||
|     - gochist | ||||
|  |  | |||
							
								
								
									
										86
									
								
								README-ja.md
								
								
								
								
							
							
						
						
									
										86
									
								
								README-ja.md
								
								
								
								
							|  | @ -4,34 +4,100 @@ | |||
| 
 | ||||
| このリポジトリには、[KubernetesのWebサイトとドキュメント](https://kubernetes.io/)をビルドするために必要な全アセットが格納されています。貢献に興味を持っていただきありがとうございます! | ||||
| 
 | ||||
| ## Hugoを使ってローカル環境でWebサイトを動かす | ||||
| # リポジトリの使い方 | ||||
| 
 | ||||
| Hugoのインストール方法については[Hugoの公式ドキュメント](https://gohugo.io/getting-started/installing/)をご覧ください。このとき、[`netlify.toml`](netlify.toml#L10)ファイルに記述されている`HUGO_VERSION`と同じバージョンをインストールするようにしてください。 | ||||
| Hugo(Extended version)を使用してWebサイトをローカルで実行することも、コンテナランタイムで実行することもできます。コンテナランタイムを使用することを強くお勧めします。これにより、本番Webサイトとのデプロイメントの一貫性が得られます。 | ||||
| 
 | ||||
| Hugoがインストールできたら、以下のコマンドを使ってWebサイトをローカル上で動かすことができます: | ||||
| ## 前提条件 | ||||
| 
 | ||||
| ```bash | ||||
| このリポジトリを使用するには、以下をローカルにインストールする必要があります。 | ||||
| 
 | ||||
| - [npm](https://www.npmjs.com/) | ||||
| - [Go](https://golang.org/) | ||||
| - [Hugo(Extended version)](https://gohugo.io/) | ||||
| - [Docker](https://www.docker.com/)などのコンテナランタイム | ||||
| 
 | ||||
| 開始する前に、依存関係をインストールしてください。リポジトリのクローンを作成し、ディレクトリに移動します。 | ||||
| 
 | ||||
| ``` | ||||
| git clone https://github.com/kubernetes/website.git | ||||
| cd website | ||||
| ``` | ||||
| 
 | ||||
| KubernetesのWebサイトではDocsyというHugoテーマを使用しています。コンテナでWebサイトを実行する場合でも、以下を実行して、サブモジュールおよびその他の開発依存関係をプルすることを強くお勧めします。 | ||||
| 
 | ||||
| ``` | ||||
| # pull in the Docsy submodule | ||||
| git submodule update --init --recursive --depth 1 | ||||
| ``` | ||||
| 
 | ||||
| **注意:** Kubernetesのウェブサイトでは[DocsyというHugoのテーマ](https://github.com/google/docsy#readme)を使用しています。リポジトリを更新していない場合、 `website/themes/docsy`ディレクトリは空です。 このサイトはテーマのローカルコピーなしでは構築できません。 | ||||
| ## コンテナを使ってウェブサイトを動かす | ||||
| 
 | ||||
| テーマをアップデートするには以下のコマンドを実行します: | ||||
| コンテナ内でサイトを構築するには、以下を実行してコンテナイメージを構築し、実行します。 | ||||
| 
 | ||||
| ```bash | ||||
| git submodule update --init --recursive --depth 1 | ||||
| ``` | ||||
| make container-image | ||||
| make container-serve | ||||
| ``` | ||||
| 
 | ||||
| サイトをローカルでビルドしてテストするには以下のコマンドを実行します: | ||||
| お使いのブラウザにて http://localhost:1313 にアクセスしてください。リポジトリ内のソースファイルに変更を加えると、HugoがWebサイトの内容を更新してブラウザに反映します。 | ||||
| 
 | ||||
| ## Hugoを使ってローカル環境でWebサイトを動かす | ||||
| 
 | ||||
| [`netlify.toml`](netlify.toml#L10)ファイルに記述されている`HUGO_VERSION`と同じExtended versionのHugoをインストールするようにしてください。 | ||||
| 
 | ||||
| ローカルでサイトを構築してテストするには、次のコマンドを実行します。 | ||||
| 
 | ||||
| ```bash | ||||
| hugo server --buildFuture | ||||
| # install dependencies | ||||
| npm ci | ||||
| make serve | ||||
| ``` | ||||
| 
 | ||||
| これで、Hugoのサーバーが1313番ポートを使って開始します。お使いのブラウザにて http://localhost:1313 にアクセスしてください。リポジトリ内のソースファイルに変更を加えると、HugoがWebサイトの内容を更新してブラウザに反映します。 | ||||
| 
 | ||||
| ## トラブルシューティング | ||||
| 
 | ||||
| ### error: failed to transform resource: TOCSS: failed to transform "scss/main.scss" (text/x-scss): this feature is not available in your current Hugo version | ||||
| 
 | ||||
| Hugoは、技術的な理由から2種類のバイナリがリリースされています。現在のウェブサイトは**Hugo Extended**バージョンのみに基づいて運営されています。[リリースページ](https://github.com/gohugoio/hugo/releases)で名前に「extended」が含まれるアーカイブを探します。確認するには、`hugo version`を実行し、「extended」という単語を探します。 | ||||
| 
 | ||||
| ### macOSにてtoo many open filesというエラーが表示される | ||||
| 
 | ||||
| macOS上で`make serve`を実行した際に以下のエラーが表示される場合 | ||||
| 
 | ||||
| ``` | ||||
| ERROR 2020/08/01 19:09:18 Error: listen tcp 127.0.0.1:1313: socket: too many open files | ||||
| make: *** [serve] Error 1 | ||||
| ``` | ||||
| 
 | ||||
| OS上で同時に開けるファイルの上限を確認してください。 | ||||
| 
 | ||||
| `launchctl limit maxfiles` | ||||
| 
 | ||||
| 続いて、以下のコマンドを実行します(https://gist.github.com/tombigel/d503800a282fcadbee14b537735d202c より引用)。 | ||||
| 
 | ||||
| ``` | ||||
| #!/bin/sh | ||||
| 
 | ||||
| # These are the original gist links, linking to my gists now. | ||||
| # curl -O https://gist.githubusercontent.com/a2ikm/761c2ab02b7b3935679e55af5d81786a/raw/ab644cb92f216c019a2f032bbf25e258b01d87f9/limit.maxfiles.plist | ||||
| # curl -O https://gist.githubusercontent.com/a2ikm/761c2ab02b7b3935679e55af5d81786a/raw/ab644cb92f216c019a2f032bbf25e258b01d87f9/limit.maxproc.plist | ||||
| 
 | ||||
| curl -O https://gist.githubusercontent.com/tombigel/d503800a282fcadbee14b537735d202c/raw/ed73cacf82906fdde59976a0c8248cce8b44f906/limit.maxfiles.plist | ||||
| curl -O https://gist.githubusercontent.com/tombigel/d503800a282fcadbee14b537735d202c/raw/ed73cacf82906fdde59976a0c8248cce8b44f906/limit.maxproc.plist | ||||
| 
 | ||||
| sudo mv limit.maxfiles.plist /Library/LaunchDaemons | ||||
| sudo mv limit.maxproc.plist /Library/LaunchDaemons | ||||
| 
 | ||||
| sudo chown root:wheel /Library/LaunchDaemons/limit.maxfiles.plist | ||||
| sudo chown root:wheel /Library/LaunchDaemons/limit.maxproc.plist | ||||
| 
 | ||||
| sudo launchctl load -w /Library/LaunchDaemons/limit.maxfiles.plist | ||||
| ``` | ||||
| 
 | ||||
| こちらはmacOSのCatalinaとMojaveで動作を確認しています。 | ||||
| 
 | ||||
| ## SIG Docsに参加する | ||||
| 
 | ||||
| [コミュニティのページ](https://github.com/kubernetes/community/tree/master/sig-docs#meetings)をご覧になることで、SIG Docs Kubernetesコミュニティとの関わり方を学ぶことができます。 | ||||
|  |  | |||
|  | @ -17,7 +17,7 @@ Więcej informacji na temat współpracy przy tworzeniu dokumentacji znajdziesz | |||
| 
 | ||||
| * [Jak rozpocząć współpracę](https://kubernetes.io/docs/contribute/start/) | ||||
| * [Podgląd wprowadzanych zmian w dokumentacji](http://kubernetes.io/docs/contribute/intermediate#view-your-changes-locally) | ||||
| * [Szablony stron](http://kubernetes.io/docs/contribute/style/page-templates/) | ||||
| * [Szablony stron](https://kubernetes.io/docs/contribute/style/page-content-types/) | ||||
| * [Styl pisania dokumentacji](http://kubernetes.io/docs/contribute/style/style-guide/) | ||||
| * [Lokalizacja dokumentacji Kubernetes](https://kubernetes.io/docs/contribute/localization/) | ||||
| 
 | ||||
|  |  | |||
|  | @ -15,7 +15,7 @@ Một khi Pull Request của bạn được tạo, reviewer sẽ chịu trách n | |||
| 
 | ||||
| * [Bắt đầu đóng góp](https://kubernetes.io/docs/contribute/start/) | ||||
| * [Các giai đoạn thay đổi tài liệu](http://kubernetes.io/docs/contribute/intermediate#view-your-changes-locally) | ||||
| * [Sử dụng các trang templates](http://kubernetes.io/docs/contribute/style/page-templates/) | ||||
| * [Sử dụng các trang templates](https://kubernetes.io/docs/contribute/style/page-content-types/) | ||||
| * [Hướng dẫn biểu mẫu tài liệu](http://kubernetes.io/docs/contribute/style/style-guide/) | ||||
| * [Địa phương hóa tài liệu Kubernetes](https://kubernetes.io/docs/contribute/localization/) | ||||
| 
 | ||||
|  |  | |||
							
								
								
									
										157
									
								
								README-zh.md
								
								
								
								
							
							
						
						
									
										157
									
								
								README-zh.md
								
								
								
								
							|  | @ -13,52 +13,94 @@ This repository contains the assets required to build the [Kubernetes website an | |||
| 我们非常高兴您想要参与贡献! | ||||
| 
 | ||||
| <!-- | ||||
| ## Running the website locally using Hugo | ||||
| # Using this repository | ||||
| 
 | ||||
| See the [official Hugo documentation](https://gohugo.io/getting-started/installing/) for Hugo installation instructions. Make sure to install the Hugo extended version specified by the `HUGO_VERSION` environment variable in the [`netlify.toml`](netlify.toml#L10) file. | ||||
| You can run the website locally using Hugo (Extended version), or you can run it in a container runtime. We strongly recommend using the container runtime, as it gives deployment consistency with the live website. | ||||
| --> | ||||
| ## 在本地使用 Hugo 来运行网站 | ||||
| ## 使用这个仓库 | ||||
| 
 | ||||
| 请参考 [Hugo 的官方文档](https://gohugo.io/getting-started/installing/)了解 Hugo 的安装指令。 | ||||
| 请确保安装的是 [`netlify.toml`](netlify.toml#L10) 文件中环境变量 `HUGO_VERSION` 所指定的 | ||||
| Hugo 扩展版本。 | ||||
| 可以使用 Hugo(扩展版)在本地运行网站,也可以在容器中运行它。强烈建议使用容器,因为这样可以和在线网站的部署保持一致。 | ||||
| 
 | ||||
| <!-- | ||||
| Before building the site, clone the Kubernetes website repository: | ||||
| --> | ||||
| 在构造网站之前,先克隆 Kubernetes website 仓库: | ||||
| ## Prerequisites | ||||
| 
 | ||||
| ```bash | ||||
| To use this repository, you need the following installed locally: | ||||
| 
 | ||||
| - [npm](https://www.npmjs.com/) | ||||
| - [Go](https://golang.org/) | ||||
| - [Hugo (Extended version)](https://gohugo.io/) | ||||
| - A container runtime, like [Docker](https://www.docker.com/). | ||||
| 
 | ||||
| --> | ||||
| ## 前提条件 | ||||
| 
 | ||||
| 使用这个仓库,需要在本地安装以下软件: | ||||
| 
 | ||||
| - [npm](https://www.npmjs.com/) | ||||
| - [Go](https://golang.org/) | ||||
| - [Hugo (Extended version)](https://gohugo.io/) | ||||
| - 容器运行时,比如 [Docker](https://www.docker.com/). | ||||
| 
 | ||||
| <!-- | ||||
| Before you start, install the dependencies. Clone the repository and navigate to the directory: | ||||
| --> | ||||
| 开始前,先安装这些依赖。克隆本仓库并进入对应目录: | ||||
| 
 | ||||
| ``` | ||||
| git clone https://github.com/kubernetes/website.git | ||||
| cd website | ||||
| git submodule update --init --recursive | ||||
| ``` | ||||
| 
 | ||||
| <!-- | ||||
| **Note:**  The Kubernetes website deploys the [Docsy Hugo theme](https://github.com/google/docsy#readme). | ||||
| If you have not updated your website repository, the `website/themes/docsy` directory is empty. | ||||
| The site cannot build without a local copy of the theme. | ||||
| 
 | ||||
| Update the website theme: | ||||
| The Kubernetes website uses the [Docsy Hugo theme](https://github.com/google/docsy#readme). Even if you plan to run the website in a container, we strongly recommend pulling in the submodule and other development dependencies by running the following: | ||||
| --> | ||||
| **注意:** Kubernetes 网站要部署 [Docsy Hugo 主题](https://github.com/google/docsy#readme). | ||||
| 如果你还没有更新你本地的 website 仓库,目录 `website/themes/docsy` | ||||
| 会是空目录。 | ||||
| 在本地没有主题副本的情况下,网站无法正常构造。 | ||||
| 
 | ||||
| 使用下面的命令更新网站主题: | ||||
| Kubernetes 网站使用的是 [Docsy Hugo 主题](https://github.com/google/docsy#readme)。 即使你打算在容器中运行网站,我们也强烈建议你通过运行以下命令来引入子模块和其他开发依赖项: | ||||
| 
 | ||||
| ```bash | ||||
| ``` | ||||
| # pull in the Docsy submodule | ||||
| git submodule update --init --recursive --depth 1 | ||||
| ``` | ||||
| 
 | ||||
| <!-- | ||||
| ## Running the website using a container | ||||
| 
 | ||||
| To build the site in a container, run the following to build the container image and run it: | ||||
| 
 | ||||
| --> | ||||
| ## 在容器中运行网站 | ||||
| 
 | ||||
| 要在容器中构建网站,请通过以下命令来构建容器镜像并运行: | ||||
| 
 | ||||
| ``` | ||||
| make container-image | ||||
| make container-serve | ||||
| ``` | ||||
| 
 | ||||
| <!-- | ||||
| Open up your browser to http://localhost:1313 to view the website. As you make changes to the source files, Hugo updates the website and forces a browser refresh. | ||||
| --> | ||||
| 启动浏览器,打开 http://localhost:1313 来查看网站。 | ||||
| 当你对源文件作出修改时,Hugo 会更新网站并强制浏览器执行刷新操作。 | ||||
| 
 | ||||
| <!-- | ||||
| ## Running the website locally using Hugo | ||||
| 
 | ||||
| Make sure to install the Hugo extended version specified by the `HUGO_VERSION` environment variable in the [`netlify.toml`](netlify.toml#L10) file. | ||||
| 
 | ||||
| To build and test the site locally, run: | ||||
| --> | ||||
| ## 在本地使用 Hugo 来运行网站 | ||||
| 
 | ||||
| 请确保安装的是 [`netlify.toml`](netlify.toml#L10) 文件中环境变量 `HUGO_VERSION` 所指定的 | ||||
| Hugo 扩展版本。 | ||||
| 
 | ||||
| 若要在本地构造和测试网站,请运行: | ||||
| 
 | ||||
| ```bash | ||||
| hugo server --buildFuture | ||||
| # install dependencies | ||||
| npm ci | ||||
| make serve | ||||
| ``` | ||||
| 
 | ||||
| <!-- | ||||
|  | @ -68,6 +110,63 @@ This will start the local Hugo server on port 1313. Open up your browser to http | |||
| 启动浏览器,打开 http://localhost:1313 来查看网站。 | ||||
| 当你对源文件作出修改时,Hugo 会更新网站并强制浏览器执行刷新操作。 | ||||
| 
 | ||||
| <!-- | ||||
| ## Troubleshooting | ||||
| ### error: failed to transform resource: TOCSS: failed to transform "scss/main.scss" (text/x-scss): this feature is not available in your current Hugo version | ||||
| 
 | ||||
| Hugo is shipped in two set of binaries for technical reasons. The current website runs based on the **Hugo Extended** version only. In the [release page](https://github.com/gohugoio/hugo/releases) look for archives with `extended` in the name. To confirm, run `hugo version` and look for the word `extended`. | ||||
| 
 | ||||
| --> | ||||
| ## 故障排除 | ||||
| 
 | ||||
| ###  error: failed to transform resource: TOCSS: failed to transform "scss/main.scss" (text/x-scss): this feature is not available in your current Hugo version | ||||
| 
 | ||||
| 由于技术原因,Hugo 会发布两套二进制文件。 | ||||
| 当前网站仅基于 **Hugo Extended** 版本运行。 | ||||
| 在 [发布页面](https://github.com/gohugoio/hugo/releases) 中查找名称为 `extended` 的归档。可以运行 `huge version` 查看是否有单词 `extended` 来确认。 | ||||
| 
 | ||||
| <!-- | ||||
| ### Troubleshooting macOS for too many open files | ||||
| 
 | ||||
| If you run `make serve` on macOS and receive the following error: | ||||
| 
 | ||||
| --> | ||||
| ### 对 macOs 上打开太多文件的故障排除 | ||||
| 
 | ||||
| 如果在 macOS 上运行 `make serve` 收到以下错误: | ||||
| 
 | ||||
| ``` | ||||
| ERROR 2020/08/01 19:09:18 Error: listen tcp 127.0.0.1:1313: socket: too many open files | ||||
| make: *** [serve] Error 1 | ||||
| ``` | ||||
| 
 | ||||
| 试着查看一下当前打开文件数的限制: | ||||
| 
 | ||||
| `launchctl limit maxfiles` | ||||
| 
 | ||||
| 然后运行以下命令(参考https://gist.github.com/tombigel/d503800a282fcadbee14b537735d202c): | ||||
| 
 | ||||
| ``` | ||||
| #!/bin/sh | ||||
| 
 | ||||
| # These are the original gist links, linking to my gists now. | ||||
| # curl -O https://gist.githubusercontent.com/a2ikm/761c2ab02b7b3935679e55af5d81786a/raw/ab644cb92f216c019a2f032bbf25e258b01d87f9/limit.maxfiles.plist | ||||
| # curl -O https://gist.githubusercontent.com/a2ikm/761c2ab02b7b3935679e55af5d81786a/raw/ab644cb92f216c019a2f032bbf25e258b01d87f9/limit.maxproc.plist | ||||
| 
 | ||||
| curl -O https://gist.githubusercontent.com/tombigel/d503800a282fcadbee14b537735d202c/raw/ed73cacf82906fdde59976a0c8248cce8b44f906/limit.maxfiles.plist | ||||
| curl -O https://gist.githubusercontent.com/tombigel/d503800a282fcadbee14b537735d202c/raw/ed73cacf82906fdde59976a0c8248cce8b44f906/limit.maxproc.plist | ||||
| 
 | ||||
| sudo mv limit.maxfiles.plist /Library/LaunchDaemons | ||||
| sudo mv limit.maxproc.plist /Library/LaunchDaemons | ||||
| 
 | ||||
| sudo chown root:wheel /Library/LaunchDaemons/limit.maxfiles.plist | ||||
| sudo chown root:wheel /Library/LaunchDaemons/limit.maxproc.plist | ||||
| 
 | ||||
| sudo launchctl load -w /Library/LaunchDaemons/limit.maxfiles.plist | ||||
| ``` | ||||
| 
 | ||||
| 这适用于 Catalina 和 Mojave macOS。 | ||||
| 
 | ||||
| <!-- | ||||
| ## Get involved with SIG Docs | ||||
| 
 | ||||
|  | @ -78,7 +177,7 @@ You can also reach the maintainers of this project at: | |||
| - [Slack](https://kubernetes.slack.com/messages/sig-docs) | ||||
| - [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-docs) | ||||
| --> | ||||
| ## 参与 SIG Docs 工作 | ||||
| # 参与 SIG Docs 工作 | ||||
| 
 | ||||
| 通过 [社区页面](https://github.com/kubernetes/community/tree/master/sig-docs#meetings) | ||||
| 进一步了解 SIG Docs Kubernetes 社区和会议信息。 | ||||
|  | @ -95,7 +194,7 @@ You can click the **Fork** button in the upper-right area of the screen to creat | |||
| 
 | ||||
| Once your pull request is created, a Kubernetes reviewer will take responsibility for providing clear, actionable feedback.  As the owner of the pull request, **it is your responsibility to modify your pull request to address the feedback that has been provided to you by the Kubernetes reviewer.** | ||||
| --> | ||||
| ## 为文档做贡献 | ||||
| # 为文档做贡献 | ||||
| 
 | ||||
| 你也可以点击屏幕右上方区域的 **Fork** 按钮,在你自己的 GitHub | ||||
| 账号下创建本仓库的拷贝。此拷贝被称作 *fork*。 | ||||
|  | @ -133,7 +232,7 @@ For more information about contributing to the Kubernetes documentation, see: | |||
| * [文档风格指南](http://kubernetes.io/docs/contribute/style/style-guide/) | ||||
| * [本地化 Kubernetes 文档](https://kubernetes.io/docs/contribute/localization/) | ||||
| 
 | ||||
| ## 中文本地化 | ||||
| # 中文本地化 | ||||
| 
 | ||||
| 可以通过以下方式联系中文本地化的维护人员: | ||||
| 
 | ||||
|  | @ -146,15 +245,15 @@ For more information about contributing to the Kubernetes documentation, see: | |||
| 
 | ||||
| Participation in the Kubernetes community is governed by the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). | ||||
| --> | ||||
| ### 行为准则 | ||||
| # 行为准则 | ||||
| 
 | ||||
| 参与 Kubernetes 社区受 [CNCF 行为准则](https://github.com/cncf/foundation/blob/master/code-of-conduct.md)约束。 | ||||
| 参与 Kubernetes 社区受 [CNCF 行为准则](https://github.com/cncf/foundation/blob/master/code-of-conduct.md) 约束。 | ||||
| 
 | ||||
| <!-- | ||||
| ## Thank you! | ||||
| 
 | ||||
| Kubernetes thrives on community participation, and we appreciate your contributions to our website and our documentation! | ||||
| --> | ||||
| ## 感谢! | ||||
| # 感谢! | ||||
| 
 | ||||
| Kubernetes 因为社区的参与而蓬勃发展,感谢您对我们网站和文档的贡献! | ||||
|  |  | |||
|  | @ -12,7 +12,6 @@ You can run the website locally using Hugo (Extended version), or you can run it | |||
| 
 | ||||
| To use this repository, you need the following installed locally: | ||||
| 
 | ||||
| - [yarn](https://yarnpkg.com/) | ||||
| - [npm](https://www.npmjs.com/) | ||||
| - [Go](https://golang.org/) | ||||
| - [Hugo (Extended version)](https://gohugo.io/) | ||||
|  | @ -28,9 +27,6 @@ cd website | |||
| The Kubernetes website uses the [Docsy Hugo theme](https://github.com/google/docsy#readme). Even if you plan to run the website in a container, we strongly recommend pulling in the submodule and other development dependencies by running the following: | ||||
| 
 | ||||
| ``` | ||||
| # install dependencies | ||||
| yarn | ||||
| 
 | ||||
| # pull in the Docsy submodule | ||||
| git submodule update --init --recursive --depth 1 | ||||
| ``` | ||||
|  | @ -53,6 +49,8 @@ Make sure to install the Hugo extended version specified by the `HUGO_VERSION` e | |||
| To build and test the site locally, run: | ||||
| 
 | ||||
| ```bash | ||||
| # install dependencies | ||||
| npm ci | ||||
| make serve | ||||
| ``` | ||||
| 
 | ||||
|  |  | |||
|  | @ -88,6 +88,20 @@ footer { | |||
|   } | ||||
| } | ||||
| 
 | ||||
| main { | ||||
|   .button { | ||||
|     display: inline-block; | ||||
|     border-radius: 6px; | ||||
|     padding: 6px 20px; | ||||
|     line-height: 1.3rem; | ||||
|     color: white; | ||||
|     background-color: $blue; | ||||
|     text-decoration: none; | ||||
|     font-size: 1rem; | ||||
|     border: 0px; | ||||
|   } | ||||
| } | ||||
| 
 | ||||
| // HEADER | ||||
| 
 | ||||
| #hamburger { | ||||
|  |  | |||
|  | @ -71,6 +71,22 @@ body.td-404 main .error-details { | |||
|   max-width: 80%; | ||||
|   border: 1px solid rgb(222, 226, 230); | ||||
|   border-radius: 5px; | ||||
|   margin-bottom: 1rem; | ||||
|   padding-top: 1rem; | ||||
|   padding-bottom: 1rem; | ||||
| 
 | ||||
|   // mermaid diagram - sequence diagram | ||||
|   .actor { | ||||
|     fill: #326ce5 !important; | ||||
|   } | ||||
|   text.actor { | ||||
|     font-size: 18px !important; | ||||
|     stroke: white !important; | ||||
|     fill: white !important; | ||||
|   } | ||||
|   .activation0 { | ||||
|     fill: #c9e9ec !important; | ||||
|   } | ||||
| } | ||||
| 
 | ||||
| /* HEADER */ | ||||
|  |  | |||
|  | @ -157,7 +157,7 @@ github_repo = "https://github.com/kubernetes/website" | |||
| # param for displaying an announcement block on every page. | ||||
| # See /i18n/en.toml for message text and title. | ||||
| announcement = true | ||||
| announcement_bg = "#3f0374" # choose a dark color – text is white | ||||
| announcement_bg = "#000000" #choose a dark color – text is white | ||||
| 
 | ||||
| #Searching | ||||
| k8s_search = true | ||||
|  |  | |||
|  | @ -6,7 +6,7 @@ weight: 10 | |||
| 
 | ||||
| <!-- overview --> | ||||
| 
 | ||||
| Ein Knoten (Node in Englisch) ist eine Arbeitsmaschine in Kubernetes, früher als `minion` bekannt. Ein Node | ||||
| Ein Knoten (Node in Englisch) ist eine Arbeitsmaschine in Kubernetes. Ein Node | ||||
| kann je nach Cluster eine VM oder eine physische Maschine sein. Jeder Node enthält | ||||
| die für den Betrieb von [Pods](/docs/concepts/workloads/pods/pod/) notwendigen Dienste | ||||
| und wird von den Master-Komponenten verwaltet. | ||||
|  |  | |||
|  | @ -8,7 +8,7 @@ sitemap: | |||
| 
 | ||||
| {{< blocks/section id="oceanNodes" >}} | ||||
| {{% blocks/feature image="flower" %}} | ||||
| [Kubernetes (K8s)]({{< relref "/docs/concepts/overview/what-is-kubernetes" >}}) is an open-source system for automating deployment, scaling, and management of containerized applications. | ||||
| [Kubernetes]({{< relref "/docs/concepts/overview/what-is-kubernetes" >}}), also known as K8s, is an open-source system for automating deployment, scaling, and management of containerized applications. | ||||
| 
 | ||||
| It groups containers that make up an application into logical units for easy management and discovery. Kubernetes builds upon [15 years of experience of running production workloads at Google](http://queue.acm.org/detail.cfm?id=2898444), combined with best-of-breed ideas and practices from the community. | ||||
| {{% /blocks/feature %}} | ||||
|  | @ -28,7 +28,7 @@ Whether testing locally or running a global enterprise, Kubernetes flexibility g | |||
| {{% /blocks/feature %}} | ||||
| 
 | ||||
| {{% blocks/feature image="suitcase" %}} | ||||
| #### Run Anywhere | ||||
| #### Run K8s Anywhere | ||||
| 
 | ||||
| Kubernetes is open source giving you the freedom to take advantage of on-premises, hybrid, or public cloud infrastructure, letting you effortlessly move workloads to where it matters to you. | ||||
| 
 | ||||
|  | @ -44,6 +44,11 @@ Kubernetes is open source giving you the freedom to take advantage of on-premise | |||
|         <br> | ||||
|         <br> | ||||
|         <a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-north-america/?utm_source=kubernetes.io&utm_medium=nav&utm_campaign=kccncna20" button id="desktopKCButton">Attend KubeCon NA virtually on November 17-20, 2020</a> | ||||
|         <br> | ||||
|         <br> | ||||
|         <br> | ||||
|         <br> | ||||
|         <a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-europe/?utm_source=kubernetes.io&utm_medium=nav&utm_campaign=kccnceu21" button id="desktopKCButton">Attend KubeCon EU virtually on May 4 – 7, 2021</a> | ||||
| </div> | ||||
| <div id="videoPlayer"> | ||||
|     <iframe data-url="https://www.youtube.com/embed/H06qrNmGqyE?autoplay=1" frameborder="0" allowfullscreen></iframe> | ||||
|  |  | |||
|  | @ -12,7 +12,7 @@ Kubernetes is well-known for running scalable workloads. It scales your workload | |||
| 
 | ||||
| ## Guaranteed scheduling with controlled cost | ||||
| 
 | ||||
| [Kubernetes Cluster Autoscaler](https://kubernetes.io/docs/tasks/administer-cluster/cluster-management/#cluster-autoscaling) is an excellent tool in the ecosystem which adds more nodes to your cluster when your applications need them. However, cluster autoscaler has some limitations and may not work for all users: | ||||
| [Kubernetes Cluster Autoscaler](https://github.com/kubernetes/autoscaler/) is an excellent tool in the ecosystem which adds more nodes to your cluster when your applications need them. However, cluster autoscaler has some limitations and may not work for all users: | ||||
| 
 | ||||
| - It does not work in physical clusters. | ||||
| - Adding more nodes to the cluster costs more. | ||||
|  |  | |||
|  | @ -153,7 +153,7 @@ And as sources are always important to mention, we will follow (partially) the h | |||
| 
 | ||||
| ```bash | ||||
| # Download the latest version of KinD | ||||
| curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/v0.7.0/kind-$(uname)-amd64 | ||||
| curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/v0.7.0/kind-linux-amd64 | ||||
| # Make the binary executable | ||||
| chmod +x ./kind | ||||
| # Move the binary to your executable path | ||||
|  |  | |||
|  | @ -0,0 +1,16 @@ | |||
| --- | ||||
| layout: blog | ||||
| title: "Remembering Dan Kohn" | ||||
| date: 2020-11-02 | ||||
| slug: remembering-dan-kohn | ||||
| --- | ||||
| 
 | ||||
| **Author**: The Kubernetes Steering Committee | ||||
| 
 | ||||
| Dan Kohn was instrumental in getting Kubernetes and CNCF community to where it is today. He shared our values, motivations, enthusiasm, community spirit, and helped the Kubernetes community to become the best that it could be. Dan loved getting people together to solve problems big and small. He enabled people to grow their individual scope in the community which often helped launch their career in open source software. | ||||
| 
 | ||||
| Dan built a coalition around the nascent Kubernetes project and turned that into a cornerstone to build the larger cloud native space. He loved challenges, especially ones where the payoff was great like building worldwide communities, spreading the love of open source, and helping diverse, underprivileged communities and students to get a head start in technology. | ||||
| 
 | ||||
| Our heart goes out to his family. Thank you, Dan, for bringing your boys to events in India and elsewhere as we got to know how great you were as a father. Dan, your thoughts and ideas will help us make progress in our journey as a community. Thank you for your life's work! | ||||
| 
 | ||||
| If Dan has made an impact on you in some way, please consider adding a memory of him in his [CNCF memorial](https://github.com/cncf/memorials/blob/master/dan-kohn.md). | ||||
										
											
												File diff suppressed because one or more lines are too long
											
										
									
								
							| After Width: | Height: | Size: 66 KiB | 
|  | @ -0,0 +1,57 @@ | |||
| --- | ||||
| layout: blog | ||||
| title: "Cloud native security for your clusters" | ||||
| date: 2020-11-18 | ||||
| slug: cloud-native-security-for-your-clusters | ||||
| --- | ||||
| 
 | ||||
| **Author**: [Pushkar Joglekar](https://twitter.com/pudijoglekar) | ||||
| 
 | ||||
| Over the last few years a small, security focused community has been working diligently to deepen our understanding of security, given the evolving cloud native infrastructure and corresponding iterative deployment practices. To enable sharing of this knowledge with the rest of the community, members of [CNCF SIG Security](https://github.com/cncf/sig-security) (a group which reports into [CNCF TOC](https://github.com/cncf/toc#sigs) and who are friends with [Kubernetes SIG Security](https://github.com/kubernetes/community/tree/master/sig-security)) led by Emily Fox, collaborated on a whitepaper outlining holistic cloud native security concerns and best practices. After over 1200 comments, changes, and discussions from 35 members across the world, we are proud to share [cloud native security whitepaper v1.0](https://www.cncf.io/blog/2020/11/18/announcing-the-cloud-native-security-white-paper) that serves as essential reading for security leadership in enterprises, financial and healthcare industries, academia, government, and non-profit organizations. | ||||
| 
 | ||||
| The paper attempts to _not_ focus on any specific [cloud native project](https://www.cncf.io/projects/). Instead, the intent is to model and inject security into four logical phases of cloud native application lifecycle: _Develop, Distribute, Deploy, and Runtime_. | ||||
| 
 | ||||
| <img alt="Cloud native application lifecycle phases" | ||||
|      src="cloud-native-app-lifecycle-phases.svg" | ||||
|      style="width:60em;max-width:100%;"> | ||||
| 
 | ||||
| 
 | ||||
| ## Kubernetes native security controls | ||||
| When using Kubernetes as a workload orchestrator, some of the security controls this version of the whitepaper recommends are: | ||||
| * [Pod Security Policies](/docs/concepts/policy/pod-security-policy/): Implement a single source of truth for “least privilege” workloads across the entire cluster | ||||
| * [Resource requests and limits](/docs/concepts/configuration/manage-resources-containers/#requests-and-limits): Apply requests (soft constraint) and limits (hard constraint) for shared resources such as memory and CPU | ||||
| * [Audit log analysis](/docs/tasks/debug-application-cluster/audit/): Enable Kubernetes API auditing and filtering for security relevant events | ||||
| * [Control plane authentication and certificate root of trust](/docs/concepts/architecture/control-plane-node-communication/): Enable mutual TLS authentication with a trusted CA for communication within the cluster | ||||
| * [Secrets management](/docs/concepts/configuration/secret/): Integrate with a built-in or external secrets store | ||||
| 
 | ||||
| ## Cloud native complementary security controls | ||||
| 
 | ||||
| Kubernetes has direct involvement in the _deploy_ phase and to a lesser extent in the _runtime_ phase. Ensuring the artifacts are securely _developed_ and _distributed_ is necessary for, enabling workloads in Kubernetes to run “secure by default”. Throughout all phases of the Cloud native application life cycle, several complementary security controls exist for Kubernetes orchestrated workloads, which includes but are not limited to: | ||||
| * Develop: | ||||
|   - Image signing and verification | ||||
|   - Image vulnerability scanners | ||||
| * Distribute: | ||||
|   - Pre-deployment checks for detecting excessive privileges | ||||
|   - Enabling observability and logging | ||||
| * Deploy: | ||||
|   - Using a service mesh for workload authentication and authorization | ||||
|   - Enforcing “default deny” network policies for inter-workload communication via [network plugins](/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) | ||||
| * Runtime: | ||||
|   - Deploying security monitoring agents for workloads | ||||
|   - Isolating applications that run on the same node using SELinux, AppArmor, etc. | ||||
|   - Scanning configuration against recognized secure baselines for node, workload and orchestrator | ||||
| 
 | ||||
| ## Understand first, secure next | ||||
| The cloud native way, including containers, provides great security benefits for its users: immutability, modularity, faster upgrades and consistent state across the environment. Realizing this fundamental change in “the way things are done”, motivates us to look at security with a cloud native lens. One of the things that was evident for all the authors of the paper was the fact that it’s tough to make smarter decisions on how and what to secure in a cloud native ecosystem if you do not understand the tools, patterns, and frameworks at hand (in addition to knowing your own critical assets). Hence, for all the security practitioners out there who want to be partners rather than a gatekeeper for your friends in Operations, Product Development, and Compliance, let’s make an attempt to _learn more so we can secure better_. | ||||
| 
 | ||||
| We recommend following this **7 step R.U.N.T.I.M.E. path** to get started on cloud native security: | ||||
| 1. <b>R</b>ead the paper and any linked material in it | ||||
| 2. <b>U</b>nderstand challenges and constraints for your environment | ||||
| 3. <b>N</b>ote the content and controls that apply to your environment | ||||
| 4. <b>T</b>alk about your observations with your peers | ||||
| 5. <b>I</b>nvolve your leadership and ask for help | ||||
| 6. <b>M</b>ake a risk profile based on existing and missing security controls | ||||
| 7. <b>E</b>xpend time, money, and resources that improve security posture and reduce risk where appropriate. | ||||
| 
 | ||||
| ## Acknowledgements | ||||
| Huge shout out to _Emily Fox, Tim Bannister (The Scale Factory), Chase Pettet (Mirantis), and Wayne Haber (GitLab)_ for contributing with their wonderful suggestions for this blog post. | ||||
|  | @ -26,10 +26,9 @@ case_study_details: | |||
| 
 | ||||
| <p>Speed of delivery increased. Some of the legacy VM-based deployments took 45 minutes; with Kubernetes, that time was "just a few seconds to a couple of minutes," says Engineering Manager Brian Balser. Adds Li: "Teams that used to deploy on weekly schedules or had to coordinate schedules with the infrastructure team now deploy their updates independently, and can do it daily when necessary." Adopting Cloud Native Computing Foundation technologies allows for a more unified approach to deployment across the engineering staff, and portability for the company.</p> | ||||
| 
 | ||||
| {{< case-studies/quote author="Deep Kapadia, Executive Director, Engineering at The New York Times">}} | ||||
| <iframe style="padding:1%:" width="380" height="215" src="https://www.youtube.com/embed/DqS_IPw-c6o" frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe> | ||||
| <iframe style="padding:1%:" width="380" height="215" src="https://www.youtube.com/embed/Tm4VfJtOHt8" frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe> | ||||
| <br> | ||||
| {{< case-studies/quote author="Deep Kapadia, Executive Director, Engineering at The New York Times" >}} | ||||
| {{< youtube DqS_IPw-c6o youtube-quote-sm >}} | ||||
| {{< youtube Tm4VfJtOHt8 youtube-quote-sm >}} | ||||
| "I think once you get over the initial hump, things get a lot easier and actually a lot faster." | ||||
| {{< /case-studies/quote >}} | ||||
| 
 | ||||
|  |  | |||
|  | @ -92,9 +92,8 @@ Controllers that interact with external state find their desired state from | |||
| the API server, then communicate directly with an external system to bring | ||||
| the current state closer in line. | ||||
| 
 | ||||
| (There actually is a controller that horizontally scales the | ||||
| nodes in your cluster. See | ||||
| [Cluster autoscaling](/docs/tasks/administer-cluster/cluster-management/#cluster-autoscaling)). | ||||
| (There actually is a [controller](https://github.com/kubernetes/autoscaler/) | ||||
| that horizontally scales the nodes in your cluster.) | ||||
| 
 | ||||
| The important point here is that the controller makes some change to bring about | ||||
| your desired state, and then reports current state back to your cluster's API server. | ||||
|  |  | |||
|  | @ -338,5 +338,4 @@ for more information. | |||
| * Read the [Node](https://git.k8s.io/community/contributors/design-proposals/architecture/architecture.md#the-kubernetes-node) | ||||
|   section of the architecture design document. | ||||
| * Read about [taints and tolerations](/docs/concepts/scheduling-eviction/taint-and-toleration/). | ||||
| * Read about [cluster autoscaling](/docs/tasks/administer-cluster/cluster-management/#cluster-autoscaling). | ||||
| 
 | ||||
|  |  | |||
|  | @ -39,8 +39,6 @@ Before choosing a guide, here are some considerations: | |||
| 
 | ||||
| ## Managing a cluster | ||||
| 
 | ||||
| * [Managing a cluster](/docs/tasks/administer-cluster/cluster-management/) describes several topics related to the lifecycle of a cluster: creating a new cluster, upgrading your cluster's master and worker nodes, performing node maintenance (e.g. kernel upgrades), and upgrading the Kubernetes API version of a running cluster. | ||||
| 
 | ||||
| * Learn how to [manage nodes](/docs/concepts/architecture/nodes/). | ||||
| 
 | ||||
| * Learn how to set up and manage the [resource quota](/docs/concepts/policy/resource-quotas/) for shared clusters. | ||||
|  |  | |||
|  | @ -25,6 +25,7 @@ This page lists some of the available add-ons and links to their respective inst | |||
| * [Flannel](https://github.com/coreos/flannel/blob/master/Documentation/kubernetes.md) is an overlay network provider that can be used with Kubernetes. | ||||
| * [Knitter](https://github.com/ZTE/Knitter/) is a plugin to support multiple network interfaces in a Kubernetes pod. | ||||
| * [Multus](https://github.com/Intel-Corp/multus-cni) is a Multi plugin for multiple network support in Kubernetes to support all CNI plugins (e.g. Calico, Cilium, Contiv, Flannel), in addition to SRIOV, DPDK, OVS-DPDK and VPP based workloads in Kubernetes. | ||||
| * [OVN-Kubernetes](https://github.com/ovn-org/ovn-kubernetes/) is a networking provider for Kubernetes based on [OVN (Open Virtual Network)](https://github.com/ovn-org/ovn/), a virtual networking implementation that came out of the Open vSwitch (OVS) project. OVN-Kubernetes provides an overlay based networking implementation for Kubernetes, including an OVS based implementation of load balancing and network policy. | ||||
| * [OVN4NFV-K8S-Plugin](https://github.com/opnfv/ovn4nfv-k8s-plugin) is OVN based CNI controller plugin to provide cloud native based Service function chaining(SFC), Multiple OVN overlay networking, dynamic subnet creation, dynamic creation of virtual networks, VLAN Provider network, Direct provider network and pluggable with other Multi-network plugins, ideal for edge based cloud native workloads in Multi-cluster networking | ||||
| * [NSX-T](https://docs.vmware.com/en/VMware-NSX-T/2.0/nsxt_20_ncp_kubernetes.pdf) Container Plug-in (NCP) provides integration between VMware NSX-T and container orchestrators such as Kubernetes, as well as integration between NSX-T and container-based CaaS/PaaS platforms such as Pivotal Container Service (PKS) and OpenShift. | ||||
| * [Nuage](https://github.com/nuagenetworks/nuage-kubernetes/blob/v5.1.1-1/docs/kubernetes-1-installation.rst) is an SDN platform that provides policy-based networking between Kubernetes Pods and non-Kubernetes environments with visibility and security monitoring. | ||||
|  |  | |||
|  | @ -426,7 +426,7 @@ poorly-behaved workloads that may be harming system health. | |||
|     {{< /note >}} | ||||
| 
 | ||||
| * `apiserver_flowcontrol_request_concurrency_limit` is a gauge vector | ||||
|   hoding the computed concurrency limit (based on the API server's | ||||
|   holding the computed concurrency limit (based on the API server's | ||||
|   total concurrency limit and PriorityLevelConfigurations' concurrency | ||||
|   shares), broken down by the label `priorityLevel`. | ||||
| 
 | ||||
|  |  | |||
|  | @ -124,6 +124,10 @@ With the help of the Big Cloud Fabric's virtual pod multi-tenant architecture, c | |||
| 
 | ||||
| BCF was recognized by Gartner as a visionary in the latest [Magic Quadrant](https://go.bigswitch.com/17GatedDocuments-MagicQuadrantforDataCenterNetworking_Reg.html). One of the BCF Kubernetes on-premises deployments (which includes Kubernetes, DC/OS & VMware running on multiple DCs across different geographic regions) is also referenced [here](https://portworx.com/architects-corner-kubernetes-satya-komala-nio/). | ||||
| 
 | ||||
| ### Calico | ||||
| 
 | ||||
| [Calico](https://docs.projectcalico.org/) is an open source networking and network security solution for containers, virtual machines, and native host-based workloads. Calico supports multiple data planes including: a pure Linux eBPF dataplane, a standard Linux networking dataplane, and a Windows HNS dataplane. Calico provides a full networking stack but can also be used in conjunction with [cloud provider CNIs](https://docs.projectcalico.org/networking/determine-best-networking#calico-compatible-cni-plugins-and-cloud-provider-integrations) to provide network policy enforcement. | ||||
| 
 | ||||
| ### Cilium | ||||
| 
 | ||||
| [Cilium](https://github.com/cilium/cilium) is open source software for | ||||
|  | @ -154,6 +158,11 @@ tables to provide per-instance subnets to each host (which is limited to 50-100 | |||
| entries per VPC). In short, cni-ipvlan-vpc-k8s significantly reduces the | ||||
| network complexity required to deploy Kubernetes at scale within AWS. | ||||
| 
 | ||||
| ### Coil | ||||
| 
 | ||||
| [Coil](https://github.com/cybozu-go/coil) is a CNI plugin designed for ease of integration, providing flexible egress networking. | ||||
| Coil operates with a low overhead compared to bare metal, and allows you to define arbitrary egress NAT gateways for external networks. | ||||
| 
 | ||||
| ### Contiv | ||||
| 
 | ||||
| [Contiv](https://github.com/contiv/netplugin) provides configurable networking (native l3 using BGP, overlay using vxlan,  classic l2, or Cisco-SDN/ACI) for various use cases. [Contiv](https://contiv.io) is all open sourced. | ||||
|  | @ -291,14 +300,6 @@ stateful ACLs, load-balancers etc to build different virtual networking | |||
| topologies.  The project has a specific Kubernetes plugin and documentation | ||||
| at [ovn-kubernetes](https://github.com/openvswitch/ovn-kubernetes). | ||||
| 
 | ||||
| ### Project Calico | ||||
| 
 | ||||
| [Project Calico](https://docs.projectcalico.org/) is an open source container networking provider and network policy engine. | ||||
| 
 | ||||
| Calico provides a highly scalable networking and network policy solution for connecting Kubernetes pods based on the same IP networking principles as the internet, for both Linux (open source) and Windows (proprietary - available from [Tigera](https://www.tigera.io/essentials/)).  Calico can be deployed without encapsulation or overlays to provide high-performance, high-scale data center networking.  Calico also provides fine-grained, intent based network security policy for Kubernetes pods via its distributed firewall. | ||||
| 
 | ||||
| Calico can also be run in policy enforcement mode in conjunction with other networking solutions such as Flannel, aka [canal](https://github.com/tigera/canal), or native GCE, AWS or Azure networking. | ||||
| 
 | ||||
| ### Romana | ||||
| 
 | ||||
| [Romana](https://romana.io) is an open source network and security automation solution that lets you deploy Kubernetes without an overlay network. Romana supports Kubernetes [Network Policy](/docs/concepts/services-networking/network-policies/) to provide isolation across network namespaces. | ||||
|  | @ -315,4 +316,4 @@ to run, and in both cases, the network provides one IP address per pod - as is s | |||
| 
 | ||||
| The early design of the networking model and its rationale, and some future | ||||
| plans are described in more detail in the | ||||
| [networking design document](https://git.k8s.io/community/contributors/design-proposals/network/networking.md). | ||||
| [networking design document](https://git.k8s.io/community/contributors/design-proposals/network/networking.md). | ||||
|  |  | |||
|  | @ -31,7 +31,7 @@ This lets you fetch a container image running in the cloud and | |||
| debug the exact same code locally if needed. | ||||
| 
 | ||||
| A ConfigMap is not designed to hold large chunks of data. The data stored in a | ||||
| ConfigMap cannot exeed 1 MiB. If you need to store settings that are | ||||
| ConfigMap cannot exceed 1 MiB. If you need to store settings that are | ||||
| larger than this limit, you may want to consider mounting a volume or use a | ||||
| separate database or file service. | ||||
| 
 | ||||
|  | @ -88,7 +88,7 @@ data: | |||
| There are four different ways that you can use a ConfigMap to configure | ||||
| a container inside a Pod: | ||||
| 
 | ||||
| 1. Command line arguments to the entrypoint of a container | ||||
| 1. Inside a container command and args | ||||
| 1. Environment variables for a container | ||||
| 1. Add a file in read-only volume, for the application to read | ||||
| 1. Write code to run inside the Pod that uses the Kubernetes API to read a ConfigMap | ||||
|  |  | |||
|  | @ -271,7 +271,7 @@ preempted. Here's an example: | |||
| *   Pod P is being considered for Node N. | ||||
| *   Pod Q is running on another Node in the same Zone as Node N. | ||||
| *   Pod P has Zone-wide anti-affinity with Pod Q (`topologyKey: | ||||
|     failure-domain.beta.kubernetes.io/zone`). | ||||
|     topology.kubernetes.io/zone`). | ||||
| *   There are no other cases of anti-affinity between Pod P and other Pods in | ||||
|     the Zone. | ||||
| *   In order to schedule Pod P on Node N, Pod Q can be preempted, but scheduler | ||||
|  | @ -321,9 +321,7 @@ Pod may be created that fits on the same Node. In this case, the scheduler will | |||
| schedule the higher priority Pod instead of the preemptor. | ||||
| 
 | ||||
| This is expected behavior: the Pod with the higher priority should take the place | ||||
| of a Pod with a lower priority. Other controller actions, such as | ||||
| [cluster autoscaling](/docs/tasks/administer-cluster/cluster-management/#cluster-autoscaling), | ||||
| may eventually provide capacity to schedule the pending Pods. | ||||
| of a Pod with a lower priority. | ||||
| 
 | ||||
| ### Higher priority Pods are preempted before lower priority pods | ||||
| 
 | ||||
|  |  | |||
|  | @ -15,50 +15,379 @@ weight: 30 | |||
| Kubernetes Secrets let you store and manage sensitive information, such | ||||
| as passwords, OAuth tokens, and ssh keys. Storing confidential information in a Secret | ||||
| is safer and more flexible than putting it verbatim in a | ||||
| {{< glossary_tooltip term_id="pod" >}} definition or in a {{< glossary_tooltip text="container image" term_id="image" >}}. See [Secrets design document](https://git.k8s.io/community/contributors/design-proposals/auth/secrets.md) for more information. | ||||
| 
 | ||||
| {{< glossary_tooltip term_id="pod" >}} definition or in a | ||||
| {{< glossary_tooltip text="container image" term_id="image" >}}. | ||||
| See [Secrets design document](https://git.k8s.io/community/contributors/design-proposals/auth/secrets.md) for more information. | ||||
| 
 | ||||
| A Secret is an object that contains a small amount of sensitive data such as | ||||
| a password, a token, or a key. Such information might otherwise be put in a | ||||
| Pod specification or in an image. Users can create Secrets and the system | ||||
| also creates some Secrets. | ||||
| 
 | ||||
| <!-- body --> | ||||
| 
 | ||||
| ## Overview of Secrets | ||||
| 
 | ||||
| A Secret is an object that contains a small amount of sensitive data such as | ||||
| a password, a token, or a key. Such information might otherwise be put in a | ||||
| Pod specification or in an image. Users can create secrets and the system | ||||
| also creates some secrets. | ||||
| 
 | ||||
| To use a secret, a Pod needs to reference the secret. | ||||
| A secret can be used with a Pod in three ways: | ||||
| To use a Secret, a Pod needs to reference the Secret. | ||||
| A Secret can be used with a Pod in three ways: | ||||
| 
 | ||||
| - As [files](#using-secrets-as-files-from-a-pod) in a | ||||
| {{< glossary_tooltip text="volume" term_id="volume" >}} mounted on one or more of | ||||
| its containers. | ||||
|   {{< glossary_tooltip text="volume" term_id="volume" >}} mounted on one or more of | ||||
|   its containers. | ||||
| - As [container environment variable](#using-secrets-as-environment-variables). | ||||
| - By the [kubelet when pulling images](#using-imagepullsecrets) for the Pod. | ||||
| 
 | ||||
| The name of a Secret object must be a valid | ||||
| [DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). | ||||
| You can specify the `data` and/or the `stringData` field when creating a | ||||
| configuration file for a Secret.  The `data` and the `stringData` fields are optional. | ||||
| The values for all keys in the `data` field have to be base64-encoded strings. | ||||
| If the conversion to base64 string is not desirable, you can choose to specify | ||||
| the `stringData` field instead, which accepts arbitrary strings as values. | ||||
| 
 | ||||
| The keys of `data` and `stringData` must consist of alphanumeric characters, | ||||
| `-`, `_` or `.`. | ||||
| `-`, `_` or `.`. All key-value pairs in the `stringData` field are internally | ||||
| merged into the `data` field. If a key appears in both the `data` and the | ||||
| `stringData` field, the value specified in the `stringData` field takes | ||||
| precedence. | ||||
| 
 | ||||
| ### Built-in Secrets | ||||
| ## Types of Secret {#secret-types} | ||||
| 
 | ||||
| #### Service accounts automatically create and attach Secrets with API credentials | ||||
| When creating a Secret, you can specify its type using the `type` field of | ||||
| the [`Secret`](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#secret-v1-core) | ||||
| resource, or certain equivalent `kubectl` command line flags (if available). | ||||
| The Secret type is used to facilitate programmatic handling of the Secret data. | ||||
| 
 | ||||
| Kubernetes automatically creates secrets which contain credentials for | ||||
| accessing the API and automatically modifies your Pods to use this type of | ||||
| secret. | ||||
| Kubernetes provides several builtin types for some common usage scenarios. | ||||
| These types vary in terms of the validations performed and the constraints | ||||
| Kubernetes imposes on them. | ||||
| 
 | ||||
| The automatic creation and use of API credentials can be disabled or overridden | ||||
| if desired.  However, if all you need to do is securely access the API server, | ||||
| this is the recommended workflow. | ||||
| | Builtin Type | Usage | | ||||
| |--------------|-------| | ||||
| | `Opaque`     |  arbitrary user-defined data | | ||||
| | `kubernetes.io/service-account-token` | service account token | | ||||
| | `kubernetes.io/dockercfg` | serialized `~/.dockercfg` file | | ||||
| | `kubernetes.io/dockerconfigjson` | serialized `~/.docker/config.json` file | | ||||
| | `kubernetes.io/basic-auth` | credentials for basic authentication | | ||||
| | `kubernetes.io/ssh-auth` | credentials for SSH authentication | | ||||
| | `kubernetes.io/tls` | data for a TLS client or server | | ||||
| | `bootstrap.kubernetes.io/token` | bootstrap token data | | ||||
| 
 | ||||
| You can define and use your own Secret type by assigning a non-empty string as the | ||||
| `type` value for a Secret object. An empty string is treated as an `Opaque` type. | ||||
| Kubernetes doesn't impose any constraints on the type name. However, if you | ||||
| are using one of the builtin types, you must meet all the requirements defined | ||||
| for that type. | ||||
| 
 | ||||
| ### Opaque secrets | ||||
| 
 | ||||
| `Opaque` is the default Secret type if omitted from a Secret configuration file. | ||||
| When you create a Secret using `kubectl`, you will use the `generic` | ||||
| subcommand to indicate an `Opaque` Secret type. For example, the following | ||||
| command creates an empty Secret of type `Opaque`. | ||||
| 
 | ||||
| ```shell | ||||
| kubectl create secret generic empty-secret | ||||
| kubectl get secret empty-secret | ||||
| ``` | ||||
| 
 | ||||
| The output looks like: | ||||
| 
 | ||||
| ``` | ||||
| NAME           TYPE     DATA   AGE | ||||
| empty-secret   Opaque   0      2m6s | ||||
| ``` | ||||
| 
 | ||||
| The `DATA` column shows the number of data items stored in the Secret. | ||||
| In this case, `0` means we have just created an empty Secret. | ||||
| 
 | ||||
| ###  Service account token Secrets | ||||
| 
 | ||||
| A `kubernetes.io/service-account-token` type of Secret is used to store a | ||||
| token that identifies a service account. When using this Secret type, you need | ||||
| to ensure that the `kubernetes.io/service-account.name` annotation is set to an | ||||
| existing service account name. An Kubernetes controller fills in some other | ||||
| fields such as the `kubernetes.io/service-account.uid` annotation and the | ||||
| `token` key in the `data` field set to actual token content. | ||||
| 
 | ||||
| The following example configuration declares a service account token Secret: | ||||
| 
 | ||||
| ```yaml | ||||
| apiVersion: v1 | ||||
| kind: Secret | ||||
| metadata: | ||||
|   name: secret-sa-sample | ||||
|   annotations: | ||||
|     kubernetes.io/service-account.name: "sa-name" | ||||
| type: kubernetes.io/service-account-token | ||||
| data: | ||||
|   # You can include additional key value pairs as you do with Opaque Secrets | ||||
|   extra: YmFyCg== | ||||
| ``` | ||||
| 
 | ||||
| When creating a `Pod`, Kubernetes automatically creates a service account Secret | ||||
| and automatically modifies your Pod to use this Secret. The service account token | ||||
| Secret contains credentials for accessing the API. | ||||
| 
 | ||||
| The automatic creation and use of API credentials can be disabled or | ||||
| overridden if desired. However, if all you need to do is securely access the | ||||
| API server, this is the recommended workflow. | ||||
| 
 | ||||
| See the [ServiceAccount](/docs/tasks/configure-pod-container/configure-service-account/) | ||||
| documentation for more information on how service accounts work. | ||||
| You can also check the `automountServiceAccountToken` field and the | ||||
| `serviceAccountName` field of the | ||||
| [`Pod`](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#secret-v1-core) | ||||
| for information on referencing service account from Pods. | ||||
| 
 | ||||
| ### Creating a Secret | ||||
| ### Docker config Secrets | ||||
| 
 | ||||
| You can use one of the following `type` values to create a Secret to | ||||
| store the credentials for accessing a Docker registry for images. | ||||
| 
 | ||||
| - `kubernetes.io/dockercfg` | ||||
| - `kubernetes.io/dockerconfigjson` | ||||
| 
 | ||||
| The `kubernetes.io/dockercfg` type is reserved to store a serialized | ||||
| `~/.dockercfg` which is the legacy format for configuring Docker command line. | ||||
| When using this Secret type, you have to ensure the Secret `data` field | ||||
| contains a `.dockercfg` key whose value is content of a `~/.dockercfg` file | ||||
| encoded in the base64 format. | ||||
| 
 | ||||
| The `kubernetes/dockerconfigjson` type is designed for storing a serialized | ||||
| JSON that follows the same format rules as the `~/.docker/config.json` file | ||||
| which is a new format for `~/.dockercfg`. | ||||
| When using this Secret type, the `data` field of the Secret object must | ||||
| contain a `.dockerconfigjson` key, in which the content for the | ||||
| `~/.docker/config.json` file is provided as a base64 encoded string. | ||||
| 
 | ||||
| Below is an example for a `kubernetes.io/dockercfg` type of Secret: | ||||
| 
 | ||||
| ```yaml | ||||
| apiVersion: v1 | ||||
| kind: Secret | ||||
| metadata: | ||||
|   name: secret-dockercfg | ||||
| type: kubernetes.io/dockercfg | ||||
| data: | ||||
|   .dockercfg: | | ||||
|     "<base64 encoded ~/.dockercfg file>" | ||||
| ``` | ||||
| 
 | ||||
| {{< note >}} | ||||
| If you do not want to perform the base64 encoding, you can choose to use the | ||||
| `stringData` field instead. | ||||
| {{< /note >}} | ||||
| 
 | ||||
| When you create these types of Secrets using a manifest, the API | ||||
| server checks whether the expected key does exists in the `data` field, and | ||||
| it verifies if the value provided can be parsed as a valid JSON. The API | ||||
| server doesn't validate if the JSON actually is a Docker config file. | ||||
| 
 | ||||
| When you do not have a Docker config file, or you want to use `kubectl` | ||||
| to create a Docker registry Secret, you can do: | ||||
| 
 | ||||
| ```shell | ||||
| kubectl create secret docker-registry secret-tiger-docker \ | ||||
|   --docker-username=tiger \ | ||||
|   --docker-password=pass113 \ | ||||
|   --docker-email=tiger@acme.com | ||||
| ``` | ||||
| 
 | ||||
| This command creates a Secret of type `kubernetes.io/dockerconfigjson`. | ||||
| If you dump the `.dockerconfigjson` content from the `data` field, you will | ||||
| get the following JSON content which is a valid Docker configuration created | ||||
| on the fly: | ||||
| 
 | ||||
| ```json | ||||
| { | ||||
|   "auths": { | ||||
|     "https://index.docker.io/v1/": { | ||||
|       "username": "tiger", | ||||
|       "password": "pass113", | ||||
|       "email": "tiger@acme.com", | ||||
|       "auth": "dGlnZXI6cGFzczExMw==" | ||||
|     } | ||||
|   } | ||||
| } | ||||
| ``` | ||||
| 
 | ||||
| ### Basic authentication Secret | ||||
| 
 | ||||
| The `kubernetes.io/basic-auth` type is provided for storing credentials needed | ||||
| for basic authentication. When using this Secret type, the `data` field of the | ||||
| Secret must contain the following two keys: | ||||
| 
 | ||||
| - `username`: the user name for authentication; | ||||
| - `password`: the password or token for authentication. | ||||
| 
 | ||||
| Both values for the above two keys are base64 encoded strings. You can, of | ||||
| course, provide the clear text content using the `stringData` for Secret | ||||
| creation. | ||||
| 
 | ||||
| The following YAML is an example config for a basic authentication Secret: | ||||
| 
 | ||||
| ```yaml | ||||
| apiVersion: v1 | ||||
| kind: Secret | ||||
| metadata: | ||||
|   name: secret-basic-auth | ||||
| type: kubernetes.io/basic-auth | ||||
| stringData: | ||||
|   username: admin | ||||
|   password: t0p-Secret | ||||
| ``` | ||||
| 
 | ||||
| The basic authentication Secret type is provided only for user's convenience. | ||||
| You can create an `Opaque` for credentials used for basic authentication. | ||||
| However, using the builtin Secret type helps unify the formats of your credentials | ||||
| and the API server does verify if the required keys are provided in a Secret | ||||
| configuration. | ||||
| 
 | ||||
| ### SSH authentication secrets | ||||
| 
 | ||||
| The builtin type `kubernetes.io/ssh-auth` is provided for storing data used in | ||||
| SSH authentication. When using this Secret type, you will have to specify a | ||||
| `ssh-privatekey` key-value pair in the `data` (or `stringData`) field. | ||||
| as the SSH credential to use. | ||||
| 
 | ||||
| The following YAML is an example config for a SSH authentication Secret: | ||||
| 
 | ||||
| ```yaml | ||||
| apiVersion: v1 | ||||
| kind: Secret | ||||
| metadata: | ||||
|   name: secret-ssh-auth | ||||
| type: kubernetes.io/ssh-auth | ||||
| data: | ||||
|   # the data is abbreviated in this example | ||||
|   ssh-privatekey: | | ||||
|      MIIEpQIBAAKCAQEAulqb/Y ... | ||||
| ``` | ||||
| 
 | ||||
| The SSH authentication Secret type is provided only for user's convenience. | ||||
| You can create an `Opaque` for credentials used for SSH authentication. | ||||
| However, using the builtin Secret type helps unify the formats of your credentials | ||||
| and the API server does verify if the required keys are provided in a Secret | ||||
| configuration. | ||||
| 
 | ||||
| ### TLS secrets | ||||
| 
 | ||||
| Kubernetes provides a builtin Secret type `kubernetes.io/tls` for to storing | ||||
| a certificate and its associated key that are typically used for TLS . This | ||||
| data is primarily used with TLS termination of the Ingress resource, but may | ||||
| be used with other resources or directly by a workload. | ||||
| When using this type of Secret, the `tls.key` and the `tls.crt` key must be provided | ||||
| in the `data` (or `stringData`) field of the Secret configuration, although the API | ||||
| server doesn't actually validate the values for each key. | ||||
| 
 | ||||
| The following YAML contains an example config for a TLS Secret: | ||||
| 
 | ||||
| ```yaml | ||||
| apiVersion: v1 | ||||
| kind: Secret | ||||
| metadata: | ||||
|   name: secret-tls | ||||
| type: kubernetes.io/tls | ||||
| data: | ||||
|   # the data is abbreviated in this example | ||||
|   tls.crt: | | ||||
|     MIIC2DCCAcCgAwIBAgIBATANBgkqh ... | ||||
|   tls.key: | | ||||
|     MIIEpgIBAAKCAQEA7yn3bRHQ5FHMQ ... | ||||
| ``` | ||||
| 
 | ||||
| The TLS Secret type is provided for user's convenience. You can create an `Opaque` | ||||
| for credentials used for TLS server and/or client. However, using the builtin Secret | ||||
| type helps ensure the consistency of Secret format in your project; the API server | ||||
| does verify if the required keys are provided in a Secret configuration. | ||||
| 
 | ||||
| When creating a TLS Secret using `kubectl`, you can use the `tls` subcommand | ||||
| as shown in the following example: | ||||
| 
 | ||||
| ```shell | ||||
| kubectl create secret tls my-tls-secret \ | ||||
|   --cert=path/to/cert/file \ | ||||
|   --key=path/to/key/file | ||||
| ``` | ||||
| 
 | ||||
| The public/private key pair must exist before hand. The public key certificate | ||||
| for `--cert` must be .PEM encoded (Base64-encoded DER format), and match the | ||||
| given private key for `--key`. | ||||
| The private key must be in what is commonly called PEM private key format, | ||||
| unencrypted. In both cases, the initial and the last lines from PEM (for | ||||
| example, `--------BEGIN CERTIFICATE-----` and `-------END CERTIFICATE----` for | ||||
| a cetificate) are *not* included. | ||||
| 
 | ||||
| ### Bootstrap token Secrets | ||||
| 
 | ||||
| A bootstrap token Secret can be created by explicitly specifying the Secret | ||||
| `type` to `bootstrap.kubernetes.io/token`. This type of Secret is designed for | ||||
| tokens used during the node bootstrap process. It stores tokens used to sign | ||||
| well known ConfigMaps. | ||||
| 
 | ||||
| A bootstrap token Secret is usually created in the `kube-system` namespace and | ||||
| named in the form `bootstrap-token-<token-id>` where `<token-id>` is a 6 character | ||||
| string of the token ID. | ||||
| 
 | ||||
| As a Kubernetes manifest, a bootstrap token Secret might look like the | ||||
| following: | ||||
| 
 | ||||
| ```yaml | ||||
| apiVersion: v1 | ||||
| kind: Secret | ||||
| metadata: | ||||
|   name: bootstrap-token-5emitj | ||||
|   namespace: kube-system | ||||
| type: bootstrap.kubernetes.io/token | ||||
| data: | ||||
|   auth-extra-groups: c3lzdGVtOmJvb3RzdHJhcHBlcnM6a3ViZWFkbTpkZWZhdWx0LW5vZGUtdG9rZW4= | ||||
|   expiration: MjAyMC0wOS0xM1QwNDozOToxMFo= | ||||
|   token-id: NWVtaXRq | ||||
|   token-secret: a3E0Z2lodnN6emduMXAwcg== | ||||
|   usage-bootstrap-authentication: dHJ1ZQ== | ||||
|   usage-bootstrap-signing: dHJ1ZQ== | ||||
| ``` | ||||
| 
 | ||||
| A bootstrap type has the following keys specified under `data`: | ||||
| 
 | ||||
| - `token_id`: A random 6 character string as the token identifier. Required. | ||||
| - `token-secret`: A random 16 character string as the actual token secret. Required. | ||||
| - `description1`: A human-readable string that describes what the token is | ||||
|   used for. Optional. | ||||
| - `expiration`: An absolute UTC time using RFC3339 specifying when the token | ||||
|   should be expired. Optional. | ||||
| - `usage-bootstrap-<usage>`: A boolean flag indicating additional usage for | ||||
|   the bootstrap token. | ||||
| - `auth-extra-groups`: A comma-separated list of group names that will be | ||||
|   authenticated as in addition to system:bootstrappers group. | ||||
| 
 | ||||
| The above YAML may look confusing because the values are all in base64 encoded | ||||
| strings. In fact, you can create an identical Secret using the following YAML | ||||
| which results in an identical Secret object: | ||||
| 
 | ||||
| ```yaml | ||||
| apiVersion: v1 | ||||
| kind: Secret | ||||
| metadata: | ||||
|   # Note how the Secret is named | ||||
|   name: bootstrap-token-5emitj | ||||
|   # A bootstrap token Secret usually resides in the kube-system namespace | ||||
|   namespace: kube-system | ||||
| type: bootstrap.kubernetes.io/token | ||||
| stringData: | ||||
|   auth-extra-groups: "system:bootstrappers:kubeadm:default-node-token" | ||||
|   expiration: "2020-09-13T04:39:10Z" | ||||
|   # This token ID is used in the name | ||||
|   token-id: "5emitj" | ||||
|   token-secret: "kq4gihvszzgn1p0r" | ||||
|   # This token can be used for authentication | ||||
|   usage-bootstrap-authentication: "true" | ||||
|   # and it can be used for signing | ||||
|   usage-bootstrap-signing: "true" | ||||
| ``` | ||||
| 
 | ||||
| ## Creating a Secret | ||||
| 
 | ||||
| There are several options to create a Secret: | ||||
| 
 | ||||
|  | @ -66,7 +395,7 @@ There are several options to create a Secret: | |||
| - [create Secret from config file](/docs/tasks/configmap-secret/managing-secret-using-config-file/) | ||||
| - [create Secret using kustomize](/docs/tasks/configmap-secret/managing-secret-using-kustomize/) | ||||
| 
 | ||||
| ### Editing a Secret | ||||
| ## Editing a Secret | ||||
| 
 | ||||
| An existing Secret may be edited with the following command: | ||||
| 
 | ||||
|  |  | |||
|  | @ -47,7 +47,7 @@ to roll back to a working version. | |||
| Instead, specify a meaningful tag such as `v1.42.0`. | ||||
| {{< /caution >}} | ||||
| 
 | ||||
| ## Updating Images | ||||
| ## Updating images | ||||
| 
 | ||||
| The default pull policy is `IfNotPresent` which causes the | ||||
| {{< glossary_tooltip text="kubelet" term_id="kubelet" >}} to skip | ||||
|  | @ -61,13 +61,13 @@ you can do one of the following: | |||
| 
 | ||||
| When `imagePullPolicy` is defined without a specific value, it is also set to `Always`. | ||||
| 
 | ||||
| ## Multi-architecture Images with Manifests | ||||
| ## Multi-architecture images with image indexes | ||||
| 
 | ||||
| As well as providing binary images, a container registry can also serve a [container image manifest](https://github.com/opencontainers/image-spec/blob/master/manifest.md). A manifest can reference image manifests for architecture-specific versions of a container. The idea is that you can have a name for an image (for example: `pause`, `example/mycontainer`, `kube-apiserver`) and allow different systems to fetch the right binary image for the machine architecture they are using. | ||||
| As well as providing binary images, a container registry can also serve a [container image index](https://github.com/opencontainers/image-spec/blob/master/image-index.md). An image index can point to multiple [image manifests](https://github.com/opencontainers/image-spec/blob/master/manifest.md) for architecture-specific versions of a container. The idea is that you can have a name for an image (for example: `pause`, `example/mycontainer`, `kube-apiserver`) and allow different systems to fetch the right binary image for the machine architecture they are using. | ||||
| 
 | ||||
| Kubernetes itself typically names container images with a suffix `-$(ARCH)`. For backward compatibility, please generate the older images with suffixes. The idea is to generate say `pause` image which has the manifest for all the arch(es) and say `pause-amd64` which is backwards compatible for older configurations or YAML files which may have hard coded the images with suffixes. | ||||
| 
 | ||||
| ## Using a Private Registry | ||||
| ## Using a private registry | ||||
| 
 | ||||
| Private registries may require keys to read images from them.   | ||||
| Credentials can be provided in several ways: | ||||
|  | @ -86,7 +86,7 @@ Credentials can be provided in several ways: | |||
| 
 | ||||
| These options are explaind in more detail below. | ||||
| 
 | ||||
| ### Configuring Nodes to authenticate to a Private Registry | ||||
| ### Configuring nodes to authenticate to a private registry | ||||
| 
 | ||||
| If you run Docker on your nodes, you can configure the Docker container | ||||
| runtime to authenticate to a private container registry. | ||||
|  | @ -178,7 +178,7 @@ template needs to include the `.docker/config.json` or mount a drive that contai | |||
| All pods will have read access to images in any private registry once private | ||||
| registry keys are added to the `.docker/config.json`. | ||||
| 
 | ||||
| ### Pre-pulled Images | ||||
| ### Pre-pulled images | ||||
| 
 | ||||
| {{< note >}} | ||||
| This approach is suitable if you can control node configuration.  It | ||||
|  | @ -197,7 +197,7 @@ This can be used to preload certain images for speed or as an alternative to aut | |||
| 
 | ||||
| All pods will have read access to any pre-pulled images. | ||||
| 
 | ||||
| ### Specifying ImagePullSecrets on a Pod | ||||
| ### Specifying imagePullSecrets on a Pod | ||||
| 
 | ||||
| {{< note >}} | ||||
| This is the recommended approach to run containers based on images | ||||
|  | @ -206,7 +206,7 @@ in private registries. | |||
| 
 | ||||
| Kubernetes supports specifying container image registry keys on a Pod. | ||||
| 
 | ||||
| #### Creating a Secret with a Docker Config | ||||
| #### Creating a Secret with a Docker config | ||||
| 
 | ||||
| Run the following command, substituting the appropriate uppercase values: | ||||
| 
 | ||||
|  | @ -266,7 +266,7 @@ Check [Add ImagePullSecrets to a Service Account](/docs/tasks/configure-pod-cont | |||
| You can use this in conjunction with a per-node `.docker/config.json`.  The credentials | ||||
| will be merged. | ||||
| 
 | ||||
| ## Use Cases | ||||
| ## Use cases | ||||
| 
 | ||||
| There are a number of solutions for configuring private registries.  Here are some | ||||
| common use cases and suggested solutions. | ||||
|  |  | |||
|  | @ -76,8 +76,7 @@ The following resource types are supported: | |||
| | `limits.memory` | Across all pods in a non-terminal state, the sum of memory limits cannot exceed this value. | | ||||
| | `requests.cpu` | Across all pods in a non-terminal state, the sum of CPU requests cannot exceed this value. | | ||||
| | `requests.memory` | Across all pods in a non-terminal state, the sum of memory requests cannot exceed this value. | | ||||
| | `hugepages-<size>` | Across all pods in a non-terminal state, the number of | ||||
| huge page requests of the specified size cannot exceed this value. | | ||||
| | `hugepages-<size>` | Across all pods in a non-terminal state, the number of huge page requests of the specified size cannot exceed this value. | | ||||
| | `cpu` | Same as `requests.cpu` | | ||||
| | `memory` | Same as `requests.memory` | | ||||
| 
 | ||||
|  | @ -237,7 +236,7 @@ one value. For example: | |||
|           - middle | ||||
| ``` | ||||
| 
 | ||||
| If the `operator` is `Exists` or `DoesNotExist`, the `values field must *NOT* be | ||||
| If the `operator` is `Exists` or `DoesNotExist`, the `values` field must *NOT* be | ||||
| specified. | ||||
| 
 | ||||
| ### Resource Quota Per PriorityClass | ||||
|  |  | |||
|  | @ -200,8 +200,8 @@ The affinity on this pod defines one pod affinity rule and one pod anti-affinity | |||
| while the `podAntiAffinity` is `preferredDuringSchedulingIgnoredDuringExecution`. The | ||||
| pod affinity rule says that the pod can be scheduled onto a node only if that node is in the same zone | ||||
| as at least one already-running pod that has a label with key "security" and value "S1". (More precisely, the pod is eligible to run | ||||
| on node N if node N has a label with key `failure-domain.beta.kubernetes.io/zone` and some value V | ||||
| such that there is at least one node in the cluster with key `failure-domain.beta.kubernetes.io/zone` and | ||||
| on node N if node N has a label with key `topology.kubernetes.io/zone` and some value V | ||||
| such that there is at least one node in the cluster with key `topology.kubernetes.io/zone` and | ||||
| value V that is running a pod that has a label with key "security" and value "S1".) The pod anti-affinity | ||||
| rule says that the pod cannot be scheduled onto a node if that node is in the same zone as a pod with | ||||
| label having key "security" and value "S2". See the | ||||
|  |  | |||
|  | @ -12,125 +12,147 @@ weight: 50 | |||
| 
 | ||||
| {{< feature-state for_k8s_version="v1.16" state="alpha" >}} | ||||
| 
 | ||||
| The kube-scheduler can be configured to enable bin packing of resources along with extended resources using `RequestedToCapacityRatioResourceAllocation` priority function. Priority functions can be used to fine-tune the kube-scheduler as per custom needs.  | ||||
| 
 | ||||
| 
 | ||||
| The kube-scheduler can be configured to enable bin packing of resources along | ||||
| with extended resources using `RequestedToCapacityRatioResourceAllocation` | ||||
| priority function. Priority functions can be used to fine-tune the | ||||
| kube-scheduler as per custom needs.  | ||||
| 
 | ||||
| <!-- body --> | ||||
| 
 | ||||
| ## Enabling Bin Packing using RequestedToCapacityRatioResourceAllocation | ||||
| 
 | ||||
| Before Kubernetes 1.15, Kube-scheduler used to allow scoring nodes based on the request to capacity ratio of primary resources like CPU and Memory. Kubernetes 1.16 added a new parameter to the priority function that allows the users to specify the resources along with weights for each resource to score nodes based on the request to capacity ratio. This allows users to bin pack extended resources by using appropriate parameters and improves the utilization of scarce resources in large clusters. The behavior of the `RequestedToCapacityRatioResourceAllocation` priority function can be controlled by a configuration option called `requestedToCapacityRatioArguments`. This argument consists of two parameters `shape` and `resources`. Shape allows the user to tune the function as least requested or most requested based on `utilization` and `score` values. Resources | ||||
| consists of `name` which specifies the resource to be considered during scoring and `weight` specify the weight of each resource. | ||||
| Kubernetes allows the users to specify the resources along with weights for | ||||
| each resource to score nodes based on the request to capacity ratio. This | ||||
| allows users to bin pack extended resources by using appropriate parameters | ||||
| and improves the utilization of scarce resources in large clusters. The | ||||
| behavior of the `RequestedToCapacityRatioResourceAllocation` priority function | ||||
| can be controlled by a configuration option called | ||||
| `requestedToCapacityRatioArguments`. This argument consists of two parameters | ||||
| `shape` and `resources`. The `shape` parameter allows the user to tune the | ||||
| function as least requested or most requested based on `utilization` and | ||||
| `score` values.  The `resources` parameter consists of `name` of the resource | ||||
| to be considered during scoring and `weight` specify the weight of each | ||||
| resource. | ||||
| 
 | ||||
| Below is an example configuration that sets `requestedToCapacityRatioArguments` to bin packing behavior for extended resources `intel.com/foo` and `intel.com/bar` | ||||
| Below is an example configuration that sets | ||||
| `requestedToCapacityRatioArguments` to bin packing behavior for extended | ||||
| resources `intel.com/foo` and `intel.com/bar`. | ||||
| 
 | ||||
| ```json | ||||
| { | ||||
|     "kind" : "Policy", | ||||
|     "apiVersion" : "v1", | ||||
| 
 | ||||
|     ... | ||||
| 
 | ||||
|     "priorities" : [ | ||||
| 
 | ||||
|        ... | ||||
| 
 | ||||
|       { | ||||
|         "name": "RequestedToCapacityRatioPriority", | ||||
|         "weight": 2, | ||||
|         "argument": { | ||||
|           "requestedToCapacityRatioArguments": { | ||||
|             "shape": [ | ||||
|               {"utilization": 0, "score": 0}, | ||||
|               {"utilization": 100, "score": 10} | ||||
|             ], | ||||
|             "resources": [ | ||||
|               {"name": "intel.com/foo", "weight": 3}, | ||||
|               {"name": "intel.com/bar", "weight": 5} | ||||
|             ] | ||||
|           } | ||||
|         } | ||||
|       } | ||||
|     ], | ||||
|   } | ||||
| ```yaml | ||||
| apiVersion: v1 | ||||
| kind: Policy | ||||
| # ... | ||||
| priorities: | ||||
|   # ... | ||||
|   - name: RequestedToCapacityRatioPriority | ||||
|     weight: 2 | ||||
|     argument: | ||||
|       requestedToCapacityRatioArguments: | ||||
|         shape: | ||||
|           - utilization: 0 | ||||
|             score: 0 | ||||
|           - utilization: 100 | ||||
|             score: 10 | ||||
|         resources: | ||||
|           - name: intel.com/foo | ||||
|             weight: 3 | ||||
|           - name: intel.com/bar | ||||
|             weight: 5 | ||||
| ``` | ||||
| 
 | ||||
| **This feature is disabled by default** | ||||
| 
 | ||||
| ### Tuning RequestedToCapacityRatioResourceAllocation Priority Function | ||||
| ### Tuning the Priority Function | ||||
| 
 | ||||
| `shape` is used to specify the behavior of the `RequestedToCapacityRatioPriority` function. | ||||
| `shape` is used to specify the behavior of the | ||||
| `RequestedToCapacityRatioPriority` function. | ||||
| 
 | ||||
| ```yaml | ||||
|  {"utilization": 0, "score": 0}, | ||||
|  {"utilization": 100, "score": 10} | ||||
| shape: | ||||
|  - utilization: 0 | ||||
|    score: 0 | ||||
|  - utilization: 100 | ||||
|    score: 10 | ||||
| ``` | ||||
| 
 | ||||
| The above arguments give the node a score of 0 if utilization is 0% and 10 for utilization 100%, thus enabling bin packing behavior. To enable least requested the score value must be reversed as follows. | ||||
| The above arguments give the node a `score` of 0 if `utilization` is 0% and 10 for | ||||
| `utilization` 100%, thus enabling bin packing behavior. To enable least | ||||
| requested the score value must be reversed as follows. | ||||
| 
 | ||||
| ```yaml | ||||
|  {"utilization": 0, "score": 100}, | ||||
|  {"utilization": 100, "score": 0} | ||||
| shape: | ||||
|   - utilization: 0 | ||||
|     score: 100 | ||||
|   - utilization: 100 | ||||
|     score: 0 | ||||
| ``` | ||||
| 
 | ||||
| `resources` is an optional parameter which by defaults is set to: | ||||
| `resources` is an optional parameter which defaults to: | ||||
| 
 | ||||
| ``` yaml | ||||
| "resources": [ | ||||
|               {"name": "CPU", "weight": 1}, | ||||
|               {"name": "Memory", "weight": 1} | ||||
|             ] | ||||
| resources: | ||||
|   - name: CPU | ||||
|     weight: 1 | ||||
|   - name: Memory | ||||
|     weight: 1 | ||||
| ``` | ||||
| 
 | ||||
| It can be used to add extended resources as follows:  | ||||
| 
 | ||||
| ```yaml | ||||
| "resources": [ | ||||
|               {"name": "intel.com/foo", "weight": 5}, | ||||
|               {"name": "CPU", "weight": 3}, | ||||
|               {"name": "Memory", "weight": 1} | ||||
|             ] | ||||
| resources: | ||||
|   - name: intel.com/foo | ||||
|     weight: 5 | ||||
|   - name: CPU | ||||
|     weight: 3 | ||||
|   - name: Memory | ||||
|     weight: 1 | ||||
| ``` | ||||
| 
 | ||||
| The weight parameter is optional and is set to 1 if not specified. Also, the weight cannot be set to a negative value. | ||||
| The `weight` parameter is optional and is set to 1 if not specified. Also, the | ||||
| `weight` cannot be set to a negative value. | ||||
| 
 | ||||
| ### How the RequestedToCapacityRatioResourceAllocation Priority Function Scores Nodes | ||||
| ### Node scoring for capacity allocation | ||||
| 
 | ||||
| This section is intended for those who want to understand the internal details | ||||
| of this feature. | ||||
| Below is an example of how the node score is calculated for a given set of values. | ||||
| 
 | ||||
| ``` | ||||
| Requested Resources | ||||
| Requested resources: | ||||
| 
 | ||||
| ``` | ||||
| intel.com/foo : 2 | ||||
| Memory: 256MB | ||||
| CPU: 2 | ||||
| ``` | ||||
| 
 | ||||
| Resource Weights | ||||
| Resource weights: | ||||
| 
 | ||||
| ``` | ||||
| intel.com/foo : 5 | ||||
| Memory: 1 | ||||
| CPU: 3 | ||||
| ``` | ||||
| 
 | ||||
| FunctionShapePoint {{0, 0}, {100, 10}} | ||||
| 
 | ||||
| Node 1 Spec | ||||
| Node 1 spec: | ||||
| 
 | ||||
| ``` | ||||
| Available: | ||||
| intel.com/foo : 4 | ||||
| Memory : 1 GB | ||||
| CPU: 8 | ||||
|   intel.com/foo: 4 | ||||
|   Memory: 1 GB | ||||
|   CPU: 8 | ||||
| 
 | ||||
| Used: | ||||
| intel.com/foo: 1 | ||||
| Memory: 256MB | ||||
| CPU: 1 | ||||
|   intel.com/foo: 1 | ||||
|   Memory: 256MB | ||||
|   CPU: 1 | ||||
| ``` | ||||
| 
 | ||||
| Node score: | ||||
| 
 | ||||
| Node Score: | ||||
| 
 | ||||
| ``` | ||||
| intel.com/foo  = resourceScoringFunction((2+1),4) | ||||
|                = (100 - ((4-3)*100/4) | ||||
|                = (100 - 25) | ||||
|  | @ -152,24 +174,24 @@ CPU            = resourceScoringFunction((2+1),8) | |||
| 
 | ||||
| NodeScore   =  (7 * 5) + (5 * 1) + (3 * 3) / (5 + 1 + 3) | ||||
|             =  5 | ||||
| ``` | ||||
| 
 | ||||
| Node 2 spec: | ||||
| 
 | ||||
| Node 2 Spec | ||||
| 
 | ||||
| ``` | ||||
| Available: | ||||
| intel.com/foo: 8 | ||||
| Memory: 1GB | ||||
| CPU: 8 | ||||
| 
 | ||||
|   intel.com/foo: 8 | ||||
|   Memory: 1GB | ||||
|   CPU: 8 | ||||
| Used: | ||||
|   intel.com/foo: 2 | ||||
|   Memory: 512MB | ||||
|   CPU: 6 | ||||
| ``` | ||||
| 
 | ||||
| intel.com/foo: 2 | ||||
| Memory: 512MB | ||||
| CPU: 6 | ||||
| 
 | ||||
| 
 | ||||
| Node Score: | ||||
| Node score: | ||||
| 
 | ||||
| ``` | ||||
| intel.com/foo  = resourceScoringFunction((2+2),8) | ||||
|                =  (100 - ((8-4)*100/8) | ||||
|                =  (100 - 50) | ||||
|  | @ -194,4 +216,8 @@ NodeScore   =  (5 * 5) + (7 * 1) + (10 * 3) / (5 + 1 + 3) | |||
| 
 | ||||
| ``` | ||||
| 
 | ||||
| ## {{% heading "whatsnext" %}} | ||||
| 
 | ||||
| - Read more about the [scheduling framework](/docs/concepts/scheduling-eviction/scheduling-framework/) | ||||
| - Read more about [scheduler configuration](/docs/reference/scheduling/config/) | ||||
| 
 | ||||
|  |  | |||
|  | @ -32,15 +32,15 @@ You add a taint to a node using [kubectl taint](/docs/reference/generated/kubect | |||
| For example, | ||||
| 
 | ||||
| ```shell | ||||
| kubectl taint nodes node1 key=value:NoSchedule | ||||
| kubectl taint nodes node1 key1=value1:NoSchedule | ||||
| ``` | ||||
| 
 | ||||
| places a taint on node `node1`. The taint has key `key`, value `value`, and taint effect `NoSchedule`. | ||||
| places a taint on node `node1`. The taint has key `key1`, value `value1`, and taint effect `NoSchedule`. | ||||
| This means that no pod will be able to schedule onto `node1` unless it has a matching toleration. | ||||
| 
 | ||||
| To remove the taint added by the command above, you can run: | ||||
| ```shell | ||||
| kubectl taint nodes node1 key:NoSchedule- | ||||
| kubectl taint nodes node1 key1=value1:NoSchedule- | ||||
| ``` | ||||
| 
 | ||||
| You specify a toleration for a pod in the PodSpec. Both of the following tolerations "match" the | ||||
|  | @ -49,15 +49,15 @@ to schedule onto `node1`: | |||
| 
 | ||||
| ```yaml | ||||
| tolerations: | ||||
| - key: "key" | ||||
| - key: "key1" | ||||
|   operator: "Equal" | ||||
|   value: "value" | ||||
|   value: "value1" | ||||
|   effect: "NoSchedule" | ||||
| ``` | ||||
| 
 | ||||
| ```yaml | ||||
| tolerations: | ||||
| - key: "key" | ||||
| - key: "key1" | ||||
|   operator: "Exists" | ||||
|   effect: "NoSchedule" | ||||
| ``` | ||||
|  | @ -80,7 +80,7 @@ There are two special cases: | |||
| An empty `key` with operator `Exists` matches all keys, values and effects which means this | ||||
| will tolerate everything. | ||||
| 
 | ||||
| An empty `effect` matches all effects with key `key`. | ||||
| An empty `effect` matches all effects with key `key1`. | ||||
| 
 | ||||
| {{< /note >}} | ||||
| 
 | ||||
|  |  | |||
|  | @ -13,40 +13,41 @@ Unlike other types of controllers which run as part of the `kube-controller-mana | |||
| are not started automatically with a cluster. Use this page to choose the ingress controller implementation  | ||||
| that best fits your cluster. | ||||
| 
 | ||||
| Kubernetes as a project currently supports and maintains [GCE](https://git.k8s.io/ingress-gce/README.md) and | ||||
|   [nginx](https://git.k8s.io/ingress-nginx/README.md) controllers. | ||||
|    | ||||
| Kubernetes as a project supports and maintains [AWS](https://github.com/kubernetes-sigs/aws-load-balancer-controller#readme), [GCE](https://git.k8s.io/ingress-gce/README.md#readme), and | ||||
|   [nginx](https://git.k8s.io/ingress-nginx/README.md#readme) ingress controllers. | ||||
| 
 | ||||
| 
 | ||||
| <!-- body --> | ||||
| 
 | ||||
| ## Additional controllers | ||||
| 
 | ||||
| * [AKS Application Gateway Ingress Controller](https://github.com/Azure/application-gateway-kubernetes-ingress) is an ingress controller that enables ingress to [AKS clusters](https://docs.microsoft.com/azure/aks/kubernetes-walkthrough-portal) using the [Azure Application Gateway](https://docs.microsoft.com/azure/application-gateway/overview). | ||||
| * [Ambassador](https://www.getambassador.io/) API Gateway is an [Envoy](https://www.envoyproxy.io) based ingress  | ||||
|   controller with [community](https://www.getambassador.io/docs) or  | ||||
|   [commercial](https://www.getambassador.io/pro/) support from [Datawire](https://www.datawire.io/). | ||||
| * [AppsCode Inc.](https://appscode.com) offers support and maintenance for the most widely used [HAProxy](https://www.haproxy.org/) based ingress controller [Voyager](https://appscode.com/products/voyager).  | ||||
| * [AWS ALB Ingress Controller](https://github.com/kubernetes-sigs/aws-alb-ingress-controller) enables ingress using the [AWS Application Load Balancer](https://aws.amazon.com/elasticloadbalancing/). | ||||
| * [Contour](https://projectcontour.io/) is an [Envoy](https://www.envoyproxy.io/) based ingress controller | ||||
|   provided and supported by VMware. | ||||
| * Citrix provides an [Ingress Controller](https://github.com/citrix/citrix-k8s-ingress-controller) for its hardware (MPX), virtualized (VPX) and [free containerized (CPX) ADC](https://www.citrix.com/products/citrix-adc/cpx-express.html) for [baremetal](https://github.com/citrix/citrix-k8s-ingress-controller/tree/master/deployment/baremetal) and [cloud](https://github.com/citrix/citrix-k8s-ingress-controller/tree/master/deployment) deployments. | ||||
| * F5 Networks provides [support and maintenance](https://support.f5.com/csp/article/K86859508) | ||||
|   for the [F5 BIG-IP Container Ingress Services for Kubernetes](https://clouddocs.f5.com/containers/latest/userguide/kubernetes/). | ||||
| * [Gloo](https://gloo.solo.io) is an open-source ingress controller based on [Envoy](https://www.envoyproxy.io) which offers API Gateway functionality with enterprise support from [solo.io](https://www.solo.io).   | ||||
| * [HAProxy Ingress](https://haproxy-ingress.github.io) is a highly customizable community-driven ingress controller for HAProxy. | ||||
| * [HAProxy Technologies](https://www.haproxy.com/) offers support and maintenance for the [HAProxy Ingress Controller for Kubernetes](https://github.com/haproxytech/kubernetes-ingress). See the [official documentation](https://www.haproxy.com/documentation/hapee/1-9r1/traffic-management/kubernetes-ingress-controller/). | ||||
| * [Istio](https://istio.io/) based ingress controller | ||||
|   [Control Ingress Traffic](https://istio.io/docs/tasks/traffic-management/ingress/). | ||||
| * [Kong](https://konghq.com/) offers [community](https://discuss.konghq.com/c/kubernetes) or | ||||
|   [commercial](https://konghq.com/kong-enterprise/) support and maintenance for the | ||||
|   [Kong Ingress Controller for Kubernetes](https://github.com/Kong/kubernetes-ingress-controller). | ||||
| * [NGINX, Inc.](https://www.nginx.com/) offers support and maintenance for the | ||||
|   [NGINX Ingress Controller for Kubernetes](https://www.nginx.com/products/nginx/kubernetes-ingress-controller). | ||||
| * [Skipper](https://opensource.zalando.com/skipper/kubernetes/ingress-controller/) HTTP router and reverse proxy for service composition, including use cases like Kubernetes Ingress, designed as a library to build your custom proxy | ||||
| * [Traefik](https://github.com/traefik/traefik) is a fully featured ingress controller | ||||
|   ([Let's Encrypt](https://letsencrypt.org), secrets, http2, websocket), and it also comes with commercial | ||||
|   support by [Traefik Labs](https://traefik.io). | ||||
| {{% thirdparty-content %}} | ||||
| 
 | ||||
| * [AKS Application Gateway Ingress Controller](https://azure.github.io/application-gateway-kubernetes-ingress/) is an ingress controller that configures the [Azure Application Gateway](https://docs.microsoft.com/azure/application-gateway/overview). | ||||
| * [Ambassador](https://www.getambassador.io/) API Gateway is an [Envoy](https://www.envoyproxy.io)-based ingress | ||||
|   controller. | ||||
| * The [Citrix ingress controller](https://github.com/citrix/citrix-k8s-ingress-controller#readme) works with | ||||
|   Citrix Application Delivery Controller. | ||||
| * [Contour](https://projectcontour.io/) is an [Envoy](https://www.envoyproxy.io/) based ingress controller. | ||||
| * F5 BIG-IP [Container Ingress Services for Kubernetes](https://clouddocs.f5.com/containers/latest/userguide/kubernetes/) | ||||
|   lets you use an Ingress to configure F5 BIG-IP virtual servers. | ||||
| * [Gloo](https://gloo.solo.io) is an open-source ingress controller based on [Envoy](https://www.envoyproxy.io), | ||||
|   which offers API gateway functionality. | ||||
| * [HAProxy Ingress](https://haproxy-ingress.github.io/) is an ingress controller for | ||||
|   [HAProxy](http://www.haproxy.org/#desc). | ||||
| * The [HAProxy Ingress Controller for Kubernetes](https://github.com/haproxytech/kubernetes-ingress#readme) | ||||
|   is also an ingress controller for [HAProxy](http://www.haproxy.org/#desc). | ||||
| * [Istio Ingress](https://istio.io/latest/docs/tasks/traffic-management/ingress/kubernetes-ingress/) | ||||
|   is an [Istio](https://istio.io/) based ingress controller. | ||||
| * The [Kong Ingress Controller for Kubernetes](https://github.com/Kong/kubernetes-ingress-controller#readme) | ||||
|   is an ingress controller driving [Kong Gateway](https://konghq.com/kong/). | ||||
| * The [NGINX Ingress Controller for Kubernetes](https://www.nginx.com/products/nginx/kubernetes-ingress-controller) | ||||
|   works with the [NGINX](https://www.nginx.com/resources/glossary/nginx/) webserver (as a proxy). | ||||
| * [Skipper](https://opensource.zalando.com/skipper/kubernetes/ingress-controller/) HTTP router and reverse proxy for service composition, including use cases like Kubernetes Ingress, designed as a library to build your custom proxy. | ||||
| * The [Traefik Kubernetes Ingress provider](https://doc.traefik.io/traefik/providers/kubernetes-ingress/) is an | ||||
|   ingress controller for the [Traefik](https://traefik.io/traefik/) proxy. | ||||
| * [Voyager](https://appscode.com/products/voyager) is an ingress controller for | ||||
|   [HAProxy](http://www.haproxy.org/#desc). | ||||
| 
 | ||||
| ## Using multiple Ingress controllers | ||||
| 
 | ||||
|  |  | |||
|  | @ -10,12 +10,12 @@ weight: 50 | |||
| 
 | ||||
| <!-- overview --> | ||||
| 
 | ||||
| If you want to control traffic flow at the IP address or port level (OSI layer 3 or 4), then you might consider using Kubernetes NetworkPolicies for particular applications in your cluster.  NetworkPolicies are an application-centric construct which allow you to specify how a {{< glossary_tooltip text="pod" term_id="pod">}} is allowed to communicate with various network "entities" (we use the word "entity" here to avoid overloading the more common terms such as "endpoints" and "services", which have specific Kubernetes connotations) over the network.  | ||||
| If you want to control traffic flow at the IP address or port level (OSI layer 3 or 4), then you might consider using Kubernetes NetworkPolicies for particular applications in your cluster.  NetworkPolicies are an application-centric construct which allow you to specify how a {{< glossary_tooltip text="pod" term_id="pod">}} is allowed to communicate with various network "entities" (we use the word "entity" here to avoid overloading the more common terms such as "endpoints" and "services", which have specific Kubernetes connotations) over the network. | ||||
| 
 | ||||
| The entities that a Pod can communicate with are identified through a combination of the following 3 identifiers: | ||||
| 
 | ||||
| 1. Other pods that are allowed (exception: a pod cannot block access to itself) | ||||
| 2. Namespaces that are allowed  | ||||
| 2. Namespaces that are allowed | ||||
| 3. IP blocks (exception: traffic to and from the node where a Pod is running is always allowed, regardless of the IP address of the Pod or the node) | ||||
| 
 | ||||
| When defining a pod- or namespace- based NetworkPolicy, you use a {{< glossary_tooltip text="selector" term_id="selector">}} to specify what traffic is allowed to and from the Pod(s) that match the selector. | ||||
|  | @ -219,14 +219,14 @@ When the feature gate is enabled, you can set the `protocol` field of a NetworkP | |||
| You must be using a {{< glossary_tooltip text="CNI" term_id="cni" >}} plugin that supports SCTP protocol NetworkPolicies. | ||||
| {{< /note >}} | ||||
| 
 | ||||
| # What you CAN'T do with network policies (at least, not yet) | ||||
| ## What you can't do with network policies (at least, not yet) | ||||
| 
 | ||||
| As of Kubernetes 1.20, the following functionality does not exist in the NetworkPolicy API, but you might be able to implement workarounds using Operating System components (such as SELinux, OpenVSwitch, IPTables, and so on) or Layer 7 technologies (Ingress controllers, Service Mesh implementations) or admission controllers.  In case you are new to network security in Kubernetes, its worth noting that the following User Stories cannot (yet) be implemented using the NetworkPolicy API.  Some (but not all) of these user stories are actively being discussed for future releases of the NetworkPolicy API. | ||||
| 
 | ||||
| - Forcing internal cluster traffic to go through a common gateway (this might be best served with a service mesh or other proxy). | ||||
| - Anything TLS related (use a service mesh or ingress controller for this). | ||||
| - Node specific policies (you can use CIDR notation for these, but you cannot target nodes by their Kubernetes identities specifically). | ||||
| - Targeting of namespaces or services by name (you can, however, target pods or namespaces by their{{< glossary_tooltip text="labels" term_id="label" >}}, which is often a viable workaround). | ||||
| - Targeting of namespaces or services by name (you can, however, target pods or namespaces by their {{< glossary_tooltip text="labels" term_id="label" >}}, which is often a viable workaround). | ||||
| - Creation or management of "Policy requests" that are fulfilled by a third party. | ||||
| - Default policies which are applied to all namespaces or pods (there are some third party Kubernetes distributions and projects which can do this). | ||||
| - Advanced policy querying and reachability tooling. | ||||
|  |  | |||
|  | @ -46,7 +46,7 @@ different purposes: | |||
|   [downwardAPI](/docs/concepts/storage/volumes/#downwardapi), | ||||
|   [secret](/docs/concepts/storage/volumes/#secret): inject different | ||||
|   kinds of Kubernetes data into a Pod | ||||
| - [CSI ephemeral volumes](#csi-ephemeral-volume): | ||||
| - [CSI ephemeral volumes](#csi-ephemeral-volumes): | ||||
|   similar to the previous volume kinds, but provided by special | ||||
|   [CSI drivers](https://github.com/container-storage-interface/spec/blob/master/spec.md) | ||||
|   which specifically [support this feature](https://kubernetes-csi.github.io/docs/drivers.html) | ||||
|  |  | |||
|  | @ -94,7 +94,7 @@ run, what volume plugin it uses (including Flex), etc. The repository | |||
| [kubernetes-sigs/sig-storage-lib-external-provisioner](https://github.com/kubernetes-sigs/sig-storage-lib-external-provisioner) | ||||
| houses a library for writing external provisioners that implements the bulk of | ||||
| the specification. Some external provisioners are listed under the repository | ||||
| [kubernetes-sigs/external-storage](https://github.com/kubernetes-sigs/external-dns). | ||||
| [kubernetes-sigs/sig-storage-lib-external-provisioner](https://github.com/kubernetes-sigs/sig-storage-lib-external-provisioner). | ||||
| 
 | ||||
| For example, NFS doesn't provide an internal provisioner, but an external | ||||
| provisioner can be used. There are also cases when 3rd party storage | ||||
|  |  | |||
|  | @ -31,6 +31,8 @@ A `VolumeSnapshot` is a request for snapshot of a volume by a user. It is simila | |||
| 
 | ||||
| `VolumeSnapshotClass` allows you to specify different attributes belonging to a `VolumeSnapshot`. These attributes may differ among snapshots taken from the same volume on the storage system and therefore cannot be expressed by using the same `StorageClass` of a `PersistentVolumeClaim`. | ||||
| 
 | ||||
| Volume snapshots provide Kubernetes users with a standardized way to copy a volume's contents at a particular point in time without creating an entirely new volume. This functionality enables, for example, database administrators to backup databases before performing edit or delete modifications. | ||||
| 
 | ||||
| Users need to be aware of the following when using this feature: | ||||
| 
 | ||||
| * API Objects `VolumeSnapshot`, `VolumeSnapshotContent`, and `VolumeSnapshotClass` are {{< glossary_tooltip term_id="CustomResourceDefinition" text="CRDs" >}}, not part of the core API. | ||||
|  | @ -152,6 +154,4 @@ You can provision a new volume, pre-populated with data from a snapshot, by usin | |||
| the *dataSource* field in the `PersistentVolumeClaim` object. | ||||
| 
 | ||||
| For more details, see | ||||
| [Volume Snapshot and Restore Volume from Snapshot](/docs/concepts/storage/persistent-volumes/#volume-snapshot-and-restore-volume-from-snapshot-support). | ||||
| 
 | ||||
| 
 | ||||
| [Volume Snapshot and Restore Volume from Snapshot](/docs/concepts/storage/persistent-volumes/#volume-snapshot-and-restore-volume-from-snapshot-support). | ||||
|  | @ -1367,7 +1367,7 @@ For more information on how to develop a CSI driver, refer to the | |||
| 
 | ||||
| #### Migrating to CSI drivers from in-tree plugins | ||||
| 
 | ||||
| {{< feature-state for_k8s_version="v1.17" state="alpha" >}} | ||||
| {{< feature-state for_k8s_version="v1.17" state="beta" >}} | ||||
| 
 | ||||
| The `CSIMigration` feature, when enabled, directs operations against existing in-tree | ||||
| plugins to corresponding CSI plugins (which are expected to be installed and configured). | ||||
|  |  | |||
|  | @ -150,7 +150,7 @@ remembered and reused, even after the Pod is running, for at least a few seconds | |||
| If you need to discover Pods promptly after they are created, you have a few options: | ||||
| 
 | ||||
| - Query the Kubernetes API directly (for example, using a watch) rather than relying on DNS lookups. | ||||
| - Decrease the time of caching in your Kubernetes DNS provider (tpyically this means editing the config map for CoreDNS, which currently caches for 30 seconds). | ||||
| - Decrease the time of caching in your Kubernetes DNS provider (typically this means editing the config map for CoreDNS, which currently caches for 30 seconds). | ||||
| 
 | ||||
| 
 | ||||
| As mentioned in the [limitations](#limitations) section, you are responsible for | ||||
|  |  | |||
|  | @ -172,14 +172,18 @@ spec: | |||
|     # The pod template ends here | ||||
| ``` | ||||
| 
 | ||||
| Modifying the pod template or switching to a new pod template has no effect on the | ||||
| Pods that already exist. Pods do not receive template updates directly. Instead, | ||||
| a new Pod is created to match the revised pod template. | ||||
| Modifying the pod template or switching to a new pod template has no direct effect | ||||
| on the Pods that already exist. If you change the pod template for a workload | ||||
| resource, that resource needs to create replacement Pods that use the updated template. | ||||
| 
 | ||||
| For example, the deployment controller ensures that the running Pods match the current | ||||
| pod template for each Deployment object. If the template is updated, the Deployment has | ||||
| to remove the existing Pods and create new Pods based on the updated template. Each workload | ||||
| resource implements its own rules for handling changes to the Pod template. | ||||
| For example, the StatefulSet controller ensures that the running Pods match the current | ||||
| pod template for each StatefulSet object. If you edit the StatefulSet to change its pod | ||||
| template, the StatefulSet starts to create new Pods based on the updated template. | ||||
| Eventually, all of the old Pods are replaced with new Pods, and the update is complete. | ||||
| 
 | ||||
| Each workload resource implements its own rules for handling changes to the Pod template. | ||||
| If you want to read more about StatefulSet specifically, read | ||||
| [Update strategy](/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets) in the StatefulSet Basics tutorial. | ||||
| 
 | ||||
| On Nodes, the {{< glossary_tooltip term_id="kubelet" text="kubelet" >}} does not | ||||
| directly observe or manage any of the details around pod templates and updates; those | ||||
|  |  | |||
|  | @ -142,7 +142,7 @@ The `restartPolicy` applies to all containers in the Pod. `restartPolicy` only | |||
| refers to restarts of the containers by the kubelet on the same node. After containers | ||||
| in a Pod exit, the kubelet restarts them with an exponential back-off delay (10s, 20s, | ||||
| 40s, …), that is capped at five minutes. Once a container has executed for 10 minutes | ||||
| without any problems, the kubelet resets the restart backoff timer forthat container. | ||||
| without any problems, the kubelet resets the restart backoff timer for that container. | ||||
| 
 | ||||
| ## Pod conditions | ||||
| 
 | ||||
|  |  | |||
|  | @ -4,11 +4,21 @@ content_type: concept | |||
| weight: 40 | ||||
| --- | ||||
| 
 | ||||
| {{< feature-state for_k8s_version="v1.19" state="stable" >}} | ||||
| <!-- leave this shortcode in place until the note about EvenPodsSpread is | ||||
| obsolete --> | ||||
| 
 | ||||
| <!-- overview --> | ||||
| 
 | ||||
| You can use _topology spread constraints_ to control how {{< glossary_tooltip text="Pods" term_id="Pod" >}} are spread across your cluster among failure-domains such as regions, zones, nodes, and other user-defined topology domains. This can help to achieve high availability as well as efficient resource utilization. | ||||
| 
 | ||||
| 
 | ||||
| {{< note >}} | ||||
| In versions of Kubernetes before v1.19, you must enable the `EvenPodsSpread` | ||||
| [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) on | ||||
| the [API server](/docs/concepts/overview/components/#kube-apiserver) and the | ||||
| [scheduler](/docs/reference/generated/kube-scheduler/) in order to use Pod | ||||
| topology spread constraints. | ||||
| {{< /note >}} | ||||
| 
 | ||||
| <!-- body --> | ||||
| 
 | ||||
|  | @ -274,8 +284,6 @@ There are some implicit conventions worth noting here: | |||
| 
 | ||||
| ### Cluster-level default constraints | ||||
| 
 | ||||
| {{< feature-state for_k8s_version="v1.19" state="beta" >}} | ||||
| 
 | ||||
| It is possible to set default topology spread constraints for a cluster. Default | ||||
| topology spread constraints are applied to a Pod if, and only if: | ||||
| 
 | ||||
|  |  | |||
|  | @ -1,6 +1,6 @@ | |||
| --- | ||||
| content_type: concept | ||||
| title: Contribute to Kubernetes docs | ||||
| title: Contribute to K8s docs | ||||
| linktitle: Contribute | ||||
| main_menu: true | ||||
| no_list: true | ||||
|  | @ -8,7 +8,7 @@ weight: 80 | |||
| card: | ||||
|   name: contribute | ||||
|   weight: 10 | ||||
|   title: Start contributing | ||||
|   title: Start contributing to K8s | ||||
| --- | ||||
| 
 | ||||
| <!-- overview --> | ||||
|  |  | |||
|  | @ -43,7 +43,7 @@ When opening a pull request, you need to know in advance which branch to base yo | |||
| Scenario | Branch | ||||
| :---------|:------------ | ||||
| Existing or new English language content for the current release | `master` | ||||
| Content for a feature change release | The branch which corresponds to the major and minor version the feature change is in, using the pattern `dev-release-<version>`. For example, if a feature changes in the `{{< latest-version >}}` release, then add documentation changes to the ``dev-{{< release-branch >}}`` branch. | ||||
| Content for a feature change release | The branch which corresponds to the major and minor version the feature change is in, using the pattern `dev-<version>`. For example, if a feature changes in the `v{{< skew nextMinorVersion >}}` release, then add documentation changes to the ``dev-{{< skew nextMinorVersion >}}`` branch. | ||||
| Content in other languages (localizations) | Use the localization's convention. See the [Localization branching strategy](/docs/contribute/localization/#branching-strategy) for more information. | ||||
| 
 | ||||
| 
 | ||||
|  |  | |||
|  | @ -32,7 +32,7 @@ cards: | |||
|   button: "View Tutorials" | ||||
|   button_path: "/docs/tutorials" | ||||
| - name: setup | ||||
|   title: "Set up a cluster" | ||||
|   title: "Set up a K8s cluster" | ||||
|   description: "Get Kubernetes running based on your resources and needs." | ||||
|   button: "Set up Kubernetes" | ||||
|   button_path: "/docs/setup" | ||||
|  | @ -57,7 +57,7 @@ cards: | |||
|   button: Contribute to the docs | ||||
|   button_path: /docs/contribute | ||||
| - name: release-notes | ||||
|   title: Release Notes | ||||
|   title: K8s Release Notes | ||||
|   description: If you are installing Kubernetes or upgrading to the newest version, refer to the current release notes. | ||||
|   button: "Download Kubernetes" | ||||
|   button_path: "/docs/setup/release/notes" | ||||
|  |  | |||
|  | @ -91,7 +91,7 @@ To see which admission plugins are enabled: | |||
| kube-apiserver -h | grep enable-admission-plugins | ||||
| ``` | ||||
| 
 | ||||
| In 1.18, they are: | ||||
| In the current version, the default ones are: | ||||
| 
 | ||||
| ```shell | ||||
| NamespaceLifecycle, LimitRanger, ServiceAccount, TaintNodesByCondition, Priority, DefaultTolerationSeconds, DefaultStorageClass, StorageObjectInUseProtection, PersistentVolumeClaimResize, RuntimeClass, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, MutatingAdmissionWebhook, ValidatingAdmissionWebhook, ResourceQuota | ||||
|  | @ -534,8 +534,8 @@ and kubelets will not be allowed to modify labels with that prefix. | |||
|   * `kubernetes.io/os` | ||||
|   * `beta.kubernetes.io/instance-type` | ||||
|   * `node.kubernetes.io/instance-type` | ||||
|   * `failure-domain.beta.kubernetes.io/region` | ||||
|   * `failure-domain.beta.kubernetes.io/zone` | ||||
|   * `failure-domain.beta.kubernetes.io/region` (deprecated) | ||||
|   * `failure-domain.beta.kubernetes.io/zone` (deprecated) | ||||
|   * `topology.kubernetes.io/region` | ||||
|   * `topology.kubernetes.io/zone` | ||||
|   * `kubelet.kubernetes.io/`-prefixed labels | ||||
|  |  | |||
|  | @ -282,7 +282,33 @@ from the OAuth2 [token response](https://openid.net/specs/openid-connect-core-1_ | |||
| as a bearer token.  See [above](#putting-a-bearer-token-in-a-request) for how the token | ||||
| is included in a request. | ||||
| 
 | ||||
|  | ||||
| {{< mermaid >}} | ||||
| sequenceDiagram | ||||
|     participant user as User | ||||
|     participant idp as Identity Provider | ||||
|     participant kube as Kubectl | ||||
|     participant api as API Server | ||||
| 
 | ||||
|     user ->> idp: 1. Login to IdP | ||||
|     activate idp | ||||
|     idp -->> user: 2. Provide access_token,<br>id_token, and refresh_token | ||||
|     deactivate idp | ||||
|     activate user | ||||
|     user ->> kube: 3. Call Kubectl<br>with --token being the id_token<br>OR add tokens to .kube/config | ||||
|     deactivate user | ||||
|     activate kube | ||||
|     kube ->> api: 4. Authorization: Bearer... | ||||
|     deactivate kube | ||||
|     activate api | ||||
|     api ->> api: 5. Is JWT signature valid? | ||||
|     api ->> api: 6. Has the JWT expired? (iat+exp) | ||||
|     api ->> api: 7. User authorized? | ||||
|     api -->> kube: 8. Authorized: Perform<br>action and return result | ||||
|     deactivate api | ||||
|     activate kube | ||||
|     kube --x user: 9. Return result | ||||
|     deactivate kube | ||||
| {{< /mermaid >}} | ||||
| 
 | ||||
| 1.  Login to your identity provider | ||||
| 2.  Your identity provider will provide you with an `access_token`, `id_token` and a `refresh_token` | ||||
|  | @ -328,7 +354,7 @@ tokens on behalf of another. | |||
| Kubernetes does not provide an OpenID Connect Identity Provider. | ||||
| You can use an existing public OpenID Connect Identity Provider (such as Google, or | ||||
| [others](https://connect2id.com/products/nimbus-oauth-openid-connect-sdk/openid-connect-providers)). | ||||
| Or, you can run your own Identity Provider, such as CoreOS [dex](https://github.com/coreos/dex), | ||||
| Or, you can run your own Identity Provider, such as [dex](https://dexidp.io/), | ||||
| [Keycloak](https://github.com/keycloak/keycloak), | ||||
| CloudFoundry [UAA](https://github.com/cloudfoundry/uaa), or | ||||
| Tremolo Security's [OpenUnison](https://github.com/tremolosecurity/openunison). | ||||
|  | @ -339,13 +365,13 @@ For an identity provider to work with Kubernetes it must: | |||
| 2.  Run in TLS with non-obsolete ciphers | ||||
| 3.  Have a CA signed certificate (even if the CA is not a commercial CA or is self signed) | ||||
| 
 | ||||
| A note about requirement #3 above, requiring a CA signed certificate.  If you deploy your own identity provider (as opposed to one of the cloud providers like Google or Microsoft) you MUST have your identity provider's web server certificate signed by a certificate with the `CA` flag set to `TRUE`, even if it is self signed.  This is due to GoLang's TLS client implementation being very strict to the standards around certificate validation.  If you don't have a CA handy, you can use [this script](https://github.com/coreos/dex/blob/1ee5920c54f5926d6468d2607c728b71cfe98092/examples/k8s/gencert.sh) from the CoreOS team to create a simple CA and a signed certificate and key pair. | ||||
| A note about requirement #3 above, requiring a CA signed certificate.  If you deploy your own identity provider (as opposed to one of the cloud providers like Google or Microsoft) you MUST have your identity provider's web server certificate signed by a certificate with the `CA` flag set to `TRUE`, even if it is self signed.  This is due to GoLang's TLS client implementation being very strict to the standards around certificate validation.  If you don't have a CA handy, you can use [this script](https://github.com/dexidp/dex/blob/master/examples/k8s/gencert.sh) from the Dex team to create a simple CA and a signed certificate and key pair. | ||||
| Or you can use [this similar script](https://raw.githubusercontent.com/TremoloSecurity/openunison-qs-kubernetes/master/src/main/bash/makessl.sh) that generates SHA256 certs with a longer life and larger key size. | ||||
| 
 | ||||
| Setup instructions for specific systems: | ||||
| 
 | ||||
| - [UAA](https://docs.cloudfoundry.org/concepts/architecture/uaa.html) | ||||
| - [Dex](https://github.com/dexidp/dex/blob/master/Documentation/kubernetes.md) | ||||
| - [Dex](https://dexidp.io/docs/kubernetes/) | ||||
| - [OpenUnison](https://www.tremolosecurity.com/orchestra-k8s/) | ||||
| 
 | ||||
| #### Using kubectl | ||||
|  |  | |||
|  | @ -23,96 +23,108 @@ incomplete features are referred to in order to better describe service accounts | |||
| Kubernetes distinguishes between the concept of a user account and a service account | ||||
| for a number of reasons: | ||||
| 
 | ||||
|   - User accounts are for humans. Service accounts are for processes, which | ||||
|     run in pods. | ||||
|   - User accounts are intended to be global. Names must be unique across all | ||||
|     namespaces of a cluster, future user resource will not be namespaced. | ||||
|     Service accounts are namespaced. | ||||
|   - Typically, a cluster's User accounts might be synced from a corporate | ||||
|     database, where new user account creation requires special privileges and | ||||
|     is tied to complex business processes. Service account creation is intended | ||||
|     to be more lightweight, allowing cluster users to create service accounts for | ||||
|     specific tasks (i.e. principle of least privilege). | ||||
|   - Auditing considerations for humans and service accounts may differ. | ||||
|   - A config bundle for a complex system may include definition of various service | ||||
|     accounts for components of that system.  Because service accounts can be created | ||||
|     ad-hoc and have namespaced names, such config is portable. | ||||
| - User accounts are for humans. Service accounts are for processes, which run | ||||
|   in pods. | ||||
| - User accounts are intended to be global. Names must be unique across all | ||||
|   namespaces of a cluster. Service accounts are namespaced. | ||||
| - Typically, a cluster's user accounts might be synced from a corporate | ||||
|   database, where new user account creation requires special privileges and is | ||||
|   tied to complex business processes. Service account creation is intended to be | ||||
|   more lightweight, allowing cluster users to create service accounts for | ||||
|   specific tasks by following the principle of least privilege. | ||||
| - Auditing considerations for humans and service accounts may differ. | ||||
| - A config bundle for a complex system may include definition of various service | ||||
|   accounts for components of that system. Because service accounts can be created | ||||
|   without many constraints and have namespaced names, such config is portable. | ||||
| 
 | ||||
| ## Service account automation | ||||
| 
 | ||||
| Three separate components cooperate to implement the automation around service accounts: | ||||
| 
 | ||||
|   - A Service account admission controller | ||||
|   - A Token controller | ||||
|   - A Service account controller | ||||
| - A `ServiceAccount` admission controller | ||||
| - A Token controller | ||||
| - A `ServiceAccount` controller | ||||
| 
 | ||||
| ### Service Account Admission Controller | ||||
| ### ServiceAccount Admission Controller | ||||
| 
 | ||||
| The modification of pods is implemented via a plugin | ||||
| called an [Admission Controller](/docs/reference/access-authn-authz/admission-controllers/). It is part of the apiserver. | ||||
| called an [Admission Controller](/docs/reference/access-authn-authz/admission-controllers/). | ||||
| It is part of the API server. | ||||
| It acts synchronously to modify pods as they are created or updated. When this plugin is active | ||||
| (and it is by default on most distributions), then it does the following when a pod is created or modified: | ||||
| 
 | ||||
|   1. If the pod does not have a `ServiceAccount` set, it sets the `ServiceAccount` to `default`. | ||||
|   1. It ensures that the `ServiceAccount` referenced by the pod exists, and otherwise rejects it. | ||||
|   1. If the pod does not contain any `ImagePullSecrets`, then `ImagePullSecrets` of the `ServiceAccount` are added to the pod. | ||||
|   1. It adds a `volume` to the pod which contains a token for API access. | ||||
|   1. It adds a `volumeSource` to each container of the pod mounted at `/var/run/secrets/kubernetes.io/serviceaccount`. | ||||
| 1. If the pod does not have a `serviceAccountName` set, it sets the | ||||
|    `serviceAccountName` to `default`. | ||||
| 1. It ensures that the `serviceAccountName` referenced by the pod exists, and | ||||
|    otherwise rejects it. | ||||
| 1. If the pod does not contain any `imagePullSecrets`, then `imagePullSecrets` | ||||
|    of the ServiceAccount referenced by `serviceAccountName` are added to the pod. | ||||
| 1. It adds a `volume` to the pod which contains a token for API access | ||||
|    if neither the ServiceAccount `automountServiceAccountToken` nor the Pod's | ||||
|    `automountServiceAccountToken` is set to `false`. | ||||
| 1. It adds a `volumeSource` to each container of the pod mounted at | ||||
|    `/var/run/secrets/kubernetes.io/serviceaccount`, if the previous step has | ||||
|    created a volume for ServiceAccount token. | ||||
| 
 | ||||
| Starting from v1.13, you can migrate a service account volume to a projected volume when | ||||
| You can migrate a service account volume to a projected volume when | ||||
| the `BoundServiceAccountTokenVolume` feature gate is enabled. | ||||
| The service account token will expire after 1 hour or the pod is deleted. See more details about [projected volume](/docs/tasks/configure-pod-container/configure-projected-volume-storage/). | ||||
| The service account token will expire after 1 hour or the pod is deleted. See | ||||
| more details about | ||||
| [projected volume](/docs/tasks/configure-pod-container/configure-projected-volume-storage/). | ||||
| 
 | ||||
| ### Token Controller | ||||
| 
 | ||||
| TokenController runs as part of controller-manager. It acts asynchronously. It: | ||||
| TokenController runs as part of `kube-controller-manager`. It acts asynchronously. It: | ||||
| 
 | ||||
| - observes serviceAccount creation and creates a corresponding Secret to allow API access. | ||||
| - observes serviceAccount deletion and deletes all corresponding ServiceAccountToken Secrets. | ||||
| - observes secret addition, and ensures the referenced ServiceAccount exists, and adds a token to the secret if needed. | ||||
| - observes secret deletion and removes a reference from the corresponding ServiceAccount if needed. | ||||
| - watches ServiceAccount creation and creates a corresponding | ||||
|   ServiceAccount token Secret to allow API access. | ||||
| - watches ServiceAccount deletion and deletes all corresponding ServiceAccount | ||||
|   token Secrets. | ||||
| - watches ServiceAccount token Secret addition, and ensures the referenced | ||||
|   ServiceAccount exists, and adds a token to the Secret if needed. | ||||
| - watches Secret deletion and removes a reference from the corresponding | ||||
|   ServiceAccount if needed. | ||||
| 
 | ||||
| You must pass a service account private key file to the token controller in the controller-manager by using | ||||
| the `--service-account-private-key-file` option. The private key will be used to sign generated service account tokens. | ||||
| Similarly, you must pass the corresponding public key to the kube-apiserver using the `--service-account-key-file` | ||||
| option. The public key will be used to verify the tokens during authentication. | ||||
| You must pass a service account private key file to the token controller in | ||||
| the `kube-controller-manager` using the `--service-account-private-key-file` | ||||
| flag. The private key is used to sign generated service account tokens. | ||||
| Similarly, you must pass the corresponding public key to the `kube-apiserver` | ||||
| using the `--service-account-key-file` flag. The public key will be used to | ||||
| verify the tokens during authentication. | ||||
| 
 | ||||
| #### To create additional API tokens | ||||
| 
 | ||||
| A controller loop ensures a secret with an API token exists for each service | ||||
| account. To create additional API tokens for a service account, create a secret | ||||
| of type `ServiceAccountToken` with an annotation referencing the service | ||||
| account, and the controller will update it with a generated token: | ||||
| A controller loop ensures a Secret with an API token exists for each | ||||
| ServiceAccount. To create additional API tokens for a ServiceAccount, create a | ||||
| Secret of type `kubernetes.io/service-account-token` with an annotation | ||||
| referencing the ServiceAccount, and the controller will update it with a | ||||
| generated token: | ||||
| 
 | ||||
| secret.json: | ||||
| Below is a sample configuration for such a Secret: | ||||
| 
 | ||||
| ```json | ||||
| { | ||||
|     "kind": "Secret", | ||||
|     "apiVersion": "v1", | ||||
|     "metadata": { | ||||
|         "name": "mysecretname", | ||||
|         "annotations": { | ||||
|             "kubernetes.io/service-account.name": "myserviceaccount" | ||||
|         } | ||||
|     }, | ||||
|     "type": "kubernetes.io/service-account-token" | ||||
| } | ||||
| ```yaml | ||||
| apiVersion: v1 | ||||
| kind: Secret | ||||
| metadata: | ||||
|   name: mysecretname | ||||
|   annotations: | ||||
|     kubernetes.io/service-account.name: myserviceaccount | ||||
| type: kubernetes.io/service-account-token | ||||
| ``` | ||||
| 
 | ||||
| ```shell | ||||
| kubectl create -f ./secret.json | ||||
| kubectl create -f ./secret.yaml | ||||
| kubectl describe secret mysecretname | ||||
| ``` | ||||
| 
 | ||||
| #### To delete/invalidate a service account token | ||||
| #### To delete/invalidate a ServiceAccount token Secret | ||||
| 
 | ||||
| ```shell | ||||
| kubectl delete secret mysecretname | ||||
| ``` | ||||
| 
 | ||||
| ### Service Account Controller | ||||
| ### ServiceAccount controller | ||||
| 
 | ||||
| Service Account Controller manages ServiceAccount inside namespaces, and ensures | ||||
| a ServiceAccount named "default" exists in every active namespace. | ||||
| A ServiceAccount controller manages the ServiceAccounts inside namespaces, and | ||||
| ensures a ServiceAccount named "default" exists in every active namespace. | ||||
| 
 | ||||
|  |  | |||
|  | @ -83,8 +83,6 @@ different Kubernetes components. | |||
| | `CSIVolumeFSGroupPolicy` | `false` | Alpha | 1.19 | | | ||||
| | `ConfigurableFSGroupPolicy` | `false` | Alpha | 1.18 | | | ||||
| | `CustomCPUCFSQuotaPeriod` | `false` | Alpha | 1.12 | | | ||||
| | `CustomResourceDefaulting` | `false` | Alpha| 1.15 | 1.15 | | ||||
| | `CustomResourceDefaulting` | `true` | Beta | 1.16 | | | ||||
| | `DefaultPodTopologySpread` | `false` | Alpha | 1.19 | | | ||||
| | `DevicePlugins` | `false` | Alpha | 1.8 | 1.9 | | ||||
| | `DevicePlugins` | `true` | Beta | 1.10 | | | ||||
|  | @ -138,12 +136,11 @@ different Kubernetes components. | |||
| | `RuntimeClass` | `true` | Beta | 1.14 | | | ||||
| | `SCTPSupport` | `false` | Alpha | 1.12 | 1.18 | | ||||
| | `SCTPSupport` | `true` | Beta | 1.19 | | | ||||
| | `ServiceAppProtocol` | `false` | Alpha | 1.18 | 1.18 | | ||||
| | `ServiceAppProtocol` | `true` | Beta | 1.19 | | | ||||
| | `ServerSideApply` | `false` | Alpha | 1.14 | 1.15 | | ||||
| | `ServerSideApply` | `true` | Beta | 1.16 | | | ||||
| | `ServiceAccountIssuerDiscovery` | `false` | Alpha | 1.18 | | | ||||
| | `ServiceAppProtocol` | `false` | Alpha | 1.18 | | | ||||
| | `ServiceAppProtocol` | `false` | Alpha | 1.18 | 1.18 | | ||||
| | `ServiceAppProtocol` | `true` | Beta | 1.19 | | | ||||
| | `ServiceNodeExclusion` | `false` | Alpha | 1.8 | 1.18 | | ||||
| | `ServiceNodeExclusion` | `true` | Beta | 1.19 | | | ||||
| | `ServiceTopology` | `false` | Alpha | 1.17 | | | ||||
|  | @ -210,6 +207,9 @@ different Kubernetes components. | |||
| | `CustomPodDNS` | `false` | Alpha | 1.9 | 1.9 | | ||||
| | `CustomPodDNS` | `true` | Beta| 1.10 | 1.13 | | ||||
| | `CustomPodDNS` | `true` | GA | 1.14 | - | | ||||
| | `CustomResourceDefaulting` | `false` | Alpha| 1.15 | 1.15 | | ||||
| | `CustomResourceDefaulting` | `true` | Beta | 1.16 | 1.16 | | ||||
| | `CustomResourceDefaulting` | `true` | GA | 1.17 | - | | ||||
| | `CustomResourcePublishOpenAPI` | `false` | Alpha| 1.14 | 1.14 | | ||||
| | `CustomResourcePublishOpenAPI` | `true` | Beta| 1.15 | 1.15 | | ||||
| | `CustomResourcePublishOpenAPI` | `true` | GA | 1.16 | - | | ||||
|  |  | |||
|  | @ -243,7 +243,7 @@ for example: | |||
| The validity duration of signed certificates can be configured with flag: | ||||
| 
 | ||||
| ``` | ||||
| --experimental-cluster-signing-duration | ||||
| --cluster-signing-duration | ||||
| ``` | ||||
| 
 | ||||
| ### Approval | ||||
|  |  | |||
|  | @ -967,7 +967,7 @@ WindowsEndpointSliceProxying=true|false (ALPHA - default=false)<br/> | |||
| <td colspan="2">--node-labels mapStringString</td> | ||||
| </tr> | ||||
| <tr> | ||||
| <td></td><td style="line-height: 130%; word-wrap: break-word;"><Warning: Alpha feature> Labels to add when registering the node in the cluster. Labels must be `key=value` pairs separated by `,`. Labels in the `kubernetes.io` namespace must begin with an allowed prefix (`kubelet.kubernetes.io`, `node.kubernetes.io`) or be in the specifically allowed set (`beta.kubernetes.io/arch`, `beta.kubernetes.io/instance-type`, `beta.kubernetes.io/os`, `failure-domain.beta.kubernetes.io/region`, `failure-domain.beta.kubernetes.io/zone`, `failure-domain.kubernetes.io/region`, `failure-domain.kubernetes.io/zone`, `kubernetes.io/arch`, `kubernetes.io/hostname`, `kubernetes.io/instance-type`, `kubernetes.io/os`)</td> | ||||
| <td></td><td style="line-height: 130%; word-wrap: break-word;"><Warning: Alpha feature>Labels to add when registering the node in the cluster. Labels must be `key=value pairs` separated by `,`. Labels in the `kubernetes.io` namespace must begin with an allowed prefix (`kubelet.kubernetes.io`, `node.kubernetes.io`) or be in the specifically allowed set (`beta.kubernetes.io/arch`, `beta.kubernetes.io/instance-type`, `beta.kubernetes.io/os`, `failure-domain.beta.kubernetes.io/region`, `failure-domain.beta.kubernetes.io/zone`, `kubernetes.io/arch`, `kubernetes.io/hostname`, `kubernetes.io/os`, `node.kubernetes.io/instance-type`, `topology.kubernetes.io/region`, `topology.kubernetes.io/zone`)</td> | ||||
| </tr> | ||||
| 
 | ||||
| <tr> | ||||
|  |  | |||
|  | @ -0,0 +1,19 @@ | |||
| --- | ||||
| title: Object | ||||
| id: object | ||||
| date: 2020-10-12 | ||||
| full_link: https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects/#kubernetes-objects | ||||
| short_description: > | ||||
|    A entity in the Kubernetes system, representing part of the state of your cluster. | ||||
| aka:  | ||||
| tags: | ||||
| - fundamental | ||||
| --- | ||||
| An entity in the Kubernetes system. The Kubernetes API uses these entities to represent the state | ||||
| of your cluster. | ||||
| <!--more--> | ||||
| A Kubernetes object is typically a “record of intent”—once you create the object, the Kubernetes | ||||
| {{< glossary_tooltip text="control plane" term_id="control-plane" >}} works constantly to ensure | ||||
| that the item it represents actually exists. | ||||
| By creating an object, you're effectively telling the Kubernetes system what you want that part of | ||||
| your cluster's workload to look like; this is your cluster's desired state. | ||||
|  | @ -90,6 +90,13 @@ kubectl apply -f ./my1.yaml -f ./my2.yaml      # create from multiple files | |||
| kubectl apply -f ./dir                         # create resource(s) in all manifest files in dir | ||||
| kubectl apply -f https://git.io/vPieo          # create resource(s) from url | ||||
| kubectl create deployment nginx --image=nginx  # start a single instance of nginx | ||||
| 
 | ||||
| # create a Job which prints "Hello World" | ||||
| kubectl create job hello --image=busybox -- echo "Hello World"  | ||||
| 
 | ||||
| # create a CronJob that prints "Hello World" every minute | ||||
| kubectl create cronjob hello --image=busybox   --schedule="*/1 * * * *" -- echo "Hello World"     | ||||
| 
 | ||||
| kubectl explain pods                           # get the documentation for pod manifests | ||||
| 
 | ||||
| # Create multiple YAML objects from stdin | ||||
|  |  | |||
|  | @ -61,7 +61,7 @@ for example `create`, `get`, `describe`, `delete`. | |||
|    * To specify resources with one or more files:  `-f file1 -f file2 -f file<#>` | ||||
| 
 | ||||
|       * [Use YAML rather than JSON](/docs/concepts/configuration/overview/#general-configuration-tips) since YAML tends to be more user-friendly, especially for configuration files.<br/> | ||||
|      Example: `kubectl get pod -f ./pod.yaml` | ||||
|      Example: `kubectl get -f ./pod.yaml` | ||||
| 
 | ||||
| * `flags`: Specifies optional flags. For example, you can use the `-s` or `--server` flags to specify the address and port of the Kubernetes API server.<br/> | ||||
| 
 | ||||
|  |  | |||
|  | @ -38,7 +38,7 @@ This label has been deprecated. Please use `kubernetes.io/arch` instead. | |||
| 
 | ||||
| This label has been deprecated. Please use `kubernetes.io/os` instead. | ||||
| 
 | ||||
| ## kubernetes.io/hostname | ||||
| ## kubernetes.io/hostname {#kubernetesiohostname} | ||||
| 
 | ||||
| Example: `kubernetes.io/hostname=ip-172-20-114-199.ec2.internal` | ||||
| 
 | ||||
|  | @ -46,6 +46,8 @@ Used on: Node | |||
| 
 | ||||
| The Kubelet populates this label with the hostname. Note that the hostname can be changed from the "actual" hostname by passing the `--hostname-override` flag to the `kubelet`. | ||||
| 
 | ||||
| This label is also used as part of the topology hierarchy.  See [topology.kubernetes.io/zone](#topologykubernetesiozone) for more information. | ||||
| 
 | ||||
| ## beta.kubernetes.io/instance-type (deprecated) | ||||
| 
 | ||||
| {{< note >}} Starting in v1.17, this label is deprecated in favor of [node.kubernetes.io/instance-type](#nodekubernetesioinstance-type). {{< /note >}} | ||||
|  | @ -63,71 +65,52 @@ to rely on the Kubernetes scheduler to perform resource-based scheduling. You sh | |||
| 
 | ||||
| ## failure-domain.beta.kubernetes.io/region (deprecated) {#failure-domainbetakubernetesioregion} | ||||
| 
 | ||||
| See [failure-domain.beta.kubernetes.io/zone](#failure-domainbetakubernetesiozone). | ||||
| See [topology.kubernetes.io/region](#topologykubernetesioregion). | ||||
| 
 | ||||
| {{< note >}} Starting in v1.17, this label is deprecated in favor of [topology.kubernetes.io/region](#topologykubernetesioregion). {{< /note >}} | ||||
| 
 | ||||
| ## failure-domain.beta.kubernetes.io/zone (deprecated) {#failure-domainbetakubernetesiozone} | ||||
| 
 | ||||
| Example: | ||||
| 
 | ||||
| `failure-domain.beta.kubernetes.io/region=us-east-1` | ||||
| 
 | ||||
| `failure-domain.beta.kubernetes.io/zone=us-east-1c` | ||||
| 
 | ||||
| Used on: Node, PersistentVolume | ||||
| 
 | ||||
| On the Node: The `kubelet` populates this with the zone information as defined by the `cloudprovider`. | ||||
| This will be set only if you are using a `cloudprovider`. However, you should consider setting this | ||||
| on the nodes if it makes sense in your topology. | ||||
| 
 | ||||
| On the PersistentVolume: The `PersistentVolumeLabel` admission controller will automatically add zone labels to PersistentVolumes, on GCE and AWS. | ||||
| 
 | ||||
| Kubernetes will automatically spread the Pods in a replication controller or service across nodes in a single-zone cluster (to reduce the impact of failures). With multiple-zone clusters, this spreading behaviour is extended across zones (to reduce the impact of zone failures). This is achieved via _SelectorSpreadPriority_. | ||||
| 
 | ||||
| _SelectorSpreadPriority_ is a best effort placement. If the zones in your cluster are heterogeneous (for example: different numbers of nodes, different types of nodes, or different pod resource requirements), this placement might prevent equal spreading of your Pods across zones. If desired, you can use homogenous zones (same number and types of nodes) to reduce the probability of unequal spreading. | ||||
| 
 | ||||
| The scheduler (through the _VolumeZonePredicate_ predicate) also will ensure that Pods, that claim a given volume, are only placed into the same zone as that volume. Volumes cannot be attached across zones. | ||||
| 
 | ||||
| The actual values of zone and region don't matter. Nor is the node hierarchy rigidly defined. | ||||
| The expectation is that failures of nodes in different zones should be uncorrelated unless the entire region has failed. For example, zones should typically avoid sharing a single network switch. The exact mapping depends on your particular infrastructure - a three rack installation will choose a very different setup to a multi-datacenter configuration. | ||||
| 
 | ||||
| If `PersistentVolumeLabel` does not support automatic labeling of your PersistentVolumes, you should consider | ||||
| adding the labels manually (or adding support for `PersistentVolumeLabel`). With `PersistentVolumeLabel`, the scheduler prevents Pods from mounting volumes in a different zone. If your infrastructure doesn't have this constraint, you don't need to add the zone labels to the volumes at all. | ||||
| See [topology.kubernetes.io/zone](#topologykubernetesiozone). | ||||
| 
 | ||||
| {{< note >}} Starting in v1.17, this label is deprecated in favor of [topology.kubernetes.io/zone](#topologykubernetesiozone). {{< /note >}} | ||||
| 
 | ||||
| ## topology.kubernetes.io/region {#topologykubernetesioregion} | ||||
| 
 | ||||
| Example: | ||||
| 
 | ||||
| `topology.kubernetes.io/region=us-east-1` | ||||
| 
 | ||||
| See [topology.kubernetes.io/zone](#topologykubernetesiozone). | ||||
| 
 | ||||
| ## topology.kubernetes.io/zone {#topologykubernetesiozone} | ||||
| 
 | ||||
| Example: | ||||
| 
 | ||||
| `topology.kubernetes.io/region=us-east-1` | ||||
| 
 | ||||
| `topology.kubernetes.io/zone=us-east-1c` | ||||
| 
 | ||||
| Used on: Node, PersistentVolume | ||||
| 
 | ||||
| On the Node: The `kubelet` populates this with the zone information as defined by the `cloudprovider`. | ||||
| This will be set only if you are using a `cloudprovider`. However, you should consider setting this | ||||
| on the nodes if it makes sense in your topology. | ||||
| On Node: The `kubelet` or the external `cloud-controller-manager` populates this with the information as provided by the `cloudprovider`.  This will be set only if you are using a `cloudprovider`. However, you should consider setting this on nodes if it makes sense in your topology. | ||||
| 
 | ||||
| On the PersistentVolume: The `PersistentVolumeLabel` admission controller will automatically add zone labels to PersistentVolumes, on GCE and AWS. | ||||
| On PersistentVolume: topology-aware volume provisioners will automatically set node affinity constraints on `PersistentVolumes`. | ||||
| 
 | ||||
| Kubernetes will automatically spread the Pods in a replication controller or service across nodes in a single-zone cluster (to reduce the impact of failures). With multiple-zone clusters, this spreading behaviour is extended across zones (to reduce the impact of zone failures). This is achieved via _SelectorSpreadPriority_. | ||||
| A zone represents a logical failure domain.  It is common for Kubernetes clusters to span multiple zones for increased availability.  While the exact definition of a zone is left to infrastructure implementations, common properties of a zone include very low network latency within a zone, no-cost network traffic within a zone, and failure independence from other zones.  For example, nodes within a zone might share a network switch, but nodes in different zones should not. | ||||
| 
 | ||||
| A region represents a larger domain, made up of one or more zones.  It is uncommon for Kubernetes clusters to span multiple regions,  While the exact definition of a zone or region is left to infrastructure implementations, common properties of a region include higher network latency between them than within them, non-zero cost for network traffic between them, and failure independence from other zones or regions.  For example, nodes within a region might share power infrastructure (e.g. a UPS or generator), but nodes in different regions typically would not. | ||||
| 
 | ||||
| Kubernetes makes a few assumptions about the structure of zones and regions: | ||||
| 1) regions and zones are hierarchical: zones are strict subsets of regions and no zone can be in 2 regions | ||||
| 2) zone names are unique across regions; for example region "africa-east-1" might be comprised of zones "africa-east-1a" and "africa-east-1b" | ||||
| 
 | ||||
| It should be safe to assume that topology labels do not change.  Even though labels are strictly mutable, consumers of them can assume that a given node is not going to be moved between zones without being destroyed and recreated. | ||||
| 
 | ||||
| Kubernetes can use this information in various ways.  For example, the scheduler automatically tries to spread the Pods in a ReplicaSet across nodes in a single-zone cluster (to reduce the impact of node failures, see [kubernetes.io/hostname](#kubernetesiohostname)). With multiple-zone clusters, this spreading behavior also applies to zones (to reduce the impact of zone failures). This is achieved via _SelectorSpreadPriority_. | ||||
| 
 | ||||
| _SelectorSpreadPriority_ is a best effort placement. If the zones in your cluster are heterogeneous (for example: different numbers of nodes, different types of nodes, or different pod resource requirements), this placement might prevent equal spreading of your Pods across zones. If desired, you can use homogenous zones (same number and types of nodes) to reduce the probability of unequal spreading. | ||||
| 
 | ||||
| The scheduler (through the _VolumeZonePredicate_ predicate) also will ensure that Pods, that claim a given volume, are only placed into the same zone as that volume. Volumes cannot be attached across zones. | ||||
| 
 | ||||
| The actual values of zone and region don't matter. Nor is the node hierarchy rigidly defined. | ||||
| The expectation is that failures of nodes in different zones should be uncorrelated unless the entire region has failed. For example, zones should typically avoid sharing a single network switch. The exact mapping depends on your particular infrastructure - a three rack installation will choose a very different setup to a multi-datacenter configuration. | ||||
| 
 | ||||
| If `PersistentVolumeLabel` does not support automatic labeling of your PersistentVolumes, you should consider | ||||
| adding the labels manually (or adding support for `PersistentVolumeLabel`). With `PersistentVolumeLabel`, the scheduler prevents Pods from mounting volumes in a different zone. If your infrastructure doesn't have this constraint, you don't need to add the zone labels to the volumes at all. | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
|  |  | |||
|  | @ -31,13 +31,6 @@ For general background information, read | |||
| describes how clients can authenticate to the Kubernetes API server, and how their | ||||
| requests are authorized. | ||||
| 
 | ||||
| <!-- body --> | ||||
| 
 | ||||
| The REST API is the fundamental fabric of Kubernetes. All operations and | ||||
| communications between components, and external user commands are REST API | ||||
| calls that the API Server handles. Consequently, everything in the Kubernetes | ||||
| platform is treated as an API object and has a corresponding entry in the | ||||
| API. | ||||
| 
 | ||||
| ## API versioning | ||||
| 
 | ||||
|  |  | |||
|  | @ -27,15 +27,15 @@ The following examples will show how you can interact with the health API endpoi | |||
| For all endpoints you can use the `verbose` parameter to print out the checks and their status. | ||||
| This can be useful for a human operator to debug the current status of the Api server, it is not intended to be consumed by a machine: | ||||
| 
 | ||||
|     ```shell | ||||
|     curl -k https://localhost:6443/livez?verbose | ||||
|     ``` | ||||
| ```shell | ||||
| curl -k https://localhost:6443/livez?verbose | ||||
| ``` | ||||
| 
 | ||||
| or from a remote host with authentication: | ||||
| 
 | ||||
|     ```shell | ||||
|     kubectl get --raw='/readyz?verbose' | ||||
|     ``` | ||||
| ```shell | ||||
| kubectl get --raw='/readyz?verbose' | ||||
| ``` | ||||
| 
 | ||||
| The output will look like this: | ||||
| 
 | ||||
|  | @ -62,9 +62,9 @@ The output will look like this: | |||
| The Kubernetes API server also supports to exclude specific checks. | ||||
| The query parameters can also be combined like in this example: | ||||
| 
 | ||||
|     ```shell | ||||
|     curl -k 'https://localhost:6443/readyz?verbose&exclude=etcd' | ||||
|     ``` | ||||
| ```shell | ||||
| curl -k 'https://localhost:6443/readyz?verbose&exclude=etcd' | ||||
| ``` | ||||
| 
 | ||||
| The output show that the `etcd` check is excluded: | ||||
| 
 | ||||
|  | @ -98,6 +98,6 @@ The schema for the individual health checks is `/livez/<healthcheck-name>` where | |||
| The `<healthcheck-name>` path can be discovered using the `verbose` flag from above and take the path between `[+]` and `ok`. | ||||
| These individual health checks should not be consumed by machines but can be helpful for a human operator to debug a system: | ||||
| 
 | ||||
|     ```shell | ||||
|     curl -k https://localhost:6443/livez/etcd | ||||
|     ``` | ||||
| ```shell | ||||
| curl -k https://localhost:6443/livez/etcd | ||||
| ``` | ||||
|  |  | |||
|  | @ -230,7 +230,7 @@ in Go files or in the OpenAPI schema definition of the | |||
| 
 | ||||
| | Golang marker | OpenAPI extension | Accepted values | Description | Introduced in | | ||||
| |---|---|---|---|---| | ||||
| | `//+listType` | `x-kubernetes-list-type` | `atomic`/`set`/`map` | Applicable to lists. `atomic` and `set` apply to lists with scalar elements only. `map` applies to lists of nested types only. If configured as `atomic`, the entire list is replaced during merge; a single manager manages the list as a whole at any one time. If `granular`, different managers can manage entries separately. | 1.16          | | ||||
| | `//+listType` | `x-kubernetes-list-type` | `atomic`/`set`/`map` | Applicable to lists. `atomic` and `set` apply to lists with scalar elements only. `map` applies to lists of nested types only. If configured as `atomic`, the entire list is replaced during merge; a single manager manages the list as a whole at any one time. If `set` or `map`, different managers can manage entries separately. | 1.16          | | ||||
| | `//+listMapKey` | `x-kubernetes-list-map-keys` | Slice of map keys that uniquely identify entries for example `["port", "protocol"]` | Only applicable when `+listType=map`. A slice of strings whose values in combination must uniquely identify list entries. While there can be multiple keys, `listMapKey` is singular because keys need to be specified individually in the Go type. | 1.16 | | ||||
| | `//+mapType` | `x-kubernetes-map-type` | `atomic`/`granular` | Applicable to maps. `atomic` means that the map can only be entirely replaced by a single manager. `granular` means that the map supports separate managers updating individual fields. | 1.17 | | ||||
| | `//+structType` | `x-kubernetes-map-type` | `atomic`/`granular` | Applicable to structs; otherwise same usage and OpenAPI annotation as `//+mapType`.| 1.17 | | ||||
|  |  | |||
|  | @ -2,126 +2,122 @@ | |||
| reviewers: | ||||
| - davidopp | ||||
| - lavalamp | ||||
| title: Building large clusters | ||||
| title: Considerations for large clusters | ||||
| weight: 20 | ||||
| --- | ||||
| 
 | ||||
| ## Support | ||||
| 
 | ||||
| At {{< param "version" >}}, Kubernetes supports clusters with up to 5000 nodes. More specifically, we support configurations that meet *all* of the following criteria: | ||||
| A cluster is a set of {{< glossary_tooltip text="nodes" term_id="node" >}} (physical | ||||
| or virtual machines) running Kubernetes agents, managed by the | ||||
| {{< glossary_tooltip text="control plane" term_id="control-plane" >}}. | ||||
| Kubernetes {{< param "version" >}} supports clusters with up to 5000 nodes. More specifically, | ||||
| Kubernetes is designed to accommodate configurations that meet *all* of the following criteria: | ||||
| 
 | ||||
| * No more than 100 pods per node | ||||
| * No more than 5000 nodes | ||||
| * No more than 150000 total pods | ||||
| * No more than 300000 total containers | ||||
| * No more than 100 pods per node | ||||
| 
 | ||||
| You can scale your cluster by adding or removing nodes. The way you do this depends | ||||
| on how your cluster is deployed. | ||||
| 
 | ||||
| ## Setup | ||||
| ## Cloud provider resource quotas {#quota-issues} | ||||
| 
 | ||||
| A cluster is a set of nodes (physical or virtual machines) running Kubernetes agents, managed by a "master" (the cluster-level control plane). | ||||
| 
 | ||||
| Normally the number of nodes in a cluster is controlled by the value `NUM_NODES` in the platform-specific `config-default.sh` file (for example, see [GCE's `config-default.sh`](https://releases.k8s.io/{{< param "githubbranch" >}}/cluster/gce/config-default.sh)). | ||||
| 
 | ||||
| Simply changing that value to something very large, however, may cause the setup script to fail for many cloud providers. A GCE deployment, for example, will run in to quota issues and fail to bring the cluster up. | ||||
| 
 | ||||
| When setting up a large Kubernetes cluster, the following issues must be considered. | ||||
| 
 | ||||
| ### Quota Issues | ||||
| 
 | ||||
| To avoid running into cloud provider quota issues, when creating a cluster with many nodes, consider: | ||||
| 
 | ||||
| * Increase the quota for things like CPU, IPs, etc. | ||||
|   * In [GCE, for example,](https://cloud.google.com/compute/docs/resource-quotas) you'll want to increase the quota for: | ||||
| To avoid running into cloud provider quota issues, when creating a cluster with many nodes, | ||||
| consider: | ||||
| * Request a quota increase for cloud resources such as: | ||||
|     * Computer instances | ||||
|     * CPUs | ||||
|     * VM instances | ||||
|     * Total persistent disk reserved | ||||
|     * Storage volumes | ||||
|     * In-use IP addresses | ||||
|     * Firewall Rules | ||||
|     * Forwarding rules | ||||
|     * Routes | ||||
|     * Target pools | ||||
| * Gating the setup script so that it brings up new node VMs in smaller batches with waits in between, because some cloud providers rate limit the creation of VMs. | ||||
|     * Packet filtering rule sets | ||||
|     * Number of load balancers | ||||
|     * Network subnets | ||||
|     * Log streams | ||||
| * Gate the cluster scaling actions to brings up new nodes in batches, with a pause | ||||
|   between batches, because some cloud providers rate limit the creation of new instances. | ||||
| 
 | ||||
| ### Etcd storage | ||||
| ## Control plane components | ||||
| 
 | ||||
| To improve performance of large clusters, we store events in a separate dedicated etcd instance. | ||||
| For a large cluster, you need a control plane with sufficient compute and other | ||||
| resources. | ||||
| 
 | ||||
| When creating a cluster, existing salt scripts: | ||||
| Typically you would run one or two control plane instances per failure zone, | ||||
| scaling those instances vertically first and then scaling horizontally after reaching | ||||
| the point of falling returns to (vertical) scale. | ||||
| 
 | ||||
| You should run at least one instance per failure zone to provide fault-tolerance. Kubernetes | ||||
| nodes do not automatically steer traffic towards control-plane endpoints that are in the | ||||
| same failure zone; however, your cloud provider might have its own mechanisms to do this. | ||||
| 
 | ||||
| For example, using a managed load balancer, you configure the load balancer to send traffic | ||||
| that originates from the kubelet and Pods in failure zone _A_, and direct that traffic only | ||||
| to the control plane hosts that are also in zone _A_. If a single control-plane host or | ||||
| endpoint failure zone _A_ goes offline, that means that all the control-plane traffic for | ||||
| nodes in zone _A_ is now being sent between zones. Running multiple control plane hosts in | ||||
| each zone makes that outcome less likely. | ||||
| 
 | ||||
| ### etcd storage | ||||
| 
 | ||||
| To improve performance of large clusters, you can store Event objects in a separate | ||||
| dedicated etcd instance. | ||||
| 
 | ||||
| When creating a cluster, you can (using custom tooling): | ||||
| 
 | ||||
| * start and configure additional etcd instance | ||||
| * configure api-server to use it for storing events | ||||
| * configure the {{< glossary_tooltip term_id="kube-apiserver" text="API server" >}} to use it for storing events | ||||
| 
 | ||||
| ### Size of master and master components | ||||
| ## Addon resources | ||||
| 
 | ||||
| On GCE/Google Kubernetes Engine, and AWS, `kube-up` automatically configures the proper VM size for your master depending on the number of nodes | ||||
| in your cluster. On other providers, you will need to configure it manually. For reference, the sizes we use on GCE are | ||||
| Kubernetes [resource limits](/docs/concepts/configuration/manage-resources-containers/) | ||||
| help to minimise the impact of memory leaks and other ways that pods and containers can | ||||
| impact on other components. These resource limits can and should apply to | ||||
| {{< glossary_tooltip text="addon" term_id="addons" >}} just as they apply to application | ||||
| workloads. | ||||
| 
 | ||||
| * 1-5 nodes: n1-standard-1 | ||||
| * 6-10 nodes: n1-standard-2 | ||||
| * 11-100 nodes: n1-standard-4 | ||||
| * 101-250 nodes: n1-standard-8 | ||||
| * 251-500 nodes: n1-standard-16 | ||||
| * more than 500 nodes: n1-standard-32 | ||||
| 
 | ||||
| And the sizes we use on AWS are | ||||
| 
 | ||||
| * 1-5 nodes: m3.medium | ||||
| * 6-10 nodes: m3.large | ||||
| * 11-100 nodes: m3.xlarge | ||||
| * 101-250 nodes: m3.2xlarge | ||||
| * 251-500 nodes: c4.4xlarge | ||||
| * more than 500 nodes: c4.8xlarge | ||||
| 
 | ||||
| {{< note >}} | ||||
| On Google Kubernetes Engine, the size of the master node adjusts automatically based on the size of your cluster. For more information, see [this blog post](https://cloudplatform.googleblog.com/2017/11/Cutting-Cluster-Management-Fees-on-Google-Kubernetes-Engine.html). | ||||
| 
 | ||||
| On AWS, master node sizes are currently set at cluster startup time and do not change, even if you later scale your cluster up or down by manually removing or adding nodes or using a cluster autoscaler. | ||||
| {{< /note >}} | ||||
| 
 | ||||
| ### Addon Resources | ||||
| 
 | ||||
| To prevent memory leaks or other resource issues in [cluster addons](https://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons) from consuming all the resources available on a node, Kubernetes sets resource limits on addon containers to limit the CPU and Memory resources they can consume (See PR [#10653](https://pr.k8s.io/10653/files) and [#10778](https://pr.k8s.io/10778/files)). | ||||
| 
 | ||||
| For example: | ||||
| For example, you can set CPU and memory limits for a logging component: | ||||
| 
 | ||||
| ```yaml | ||||
|   ... | ||||
|   containers: | ||||
|   - name: fluentd-cloud-logging | ||||
|     image: k8s.gcr.io/fluentd-gcp:1.16 | ||||
|     image: fluent/fluentd-kubernetes-daemonset:v1 | ||||
|     resources: | ||||
|       limits: | ||||
|         cpu: 100m | ||||
|         memory: 200Mi | ||||
| ``` | ||||
| 
 | ||||
| Except for Heapster, these limits are static and are based on data we collected from addons running on 4-node clusters (see [#10335](https://issue.k8s.io/10335#issuecomment-117861225)). The addons consume a lot more resources when running on large deployment clusters (see [#5880](http://issue.k8s.io/5880#issuecomment-113984085)). So, if a large cluster is deployed without adjusting these values, the addons may continuously get killed because they keep hitting the limits. | ||||
| Addons' default limits are typically based on data collected from experience running | ||||
| each addon on small or medium Kubernetes clusters. When running on large | ||||
| clusters, addons often consume more of some resources than their default limits. | ||||
| If a large cluster is deployed without adjusting these values, the addon(s) | ||||
| may continuously get killed because they keep hitting the memory limit. | ||||
| Alternatively, the addon may run but with poor performance due to CPU time | ||||
| slice restrictions. | ||||
| 
 | ||||
| To avoid running into cluster addon resource issues, when creating a cluster with many nodes, consider the following: | ||||
| To avoid running into cluster addon resource issues, when creating a cluster with | ||||
| many nodes, consider the following: | ||||
| 
 | ||||
| * Scale memory and CPU limits for each of the following addons, if used, as you scale up the size of cluster (there is one replica of each handling the entire cluster so memory and CPU usage tends to grow proportionally with size/load on cluster): | ||||
|   * [InfluxDB and Grafana](https://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml) | ||||
|   * [kubedns, dnsmasq, and sidecar](https://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/dns/kube-dns/kube-dns.yaml.in) | ||||
|   * [Kibana](https://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/fluentd-elasticsearch/kibana-deployment.yaml) | ||||
| * Scale number of replicas for the following addons, if used, along with the size of cluster (there are multiple replicas of each so increasing replicas should help handle increased load, but, since load per replica also increases slightly, also consider increasing CPU/memory limits): | ||||
|   * [elasticsearch](https://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml) | ||||
| * Increase memory and CPU limits slightly for each of the following addons, if used, along with the size of cluster (there is one replica per node but CPU/memory usage increases slightly along with cluster load/size as well): | ||||
|   * [FluentD with ElasticSearch Plugin](https://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml) | ||||
|   * [FluentD with GCP Plugin](https://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml) | ||||
| * Some addons scale vertically - there is one replica of the addon for the cluster | ||||
|   or serving a whole failure zone. For these addons, increase requests and limits | ||||
|   as you scale out your cluster. | ||||
| * Many addons scale horizontally - you add capacity by running more pods - but with | ||||
|   a very large cluster you may also need to raise CPU or memory limits slightly. | ||||
|   The VerticalPodAutoscaler can run in _recommender_ mode to provide suggested | ||||
|   figures for requests and limits. | ||||
| * Some addons run as one copy per node, controlled by a {{< glossary_tooltip text="DaemonSet" | ||||
|   term_id="daemonset" >}}: for example, a node-level log aggregator. Similar to | ||||
|   the case with horizontally-scaled addons, you may also need to raise CPU or memory | ||||
|   limits slightly. | ||||
| 
 | ||||
| Heapster's resource limits are set dynamically based on the initial size of your cluster (see [#16185](http://issue.k8s.io/16185) | ||||
| and [#22940](http://issue.k8s.io/22940)). If you find that Heapster is running | ||||
| out of resources, you should adjust the formulas that compute heapster memory request (see those PRs for details). | ||||
| ## {{% heading "whatsnext" %}} | ||||
| 
 | ||||
| For directions on how to detect if addon containers are hitting resource limits, see the | ||||
| [Troubleshooting section of Compute Resources](/docs/concepts/configuration/manage-resources-containers/#troubleshooting). | ||||
| 
 | ||||
| ### Allowing minor node failure at startup | ||||
| 
 | ||||
| For various reasons (see [#18969](https://github.com/kubernetes/kubernetes/issues/18969) for more details) running | ||||
| `kube-up.sh` with a very large `NUM_NODES` may fail due to a very small number of nodes not coming up properly. | ||||
| Currently you have two choices: restart the cluster (`kube-down.sh` and then `kube-up.sh` again), or before | ||||
| running `kube-up.sh` set the environment variable `ALLOWED_NOTREADY_NODES` to whatever value you feel comfortable | ||||
| with. This will allow `kube-up.sh` to succeed with fewer than `NUM_NODES` coming up. Depending on the | ||||
| reason for the failure, those additional nodes may join later or the cluster may remain at a size of | ||||
| `NUM_NODES - ALLOWED_NOTREADY_NODES`. | ||||
| `VerticalPodAutoscaler` is a custom resource that you can deploy into your cluster | ||||
| to help you manage resource requests and limits for pods.   | ||||
| Visit [Vertical Pod Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler#readme) | ||||
| to learn more about `VerticalPodAutoscaler` and how you can use it to scale cluster | ||||
| components, including cluster-critical addons. | ||||
| 
 | ||||
| The [cluster autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler#readme) | ||||
| integrates with a number of cloud providers to help you run the right number of | ||||
| nodes for the level of resource demand in your cluster. | ||||
|  |  | |||
|  | @ -111,7 +111,7 @@ see [Allowed topologies](/docs/concepts/storage/storage-classes/#allowed-topolog | |||
| ## Networking | ||||
| 
 | ||||
| By itself, Kubernetes does not include zone-aware networking. You can use a | ||||
| [network plugin](docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) | ||||
| [network plugin](/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) | ||||
| to configure cluster networking, and that network solution might have zone-specific | ||||
| elements. For example, if your cloud provider supports Services with | ||||
| `type=LoadBalancer`, the load balancer might only send traffic to Pods running in the | ||||
|  |  | |||
|  | @ -25,10 +25,11 @@ daemons installed: | |||
| ## Running Node Conformance Test | ||||
| 
 | ||||
| To run the node conformance test, perform the following steps: | ||||
| 
 | ||||
| 1. Point your Kubelet to localhost `--api-servers="http://localhost:8080"`, | ||||
| because the test framework starts a local master to test Kubelet. There are some | ||||
| other Kubelet flags you may care: | ||||
| 1. Work out the value of the `--kubeconfig` option for the kubelet; for example: | ||||
|    `--kubeconfig=/var/lib/kubelet/config.yaml`. | ||||
|     Because the test framework starts a local control plane to test the kubelet, | ||||
|     use `http://localhost:8080` as the URL of the API server. | ||||
|     There are some other kubelet command line parameters you may want to use: | ||||
|   * `--pod-cidr`: If you are using `kubenet`, you should specify an arbitrary CIDR | ||||
|     to Kubelet, for example `--pod-cidr=10.180.0.0/24`. | ||||
|   * `--cloud-provider`: If you are using `--cloud-provider=gce`, you should | ||||
|  |  | |||
|  | @ -4,7 +4,7 @@ reviewers: | |||
| - bart0sh | ||||
| title: Container runtimes | ||||
| content_type: concept | ||||
| weight: 10 | ||||
| weight: 20 | ||||
| --- | ||||
| <!-- overview --> | ||||
| 
 | ||||
|  | @ -85,6 +85,7 @@ net.ipv4.ip_forward                 = 1 | |||
| net.bridge.bridge-nf-call-ip6tables = 1 | ||||
| EOF | ||||
| 
 | ||||
| # Apply sysctl params without reboot | ||||
| sudo sysctl --system | ||||
| ``` | ||||
| 
 | ||||
|  | @ -102,7 +103,7 @@ sudo apt-get update && sudo apt-get install -y apt-transport-https ca-certificat | |||
| 
 | ||||
| ```shell | ||||
| ## Add Docker's official GPG key | ||||
| curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add --keyring /etc/apt/trusted.gpg.d/docker.gpg - | ||||
| curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key --keyring /etc/apt/trusted.gpg.d/docker.gpg add - | ||||
| ``` | ||||
| 
 | ||||
| ```shell | ||||
|  | @ -257,8 +258,8 @@ cat <<EOF | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable:cr | |||
| deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/$VERSION/$OS/ / | ||||
| EOF | ||||
| 
 | ||||
| curl -L https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable:cri-o:$VERSION/$OS/Release.key | sudo apt-key add --keyring /etc/apt/trusted.gpg.d/libcontainers.gpg - | ||||
| curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/Release.key | sudo apt-key add --keyring /etc/apt/trusted.gpg.d/libcontainers.gpg - | ||||
| curl -L https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable:cri-o:$VERSION/$OS/Release.key | sudo apt-key --keyring /etc/apt/trusted.gpg.d/libcontainers.gpg add - | ||||
| curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/Release.key | sudo apt-key --keyring /etc/apt/trusted.gpg.d/libcontainers.gpg add - | ||||
| 
 | ||||
| sudo apt-get update | ||||
| sudo apt-get install cri-o cri-o-runc | ||||
|  | @ -293,8 +294,8 @@ cat <<EOF | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable:cr | |||
| deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/$VERSION/$OS/ / | ||||
| EOF | ||||
| 
 | ||||
| curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/Release.key | sudo apt-key add --keyring /etc/apt/trusted.gpg.d/libcontainers.gpg | ||||
| curl -L https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable:cri-o:$VERSION/$OS/Release.key | sudo apt-key add --keyring /etc/apt/trusted.gpg.d/libcontainers-cri-o.gpg - | ||||
| curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/Release.key | sudo apt-key --keyring /etc/apt/trusted.gpg.d/libcontainers.gpg add - | ||||
| curl -L https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable:cri-o:$VERSION/$OS/Release.key | sudo apt-key --keyring /etc/apt/trusted.gpg.d/libcontainers-cri-o.gpg add - | ||||
| 
 | ||||
| sudo apt-get update | ||||
| sudo apt-get install cri-o cri-o-runc | ||||
|  | @ -387,7 +388,7 @@ sudo apt-get update && sudo apt-get install -y \ | |||
| 
 | ||||
| ```shell | ||||
| # Add Docker's official GPG key: | ||||
| curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add --keyring /etc/apt/trusted.gpg.d/docker.gpg - | ||||
| curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key --keyring /etc/apt/trusted.gpg.d/docker.gpg add - | ||||
| ``` | ||||
| 
 | ||||
| ```shell | ||||
|  | @ -421,6 +422,7 @@ EOF | |||
| ``` | ||||
| 
 | ||||
| ```shell | ||||
| # Create /etc/systemd/system/docker.service.d | ||||
| sudo mkdir -p /etc/systemd/system/docker.service.d | ||||
| ``` | ||||
| 
 | ||||
|  | @ -476,6 +478,7 @@ EOF | |||
| ``` | ||||
| 
 | ||||
| ```shell | ||||
| # Create /etc/systemd/system/docker.service.d | ||||
| sudo mkdir -p /etc/systemd/system/docker.service.d | ||||
| ``` | ||||
| 
 | ||||
|  |  | |||
|  | @ -50,7 +50,7 @@ this example. | |||
| 
 | ||||
| 
 | ||||
| 1. Configure the kubelet to be a service manager for etcd. | ||||
|    | ||||
| 
 | ||||
|    {{< note >}}You must do this on every host where etcd should be running.{{< /note >}} | ||||
|     Since etcd was created first, you must override the service priority by creating a new unit file | ||||
|     that has higher precedence than the kubeadm-provided kubelet unit file. | ||||
|  | @ -68,6 +68,12 @@ this example. | |||
|     systemctl restart kubelet | ||||
|     ``` | ||||
| 
 | ||||
|     Check the kubelet status to ensure it is running. | ||||
| 
 | ||||
|     ```sh | ||||
|     systemctl status kubelet | ||||
|     ``` | ||||
| 
 | ||||
| 1. Create configuration files for kubeadm. | ||||
| 
 | ||||
|     Generate one kubeadm configuration file for each host that will have an etcd | ||||
|  |  | |||
|  | @ -0,0 +1,14 @@ | |||
| --- | ||||
| title: Turnkey Cloud Solutions | ||||
| content_type: concept | ||||
| weight: 30 | ||||
| --- | ||||
| <!-- overview --> | ||||
| 
 | ||||
| This page provides a list of Kubernetes certified solution providers. From each | ||||
| provider page, you can learn how to install and setup production | ||||
| ready clusters. | ||||
| 
 | ||||
| <!-- body --> | ||||
| 
 | ||||
| {{< cncf-landscape helpers=true category="certified-kubernetes-hosted" >}} | ||||
|  | @ -1,4 +0,0 @@ | |||
| --- | ||||
| title: Turnkey Cloud Solutions | ||||
| weight: 30 | ||||
| --- | ||||
|  | @ -1,20 +0,0 @@ | |||
| --- | ||||
| reviewers: | ||||
| - colemickens | ||||
| - brendandburns | ||||
| title: Running Kubernetes on Alibaba Cloud | ||||
| --- | ||||
| 
 | ||||
| ## Alibaba Cloud Container Service | ||||
| 
 | ||||
| The [Alibaba Cloud Container Service](https://www.alibabacloud.com/product/container-service) lets you run and manage Docker applications on a cluster of either Alibaba Cloud ECS instances or in a Serverless fashion. It supports the popular open source container orchestrators: Docker Swarm and Kubernetes. | ||||
| 
 | ||||
| To simplify cluster deployment and management, use [Kubernetes Support for Alibaba Cloud Container Service](https://www.alibabacloud.com/product/kubernetes). You can get started quickly by following the [Kubernetes walk-through](https://www.alibabacloud.com/help/doc-detail/86737.htm), and there are some [tutorials for Kubernetes Support on Alibaba Cloud](https://yq.aliyun.com/teams/11/type_blog-cid_200-page_1) in Chinese. | ||||
| 
 | ||||
| To use custom binaries or open source Kubernetes, follow the instructions below. | ||||
| 
 | ||||
| ## Custom Deployments | ||||
| 
 | ||||
| The source code for [Kubernetes with Alibaba Cloud provider implementation](https://github.com/AliyunContainerService/kubernetes) is open source and available on GitHub. | ||||
| 
 | ||||
| For more information, see "[Quick deployment of Kubernetes - VPC environment on Alibaba Cloud](https://www.alibabacloud.com/forum/read-830)" in English. | ||||
|  | @ -1,88 +0,0 @@ | |||
| --- | ||||
| reviewers: | ||||
| - justinsb | ||||
| - clove | ||||
| title: Running Kubernetes on AWS EC2 | ||||
| content_type: task | ||||
| --- | ||||
| 
 | ||||
| <!-- overview --> | ||||
| 
 | ||||
| This page describes how to install a Kubernetes cluster on AWS. | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| ## {{% heading "prerequisites" %}} | ||||
| 
 | ||||
| 
 | ||||
| To create a Kubernetes cluster on AWS, you will need an Access Key ID and a Secret Access Key from AWS. | ||||
| 
 | ||||
| ### Supported Production Grade Tools | ||||
| 
 | ||||
| * [conjure-up](https://docs.conjure-up.io/stable/en/cni/k8s-and-aws) is an open-source installer for Kubernetes that creates Kubernetes clusters with native AWS integrations on Ubuntu. | ||||
| 
 | ||||
| * [Kubernetes Operations](https://github.com/kubernetes/kops) - Production Grade K8s Installation, Upgrades, and Management. Supports running Debian, Ubuntu, CentOS, and RHEL in AWS. | ||||
| 
 | ||||
| * [kube-aws](https://github.com/kubernetes-retired/kube-aws), creates and manages Kubernetes clusters with [Flatcar Linux](https://www.flatcar-linux.org/) nodes, using AWS tools: EC2, CloudFormation and Autoscaling. | ||||
| 
 | ||||
| * [KubeOne](https://github.com/kubermatic/kubeone) is an open source cluster lifecycle management tool that creates, upgrades and manages Kubernetes Highly-Available clusters. | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| <!-- steps --> | ||||
| 
 | ||||
| ## Getting started with your cluster | ||||
| 
 | ||||
| ### Command line administration tool: kubectl | ||||
| 
 | ||||
| The cluster startup script will leave you with a `kubernetes` directory on your workstation. | ||||
| Alternately, you can download the latest Kubernetes release from [this page](https://github.com/kubernetes/kubernetes/releases). | ||||
| 
 | ||||
| Next, add the appropriate binary folder to your `PATH` to access kubectl: | ||||
| 
 | ||||
| ```shell | ||||
| # macOS | ||||
| export PATH=<path/to/kubernetes-directory>/platforms/darwin/amd64:$PATH | ||||
| 
 | ||||
| # Linux | ||||
| export PATH=<path/to/kubernetes-directory>/platforms/linux/amd64:$PATH | ||||
| ``` | ||||
| 
 | ||||
| An up-to-date documentation page for this tool is available here: [kubectl manual](/docs/reference/kubectl/kubectl/) | ||||
| 
 | ||||
| By default, `kubectl` will use the `kubeconfig` file generated during the cluster startup for authenticating against the API. | ||||
| For more information, please read [kubeconfig files](/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) | ||||
| 
 | ||||
| ### Examples | ||||
| 
 | ||||
| See [a simple nginx example](/docs/tasks/run-application/run-stateless-application-deployment/) to try out your new cluster. | ||||
| 
 | ||||
| The "Guestbook" application is another popular example to get started with Kubernetes: [guestbook example](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/guestbook/) | ||||
| 
 | ||||
| For more complete applications, please look in the [examples directory](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/) | ||||
| 
 | ||||
| ## Scaling the cluster | ||||
| 
 | ||||
| Adding and removing nodes through `kubectl` is not supported. You can still scale the amount of nodes manually through adjustments of the 'Desired' and 'Max' properties within the | ||||
| [Auto Scaling Group](https://docs.aws.amazon.com/autoscaling/latest/userguide/as-manual-scaling.html), which was created during the installation. | ||||
| 
 | ||||
| ## Tearing down the cluster | ||||
| 
 | ||||
| Make sure the environment variables you used to provision your cluster are still exported, then call the following script inside the | ||||
| `kubernetes` directory: | ||||
| 
 | ||||
| ```shell | ||||
| cluster/kube-down.sh | ||||
| ``` | ||||
| 
 | ||||
| ## Support Level | ||||
| 
 | ||||
| 
 | ||||
| IaaS Provider        | Config. Mgmt | OS            | Networking  | Docs                                          | Conforms | Support Level | ||||
| -------------------- | ------------ | ------------- | ----------  | --------------------------------------------- | ---------| ---------------------------- | ||||
| AWS                  | kops         | Debian        | k8s (VPC)   | [docs](https://github.com/kubernetes/kops)    |          | Community ([@justinsb](https://github.com/justinsb)) | ||||
| AWS                  | CoreOS       | CoreOS        | flannel     | -  |          | Community | ||||
| AWS                  | Juju         | Ubuntu        | flannel, calico, canal     | - | 100%     | Commercial, Community | ||||
| AWS                  | KubeOne         | Ubuntu, CoreOS, CentOS   | canal, weavenet     | [docs](https://github.com/kubermatic/kubeone)      | 100%    | Commercial, Community | ||||
| 
 | ||||
| 
 | ||||
|  | @ -1,36 +0,0 @@ | |||
| --- | ||||
| reviewers: | ||||
| - colemickens | ||||
| - brendandburns | ||||
| title: Running Kubernetes on Azure | ||||
| --- | ||||
| 
 | ||||
| ## Azure Kubernetes Service (AKS) | ||||
| 
 | ||||
| The [Azure Kubernetes Service](https://azure.microsoft.com/en-us/services/kubernetes-service/) offers simple | ||||
| deployments for Kubernetes clusters. | ||||
| 
 | ||||
| For an example of deploying a Kubernetes cluster onto Azure via the Azure Kubernetes Service: | ||||
| 
 | ||||
| **[Microsoft Azure Kubernetes Service](https://docs.microsoft.com/en-us/azure/aks/intro-kubernetes)** | ||||
| 
 | ||||
| ## Custom Deployments: AKS-Engine | ||||
| 
 | ||||
| The core of the Azure Kubernetes Service is **open source** and available on GitHub for the community | ||||
| to use and contribute to: **[AKS-Engine](https://github.com/Azure/aks-engine)**. The legacy [ACS-Engine](https://github.com/Azure/acs-engine) codebase has been deprecated in favor of AKS-engine. | ||||
| 
 | ||||
| AKS-Engine is a good choice if you need to make customizations to the deployment beyond what the Azure Kubernetes | ||||
| Service officially supports. These customizations include deploying into existing virtual networks, utilizing multiple | ||||
| agent pools, and more. Some community contributions to AKS-Engine may even become features of the Azure Kubernetes Service. | ||||
| 
 | ||||
| The input to AKS-Engine is an apimodel JSON file describing the Kubernetes cluster. It is similar to the Azure Resource Manager (ARM) template syntax used to deploy a cluster directly with the Azure Kubernetes Service. The resulting output is an ARM template that can be checked into source control and used to deploy Kubernetes clusters to Azure. | ||||
| 
 | ||||
| You can get started by following the **[AKS-Engine Kubernetes Tutorial](https://github.com/Azure/aks-engine/blob/master/docs/tutorials/README.md)**. | ||||
| 
 | ||||
| ## CoreOS Tectonic for Azure | ||||
| 
 | ||||
| The CoreOS Tectonic Installer for Azure is **open source** and available on GitHub for the community to use and contribute to: **[Tectonic Installer](https://github.com/coreos/tectonic-installer)**. | ||||
| 
 | ||||
| Tectonic Installer is a good choice when you need to make cluster customizations as it is built on [Hashicorp's Terraform](https://www.terraform.io/docs/providers/azurerm/) Azure Resource Manager (ARM) provider. This enables users to customize or integrate using familiar Terraform tooling. | ||||
| 
 | ||||
| You can get started using the [Tectonic Installer for Azure Guide](https://coreos.com/tectonic/docs/latest/install/azure/azure-terraform.html). | ||||
|  | @ -1,223 +0,0 @@ | |||
| --- | ||||
| reviewers: | ||||
| - brendandburns | ||||
| - jbeda | ||||
| - mikedanese | ||||
| - thockin | ||||
| title: Running Kubernetes on Google Compute Engine | ||||
| content_type: task | ||||
| --- | ||||
| 
 | ||||
| <!-- overview --> | ||||
| 
 | ||||
| The example below creates a Kubernetes cluster with 3 worker node Virtual Machines and a master Virtual Machine (i.e. 4 VMs in your cluster). This cluster is set up and controlled from your workstation (or wherever you find convenient). | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| ## {{% heading "prerequisites" %}} | ||||
| 
 | ||||
| 
 | ||||
| If you want a simplified getting started experience and GUI for managing clusters, please consider trying [Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/) for hosted cluster installation and management. | ||||
| 
 | ||||
| For an easy way to experiment with the Kubernetes development environment, click the button below | ||||
| to open a Google Cloud Shell with an auto-cloned copy of the Kubernetes source repo. | ||||
| 
 | ||||
| [](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/kubernetes/kubernetes&page=editor&open_in_editor=README.md) | ||||
| 
 | ||||
| If you want to use custom binaries or pure open source Kubernetes, please continue with the instructions below. | ||||
| 
 | ||||
| ### Prerequisites | ||||
| 
 | ||||
| 1. You need a Google Cloud Platform account with billing enabled. Visit the [Google Developers Console](https://console.cloud.google.com) for more details. | ||||
| 1. Install `gcloud` as necessary. `gcloud` can be installed as a part of the [Google Cloud SDK](https://cloud.google.com/sdk/). | ||||
| 1. Enable the [Compute Engine Instance Group Manager API](https://console.developers.google.com/apis/api/replicapool.googleapis.com/overview) in the [Google Cloud developers console](https://console.developers.google.com/apis/library). | ||||
| 1. Make sure that gcloud is set to use the Google Cloud Platform project you want. You can check the current project using `gcloud config list project` and change it via `gcloud config set project <project-id>`. | ||||
| 1. Make sure you have credentials for GCloud by running `gcloud auth login`. | ||||
| 1. (Optional)  In order to make API calls against GCE, you must also run `gcloud auth application-default login`. | ||||
| 1. Make sure you can start up a GCE VM from the command line.  At least make sure you can do the [Create an instance](https://cloud.google.com/compute/docs/instances/#startinstancegcloud) part of the GCE Quickstart. | ||||
| 1. Make sure you can SSH into the VM without interactive prompts.  See the [Log in to the instance](https://cloud.google.com/compute/docs/instances/#sshing) part of the GCE Quickstart. | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| <!-- steps --> | ||||
| 
 | ||||
| ## Starting a cluster | ||||
| 
 | ||||
| You can install a client and start a cluster with either one of these commands (we list both in case only one is installed on your machine): | ||||
| 
 | ||||
| 
 | ||||
| ```shell | ||||
| curl -sS https://get.k8s.io | bash | ||||
| ``` | ||||
| 
 | ||||
| or | ||||
| 
 | ||||
| ```shell | ||||
| wget -q -O - https://get.k8s.io | bash | ||||
| ``` | ||||
| 
 | ||||
| Once this command completes, you will have a master VM and four worker VMs, running as a Kubernetes cluster. | ||||
| 
 | ||||
| By default, some containers will already be running on your cluster. Containers like `fluentd` provide [logging](/docs/concepts/cluster-administration/logging/), while `heapster` provides [monitoring](https://releases.k8s.io/master/cluster/addons/cluster-monitoring/README.md) services. | ||||
| 
 | ||||
| The script run by the commands above creates a cluster with the name/prefix "kubernetes". It defines one specific cluster config, so you can't run it more than once. | ||||
| 
 | ||||
| Alternately, you can download and install the latest Kubernetes release from [this page](https://github.com/kubernetes/kubernetes/releases), then run the `<kubernetes>/cluster/kube-up.sh` script to start the cluster: | ||||
| 
 | ||||
| ```shell | ||||
| cd kubernetes | ||||
| cluster/kube-up.sh | ||||
| ``` | ||||
| 
 | ||||
| If you want more than one cluster running in your project, want to use a different name, or want a different number of worker nodes, see the `<kubernetes>/cluster/gce/config-default.sh` file for more fine-grained configuration before you start up your cluster. | ||||
| 
 | ||||
| If you run into trouble, please see the section on [troubleshooting](/docs/setup/production-environment/turnkey/gce/#troubleshooting), post to the | ||||
| [Kubernetes Forum](https://discuss.kubernetes.io), or come ask questions on `#gke` Slack channel. | ||||
| 
 | ||||
| The next few steps will show you: | ||||
| 
 | ||||
| 1. How to set up the command line client on your workstation to manage the cluster | ||||
| 1. Examples of how to use the cluster | ||||
| 1. How to delete the cluster | ||||
| 1. How to start clusters with non-default options (like larger clusters) | ||||
| 
 | ||||
| ## Installing the Kubernetes command line tools on your workstation | ||||
| 
 | ||||
| The cluster startup script will leave you with a running cluster and a `kubernetes` directory on your workstation. | ||||
| 
 | ||||
| The [kubectl](/docs/reference/kubectl/kubectl/) tool controls the Kubernetes cluster | ||||
| manager.  It lets you inspect your cluster resources, create, delete, and update | ||||
| components, and much more. You will use it to look at your new cluster and bring | ||||
| up example apps. | ||||
| 
 | ||||
| You can use `gcloud` to install the `kubectl` command-line tool on your workstation: | ||||
| 
 | ||||
| ```shell | ||||
| gcloud components install kubectl | ||||
| ``` | ||||
| 
 | ||||
| {{< note >}} | ||||
| The kubectl version bundled with `gcloud` may be older than the one | ||||
| downloaded by the get.k8s.io install script. See [Installing kubectl](/docs/tasks/tools/install-kubectl/) | ||||
| document to see how you can set up the latest `kubectl` on your workstation. | ||||
| {{< /note >}} | ||||
| 
 | ||||
| ## Getting started with your cluster | ||||
| 
 | ||||
| ### Inspect your cluster | ||||
| 
 | ||||
| Once `kubectl` is in your path, you can use it to look at your cluster. E.g., running: | ||||
| 
 | ||||
| ```shell | ||||
| kubectl get --all-namespaces services | ||||
| ``` | ||||
| 
 | ||||
| should show a set of [services](/docs/concepts/services-networking/service/) that look something like this: | ||||
| 
 | ||||
| ```shell | ||||
| NAMESPACE     NAME          TYPE             CLUSTER_IP       EXTERNAL_IP       PORT(S)        AGE | ||||
| default       kubernetes    ClusterIP        10.0.0.1         <none>            443/TCP        1d | ||||
| kube-system   kube-dns      ClusterIP        10.0.0.2         <none>            53/TCP,53/UDP  1d | ||||
| kube-system   kube-ui       ClusterIP        10.0.0.3         <none>            80/TCP         1d | ||||
| ... | ||||
| ``` | ||||
| 
 | ||||
| Similarly, you can take a look at the set of [pods](/docs/concepts/workloads/pods/) that were created during cluster startup. | ||||
| You can do this via the | ||||
| 
 | ||||
| ```shell | ||||
| kubectl get --all-namespaces pods | ||||
| ``` | ||||
| 
 | ||||
| command. | ||||
| 
 | ||||
| You'll see a list of pods that looks something like this (the name specifics will be different): | ||||
| 
 | ||||
| ```shell | ||||
| NAMESPACE     NAME                                           READY     STATUS    RESTARTS   AGE | ||||
| kube-system   coredns-5f4fbb68df-mc8z8                       1/1       Running   0          15m | ||||
| kube-system   fluentd-cloud-logging-kubernetes-minion-63uo   1/1       Running   0          14m | ||||
| kube-system   fluentd-cloud-logging-kubernetes-minion-c1n9   1/1       Running   0          14m | ||||
| kube-system   fluentd-cloud-logging-kubernetes-minion-c4og   1/1       Running   0          14m | ||||
| kube-system   fluentd-cloud-logging-kubernetes-minion-ngua   1/1       Running   0          14m | ||||
| kube-system   kube-ui-v1-curt1                               1/1       Running   0          15m | ||||
| kube-system   monitoring-heapster-v5-ex4u3                   1/1       Running   1          15m | ||||
| kube-system   monitoring-influx-grafana-v1-piled             2/2       Running   0          15m | ||||
| ``` | ||||
| 
 | ||||
| Some of the pods may take a few seconds to start up (during this time they'll show `Pending`), but check that they all show as `Running` after a short period. | ||||
| 
 | ||||
| ### Run some examples | ||||
| 
 | ||||
| Then, see [a simple nginx example](/docs/tasks/run-application/run-stateless-application-deployment/) to try out your new cluster. | ||||
| 
 | ||||
| For more complete applications, please look in the [examples directory](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/).  The [guestbook example](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/guestbook/) is a good "getting started" walkthrough. | ||||
| 
 | ||||
| ## Tearing down the cluster | ||||
| 
 | ||||
| To remove/delete/teardown the cluster, use the `kube-down.sh` script. | ||||
| 
 | ||||
| ```shell | ||||
| cd kubernetes | ||||
| cluster/kube-down.sh | ||||
| ``` | ||||
| 
 | ||||
| Likewise, the `kube-up.sh` in the same directory will bring it back up. You do not need to rerun the `curl` or `wget` command: everything needed to setup the Kubernetes cluster is now on your workstation. | ||||
| 
 | ||||
| ## Customizing | ||||
| 
 | ||||
| The script above relies on Google Storage to stage the Kubernetes release. It | ||||
| then will start (by default) a single master VM along with 3 worker VMs.  You | ||||
| can tweak some of these parameters by editing `kubernetes/cluster/gce/config-default.sh` | ||||
| You can view a transcript of a successful cluster creation | ||||
| [here](https://gist.github.com/satnam6502/fc689d1b46db9772adea). | ||||
| 
 | ||||
| ## Troubleshooting | ||||
| 
 | ||||
| ### Project settings | ||||
| 
 | ||||
| You need to have the Google Cloud Storage API, and the Google Cloud Storage | ||||
| JSON API enabled. It is activated by default for new projects. Otherwise, it | ||||
| can be done in the Google Cloud Console.  See the [Google Cloud Storage JSON | ||||
| API Overview](https://cloud.google.com/storage/docs/json_api/) for more | ||||
| details. | ||||
| 
 | ||||
| Also ensure that-- as listed in the [Prerequisites section](#prerequisites)-- you've enabled the `Compute Engine Instance Group Manager API`, and can start up a GCE VM from the command line as in the [GCE Quickstart](https://cloud.google.com/compute/docs/quickstart) instructions. | ||||
| 
 | ||||
| ### Cluster initialization hang | ||||
| 
 | ||||
| If the Kubernetes startup script hangs waiting for the API to be reachable, you can troubleshoot by SSHing into the master and node VMs and looking at logs such as `/var/log/startupscript.log`. | ||||
| 
 | ||||
| **Once you fix the issue, you should run `kube-down.sh` to cleanup** after the partial cluster creation, before running `kube-up.sh` to try again. | ||||
| 
 | ||||
| ### SSH | ||||
| 
 | ||||
| If you're having trouble SSHing into your instances, ensure the GCE firewall | ||||
| isn't blocking port 22 to your VMs.  By default, this should work but if you | ||||
| have edited firewall rules or created a new non-default network, you'll need to | ||||
| expose it: `gcloud compute firewall-rules create default-ssh --network=<network-name> | ||||
| --description "SSH allowed from anywhere" --allow tcp:22` | ||||
| 
 | ||||
| Additionally, your GCE SSH key must either have no passcode or you need to be | ||||
| using `ssh-agent`. | ||||
| 
 | ||||
| ### Networking | ||||
| 
 | ||||
| The instances must be able to connect to each other using their private IP. The | ||||
| script uses the "default" network which should have a firewall rule called | ||||
| "default-allow-internal" which allows traffic on any port on the private IPs. | ||||
| If this rule is missing from the default network or if you change the network | ||||
| being used in `cluster/config-default.sh` create a new rule with the following | ||||
| field values: | ||||
| 
 | ||||
| * Source Ranges: `10.0.0.0/8` | ||||
| * Allowed Protocols and Port: `tcp:1-65535;udp:1-65535;icmp` | ||||
| 
 | ||||
| ## Support Level | ||||
| 
 | ||||
| 
 | ||||
| IaaS Provider        | Config. Mgmt | OS     | Networking  | Docs                                              | Conforms | Support Level | ||||
| -------------------- | ------------ | ------ | ----------  | ---------------------------------------------     | ---------| ---------------------------- | ||||
| GCE                  | Saltstack    | Debian | GCE         | [docs](/docs/setup/production-environment/turnkey/gce/)                                    |   | Project | ||||
| 
 | ||||
| 
 | ||||
|  | @ -1,65 +0,0 @@ | |||
| --- | ||||
| reviewers: | ||||
| - bradtopol | ||||
| title: Running Kubernetes on Multiple Clouds with IBM Cloud Private | ||||
| --- | ||||
| 
 | ||||
| IBM® Cloud Private is a turnkey cloud solution and an on-premises turnkey cloud solution. IBM Cloud Private delivers pure upstream Kubernetes with the typical management components that are required to run real enterprise workloads. These workloads include health management, log management, audit trails, and metering for tracking usage of workloads on the platform. | ||||
| 
 | ||||
| IBM Cloud Private is available in a community edition and a fully supported enterprise edition. The community edition is available at no charge from [Docker Hub](https://hub.docker.com/r/ibmcom/icp-inception/). The enterprise edition supports high availability topologies and includes commercial support from IBM for Kubernetes and the IBM Cloud Private management platform. If you want to try IBM Cloud Private, you can use either the hosted trial, the tutorial, or the self-guided demo. You can also try the free community edition. For details, see [Get started with IBM Cloud Private](https://www.ibm.com/cloud/private/get-started). | ||||
| 
 | ||||
| For more information, explore the following resources: | ||||
| 
 | ||||
| * [IBM Cloud Private](https://www.ibm.com/cloud/private) | ||||
| * [Reference architecture for IBM Cloud Private](https://github.com/ibm-cloud-architecture/refarch-privatecloud) | ||||
| * [IBM Cloud Private documentation](https://www.ibm.com/support/knowledgecenter/SSBS6K/product_welcome_cloud_private.html) | ||||
| 
 | ||||
| ## IBM Cloud Private and Terraform | ||||
| 
 | ||||
| The following modules are available where you can deploy IBM Cloud Private by using Terraform: | ||||
| 
 | ||||
| * AWS: [Deploy IBM Cloud Private to AWS](https://github.com/ibm-cloud-architecture/terraform-icp-aws) | ||||
| * Azure: [Deploy IBM Cloud Private to Azure](https://github.com/ibm-cloud-architecture/terraform-icp-azure) | ||||
| * IBM Cloud: [Deploy IBM Cloud Private cluster to IBM Cloud](https://github.com/ibm-cloud-architecture/terraform-icp-ibmcloud) | ||||
| * OpenStack: [Deploy IBM Cloud Private to OpenStack](https://github.com/ibm-cloud-architecture/terraform-icp-openstack) | ||||
| * Terraform module: [Deploy IBM Cloud Private on any supported infrastructure vendor](https://github.com/ibm-cloud-architecture/terraform-module-icp-deploy) | ||||
| * VMware: [Deploy IBM Cloud Private to VMware](https://github.com/ibm-cloud-architecture/terraform-icp-vmware) | ||||
| 
 | ||||
| ## IBM Cloud Private on AWS | ||||
| 
 | ||||
| You can deploy an IBM Cloud Private cluster on Amazon Web Services (AWS) using Terraform. | ||||
| 
 | ||||
| IBM Cloud Private can also run on the AWS cloud platform by using Terraform. To deploy IBM Cloud Private in an AWS EC2 environment, see [Installing IBM Cloud Private on AWS](https://github.com/ibm-cloud-architecture/terraform-icp-aws). | ||||
| 
 | ||||
| ## IBM Cloud Private on Azure | ||||
| 
 | ||||
| You can enable Microsoft Azure as a cloud provider for IBM Cloud Private deployment and take advantage of all the IBM Cloud Private features on the Azure public cloud. For more information, see [IBM Cloud Private on Azure](https://www.ibm.com/support/knowledgecenter/SSBS6K_3.2.0/supported_environments/azure_overview.html). | ||||
| 
 | ||||
| ## IBM Cloud Private with Red Hat OpenShift | ||||
| 
 | ||||
| You can deploy IBM certified software containers that are running on IBM Cloud Private onto Red Hat OpenShift. | ||||
| 
 | ||||
| Integration capabilities: | ||||
| 
 | ||||
| * Supports Linux® 64-bit platform in offline-only installation mode | ||||
| * Single-master configuration | ||||
| * Integrated IBM Cloud Private cluster management console and catalog | ||||
| * Integrated core platform services, such as monitoring, metering, and logging | ||||
| * IBM Cloud Private uses the OpenShift image registry | ||||
| 
 | ||||
| For more information see, [IBM Cloud Private on OpenShift](https://www.ibm.com/support/knowledgecenter/SSBS6K_3.2.0/supported_environments/openshift/overview.html). | ||||
| 
 | ||||
| ## IBM Cloud Private on VirtualBox | ||||
| 
 | ||||
| To install IBM Cloud Private to a VirtualBox environment, see [Installing IBM Cloud Private on VirtualBox](https://github.com/ibm-cloud-architecture/refarch-privatecloud-virtualbox). | ||||
| 
 | ||||
| ## IBM Cloud Private on VMware | ||||
| 
 | ||||
| You can install IBM Cloud Private on VMware with either Ubuntu or RHEL images. For details, see the following projects: | ||||
| 
 | ||||
| * [Installing IBM Cloud Private with Ubuntu](https://github.com/ibm-cloud-architecture/refarch-privatecloud/blob/master/Installing_ICp_on_prem_ubuntu.md) | ||||
| * [Installing IBM Cloud Private with Red Hat Enterprise](https://github.com/ibm-cloud-architecture/refarch-privatecloud/tree/master/icp-on-rhel) | ||||
| 
 | ||||
| The IBM Cloud Private Hosted service automatically deploys IBM Cloud Private Hosted on your VMware vCenter Server instances. This service brings the power of microservices and containers to your VMware environment on IBM Cloud. With this service, you can extend the same familiar VMware and IBM Cloud Private operational model and tools from on-premises into the IBM Cloud. | ||||
| 
 | ||||
| For more information, see [IBM Cloud Private Hosted service](https://cloud.ibm.com/docs/vmwaresolutions?topic=vmwaresolutions-icp_overview). | ||||
|  | @ -1,19 +0,0 @@ | |||
| --- | ||||
| title: Running Kubernetes on Tencent Kubernetes Engine | ||||
| --- | ||||
| 
 | ||||
| ## Tencent Kubernetes Engine | ||||
| 
 | ||||
|  [Tencent Cloud Tencent Kubernetes Engine (TKE)](https://intl.cloud.tencent.com/product/tke) provides native Kubernetes container management services. You can deploy and manage a Kubernetes cluster with TKE in just a few steps. For detailed directions, see [Deploy Tencent Kubernetes Engine](https://intl.cloud.tencent.com/document/product/457/11741). | ||||
| 
 | ||||
|  TKE is a [Certified Kubernetes product](https://www.cncf.io/certification/software-conformance/).It is fully compatible with the native Kubernetes API. | ||||
| 
 | ||||
| ## Custom Deployment | ||||
| 
 | ||||
|  The core of Tencent Kubernetes Engine is open source and available [on GitHub](https://github.com/TencentCloud/tencentcloud-cloud-controller-manager/). | ||||
| 
 | ||||
|  When using TKE to create a Kubernetes cluster, you can choose managed mode or independent deployment mode. In addition, you can customize the deployment as needed; for example, you can choose an existing Cloud Virtual Machine instance for cluster creation or enable Kube-proxy in IPVS mode. | ||||
| 
 | ||||
| ## What's Next | ||||
| 
 | ||||
|  To learn more, see the [TKE documentation](https://intl.cloud.tencent.com/document/product/457). | ||||
|  | @ -2441,7 +2441,7 @@ filename | sha512 hash | |||
| - AppProtocol is a new field on Service and Endpoints resources, enabled with the ServiceAppProtocol feature gate. ([#88503](https://github.com/kubernetes/kubernetes/pull/88503), [@robscott](https://github.com/robscott)) [SIG Apps and Network] | ||||
| - BlockVolume and CSIBlockVolume features are now GA. ([#88673](https://github.com/kubernetes/kubernetes/pull/88673), [@jsafrane](https://github.com/jsafrane)) [SIG Apps, Node and Storage] | ||||
| - Consumers of the 'certificatesigningrequests/approval' API must now grant permission to 'approve' CSRs for the 'signerName' specified on the CSR. More information on the new signerName field can be found at https://github.com/kubernetes/enhancements/blob/master/keps/sig-auth/20190607-certificates-api.md#signers ([#88246](https://github.com/kubernetes/kubernetes/pull/88246), [@munnerz](https://github.com/munnerz)) [SIG API Machinery, Apps, Auth, CLI, Node and Testing] | ||||
| - CustomResourceDefinition schemas that use `x-kubernetes-list-map-keys` to specify properties that uniquely identify list items must make those properties required or have a default value, to ensure those properties are present for all list items. See https://kubernetes.io/docs/reference/using-api/api-concepts/#merge-strategy for details. ([#88076](https://github.com/kubernetes/kubernetes/pull/88076), [@eloyekunle](https://github.com/eloyekunle)) [SIG API Machinery and Testing] | ||||
| - CustomResourceDefinition schemas that use `x-kubernetes-list-map-keys` to specify properties that uniquely identify list items must make those properties required or have a default value, to ensure those properties are present for all list items. See https://kubernetes.io/docs/reference/using-api/api-concepts/#merge-strategy for details. ([#88076](https://github.com/kubernetes/kubernetes/pull/88076), [@eloyekunle](https://github.com/eloyekunle)) [SIG API Machinery and Testing] | ||||
| - Fixed missing validation of uniqueness of list items in lists with `x-kubernetes-list-type: map` or x-kubernetes-list-type: set` in CustomResources. ([#84920](https://github.com/kubernetes/kubernetes/pull/84920), [@sttts](https://github.com/sttts)) [SIG API Machinery] | ||||
| - Fixes a regression with clients prior to 1.15 not being able to update podIP in pod status, or podCIDR in node spec, against >= 1.16 API servers ([#88505](https://github.com/kubernetes/kubernetes/pull/88505), [@liggitt](https://github.com/liggitt)) [SIG Apps and Network] | ||||
| - Ingress: Add Exact and Prefix maching to Ingress PathTypes ([#88587](https://github.com/kubernetes/kubernetes/pull/88587), [@cmluciano](https://github.com/cmluciano)) [SIG Apps, Cluster Lifecycle and Network] | ||||
|  |  | |||
|  | @ -1,223 +0,0 @@ | |||
| --- | ||||
| reviewers: | ||||
| - lavalamp | ||||
| - thockin | ||||
| title: Cluster Management | ||||
| content_type: concept | ||||
| --- | ||||
| 
 | ||||
| <!-- overview --> | ||||
| 
 | ||||
| This document describes several topics related to the lifecycle of a cluster: creating a new cluster, | ||||
| upgrading your cluster's | ||||
| master and worker nodes, performing node maintenance (e.g. kernel upgrades), and upgrading the Kubernetes API version of a | ||||
| running cluster. | ||||
| 
 | ||||
| <!-- body --> | ||||
| 
 | ||||
| ## Creating and configuring a Cluster | ||||
| 
 | ||||
| To install Kubernetes on a set of machines, consult one of the existing [Getting Started guides](/docs/setup/) depending on your environment. | ||||
| 
 | ||||
| ## Upgrading a cluster | ||||
| 
 | ||||
| The current state of cluster upgrades is provider dependent, and some releases may require special care when upgrading. It is recommended that administrators consult both the [release notes](https://git.k8s.io/kubernetes/CHANGELOG/README.md), as well as the version specific upgrade notes prior to upgrading their clusters. | ||||
| 
 | ||||
| ### Upgrading an Azure Kubernetes Service (AKS) cluster | ||||
| 
 | ||||
| Azure Kubernetes Service enables easy self-service upgrades of the control plane and nodes in your cluster. The process is | ||||
| currently user-initiated and is described in the [Azure AKS documentation](https://docs.microsoft.com/en-us/azure/aks/upgrade-cluster). | ||||
| 
 | ||||
| ### Upgrading Google Compute Engine clusters | ||||
| 
 | ||||
| Google Compute Engine Open Source (GCE-OSS) support master upgrades by deleting and | ||||
| recreating the master, while maintaining the same Persistent Disk (PD) to ensure that data is retained across the | ||||
| upgrade. | ||||
| 
 | ||||
| Node upgrades for GCE use a [Managed Instance Group](https://cloud.google.com/compute/docs/instance-groups/), each node | ||||
| is sequentially destroyed and then recreated with new software.  Any Pods that are running on that node need to be | ||||
| controlled by a Replication Controller, or manually re-created after the roll out. | ||||
| 
 | ||||
| Upgrades on open source Google Compute Engine (GCE) clusters are controlled by the `cluster/gce/upgrade.sh` script. | ||||
| 
 | ||||
| Get its usage by running `cluster/gce/upgrade.sh -h`. | ||||
| 
 | ||||
| For example, to upgrade just your master to a specific version (v1.0.2): | ||||
| 
 | ||||
| ```shell | ||||
| cluster/gce/upgrade.sh -M v1.0.2 | ||||
| ``` | ||||
| 
 | ||||
| Alternatively, to upgrade your entire cluster to the latest stable release: | ||||
| 
 | ||||
| ```shell | ||||
| cluster/gce/upgrade.sh release/stable | ||||
| ``` | ||||
| 
 | ||||
| ### Upgrading Google Kubernetes Engine clusters | ||||
| 
 | ||||
| Google Kubernetes Engine automatically updates master components (e.g. `kube-apiserver`, `kube-scheduler`) to the latest version. It also handles upgrading the operating system and other components that the master runs on. | ||||
| 
 | ||||
| The node upgrade process is user-initiated and is described in the [Google Kubernetes Engine documentation](https://cloud.google.com/kubernetes-engine/docs/clusters/upgrade). | ||||
| 
 | ||||
| ### Upgrading an Amazon EKS Cluster | ||||
| 
 | ||||
| Amazon EKS cluster's master components can be upgraded by using eksctl, AWS Management Console, or AWS CLI. The process is user-initiated and is described in the [Amazon EKS documentation](https://docs.aws.amazon.com/eks/latest/userguide/update-cluster.html). | ||||
| 
 | ||||
| ### Upgrading an Oracle Cloud Infrastructure Container Engine for Kubernetes (OKE) cluster | ||||
| 
 | ||||
| Oracle creates and manages a set of master nodes in the Oracle control plane on your behalf (and associated Kubernetes infrastructure such as etcd nodes) to ensure you have a highly available managed Kubernetes control plane. You can also seamlessly upgrade these master nodes to new versions of Kubernetes with zero downtime. These actions are described in the [OKE documentation](https://docs.cloud.oracle.com/iaas/Content/ContEng/Tasks/contengupgradingk8smasternode.htm).  | ||||
| 
 | ||||
| ### Upgrading clusters on other platforms | ||||
| 
 | ||||
| Different providers, and tools, will manage upgrades differently.  It is recommended that you consult their main documentation regarding upgrades. | ||||
| 
 | ||||
| * [kops](https://github.com/kubernetes/kops) | ||||
| * [kubespray](https://github.com/kubernetes-sigs/kubespray) | ||||
| * [CoreOS Tectonic](https://coreos.com/tectonic/docs/latest/admin/upgrade.html) | ||||
| * [Digital Rebar](https://provision.readthedocs.io/en/tip/doc/content-packages/krib.html) | ||||
| * ... | ||||
| 
 | ||||
| To upgrade a cluster on a platform not mentioned in the above list, check the order of component upgrade on the | ||||
| [Skewed versions](/docs/setup/release/version-skew-policy/#supported-component-upgrade-order) page. | ||||
| 
 | ||||
| ## Resizing a cluster | ||||
| 
 | ||||
| If your cluster runs short on resources you can easily add more machines to it if your cluster | ||||
| is running in [Node self-registration mode](/docs/concepts/architecture/nodes/#self-registration-of-nodes). | ||||
| If you're using GCE or Google Kubernetes Engine it's done by resizing the Instance Group managing your Nodes. | ||||
| It can be accomplished by modifying number of instances on | ||||
| `Compute > Compute Engine > Instance groups > your group > Edit group` | ||||
| [Google Cloud Console page](https://console.developers.google.com) or using gcloud CLI: | ||||
| 
 | ||||
| ```shell | ||||
| gcloud compute instance-groups managed resize kubernetes-node-pool --size=42 --zone=$ZONE | ||||
| ``` | ||||
| 
 | ||||
| The Instance Group will take care of putting appropriate image on new machines and starting them, | ||||
| while the Kubelet will register its Node with the API server to make it available for scheduling. | ||||
| If you scale the instance group down, system will randomly choose Nodes to kill. | ||||
| 
 | ||||
| In other environments you may need to configure the machine yourself and tell the Kubelet on which machine API server is running. | ||||
| 
 | ||||
| ### Resizing an Azure Kubernetes Service (AKS) cluster | ||||
| 
 | ||||
| Azure Kubernetes Service enables user-initiated resizing of the cluster from either the CLI or | ||||
| the Azure Portal and is described in the | ||||
| [Azure AKS documentation](https://docs.microsoft.com/en-us/azure/aks/scale-cluster). | ||||
| 
 | ||||
| 
 | ||||
| ### Cluster autoscaling | ||||
| 
 | ||||
| If you are using GCE or Google Kubernetes Engine, you can configure your cluster so that it is automatically rescaled based on | ||||
| pod needs. | ||||
| 
 | ||||
| As described in [Compute Resource](/docs/concepts/configuration/manage-resources-containers/), | ||||
| users can reserve how much CPU and memory is allocated to pods. | ||||
| This information is used by the Kubernetes scheduler to find a place to run the pod. If there is | ||||
| no node that has enough free capacity (or doesn't match other pod requirements) then the pod has | ||||
| to wait until some pods are terminated or a new node is added. | ||||
| 
 | ||||
| Cluster autoscaler looks for the pods that cannot be scheduled and checks if adding a new node, similar | ||||
| to the other in the cluster, would help. If yes, then it resizes the cluster to accommodate the waiting pods. | ||||
| 
 | ||||
| Cluster autoscaler also scales down the cluster if it notices that one or more nodes are not needed anymore for | ||||
| an extended period of time (10min but it may change in the future). | ||||
| 
 | ||||
| Cluster autoscaler is configured per instance group (GCE) or node pool (Google Kubernetes Engine). | ||||
| 
 | ||||
| If you are using GCE then you can either enable it while creating a cluster with kube-up.sh script. | ||||
| To configure cluster autoscaler you have to set three environment variables: | ||||
| 
 | ||||
| * `KUBE_ENABLE_CLUSTER_AUTOSCALER` - it enables cluster autoscaler if set to true. | ||||
| * `KUBE_AUTOSCALER_MIN_NODES` - minimum number of nodes in the cluster. | ||||
| * `KUBE_AUTOSCALER_MAX_NODES` - maximum number of nodes in the cluster. | ||||
| 
 | ||||
| Example: | ||||
| 
 | ||||
| ```shell | ||||
| KUBE_ENABLE_CLUSTER_AUTOSCALER=true KUBE_AUTOSCALER_MIN_NODES=3 KUBE_AUTOSCALER_MAX_NODES=10 NUM_NODES=5 ./cluster/kube-up.sh | ||||
| ``` | ||||
| 
 | ||||
| On Google Kubernetes Engine you configure cluster autoscaler either on cluster creation or update or when creating a particular node pool | ||||
| (which you want to be autoscaled) by passing flags `--enable-autoscaling` `--min-nodes` and `--max-nodes` | ||||
| to the corresponding `gcloud` commands. | ||||
| 
 | ||||
| Examples: | ||||
| 
 | ||||
| ```shell | ||||
| gcloud container clusters create mytestcluster --zone=us-central1-b --enable-autoscaling --min-nodes=3 --max-nodes=10 --num-nodes=5 | ||||
| ``` | ||||
| 
 | ||||
| ```shell | ||||
| gcloud container clusters update mytestcluster --enable-autoscaling --min-nodes=1 --max-nodes=15 | ||||
| ``` | ||||
| 
 | ||||
| **Cluster autoscaler expects that nodes have not been manually modified (e.g. by adding labels via kubectl) as those properties would not be propagated to the new nodes within the same instance group.** | ||||
| 
 | ||||
| For more details about how the cluster autoscaler decides whether, when and how | ||||
| to scale a cluster, please refer to the [FAQ](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md) | ||||
| documentation from the autoscaler project. | ||||
| 
 | ||||
| ## Maintenance on a Node | ||||
| 
 | ||||
| If you need to reboot a node (such as for a kernel upgrade, libc upgrade, hardware repair, etc.), and the downtime is | ||||
| brief, then when the Kubelet restarts, it will attempt to restart the pods scheduled to it.  If the reboot takes longer | ||||
| (the default time is 5 minutes, controlled by `--pod-eviction-timeout` on the controller-manager), | ||||
| then the node controller will terminate the pods that are bound to the unavailable node.  If there is a corresponding | ||||
| replica set (or replication controller), then a new copy of the pod will be started on a different node.  So, in the case where all | ||||
| pods are replicated, upgrades can be done without special coordination, assuming that not all nodes will go down at the same time. | ||||
| 
 | ||||
| If you want more control over the upgrading process, you may use the following workflow: | ||||
| 
 | ||||
| Use `kubectl drain` to gracefully terminate all pods on the node while marking the node as unschedulable: | ||||
| 
 | ||||
| ```shell | ||||
| kubectl drain $NODENAME | ||||
| ``` | ||||
| 
 | ||||
| This keeps new pods from landing on the node while you are trying to get them off. | ||||
| 
 | ||||
| For pods with a replica set, the pod will be replaced by a new pod which will be scheduled to a new node. Additionally, if the pod is part of a service, then clients will automatically be redirected to the new pod. | ||||
| 
 | ||||
| For pods with no replica set, you need to bring up a new copy of the pod, and assuming it is not part of a service, redirect clients to it. | ||||
| 
 | ||||
| Perform maintenance work on the node. | ||||
| 
 | ||||
| Make the node schedulable again: | ||||
| 
 | ||||
| ```shell | ||||
| kubectl uncordon $NODENAME | ||||
| ``` | ||||
| 
 | ||||
| If you deleted the node's VM instance and created a new one, then a new schedulable node resource will | ||||
| be created automatically (if you're using a cloud provider that supports | ||||
| node discovery; currently this is only Google Compute Engine, not including CoreOS on Google Compute Engine using kube-register). | ||||
| See [Node](/docs/concepts/architecture/nodes/) for more details. | ||||
| 
 | ||||
| ## Advanced Topics | ||||
| 
 | ||||
| ### Turn on or off an API version for your cluster | ||||
| 
 | ||||
| Specific API versions can be turned on or off by passing `--runtime-config=api/<version>` flag while bringing up the API server. For example: to turn off v1 API, pass `--runtime-config=api/v1=false`. | ||||
| runtime-config also supports 2 special keys: api/all and api/legacy to control all and legacy APIs respectively. | ||||
| For example, for turning off all API versions except v1, pass `--runtime-config=api/all=false,api/v1=true`. | ||||
| For the purposes of these flags, _legacy_ APIs are those APIs which have been explicitly deprecated (e.g. `v1beta3`). | ||||
| 
 | ||||
| ### Switching your cluster's storage API version | ||||
| 
 | ||||
| The objects that are stored to disk for a cluster's internal representation of the Kubernetes resources active in the cluster are written using a particular version of the API. | ||||
| When the supported API changes, these objects may need to be rewritten in the newer API.  Failure to do this will eventually result in resources that are no longer decodable or usable | ||||
| by the Kubernetes API server. | ||||
| 
 | ||||
| ### Switching your config files to a new API version | ||||
| 
 | ||||
| You can use `kubectl convert` command to convert config files between different API versions. | ||||
| 
 | ||||
| ```shell | ||||
| kubectl convert -f pod.yaml --output-version v1 | ||||
| ``` | ||||
| 
 | ||||
| For more options, please refer to the usage of [kubectl convert](/docs/reference/generated/kubectl/kubectl-commands#convert) command. | ||||
| 
 | ||||
| 
 | ||||
|  | @ -0,0 +1,93 @@ | |||
| --- | ||||
| title: Upgrade A Cluster | ||||
| content_type: task | ||||
| --- | ||||
| 
 | ||||
| <!-- overview --> | ||||
| This page provides an overview of the steps you should follow to upgrade a | ||||
| Kubernetes cluster. | ||||
| 
 | ||||
| The way that you upgrade a cluster depends on how you initially deployed it | ||||
| and on any subsequent changes. | ||||
| 
 | ||||
| At a high level, the steps you perform are: | ||||
| 
 | ||||
| - Upgrade the {{< glossary_tooltip text="control plane" term_id="control-plane" >}} | ||||
| - Upgrade the nodes in your cluster | ||||
| - Upgrade clients such as {{< glossary_tooltip text="kubectl" term_id="kubectl" >}} | ||||
| - Adjust manifests and other resources based on the API changes that accompany the | ||||
|   new Kubernetes version | ||||
| 
 | ||||
| ## {{% heading "prerequisites" %}} | ||||
| 
 | ||||
| You must have an existing cluster. This page is about upgrading from Kubernetes | ||||
| {{< skew prevMinorVersion >}} to Kubernetes {{< skew latestVersion >}}. If your cluster | ||||
| is not currently running Kubernetes {{< skew prevMinorVersion >}} then please check | ||||
| the documentation for the version of Kubernetes that you plan to upgrade to. | ||||
| 
 | ||||
| ## Upgrade approaches | ||||
| 
 | ||||
| ### kubeadm {#upgrade-kubeadm} | ||||
| 
 | ||||
| If your cluster was deployed using the `kubeadm` tool, refer to  | ||||
| [Upgrading kubeadm clusters](/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/) | ||||
| for detailed information on how to upgrade the cluster. | ||||
| 
 | ||||
| Once you have upgraded the cluster, remember to | ||||
| [install the latest version of `kubectl`](/docs/tasks/tools/install-kubectl/). | ||||
| 
 | ||||
| ### Manual deployments | ||||
| 
 | ||||
| {{< caution >}} | ||||
| These steps do not account for third-party extensions such as network and storage | ||||
| plugins. | ||||
| {{< /caution >}} | ||||
| 
 | ||||
| You should manually update the control plane following this sequence: | ||||
| 
 | ||||
| - etcd (all instances) | ||||
| - kube-apiserver (all control plane hosts) | ||||
| - kube-controller-manager | ||||
| - kube-scheduler | ||||
| - cloud controller manager, if you use one | ||||
| 
 | ||||
| At this point you should | ||||
| [install the latest version of `kubectl`](/docs/tasks/tools/install-kubectl/). | ||||
| 
 | ||||
| For each node in your cluster, [drain](/docs/tasks/administer-cluster/safely-drain-node/) | ||||
| that node and then either replace it with a new node that uses the {{< skew latestVersion >}} | ||||
| kubelet, or upgrade the kubelet on that node and bring the node back into service. | ||||
| 
 | ||||
| ### Other deployments {#upgrade-other} | ||||
| 
 | ||||
| Refer to the documentation for your cluster deployment tool to learn the recommended set | ||||
| up steps for maintenance. | ||||
| 
 | ||||
| ## Post-upgrade tasks | ||||
| 
 | ||||
| ### Switch your cluster's storage API version | ||||
| 
 | ||||
| The objects that are serialized into etcd for a cluster's internal | ||||
| representation of the Kubernetes resources active in the cluster are | ||||
| written using a particular version of the API. | ||||
| 
 | ||||
| When the supported API changes, these objects may need to be rewritten | ||||
| in the newer API. Failure to do this will eventually result in resources | ||||
| that are no longer decodable or usable by the Kubernetes API server. | ||||
| 
 | ||||
| For each affected object, fetch it using the latest supported API and then | ||||
| write it back also using the latest supported API. | ||||
| 
 | ||||
| ### Update manifests | ||||
| 
 | ||||
| Upgrading to a new Kubernetes version can provide new APIs. | ||||
| 
 | ||||
| You can use `kubectl convert` command to convert manifests between different API versions. | ||||
| For example: | ||||
| 
 | ||||
| ```shell | ||||
| kubectl convert -f pod.yaml --output-version v1 | ||||
| ``` | ||||
| 
 | ||||
| The `kubectl` tool replaces the contents of `pod.yaml` with a manifest that sets `kind` to | ||||
| Pod (unchanged), but with a revised `apiVersion`. | ||||
|  | @ -0,0 +1,29 @@ | |||
| --- | ||||
| title: Enable Or Disable A Kubernetes API | ||||
| content_type: task | ||||
| --- | ||||
| 
 | ||||
| <!-- overview --> | ||||
| This page shows how to enable or disable an API version from your cluster's | ||||
| {{< glossary_tooltip text="control plane" term_id="control-plane" >}}. | ||||
| 
 | ||||
| <!-- steps --> | ||||
| 
 | ||||
| 
 | ||||
| Specific API versions can be turned on or off by passing `--runtime-config=api/<version>` as a | ||||
| command line argument to the API server. The values for this argument are a comma-separated | ||||
| list of API versions. Later values override earlier values. | ||||
| 
 | ||||
| The `runtime-config` command line argument also supports 2 special keys: | ||||
| 
 | ||||
| - `api/all`, representing all known APIs | ||||
| - `api/legacy`, representing only legacy APIs. Legacy APIs are any APIs that have been | ||||
|    explicitly [deprecated](/docs/reference/using-api/deprecation-policy/). | ||||
| 
 | ||||
| For example, to turning off all API versions except v1, pass `--runtime-config=api/all=false,api/v1=true` | ||||
| to the `kube-apiserver`. | ||||
| 
 | ||||
| ## {{% heading "whatsnext" %}} | ||||
| 
 | ||||
| Read the [full documentation](/docs/reference/command-line-tools-reference/kube-apiserver/) | ||||
| for the `kube-apiserver` component. | ||||
|  | @ -140,7 +140,7 @@ curl -L https://github.com/kubernetes-sigs/sig-windows-tools/releases/latest/dow | |||
| ### Joining a Windows worker node | ||||
| {{< note >}} | ||||
| You must install the `Containers` feature and install Docker. Instructions | ||||
| to do so are available at [Install Docker Engine - Enterprise on Windows Servers](https://docs.mirantis.com/docker-enterprise/v3.1/dockeree-products/docker-engine-enterprise/dee-windows.html). | ||||
| to do so are available at [Install Docker Engine - Enterprise on Windows Servers](https://hub.docker.com/editions/enterprise/docker-ee-server-windows). | ||||
| {{< /note >}} | ||||
| 
 | ||||
| {{< note >}} | ||||
|  |  | |||
|  | @ -4,14 +4,14 @@ reviewers: | |||
| - mml | ||||
| - foxish | ||||
| - kow3ns | ||||
| title: Safely Drain a Node while Respecting the PodDisruptionBudget | ||||
| title: Safely Drain a Node | ||||
| content_type: task | ||||
| min-kubernetes-server-version: 1.5 | ||||
| --- | ||||
| 
 | ||||
| <!-- overview --> | ||||
| This page shows how to safely drain a {{< glossary_tooltip text="node" term_id="node" >}}, | ||||
| respecting the PodDisruptionBudget you have defined. | ||||
| optionally respecting the PodDisruptionBudget you have defined. | ||||
| 
 | ||||
| ## {{% heading "prerequisites" %}} | ||||
| 
 | ||||
|  | @ -27,6 +27,15 @@ This task also assumes that you have met the following prerequisites: | |||
| 
 | ||||
| <!-- steps --> | ||||
| 
 | ||||
| ## (Optional) Configure a disruption budget {#configure-poddisruptionbudget} | ||||
| 
 | ||||
| To endure that your workloads remain available during maintenance, you can | ||||
| configure a [PodDisruptionBudget](/docs/concepts/workloads/pods/disruptions/). | ||||
| 
 | ||||
| If availability is important for any applications that run or could run on the node(s) | ||||
| that you are draining, [configure a PodDisruptionBudgets](/docs/tasks/run-application/configure-pdb/) | ||||
| first and the continue following this guide. | ||||
| 
 | ||||
| ## Use `kubectl drain` to remove a node from service | ||||
| 
 | ||||
| You can use `kubectl drain` to safely evict all of your pods from a | ||||
|  | @ -158,7 +167,4 @@ application owners and cluster owners to establish an agreement on behavior in t | |||
| 
 | ||||
| 
 | ||||
| * Follow steps to protect your application by [configuring a Pod Disruption Budget](/docs/tasks/run-application/configure-pdb/). | ||||
| * Learn more about [maintenance on a node](/docs/tasks/administer-cluster/cluster-management/#maintenance-on-a-node). | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
|  |  | |||
|  | @ -330,8 +330,8 @@ seconds. Minimum value is 1. | |||
| * `timeoutSeconds`: Number of seconds after which the probe times out. Defaults | ||||
| to 1 second. Minimum value is 1. | ||||
| * `successThreshold`: Minimum consecutive successes for the probe to be | ||||
| considered successful after having failed. Defaults to 1. Must be 1 for | ||||
| liveness. Minimum value is 1. | ||||
| considered successful after having failed. Defaults to 1. Must be 1 for liveness | ||||
| and startup Probes. Minimum value is 1. | ||||
| * `failureThreshold`: When a probe fails, Kubernetes will | ||||
| try `failureThreshold` times before giving up. Giving up in case of liveness probe means restarting the container. In case of readiness probe the Pod will be marked Unready. | ||||
| Defaults to 3. Minimum value is 1. | ||||
|  |  | |||
|  | @ -532,7 +532,7 @@ This functionality is available in Kubernetes v1.6 and later. | |||
| 
 | ||||
| ## Use ConfigMap-defined environment variables in Pod commands | ||||
| 
 | ||||
| You can use ConfigMap-defined environment variables in the `command` section of the Pod specification using the `$(VAR_NAME)` Kubernetes substitution syntax. | ||||
| You can use ConfigMap-defined environment variables in the `command` and `args` of a container using the `$(VAR_NAME)` Kubernetes substitution syntax. | ||||
| 
 | ||||
| For example, the following Pod specification | ||||
| 
 | ||||
|  |  | |||
|  | @ -45,8 +45,8 @@ kubectl create namespace qos-example | |||
| 
 | ||||
| For a Pod to be given a QoS class of Guaranteed: | ||||
| 
 | ||||
| * Every Container in the Pod must have a memory limit and a memory request, and they must be the same. | ||||
| * Every Container in the Pod must have a CPU limit and a CPU request, and they must be the same. | ||||
| * Every Container, including init containers, in the Pod must have a memory limit and a memory request, and they must be the same. | ||||
| * Every Container, including init containers, in the Pod must have a CPU limit and a CPU request, and they must be the same. | ||||
| 
 | ||||
| Here is the configuration file for a Pod that has one Container. The Container has a memory limit and a | ||||
| memory request, both equal to 200 MiB. The Container has a CPU limit and a CPU request, both equal to 700 milliCPU: | ||||
|  |  | |||
|  | @ -9,10 +9,11 @@ title: Auditing | |||
| 
 | ||||
| <!-- overview --> | ||||
| 
 | ||||
| Kubernetes auditing provides a security-relevant chronological set of records documenting | ||||
| the sequence of activities that have affected system by individual users, administrators | ||||
| or other components of the system. It allows cluster administrator to | ||||
| answer the following questions: | ||||
| Kubernetes _auditing_ provides a security-relevant, chronological set of records documenting | ||||
| the sequence of actions in a cluster. The cluster audits the activities generated by users, | ||||
| by applications that use the Kubernetes API, and by the control plane itself. | ||||
| 
 | ||||
| Auditing allows cluster administrators to answer the following questions: | ||||
| 
 | ||||
|  - what happened? | ||||
|  - when did it happen? | ||||
|  | @ -32,7 +33,7 @@ a certain policy and written to a backend. The policy determines what's recorded | |||
| and the backends persist the records. The current backend implementations | ||||
| include logs files and webhooks. | ||||
| 
 | ||||
| Each request can be recorded with an associated "stage". The known stages are: | ||||
| Each request can be recorded with an associated _stage_. The defined stages are: | ||||
| 
 | ||||
| - `RequestReceived` - The stage for events generated as soon as the audit | ||||
|   handler receives the request, and before it is delegated down the handler | ||||
|  | @ -45,19 +46,23 @@ Each request can be recorded with an associated "stage". The known stages are: | |||
| - `Panic` - Events generated when a panic occurred. | ||||
| 
 | ||||
| {{< note >}} | ||||
| The audit logging feature increases the memory consumption of the API server | ||||
| because some context required for auditing is stored for each request. | ||||
| Additionally, memory consumption depends on the audit logging configuration. | ||||
| Audit events are different from the | ||||
| [Event](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#event-v1-core) | ||||
| API object. | ||||
| {{< /note >}} | ||||
| 
 | ||||
| ## Audit Policy | ||||
| The audit logging feature increases the memory consumption of the API server | ||||
| because some context required for auditing is stored for each request. | ||||
| Memory consumption depends on the audit logging configuration. | ||||
| 
 | ||||
| ## Audit policy | ||||
| 
 | ||||
| Audit policy defines rules about what events should be recorded and what data | ||||
| they should include. The audit policy object structure is defined in the | ||||
| [`audit.k8s.io` API group](https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/staging/src/k8s.io/apiserver/pkg/apis/audit/v1/types.go). | ||||
| When an event is processed, it's | ||||
| compared against the list of rules in order. The first matching rule sets the | ||||
| "audit level" of the event. The known audit levels are: | ||||
| _audit level_ of the event. The defined audit levels are: | ||||
| 
 | ||||
| - `None` - don't log events that match this rule. | ||||
| - `Metadata` - log request metadata (requesting user, timestamp, resource, | ||||
|  | @ -86,26 +91,27 @@ rules: | |||
| - level: Metadata | ||||
| ``` | ||||
| 
 | ||||
| The audit profile used by GCE should be used as reference by admins constructing their own audit profiles. You can check the | ||||
| If you're crafting your own audit profile, you can use the audit profile for Google Container-Optimized OS as a starting point. You can check the | ||||
| [configure-helper.sh](https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/cluster/gce/gci/configure-helper.sh) | ||||
| script, which generates the audit policy file. You can see most of the audit policy file by looking directly at the script. | ||||
| script, which generates an audit policy file. You can see most of the audit policy file by looking directly at the script. | ||||
| 
 | ||||
| ## Audit backends | ||||
| 
 | ||||
| Audit backends persist audit events to an external storage. | ||||
| Out of the box, the kube-apiserver provides two backends: | ||||
| 
 | ||||
| - Log backend, which writes events to a disk | ||||
| - Webhook backend, which sends events to an external API | ||||
| - Log backend, which writes events into the filesystem | ||||
| - Webhook backend, which sends events to an external HTTP API | ||||
| 
 | ||||
| In all cases, audit events structure is defined by the API in the | ||||
| `audit.k8s.io` API group. The current version of the API is | ||||
| In all cases, audit events follow a structure defined by the Kubernetes API in the | ||||
| `audit.k8s.io` API group. For Kubernetes {{< param "fullversion" >}}, that | ||||
| API is at version | ||||
| [`v1`](https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/staging/src/k8s.io/apiserver/pkg/apis/audit/v1/types.go). | ||||
| 
 | ||||
| {{< note >}} | ||||
| In case of patches, request body is a JSON array with patch operations, not a JSON object | ||||
| with an appropriate Kubernetes API object. For example, the following request body is a valid patch | ||||
| request to `/apis/batch/v1/namespaces/some-namespace/jobs/some-job-name`. | ||||
| request to `/apis/batch/v1/namespaces/some-namespace/jobs/some-job-name`: | ||||
| 
 | ||||
| ```json | ||||
| [ | ||||
|  | @ -125,8 +131,8 @@ request to `/apis/batch/v1/namespaces/some-namespace/jobs/some-job-name`. | |||
| 
 | ||||
| ### Log backend | ||||
| 
 | ||||
| Log backend writes audit events to a file in JSON format. You can configure | ||||
| log audit backend using the following `kube-apiserver` flags: | ||||
| The log backend writes audit events to a file in [JSONlines](https://jsonlines.org/) format. | ||||
| You can configure the log audit backend using the following `kube-apiserver` flags: | ||||
| 
 | ||||
| - `--audit-log-path` specifies the log file path that log backend uses to write | ||||
|   audit events. Not specifying this flag disables log backend. `-` means standard out | ||||
|  | @ -134,15 +140,16 @@ log audit backend using the following `kube-apiserver` flags: | |||
| - `--audit-log-maxbackup` defines the maximum number of audit log files to retain | ||||
| - `--audit-log-maxsize` defines the maximum size in megabytes of the audit log file before it gets rotated | ||||
| 
 | ||||
| In case kube-apiserver is configured as a Pod,remember to mount the hostPath to the location of the policy file and log file. For example,  | ||||
| ` | ||||
| --audit-policy-file=/etc/kubernetes/audit-policy.yaml | ||||
| --audit-log-path=/var/log/audit.log | ||||
| ` | ||||
| If your cluster's control plane runs the kube-apiserver as a Pod, remember to mount the `hostPath` | ||||
| to the location of the policy file and log file, so that audit records are persisted. For example: | ||||
| ```shell | ||||
|     --audit-policy-file=/etc/kubernetes/audit-policy.yaml \ | ||||
|     --audit-log-path=/var/log/audit.log | ||||
| ``` | ||||
| then mount the volumes: | ||||
| 
 | ||||
| 
 | ||||
| ``` | ||||
| ```yaml | ||||
| ... | ||||
| volumeMounts: | ||||
|   - mountPath: /etc/kubernetes/audit-policy.yaml | ||||
|     name: audit | ||||
|  | @ -151,9 +158,10 @@ volumeMounts: | |||
|     name: audit-log | ||||
|     readOnly: false | ||||
| ``` | ||||
| finally the hostPath: | ||||
| and finally configure the `hostPath`: | ||||
| 
 | ||||
| ``` | ||||
| ```yaml | ||||
| ... | ||||
| - name: audit | ||||
|   hostPath: | ||||
|     path: /etc/kubernetes/audit-policy.yaml | ||||
|  | @ -163,19 +171,19 @@ finally the hostPath: | |||
|   hostPath: | ||||
|     path: /var/log/audit.log | ||||
|     type: FileOrCreate | ||||
|      | ||||
| 
 | ||||
| ``` | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| ### Webhook backend | ||||
| 
 | ||||
| Webhook backend sends audit events to a remote API, which is assumed to be the | ||||
| same API as `kube-apiserver` exposes. You can configure webhook | ||||
| audit backend using the following kube-apiserver flags: | ||||
| The webhook audit backend sends audit events to a remote web API, which is assumed to | ||||
| be a form of the Kubernetes API, including means of authentication. You can configure | ||||
| a webhook audit backend using the following kube-apiserver flags: | ||||
| 
 | ||||
| - `--audit-webhook-config-file` specifies the path to a file with a webhook | ||||
|   configuration. Webhook configuration is effectively a | ||||
|   configuration. The webhook configuration is effectively a specialized | ||||
|   [kubeconfig](/docs/tasks/access-application-cluster/configure-access-multiple-clusters). | ||||
| - `--audit-webhook-initial-backoff` specifies the amount of time to wait after the first failed | ||||
|   request before retrying. Subsequent requests are retried with exponential backoff. | ||||
|  | @ -183,7 +191,7 @@ audit backend using the following kube-apiserver flags: | |||
| The webhook config file uses the kubeconfig format to specify the remote address of | ||||
| the service and credentials used to connect to it. | ||||
| 
 | ||||
| ### Batching | ||||
| ## Event batching {#batching} | ||||
| 
 | ||||
| Both log and webhook backends support batching. Using webhook as an example, here's the list of | ||||
| available flags. To get the same flag for log backend, replace `webhook` with `log` in the flag | ||||
|  | @ -193,9 +201,10 @@ throttling is enabled in `webhook` and disabled in `log`. | |||
| - `--audit-webhook-mode` defines the buffering strategy. One of the following: | ||||
|   - `batch` - buffer events and asynchronously process them in batches. This is the default. | ||||
|   - `blocking` - block API server responses on processing each individual event. | ||||
|   - `blocking-strict` - Same as blocking, but when there is a failure during audit logging at RequestReceived stage, the whole request to apiserver will fail. | ||||
|   - `blocking-strict` - Same as blocking, but when there is a failure during audit logging at the | ||||
|      RequestReceived stage, the whole request to the kube-apiserver fails. | ||||
| 
 | ||||
| The following flags are used only in the `batch` mode. | ||||
| The following flags are used only in the `batch` mode: | ||||
| 
 | ||||
| - `--audit-webhook-batch-buffer-size` defines the number of events to buffer before batching. | ||||
|   If the rate of incoming events overflows the buffer, events are dropped. | ||||
|  | @ -207,16 +216,16 @@ The following flags are used only in the `batch` mode. | |||
| - `--audit-webhook-batch-throttle-burst` defines the maximum number of batches generated at the same | ||||
|   moment if the allowed QPS was underutilized previously. | ||||
| 
 | ||||
| #### Parameter tuning | ||||
| ## Parameter tuning | ||||
| 
 | ||||
| Parameters should be set to accommodate the load on the apiserver. | ||||
| Parameters should be set to accommodate the load on the API server. | ||||
| 
 | ||||
| For example, if kube-apiserver receives 100 requests each second, and each request is audited only | ||||
| on `ResponseStarted` and `ResponseComplete` stages, you should account for ~200 audit | ||||
| on `ResponseStarted` and `ResponseComplete` stages, you should account for ≅200 audit | ||||
| events being generated each second. Assuming that there are up to 100 events in a batch, | ||||
| you should set throttling level at least 2 QPS. Assuming that the backend can take up to | ||||
| 5 seconds to write events, you should set the buffer size to hold up to 5 seconds of events, i.e. | ||||
| 10 batches, i.e. 1000 events. | ||||
| you should set throttling level at least 2 queries per second. Assuming that the backend can take up to | ||||
| 5 seconds to write events, you should set the buffer size to hold up to 5 seconds of events; | ||||
| that is: 10 batches, or 1000 events. | ||||
| 
 | ||||
| In most cases however, the default parameters should be sufficient and you don't have to worry about | ||||
| setting them manually. You can look at the following Prometheus metrics exposed by kube-apiserver | ||||
|  | @ -226,192 +235,18 @@ and in the logs to monitor the state of the auditing subsystem. | |||
| - `apiserver_audit_error_total` metric contains the total number of events dropped due to an error | ||||
|   during exporting. | ||||
| 
 | ||||
| ### Truncate | ||||
| ### Log entry truncation {#truncate} | ||||
| 
 | ||||
| Both log and webhook backends support truncating. As an example, the following is the list of flags | ||||
| available for the log backend: | ||||
| Both log and webhook backends support limiting the size of events that are logged. | ||||
| As an example, the following is the list of flags available for the log backend: | ||||
| 
 | ||||
|  - `audit-log-truncate-enabled` whether event and batch truncating is enabled. | ||||
|  - `audit-log-truncate-max-batch-size` maximum size in bytes of the batch sent to the underlying backend. | ||||
|  - `audit-log-truncate-max-event-size` maximum size in bytes of the audit event sent to the underlying backend. | ||||
| 
 | ||||
| By default truncate is disabled in both `webhook` and `log`, a cluster administrator should set `audit-log-truncate-enabled` or `audit-webhook-truncate-enabled` to enable the feature. | ||||
| 
 | ||||
| ## Setup for multiple API servers | ||||
| 
 | ||||
| If you're extending the Kubernetes API with the [aggregation | ||||
| layer](/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/), | ||||
| you can also set up audit logging for the aggregated apiserver. To do this, | ||||
| pass the configuration options in the same format as described above to the | ||||
| aggregated apiserver and set up the log ingesting pipeline to pick up audit | ||||
| logs. Different apiservers can have different audit configurations and | ||||
| different audit policies. | ||||
| 
 | ||||
| ## Log Collector Examples | ||||
| 
 | ||||
| ### Use fluentd to collect and distribute audit events from log file | ||||
| 
 | ||||
| [Fluentd](https://www.fluentd.org/) is an open source data collector for unified logging layer. | ||||
| In this example, we will use fluentd to split audit events by different namespaces. | ||||
| 
 | ||||
| {{< note >}} | ||||
| The `fluent-plugin-forest` and `fluent-plugin-rewrite-tag-filter` are plugins for fluentd. | ||||
| You can get details about plugin installation from | ||||
| [fluentd plugin-management](https://docs.fluentd.org/v1.0/articles/plugin-management). | ||||
| {{< /note >}} | ||||
| 
 | ||||
| 1. Install [`fluentd`](https://docs.fluentd.org/v1.0/articles/quickstart#step-1:-installing-fluentd), | ||||
|    `fluent-plugin-forest` and `fluent-plugin-rewrite-tag-filter` in the kube-apiserver node | ||||
| 
 | ||||
| 1. Create a config file for fluentd | ||||
| 
 | ||||
|     ``` | ||||
|     cat <<'EOF' > /etc/fluentd/config | ||||
|     # fluentd conf runs in the same host with kube-apiserver | ||||
|     <source> | ||||
|         @type tail | ||||
|         # audit log path of kube-apiserver | ||||
|         path /var/log/kube-audit | ||||
|         pos_file /var/log/audit.pos | ||||
|         format json | ||||
|         time_key time | ||||
|         time_format %Y-%m-%dT%H:%M:%S.%N%z | ||||
|         tag audit | ||||
|     </source> | ||||
| 
 | ||||
|     <filter audit> | ||||
|         #https://github.com/fluent/fluent-plugin-rewrite-tag-filter/issues/13 | ||||
|         @type record_transformer | ||||
|         enable_ruby | ||||
|         <record> | ||||
|          namespace ${record["objectRef"].nil? ? "none":(record["objectRef"]["namespace"].nil? ? "none":record["objectRef"]["namespace"])} | ||||
|         </record> | ||||
|     </filter> | ||||
| 
 | ||||
|     <match audit> | ||||
|         # route audit according to namespace element in context | ||||
|         @type rewrite_tag_filter | ||||
|         <rule> | ||||
|             key namespace | ||||
|             pattern /^(.+)/ | ||||
|             tag ${tag}.$1 | ||||
|         </rule> | ||||
|     </match> | ||||
| 
 | ||||
|     <filter audit.**> | ||||
|        @type record_transformer | ||||
|        remove_keys namespace | ||||
|     </filter> | ||||
| 
 | ||||
|     <match audit.**> | ||||
|         @type forest | ||||
|         subtype file | ||||
|         remove_prefix audit | ||||
|         <template> | ||||
|             time_slice_format %Y%m%d%H | ||||
|             compress gz | ||||
|             path /var/log/audit-${tag}.*.log | ||||
|             format json | ||||
|             include_time_key true | ||||
|         </template> | ||||
|     </match> | ||||
|     EOF | ||||
|     ``` | ||||
| 
 | ||||
| 1. Start fluentd | ||||
| 
 | ||||
|     ```shell | ||||
|     fluentd -c /etc/fluentd/config  -vv | ||||
|     ``` | ||||
| 
 | ||||
| 1. Start kube-apiserver with the following options: | ||||
| 
 | ||||
|     ```shell | ||||
|     --audit-policy-file=/etc/kubernetes/audit-policy.yaml --audit-log-path=/var/log/kube-audit --audit-log-format=json | ||||
|     ``` | ||||
| 
 | ||||
| 1. Check audits for different namespaces in `/var/log/audit-*.log` | ||||
| 
 | ||||
| ### Use logstash to collect and distribute audit events from webhook backend | ||||
| 
 | ||||
| [Logstash](https://www.elastic.co/products/logstash) | ||||
| is an open source, server-side data processing tool. In this example, | ||||
| we will use logstash to collect audit events from webhook backend, and save events of | ||||
| different users into different files. | ||||
| 
 | ||||
| 1. install [logstash](https://www.elastic.co/guide/en/logstash/current/installing-logstash.html) | ||||
| 
 | ||||
| 1. create config file for logstash | ||||
| 
 | ||||
|     ``` | ||||
|     cat <<EOF > /etc/logstash/config | ||||
|     input{ | ||||
|         http{ | ||||
|             #TODO, figure out a way to use kubeconfig file to authenticate to logstash | ||||
|             #https://www.elastic.co/guide/en/logstash/current/plugins-inputs-http.html#plugins-inputs-http-ssl | ||||
|             port=>8888 | ||||
|         } | ||||
|     } | ||||
|     filter{ | ||||
|         split{ | ||||
|             # Webhook audit backend sends several events together with EventList | ||||
|             # split each event here. | ||||
|             field=>[items] | ||||
|             # We only need event subelement, remove others. | ||||
|             remove_field=>[headers, metadata, apiVersion, "@timestamp", kind, "@version", host] | ||||
|         } | ||||
|         mutate{ | ||||
|             rename => {items=>event} | ||||
|         } | ||||
|     } | ||||
|     output{ | ||||
|         file{ | ||||
|             # Audit events from different users will be saved into different files. | ||||
|             path=>"/var/log/kube-audit-%{[event][user][username]}/audit" | ||||
|         } | ||||
|     } | ||||
|     EOF | ||||
|     ``` | ||||
| 
 | ||||
| 1. start logstash | ||||
| 
 | ||||
|     ```shell | ||||
|     bin/logstash -f /etc/logstash/config --path.settings /etc/logstash/ | ||||
|     ``` | ||||
| 
 | ||||
| 1. create a [kubeconfig file](/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) for kube-apiserver webhook audit backend | ||||
| 
 | ||||
|         cat <<EOF > /etc/kubernetes/audit-webhook-kubeconfig | ||||
|         apiVersion: v1 | ||||
|         kind: Config | ||||
|         clusters: | ||||
|         - cluster: | ||||
|             server: http://<ip_of_logstash>:8888 | ||||
|           name: logstash | ||||
|         contexts: | ||||
|         - context: | ||||
|             cluster: logstash | ||||
|             user: "" | ||||
|           name: default-context | ||||
|         current-context: default-context | ||||
|         preferences: {} | ||||
|         users: [] | ||||
|         EOF | ||||
| 
 | ||||
| 1. start kube-apiserver with the following options: | ||||
| 
 | ||||
|     ```shell | ||||
|     --audit-policy-file=/etc/kubernetes/audit-policy.yaml --audit-webhook-config-file=/etc/kubernetes/audit-webhook-kubeconfig | ||||
|     ``` | ||||
| 
 | ||||
| 1. check audits in logstash node's directories `/var/log/kube-audit-*/audit` | ||||
| 
 | ||||
| Note that in addition to file output plugin, logstash has a variety of outputs that | ||||
| let users route data where they want. For example, users can emit audit events to elasticsearch | ||||
| plugin which supports full-text search and analytics. | ||||
| - `audit-log-truncate-enabled` whether event and batch truncating is enabled. | ||||
| - `audit-log-truncate-max-batch-size` maximum size in bytes of the batch sent to the underlying backend. | ||||
| - `audit-log-truncate-max-event-size` maximum size in bytes of the audit event sent to the underlying backend. | ||||
| 
 | ||||
| By default truncate is disabled in both `webhook` and `log`, a cluster administrator should set | ||||
| `audit-log-truncate-enabled` or `audit-webhook-truncate-enabled` to enable the feature. | ||||
| 
 | ||||
| ## {{% heading "whatsnext" %}} | ||||
| 
 | ||||
| Learn about [Mutating webhook auditing annotations](/docs/reference/access-authn-authz/extensible-admission-controllers/#mutating-webhook-auditing-annotations). | ||||
| 
 | ||||
| * Learn about [Mutating webhook auditing annotations](/docs/reference/access-authn-authz/extensible-admission-controllers/#mutating-webhook-auditing-annotations). | ||||
|  |  | |||
|  | @ -47,7 +47,7 @@ can not schedule your pod. Reasons include: | |||
| You may have exhausted the supply of CPU or Memory in your cluster. In this | ||||
| case you can try several things: | ||||
| 
 | ||||
| * [Add more nodes](/docs/tasks/administer-cluster/cluster-management/#resizing-a-cluster) to the cluster. | ||||
| * Add more nodes to the cluster. | ||||
| 
 | ||||
| * [Terminate unneeded pods](/docs/concepts/workloads/pods/#pod-termination) | ||||
|   to make room for pending pods. | ||||
|  |  | |||
|  | @ -32,7 +32,7 @@ using [Krew](https://krew.dev/). Krew is a plugin manager maintained by | |||
| the Kubernetes SIG CLI community. | ||||
| 
 | ||||
| {{< caution >}} | ||||
| Kubectl plugins available via the Krew [plugin index](https://index.krew.dev/) | ||||
| Kubectl plugins available via the Krew [plugin index](https://krew.sigs.k8s.io/plugins/) | ||||
| are not audited for security. You should install and run third-party plugins at your | ||||
| own risk, since they are arbitrary programs running on your machine. | ||||
| {{< /caution >}} | ||||
|  | @ -46,7 +46,7 @@ A warning will also be included for any valid plugin files that overlap each oth | |||
| 
 | ||||
| You can use [Krew](https://krew.dev/) to discover and install `kubectl` | ||||
| plugins from a community-curated | ||||
| [plugin index](https://index.krew.dev/). | ||||
| [plugin index](https://krew.sigs.k8s.io/plugins/). | ||||
| 
 | ||||
| #### Limitations | ||||
| 
 | ||||
|  | @ -354,7 +354,7 @@ package it, distribute it and deliver updates to your users. | |||
| distribute your plugins. This way, you use a single packaging format for all | ||||
| target platforms (Linux, Windows, macOS etc) and deliver updates to your users. | ||||
| Krew also maintains a [plugin | ||||
| index](https://index.krew.dev/) so that other people can | ||||
| index](https://krew.sigs.k8s.io/plugins/) so that other people can | ||||
| discover your plugin and install it. | ||||
| 
 | ||||
| 
 | ||||
|  |  | |||
|  | @ -518,27 +518,25 @@ apiVersion: "stable.example.com/v1" | |||
| kind: CronTab | ||||
| metadata: | ||||
|   finalizers: | ||||
|   - finalizer.stable.example.com | ||||
|   - stable.example.com/finalizer | ||||
| ``` | ||||
| 
 | ||||
| Finalizers are arbitrary string values, that when present ensure that a hard delete | ||||
| of a resource is not possible while they exist. | ||||
| Identifiers of custom finalizers consist of a domain name, a forward slash and the name of | ||||
| the finalizer. Any controller can add a finalizer to any object's list of finalizers. | ||||
| 
 | ||||
| The first delete request on an object with finalizers sets a value for the | ||||
| `metadata.deletionTimestamp` field but does not delete it. Once this value is set, | ||||
| entries in the `finalizers` list can only be removed. | ||||
| entries in the `finalizers` list can only be removed. While any finalizers remain it is also | ||||
| impossible to force the deletion of an object. | ||||
| 
 | ||||
| When the `metadata.deletionTimestamp` field is set, controllers watching the object | ||||
| execute any finalizers they handle, by polling update requests for that | ||||
| object. When all finalizers have been executed, the resource is deleted. | ||||
| When the `metadata.deletionTimestamp` field is set, controllers watching the object execute any | ||||
| finalizers they handle and remove the finalizer from the list after they are done. It is the | ||||
| responsibility of each controller to remove its finalizer from the list. | ||||
| 
 | ||||
| The value of `metadata.deletionGracePeriodSeconds` controls the interval between | ||||
| polling updates. | ||||
| The value of `metadata.deletionGracePeriodSeconds` controls the interval between polling updates. | ||||
| 
 | ||||
| It is the responsibility of each controller to remove its finalizer from the list. | ||||
| 
 | ||||
| Kubernetes only finally deletes the object if the list of finalizers is empty, | ||||
| meaning all finalizers have been executed. | ||||
| Once the list of finalizers is empty, meaning all finalizers have been executed, the resource is | ||||
| deleted by Kubernetes. | ||||
| 
 | ||||
| ### Validation | ||||
| 
 | ||||
|  |  | |||
|  | @ -24,10 +24,35 @@ The following steps require an egress configuration, for example: | |||
| You need to configure the API Server to use the Konnectivity service | ||||
| and direct the network traffic to the cluster nodes: | ||||
| 
 | ||||
| 1. Make sure that | ||||
| the `ServiceAccountTokenVolumeProjection` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) | ||||
| is enabled. You can enable | ||||
| [service account token volume protection](/docs/tasks/configure-pod-container/configure-service-account/#service-account-token-volume-projection) | ||||
| by providing the following flags to the kube-apiserver: | ||||
|    ``` | ||||
|    --service-account-issuer=api | ||||
|    --service-account-signing-key-file=/etc/kubernetes/pki/sa.key | ||||
|    --api-audiences=system:konnectivity-server | ||||
|    ``` | ||||
| 1. Create an egress configuration file such as `admin/konnectivity/egress-selector-configuration.yaml`. | ||||
| 1. Set the `--egress-selector-config-file` flag of the API Server to the path of | ||||
| your API Server egress configuration file. | ||||
| 
 | ||||
| Generate or obtain a certificate and kubeconfig for konnectivity-server.   | ||||
| For example, you can use the OpenSSL command line tool to issue a X.509 certificate, | ||||
| using the cluster CA certificate `/etc/kubernetes/pki/ca.crt` from a control-plane host. | ||||
| 
 | ||||
| ```bash | ||||
| openssl req -subj "/CN=system:konnectivity-server" -new -newkey rsa:2048 -nodes -out konnectivity.csr -keyout konnectivity.key -out konnectivity.csr | ||||
| openssl x509 -req -in konnectivity.csr -CA /etc/kubernetes/pki/ca.crt -CAkey /etc/kubernetes/pki/ca.key -CAcreateserial -out konnectivity.crt -days 375 -sha256 | ||||
| SERVER=$(kubectl config view -o jsonpath='{.clusters..server}') | ||||
| kubectl --kubeconfig /etc/kubernetes/konnectivity-server.conf config set-credentials system:konnectivity-server --client-certificate konnectivity.crt --client-key konnectivity.key --embed-certs=true | ||||
| kubectl --kubeconfig /etc/kubernetes/konnectivity-server.conf config set-cluster kubernetes --server "$SERVER" --certificate-authority /etc/kubernetes/pki/ca.crt --embed-certs=true | ||||
| kubectl --kubeconfig /etc/kubernetes/konnectivity-server.conf config set-context system:konnectivity-server@kubernetes --cluster kubernetes --user system:konnectivity-server | ||||
| kubectl --kubeconfig /etc/kubernetes/konnectivity-server.conf config use-context system:konnectivity-server@kubernetes | ||||
| rm -f konnectivity.crt konnectivity.key konnectivity.csr | ||||
| ``` | ||||
| 
 | ||||
| Next, you need to deploy the Konnectivity server and agents. | ||||
| [kubernetes-sigs/apiserver-network-proxy](https://github.com/kubernetes-sigs/apiserver-network-proxy) | ||||
| is a reference implementation. | ||||
|  |  | |||
|  | @ -16,34 +16,32 @@ in a replication controller, deployment, replica set or stateful set based on ob | |||
| (or, with beta support, on some other, application-provided metrics). | ||||
| 
 | ||||
| This document walks you through an example of enabling Horizontal Pod Autoscaler for the php-apache server. | ||||
| For more information on how Horizontal Pod Autoscaler behaves, see the  | ||||
| For more information on how Horizontal Pod Autoscaler behaves, see the | ||||
| [Horizontal Pod Autoscaler user guide](/docs/tasks/run-application/horizontal-pod-autoscale/). | ||||
| 
 | ||||
| ## {{% heading "prerequisites" %}} | ||||
| 
 | ||||
| 
 | ||||
| This example requires a running Kubernetes cluster and kubectl, version 1.2 or later. | ||||
| [metrics-server](https://github.com/kubernetes-sigs/metrics-server) monitoring needs to be deployed in the cluster | ||||
| to provide metrics via the resource metrics API, as Horizontal Pod Autoscaler uses this API to collect metrics. The instructions for deploying this are on the GitHub repository of [metrics-server](https://github.com/kubernetes-sigs/metrics-server), if you followed [getting started on GCE guide](/docs/setup/production-environment/turnkey/gce/), | ||||
| metrics-server monitoring will be turned-on by default. | ||||
| [Metrics server](https://github.com/kubernetes-sigs/metrics-server) monitoring needs to be deployed | ||||
| in the cluster to provide metrics through the [Metrics API](https://github.com/kubernetes/metrics). | ||||
| Horizontal Pod Autoscaler uses this API to collect metrics. To learn how to deploy the metrics-server, | ||||
| see the [metrics-server documentation](https://github.com/kubernetes-sigs/metrics-server#deployment). | ||||
| 
 | ||||
| To specify multiple resource metrics for a Horizontal Pod Autoscaler, you must have a Kubernetes cluster | ||||
| and kubectl at version 1.6 or later.  Furthermore, in order to make use of custom metrics, your cluster | ||||
| must be able to communicate with the API server providing the custom metrics API. Finally, to use metrics | ||||
| not related to any Kubernetes object you must have a Kubernetes cluster at version 1.10 or later, and | ||||
| you must be able to communicate with the API server that provides the external metrics API. | ||||
| To specify multiple resource metrics for a Horizontal Pod Autoscaler, you must have a | ||||
| Kubernetes cluster and kubectl at version 1.6 or later. To make use of custom metrics, your cluster | ||||
| must be able to communicate with the API server providing the custom Metrics API. | ||||
| Finally, to use metrics not related to any Kubernetes object you must have a | ||||
| Kubernetes cluster at version 1.10 or later, and you must be able to communicate | ||||
| with the API server that provides the external Metrics API. | ||||
| See the [Horizontal Pod Autoscaler user guide](/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics) for more details. | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| <!-- steps --> | ||||
| 
 | ||||
| ## Run & expose php-apache server | ||||
| ## Run and expose php-apache server | ||||
| 
 | ||||
| To demonstrate Horizontal Pod Autoscaler we will use a custom docker image based on the php-apache image. | ||||
| The Dockerfile has the following content: | ||||
| To demonstrate Horizontal Pod Autoscaler we will use a custom docker image based on the php-apache image. The Dockerfile has the following content: | ||||
| 
 | ||||
| ``` | ||||
| ```dockerfile | ||||
| FROM php:5-apache | ||||
| COPY index.php /var/www/html/index.php | ||||
| RUN chmod a+rx index.php | ||||
|  | @ -51,7 +49,7 @@ RUN chmod a+rx index.php | |||
| 
 | ||||
| It defines an index.php page which performs some CPU intensive computations: | ||||
| 
 | ||||
| ``` | ||||
| ```php | ||||
| <?php | ||||
|   $x = 0.0001; | ||||
|   for ($i = 0; $i <= 1000000; $i++) { | ||||
|  | @ -66,11 +64,12 @@ using the following configuration: | |||
| 
 | ||||
| {{< codenew file="application/php-apache.yaml" >}} | ||||
| 
 | ||||
| 
 | ||||
| Run the following command: | ||||
| 
 | ||||
| ```shell | ||||
| kubectl apply -f https://k8s.io/examples/application/php-apache.yaml | ||||
| ``` | ||||
| 
 | ||||
| ``` | ||||
| deployment.apps/php-apache created | ||||
| service/php-apache created | ||||
|  | @ -90,6 +89,7 @@ See [here](/docs/tasks/run-application/horizontal-pod-autoscale/#algorithm-detai | |||
| ```shell | ||||
| kubectl autoscale deployment php-apache --cpu-percent=50 --min=1 --max=10 | ||||
| ``` | ||||
| 
 | ||||
| ``` | ||||
| horizontalpodautoscaler.autoscaling/php-apache autoscaled | ||||
| ``` | ||||
|  | @ -99,10 +99,10 @@ We may check the current status of autoscaler by running: | |||
| ```shell | ||||
| kubectl get hpa | ||||
| ``` | ||||
| 
 | ||||
| ``` | ||||
| NAME         REFERENCE                     TARGET    MINPODS   MAXPODS   REPLICAS   AGE | ||||
| php-apache   Deployment/php-apache/scale   0% / 50%  1         10        1          18s | ||||
| 
 | ||||
| ``` | ||||
| 
 | ||||
| Please note that the current CPU consumption is 0% as we are not sending any requests to the server | ||||
|  | @ -122,10 +122,10 @@ Within a minute or so, we should see the higher CPU load by executing: | |||
| ```shell | ||||
| kubectl get hpa | ||||
| ``` | ||||
| 
 | ||||
| ``` | ||||
| NAME         REFERENCE                     TARGET      MINPODS   MAXPODS   REPLICAS   AGE | ||||
| php-apache   Deployment/php-apache/scale   305% / 50%  1         10        1          3m | ||||
| 
 | ||||
| ``` | ||||
| 
 | ||||
| Here, CPU consumption has increased to 305% of the request. | ||||
|  | @ -134,6 +134,7 @@ As a result, the deployment was resized to 7 replicas: | |||
| ```shell | ||||
| kubectl get deployment php-apache | ||||
| ``` | ||||
| 
 | ||||
| ``` | ||||
| NAME         READY   UP-TO-DATE   AVAILABLE   AGE | ||||
| php-apache   7/7      7           7           19m | ||||
|  | @ -157,6 +158,7 @@ Then we will verify the result state (after a minute or so): | |||
| ```shell | ||||
| kubectl get hpa | ||||
| ``` | ||||
| 
 | ||||
| ``` | ||||
| NAME         REFERENCE                     TARGET       MINPODS   MAXPODS   REPLICAS   AGE | ||||
| php-apache   Deployment/php-apache/scale   0% / 50%     1         10        1          11m | ||||
|  | @ -165,6 +167,7 @@ php-apache   Deployment/php-apache/scale   0% / 50%     1         10        1 | |||
| ```shell | ||||
| kubectl get deployment php-apache | ||||
| ``` | ||||
| 
 | ||||
| ``` | ||||
| NAME         READY   UP-TO-DATE   AVAILABLE   AGE | ||||
| php-apache   1/1     1            1           27m | ||||
|  | @ -176,8 +179,6 @@ Here CPU utilization dropped to 0, and so HPA autoscaled the number of replicas | |||
| Autoscaling the replicas may take a few minutes. | ||||
| {{< /note >}} | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| <!-- discussion --> | ||||
| 
 | ||||
| ## Autoscaling on multiple metrics and custom metrics | ||||
|  | @ -419,7 +420,8 @@ we can use `kubectl describe hpa`: | |||
| ```shell | ||||
| kubectl describe hpa cm-test | ||||
| ``` | ||||
| ```shell | ||||
| 
 | ||||
| ``` | ||||
| Name:                           cm-test | ||||
| Namespace:                      prom | ||||
| Labels:                         <none> | ||||
|  | @ -474,8 +476,7 @@ We will create the autoscaler by executing the following command: | |||
| ```shell | ||||
| kubectl create -f https://k8s.io/examples/application/hpa/php-apache.yaml | ||||
| ``` | ||||
| 
 | ||||
| ``` | ||||
| horizontalpodautoscaler.autoscaling/php-apache created | ||||
| ``` | ||||
| 
 | ||||
| 
 | ||||
|  |  | |||
|  | @ -433,7 +433,14 @@ behavior: | |||
|     selectPolicy: Disabled | ||||
| ``` | ||||
| 
 | ||||
| ## Implicit maintenance-mode deactivation | ||||
| 
 | ||||
| You can implicitly deactivate the HPA for a target without the | ||||
| need to change the HPA configuration itself. If the target's desired replica count | ||||
| is set to 0, and the HPA's minimum replica count is greater than 0, the HPA  | ||||
| stops adjusting the target (and sets the `ScalingActive` Condition on itself | ||||
| to `false`) until you reactivate it by manually adjusting the target's desired | ||||
| replica count or HPA's minimum replica count. | ||||
| 
 | ||||
| ## {{% heading "whatsnext" %}} | ||||
| 
 | ||||
|  | @ -441,4 +448,3 @@ behavior: | |||
| * Design documentation: [Horizontal Pod Autoscaling](https://git.k8s.io/community/contributors/design-proposals/autoscaling/horizontal-pod-autoscaler.md). | ||||
| * kubectl autoscale command: [kubectl autoscale](/docs/reference/generated/kubectl/kubectl-commands/#autoscale). | ||||
| * Usage example of [Horizontal Pod Autoscaler](/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/). | ||||
| 
 | ||||
|  |  | |||
|  | @ -33,14 +33,17 @@ Configurations with a single API server will experience unavailability while the | |||
|    (ex: `ca.crt`, `ca.key`, `front-proxy-ca.crt`, and `front-proxy-ca.key`) | ||||
|    to all your control plane nodes in the Kubernetes certificates directory. | ||||
| 
 | ||||
| 1. Update *Kubernetes controller manager's* `--root-ca-file` to include both old and new CA and restart controller manager. | ||||
| 1. Update {{< glossary_tooltip text="kube-controller-manager" term_id="kube-controller-manager" >}}'s `--root-ca-file` to | ||||
|    include both old and new CA. Then restart the component. | ||||
| 
 | ||||
|    Any service account created after this point will get secrets that include both old and new CAs. | ||||
| 
 | ||||
|    {{< note >}} | ||||
|    Remove the flag `--client-ca-file` from the *Kubernetes controller manager* configuration. | ||||
|    You can also replace the existing client CA file or change this configuration item to reference a new, updated CA. | ||||
|    [Issue 1350](https://github.com/kubernetes/kubeadm/issues/1350) tracks an issue with *Kubernetes controller manager* being unable to accept a CA bundle. | ||||
|    The files specified by the kube-controller-manager flags `--client-ca-file` and `--cluster-signing-cert-file` | ||||
|    cannot be CA bundles. If these flags and `--root-ca-file` point to the same `ca.crt` file which is now a | ||||
|    bundle (includes both old and new CA) you will face an error. To workaround this problem you can copy the new CA to a separate | ||||
|    file and make the flags `--client-ca-file` and `--cluster-signing-cert-file` point to the copy. Once `ca.crt` is no longer | ||||
|    a bundle you can restore the problem flags to point to `ca.crt` and delete the copy. | ||||
|    {{< /note >}} | ||||
| 
 | ||||
| 1. Update all service account tokens to include both old and new CA certificates. | ||||
|  |  | |||
|  | @ -23,7 +23,7 @@ You can also read the | |||
| ## kind | ||||
| 
 | ||||
| [`kind`](https://kind.sigs.k8s.io/docs/) lets you run Kubernetes on | ||||
| your local computer. This tool it requires that you have | ||||
| your local computer. This tool requires that you have | ||||
| [Docker](https://docs.docker.com/get-docker/) installed and configured. | ||||
| 
 | ||||
| The kind [Quick Start](https://kind.sigs.k8s.io/docs/user/quick-start/) page | ||||
|  |  | |||
|  | @ -1,6 +1,7 @@ | |||
| --- | ||||
| title: Learn Kubernetes Basics | ||||
| linkTitle: Learn Kubernetes Basics | ||||
| no_list: true | ||||
| weight: 10 | ||||
| card: | ||||
|   name: tutorials | ||||
|  |  | |||
|  | @ -26,7 +26,7 @@ Kubernetes concepts. | |||
| -   [Pods](/docs/concepts/workloads/pods/) | ||||
| -   [Cluster DNS](/docs/concepts/services-networking/dns-pod-service/) | ||||
| -   [Headless Services](/docs/concepts/services-networking/service/#headless-services) | ||||
| -   [PersistentVolumes](/docs/concepts/storage/volumes/) | ||||
| -   [PersistentVolumes](/docs/concepts/storage/persistent-volumes/) | ||||
| -   [PersistentVolume Provisioning](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/persistent-volume-provisioning/) | ||||
| -   [StatefulSets](/docs/concepts/workloads/controllers/statefulset/) | ||||
| -   [PodDisruptionBudgets](/docs/concepts/workloads/pods/disruptions/#pod-disruption-budget) | ||||
|  | @ -46,7 +46,7 @@ tutorial. | |||
| After this tutorial, you will know the following. | ||||
| 
 | ||||
| -   How to deploy a ZooKeeper ensemble using StatefulSet. | ||||
| -   How to consistently configure the ensemble using ConfigMaps. | ||||
| -   How to consistently configure the ensemble. | ||||
| -   How to spread the deployment of ZooKeeper servers in the ensemble. | ||||
| -   How to use PodDisruptionBudgets to ensure service availability during planned maintenance. | ||||
| 
 | ||||
|  | @ -522,7 +522,7 @@ log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout | |||
| log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n | ||||
| ``` | ||||
| 
 | ||||
| This is the simplest possible way to safely log inside the container.  | ||||
| This is the simplest possible way to safely log inside the container. | ||||
| Because the applications write logs to standard out, Kubernetes will handle log rotation for you. | ||||
| Kubernetes also implements a sane retention policy that ensures application logs written to | ||||
| standard out and standard error do not exhaust local storage media. | ||||
|  | @ -770,7 +770,7 @@ In one terminal window, use the following command to watch the Pods in the `zk` | |||
| kubectl get pod -w -l app=zk | ||||
| ``` | ||||
| 
 | ||||
| In another window, using the following command to delete the `zkOk.sh` script from the file system of Pod `zk-0`. | ||||
| In another window, using the following command to delete the `zookeeper-ready` script from the file system of Pod `zk-0`. | ||||
| 
 | ||||
| ```shell | ||||
| kubectl exec zk-0 -- rm /usr/bin/zookeeper-ready | ||||
|  |  | |||
|  | @ -18,4 +18,4 @@ egressSelections: | |||
|       # The other supported transport is "tcp". You will need to set up TLS  | ||||
|       # config to secure the TCP transport. | ||||
|       uds: | ||||
|         udsName: /etc/srv/kubernetes/konnectivity-server/konnectivity-server.socket | ||||
|         udsName: /etc/kubernetes/konnectivity-server/konnectivity-server.socket | ||||
|  |  | |||
|  | @ -22,7 +22,7 @@ spec: | |||
|         - key: "CriticalAddonsOnly" | ||||
|           operator: "Exists" | ||||
|       containers: | ||||
|         - image: us.gcr.io/k8s-artifacts-prod/kas-network-proxy/proxy-agent:v0.0.8 | ||||
|         - image: us.gcr.io/k8s-artifacts-prod/kas-network-proxy/proxy-agent:v0.0.12 | ||||
|           name: konnectivity-agent | ||||
|           command: ["/proxy-agent"] | ||||
|           args: [ | ||||
|  | @ -32,6 +32,8 @@ spec: | |||
|                   # this is the IP address of the master machine. | ||||
|                   "--proxy-server-host=35.225.206.7", | ||||
|                   "--proxy-server-port=8132", | ||||
|                   "--admin-server-port=8133", | ||||
|                   "--health-server-port=8134", | ||||
|                   "--service-account-token-path=/var/run/secrets/tokens/konnectivity-agent-token" | ||||
|                   ] | ||||
|           volumeMounts: | ||||
|  | @ -39,7 +41,7 @@ spec: | |||
|               name: konnectivity-agent-token | ||||
|           livenessProbe: | ||||
|             httpGet: | ||||
|               port: 8093 | ||||
|               port: 8134 | ||||
|               path: /healthz | ||||
|             initialDelaySeconds: 15 | ||||
|             timeoutSeconds: 15 | ||||
|  |  | |||
|  | @ -8,34 +8,33 @@ spec: | |||
|   hostNetwork: true | ||||
|   containers: | ||||
|   - name: konnectivity-server-container | ||||
|     image: us.gcr.io/k8s-artifacts-prod/kas-network-proxy/proxy-server:v0.0.8 | ||||
|     image: us.gcr.io/k8s-artifacts-prod/kas-network-proxy/proxy-server:v0.0.12 | ||||
|     command: ["/proxy-server"] | ||||
|     args: [ | ||||
|             "--log-file=/var/log/konnectivity-server.log", | ||||
|             "--logtostderr=false", | ||||
|             "--log-file-max-size=0", | ||||
|             "--logtostderr=true", | ||||
|             # This needs to be consistent with the value set in egressSelectorConfiguration. | ||||
|             "--uds-name=/etc/srv/kubernetes/konnectivity-server/konnectivity-server.socket", | ||||
|             "--uds-name=/etc/kubernetes/konnectivity-server/konnectivity-server.socket", | ||||
|             # The following two lines assume the Konnectivity server is | ||||
|             # deployed on the same machine as the apiserver, and the certs and | ||||
|             # key of the API Server are at the specified location. | ||||
|             "--cluster-cert=/etc/srv/kubernetes/pki/apiserver.crt", | ||||
|             "--cluster-key=/etc/srv/kubernetes/pki/apiserver.key", | ||||
|             "--cluster-cert=/etc/kubernetes/pki/apiserver.crt", | ||||
|             "--cluster-key=/etc/kubernetes/pki/apiserver.key", | ||||
|             # This needs to be consistent with the value set in egressSelectorConfiguration. | ||||
|             "--mode=grpc", | ||||
|             "--server-port=0", | ||||
|             "--agent-port=8132", | ||||
|             "--admin-port=8133", | ||||
|             "--health-port=8134", | ||||
|             "--agent-namespace=kube-system", | ||||
|             "--agent-service-account=konnectivity-agent", | ||||
|             "--kubeconfig=/etc/srv/kubernetes/konnectivity-server/kubeconfig", | ||||
|             "--kubeconfig=/etc/kubernetes/konnectivity-server.conf", | ||||
|             "--authentication-audience=system:konnectivity-server" | ||||
|             ] | ||||
|     livenessProbe: | ||||
|       httpGet: | ||||
|         scheme: HTTP | ||||
|         host: 127.0.0.1 | ||||
|         port: 8133 | ||||
|         port: 8134 | ||||
|         path: /healthz | ||||
|       initialDelaySeconds: 30 | ||||
|       timeoutSeconds: 60 | ||||
|  | @ -46,25 +45,28 @@ spec: | |||
|     - name: adminport | ||||
|       containerPort: 8133 | ||||
|       hostPort: 8133 | ||||
|     - name: healthport | ||||
|       containerPort: 8134 | ||||
|       hostPort: 8134 | ||||
|     volumeMounts: | ||||
|     - name: varlogkonnectivityserver | ||||
|       mountPath: /var/log/konnectivity-server.log | ||||
|       readOnly: false | ||||
|     - name: pki | ||||
|       mountPath: /etc/srv/kubernetes/pki | ||||
|     - name: k8s-certs | ||||
|       mountPath: /etc/kubernetes/pki | ||||
|       readOnly: true | ||||
|     - name: kubeconfig | ||||
|       mountPath: /etc/kubernetes/konnectivity-server.conf | ||||
|       readOnly: true | ||||
|     - name: konnectivity-uds | ||||
|       mountPath: /etc/srv/kubernetes/konnectivity-server | ||||
|       mountPath: /etc/kubernetes/konnectivity-server | ||||
|       readOnly: false | ||||
|   volumes: | ||||
|   - name: varlogkonnectivityserver | ||||
|   - name: k8s-certs | ||||
|     hostPath: | ||||
|       path: /var/log/konnectivity-server.log | ||||
|       path: /etc/kubernetes/pki | ||||
|   - name: kubeconfig | ||||
|     hostPath: | ||||
|       path: /etc/kubernetes/konnectivity-server.conf | ||||
|       type: FileOrCreate | ||||
|   - name: pki | ||||
|     hostPath: | ||||
|       path: /etc/srv/kubernetes/pki | ||||
|   - name: konnectivity-uds | ||||
|     hostPath: | ||||
|       path: /etc/srv/kubernetes/konnectivity-server | ||||
|       path: /etc/kubernetes/konnectivity-server | ||||
|       type: DirectoryOrCreate | ||||
|  |  | |||
|  | @ -6,7 +6,7 @@ spec: | |||
|   containers: | ||||
|     - name: test-container | ||||
|       image: k8s.gcr.io/busybox | ||||
|       command: [ "/bin/sh", "-c", "echo $(SPECIAL_LEVEL_KEY) $(SPECIAL_TYPE_KEY)" ] | ||||
|       command: [ "/bin/echo", "$(SPECIAL_LEVEL_KEY) $(SPECIAL_TYPE_KEY)" ] | ||||
|       env: | ||||
|         - name: SPECIAL_LEVEL_KEY | ||||
|           valueFrom: | ||||
|  |  | |||
|  | @ -12,7 +12,7 @@ spec: | |||
|             operator: In | ||||
|             values: | ||||
|             - S1 | ||||
|         topologyKey: failure-domain.beta.kubernetes.io/zone | ||||
|         topologyKey: topology.kubernetes.io/zone | ||||
|     podAntiAffinity: | ||||
|       preferredDuringSchedulingIgnoredDuringExecution: | ||||
|       - weight: 100 | ||||
|  | @ -23,7 +23,7 @@ spec: | |||
|               operator: In | ||||
|               values: | ||||
|               - S2 | ||||
|           topologyKey: failure-domain.beta.kubernetes.io/zone | ||||
|           topologyKey: topology.kubernetes.io/zone | ||||
|   containers: | ||||
|   - name: with-pod-affinity | ||||
|     image: k8s.gcr.io/pause:2.0 | ||||
|  |  | |||
Some files were not shown because too many files have changed in this diff Show More
		Loading…
	
		Reference in New Issue