diff --git a/.github/workflows/netlify-periodic-build.yml b/.github/workflows/netlify-periodic-build.yml new file mode 100644 index 0000000000..29fef807b6 --- /dev/null +++ b/.github/workflows/netlify-periodic-build.yml @@ -0,0 +1,15 @@ +--- +name: Scheduled Netlify site build +on: + schedule: # Build twice daily: shortly after midnight and noon (UTC) + # Offset is to be nice to the build service + - cron: '4 0,12 * * *' +jobs: + build: + runs-on: ubuntu-latest + steps: + - name: Trigger build on Netlify + env: + TOKEN: ${{ secrets.NETLIFY_BUILD_HOOK_KEY }} + run: >- + curl -s -H "Accept: application/json" -H "Content-Type: application/json" -X POST -d "{}" "https://api.netlify.com/build_hooks/${TOKEN}" diff --git a/Makefile b/Makefile index 58babc3627..ea1c4797af 100644 --- a/Makefile +++ b/Makefile @@ -58,7 +58,7 @@ docker-serve: @echo -e "$(CCRED)**** The use of docker-serve is deprecated. Use container-serve instead. ****$(CCEND)" $(MAKE) container-serve -container-image: +container-image: ## Build a container image for the preview of the website $(CONTAINER_ENGINE) build . \ --network=host \ --tag $(CONTAINER_IMAGE) \ @@ -67,7 +67,7 @@ container-image: container-build: module-check $(CONTAINER_RUN) --read-only --mount type=tmpfs,destination=/tmp,tmpfs-mode=01777 $(CONTAINER_IMAGE) sh -c "npm ci && hugo --minify" -container-serve: module-check +container-serve: module-check ## Boot the development server using container. Run `make container-image` before this. $(CONTAINER_RUN) --read-only --mount type=tmpfs,destination=/tmp,tmpfs-mode=01777 -p 1313:1313 $(CONTAINER_IMAGE) hugo server --buildFuture --bind 0.0.0.0 --destination /tmp/hugo --cleanDestinationDir test-examples: diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index eea832a762..978be7ca33 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -7,12 +7,9 @@ aliases: - mrbobbytables sig-docs-blog-reviewers: # Reviewers for blog content - castrojo - - cody-clark - kbarnard10 - mrbobbytables - onlydole - - parispittman - - vonguard sig-docs-de-owners: # Admins for German content - bene2k1 - mkorbi @@ -30,6 +27,7 @@ aliases: - kbarnard10 - kbhawkey - onlydole + - reylejano - savitharaghunathan - sftim - steveperry-53 @@ -98,6 +96,7 @@ aliases: - irvifa sig-docs-id-reviews: # PR reviews for Indonesian content - girikuncoro + - habibrosyad - irvifa - wahyuoi - phanama @@ -138,6 +137,7 @@ aliases: - seokho-son - ysyukr - pjhwa + - yoonian sig-docs-leads: # Website chairs and tech leads - irvifa - jimangel @@ -214,3 +214,12 @@ aliases: - idvoretskyi - MaxymVlasov - Potapy4 + # authoritative source: git.k8s.io/community/OWNERS_ALIASES + committee-steering: # provide PR approvals for announcements + - cblecker + - derekwaynecarr + - dims + - liggitt + - mrbobbytables + - nikhita + - parispittman diff --git a/README-de.md b/README-de.md index 9efc1fb597..b570f43671 100644 --- a/README-de.md +++ b/README-de.md @@ -14,9 +14,9 @@ Sobald Ihre Pull-Anfrage erstellt wurde, übernimmt ein Rezensent von Kubernetes Weitere Informationen zum Beitrag zur Kubernetes-Dokumentation finden Sie unter: * [Mitwirkung beginnen](https://kubernetes.io/docs/contribute/start/) -* [Ihre Dokumentationsänderungen bereitstellen](http://kubernetes.io/docs/contribute/intermediate#view-your-changes-locally) -* [Seitenvorlagen verwenden](http://kubernetes.io/docs/contribute/style/page-content-types/) -* [Dokumentationsstil-Handbuch](http://kubernetes.io/docs/contribute/style/style-guide/) +* [Ihre Dokumentationsänderungen bereitstellen](https://kubernetes.io/docs/contribute/intermediate#view-your-changes-locally) +* [Seitenvorlagen verwenden](https://kubernetes.io/docs/contribute/style/page-content-types/) +* [Dokumentationsstil-Handbuch](https://kubernetes.io/docs/contribute/style/style-guide/) * [Übersetzung der Kubernetes-Dokumentation](https://kubernetes.io/docs/contribute/localization/) ## `README.md`'s Localizing Kubernetes Documentation @@ -65,7 +65,7 @@ Dadurch wird der lokale Hugo-Server an Port 1313 gestartet. Öffnen Sie Ihren Br ## Community, Diskussion, Beteiligung und Unterstützung -Erfahren Sie auf der [Community-Seite](http://kubernetes.io/community/) wie Sie mit der Kubernetes-Community interagieren können. +Erfahren Sie auf der [Community-Seite](https://kubernetes.io/community/) wie Sie mit der Kubernetes-Community interagieren können. Sie können die Betreuer dieses Projekts unter folgender Adresse erreichen: diff --git a/README-es.md b/README-es.md index cc4a6cefa0..f5b1e870dd 100644 --- a/README-es.md +++ b/README-es.md @@ -17,9 +17,9 @@ Los revisores harán todo lo posible para proporcionar toda la información nece Para obtener más información sobre cómo contribuir a la documentación de Kubernetes, puede consultar: * [Empezando a contribuir](https://kubernetes.io/docs/contribute/start/) -* [Visualizando sus cambios en su entorno local](http://kubernetes.io/docs/contribute/intermediate#view-your-changes-locally) -* [Utilizando las plantillas de las páginas](http://kubernetes.io/docs/contribute/style/page-content-types/) -* [Guía de estilo de la documentación](http://kubernetes.io/docs/contribute/style/style-guide/) +* [Visualizando sus cambios en su entorno local](https://kubernetes.io/docs/contribute/intermediate#view-your-changes-locally) +* [Utilizando las plantillas de las páginas](https://kubernetes.io/docs/contribute/style/page-content-types/) +* [Guía de estilo de la documentación](https://kubernetes.io/docs/contribute/style/style-guide/) * [Traduciendo la documentación de Kubernetes](https://kubernetes.io/docs/contribute/localization/) ## Levantando el sitio web kubernetes.io en su entorno local con Docker diff --git a/README-ru.md b/README-ru.md index d999e1cc88..348f92a82e 100644 --- a/README-ru.md +++ b/README-ru.md @@ -2,38 +2,117 @@ [![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-master-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest) -Добро пожаловать! Данный репозиторий содержит все необходимые файлы для сборки [сайта Kubernetes и документации](https://kubernetes.io/). Мы благодарим вас за старания! +Данный репозиторий содержит все необходимые файлы для сборки [сайта Kubernetes и документации](https://kubernetes.io/). Мы благодарим вас за желание внести свой вклад! -## Запуск сайта с помощью Hugo +# Использование этого репозитория -Обратитесь к [официальной документации Hugo](https://gohugo.io/getting-started/installing/), чтобы установить Hugo. Убедитесь, что вы установили правильную версию Hugo, которая устанавливается в переменной окружения `HUGO_VERSION` в файле [`netlify.toml`](netlify.toml#L10). +Запустить сайт локально можно с помощью Hugo (Extended version) или же в исполняемой среде для контейнеров. Мы настоятельно рекомендуем воспользоваться контейнерной средой, поскольку она обеспечивает консистивность развёртывания с оригинальным сайтом. -После установки Hugo, чтобы запустить сайт, выполните в консоли: +## Предварительные требования -```bash +Чтобы работать с этим репозиторием, понадобятся следующие компоненты, установленные локально: + +- [npm](https://www.npmjs.com/) +- [Go](https://golang.org/) +- [Hugo (Extended version)](https://gohugo.io/) +- Исполняемая среда для контейнеров вроде [Docker](https://www.docker.com/) + +Перед тем, как начать, установите зависимости. Склонируйте репозиторий и перейдите в его директорию: + +``` git clone https://github.com/kubernetes/website.git cd website -hugo server --buildFuture ``` -Эта команда запустит сервер Hugo на порту 1313. Откройте браузер и перейдите по ссылке http://localhost:1313, чтобы открыть сайт. Если вы отредактируете исходные файлы сайта, Hugo автоматически применит изменения и обновит страницу в браузере. +Сайт Kubernetes использует [тему для Hugo под названием Docsy](https://github.com/google/docsy). Даже если вы планируете запускать сайт в контейнере, мы настоятельно рекомендуем загрузить соответствующий подмодуль и другие зависимости для разработки, выполнив следующую команду: -## Сообщество, обсуждение, вклад и поддержка +``` +# загружаем Git-подмодуль Docsy +git submodule update --init --recursive --depth 1 +``` -Узнайте, как поучаствовать в жизни сообщества Kubernetes на [странице сообщества](http://kubernetes.io/community/). +## Запуск сайта в контейнере -Вы можете связаться с сопровождающими этого проекта по следующим ссылкам: +Чтобы собрать сайт в контейнере, выполните следующие команды — они собирают образ контейнера и запускают его: -- [Канал в Slack](https://kubernetes.slack.com/messages/sig-docs) -- [Рассылка](https://groups.google.com/forum/#!forum/kubernetes-sig-docs) +``` +make container-image +make container-serve +``` -## Вклад в документацию +Откройте браузер и перейдите по ссылке http://localhost:1313, чтобы увидеть сайт. Если вы отредактируете исходные файлы сайта, Hugo автоматически обновит сам сайт и выполнит обновление страницы в браузере. + +## Запуск сайта с помощью Hugo -Нажмите на кнопку **Fork** в правом верхнем углу, чтобы создать копию этого репозитория в ваш GitHub-аккаунт. Эта копия называется *форк-репозиторием*. Делайте любые изменения в вашем форк-репозитории, и когда вы будете готовы опубликовать изменения, откройте форк-репозиторий и создайте новый пулреквест, чтобы уведомить нас. +Убедитесь, что вы установили расширенную версию Hugo (extended version): она определена в переменной окружения `HUGO_VERSION` в файле [`netlify.toml`](netlify.toml#L10). -После того, как вы отправите пулреквест, ревьювер Kubernetes даст по нему обратную связь. Вы, как автор пулреквеста, **должны обновить свой пулреквест после его рассмотрения ревьювером Kubernetes.** +Чтобы собрать и протестировать сайт локально, выполните: -Вполне возможно, что более одного ревьювера Kubernetes оставят свои комментарии или даже может быть так, что новый комментарий ревьювера Kubernetes будет отличаться от первоначального назначенного ревьювера. Кроме того, в некоторых случаях один из ревьюверов может запросить технический обзор у [технического ревьювера Kubernetes](https://github.com/kubernetes/website/wiki/Tech-reviewers), если это будет необходимо. Ревьюверы сделают все возможное, чтобы как можно оперативно оставить свои предложения и пожелания, но время ответа может варьироваться в зависимости от обстоятельств. +```bash +# install dependencies +npm ci +make serve +``` + +Эти команды запустят локальный сервер Hugo на порту 1313. Откройте браузер и перейдите по ссылке http://localhost:1313, чтобы увидеть сайт. Если вы отредактируете исходные файлы сайта, Hugo автоматически обновит сам сайт и выполнит обновление страницы в браузере. + +## Решение проблем +### error: failed to transform resource: TOCSS: failed to transform "scss/main.scss" (text/x-scss): this feature is not available in your current Hugo version + +По техническим причинам Hugo поставляется с двумя наборами бинарников. Текущий сайт Kubernetes работает только в версии **Hugo Extended**. На [странице релизов](https://github.com/gohugoio/hugo/releases) ищите архивы со словом `extended` в названии. Чтобы убедиться в корректности, запустите команду `hugo version` и найдите в выводе слово `extended`. + +### Решение проблемы на macOS с "too many open files" + +Если вы запускаете `make serve` на macOS и получаете следующую ошибку: + +``` +ERROR 2020/08/01 19:09:18 Error: listen tcp 127.0.0.1:1313: socket: too many open files +make: *** [serve] Error 1 +``` + +Попробуйте проверить текущий лимит для открытых файлов: + +`launchctl limit maxfiles` + +Затем выполните следующие команды (они взяты и адаптированы из https://gist.github.com/tombigel/d503800a282fcadbee14b537735d202c): + +```shell +#!/bin/sh + +# Ссылки на оригинальные gist-файлы закомментированы в пользу моих адаптированных. +# curl -O https://gist.githubusercontent.com/a2ikm/761c2ab02b7b3935679e55af5d81786a/raw/ab644cb92f216c019a2f032bbf25e258b01d87f9/limit.maxfiles.plist +# curl -O https://gist.githubusercontent.com/a2ikm/761c2ab02b7b3935679e55af5d81786a/raw/ab644cb92f216c019a2f032bbf25e258b01d87f9/limit.maxproc.plist + +curl -O https://gist.githubusercontent.com/tombigel/d503800a282fcadbee14b537735d202c/raw/ed73cacf82906fdde59976a0c8248cce8b44f906/limit.maxfiles.plist +curl -O https://gist.githubusercontent.com/tombigel/d503800a282fcadbee14b537735d202c/raw/ed73cacf82906fdde59976a0c8248cce8b44f906/limit.maxproc.plist + +sudo mv limit.maxfiles.plist /Library/LaunchDaemons +sudo mv limit.maxproc.plist /Library/LaunchDaemons + +sudo chown root:wheel /Library/LaunchDaemons/limit.maxfiles.plist +sudo chown root:wheel /Library/LaunchDaemons/limit.maxproc.plist + +sudo launchctl load -w /Library/LaunchDaemons/limit.maxfiles.plist +``` + +Данное решение работает для macOS Catalina и Mojave. + +# Участие в SIG Docs + +Узнайте о Kubernetes-сообществе SIG Docs и его встречах на [странице сообщества](https://github.com/kubernetes/community/tree/master/sig-docs#meetings). + +Вы можете связаться с сопровождающими этот проект по следующим ссылкам: + +- [Канал в Slack](https://kubernetes.slack.com/messages/sig-docs) ([получите приглашение в этот Slack](https://slack.k8s.io/)) +- [Почтовая рассылка](https://groups.google.com/forum/#!forum/kubernetes-sig-docs) + +# Вклад в документацию + +Нажмите на кнопку **Fork** в правом верхнем углу, чтобы создать копию этого репозитория для вашего GitHub-аккаунта. Эта копия называется *форк-репозиторием*. Делайте любые изменения в своем форк-репозитории и, когда будете готовы опубликовать изменения, зайдите в свой форк-репозиторий и создайте новый pull-запрос (PR), чтобы уведомить нас. + +После того, как вы отправите pull-запрос, ревьювер из проекта Kubernetes даст по нему обратную связь. Вы, как автор pull-запроса, **должны обновить свой PR после его рассмотрения ревьювером Kubernetes.** + +Вполне возможно, что более одного ревьювера Kubernetes оставят свои комментарии. Может быть даже так, что вы будете получать обратную связь уже не от того ревьювера, что был первоначально вам назначен. Кроме того, в некоторых случаях один из ревьюверов может запросить техническую рецензию от [технического ревьювера Kubernetes](https://github.com/kubernetes/website/wiki/Tech-reviewers), если это потребуется. Ревьюверы сделают все возможное, чтобы как можно оперативнее оставить свои предложения и пожелания, но время ответа может варьироваться в зависимости от обстоятельств. Узнать подробнее о том, как поучаствовать в документации Kubernetes, вы можете по ссылкам ниже: @@ -42,21 +121,22 @@ hugo server --buildFuture * [Руководство по оформлению документации](https://kubernetes.io/docs/contribute/style/style-guide/) * [Руководство по локализации Kubernetes](https://kubernetes.io/docs/contribute/localization/) -## Файл `README.md` на других языках +# Файл `README.md` на других языках + | другие языки | другие языки | |-------------------------------|-------------------------------| -| [Английский](README.md) | [Французский](README-fr.md) | -| [Корейский](README-ko.md) | [Немецкий](README-de.md) | -| [Португальский](README-pt.md) | [Хинди](README-hi.md) | -| [Испанский](README-es.md) | [Индонезийский](README-id.md) | -| [Китайский](README-zh.md) | [Японский](README-ja.md) | -| [Вьетнамский](README-vi.md) | [Итальянский](README-it.md) | -| [Польский]( README-pl.md) | [Украинский](README-uk.md) | +| [Английский](README.md) | [Немецкий](README-de.md) | +| [Вьетнамский](README-vi.md) | [Польский]( README-pl.md) | +| [Индонезийский](README-id.md) | [Португальский](README-pt.md) | +| [Испанский](README-es.md) | [Украинский](README-uk.md) | +| [Итальянский](README-it.md) | [Французский](README-fr.md) | +| [Китайский](README-zh.md) | [Хинди](README-hi.md) | +| [Корейский](README-ko.md) | [Японский](README-ja.md) | -### Кодекс поведения +# Кодекс поведения -Участие в сообществе Kubernetes регулируется [кодексом поведения CNCF](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). +Участие в сообществе Kubernetes регулируется [кодексом поведения CNCF](https://github.com/cncf/foundation/blob/master/code-of-conduct-languages/ru.md). -## Спасибо! +# Спасибо! Kubernetes процветает благодаря сообществу и мы ценим ваш вклад в сайт и документацию! diff --git a/README-uk.md b/README-uk.md index f437535353..2872b406d1 100644 --- a/README-uk.md +++ b/README-uk.md @@ -18,7 +18,8 @@ ```bash git clone https://github.com/kubernetes/website.git cd website -hugo server --buildFuture +git submodule update --init --recursive --depth 1 +make serve ``` @@ -82,4 +83,4 @@ hugo server --buildFuture ## Дякуємо! -Долучення до спільноти - запорука успішного розвитку Kubernetes. Ми цінуємо ваш внесок у наш сайт і документацію! \ No newline at end of file +Долучення до спільноти - запорука успішного розвитку Kubernetes. Ми цінуємо ваш внесок у наш сайт і документацію! diff --git a/README-zh.md b/README-zh.md index a1532f37a3..4dd4de269f 100644 --- a/README-zh.md +++ b/README-zh.md @@ -228,8 +228,8 @@ For more information about contributing to the Kubernetes documentation, see: 有关为 Kubernetes 文档做出贡献的更多信息,请参阅: * [贡献 Kubernetes 文档](https://kubernetes.io/docs/contribute/) -* [页面内容类型](http://kubernetes.io/docs/contribute/style/page-content-types/) -* [文档风格指南](http://kubernetes.io/docs/contribute/style/style-guide/) +* [页面内容类型](https://kubernetes.io/docs/contribute/style/page-content-types/) +* [文档风格指南](https://kubernetes.io/docs/contribute/style/style-guide/) * [本地化 Kubernetes 文档](https://kubernetes.io/docs/contribute/localization/) # 中文本地化 diff --git a/README.md b/README.md index 31e5fb65d0..291106d09c 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,9 @@ This repository contains the assets required to build the [Kubernetes website and documentation](https://kubernetes.io/). We're glad that you want to contribute! ++ [Contributing to the docs](#contributing-to-the-docs) ++ [Localization ReadMes](#localization-readmemds) + # Using this repository You can run the website locally using Hugo (Extended version), or you can run it in a container runtime. We strongly recommend using the container runtime, as it gives deployment consistency with the live website. diff --git a/assets/scss/_custom.scss b/assets/scss/_custom.scss index 5eb26c2e54..568a258a1c 100644 --- a/assets/scss/_custom.scss +++ b/assets/scss/_custom.scss @@ -578,3 +578,64 @@ body.td-documentation { color: black; text-decoration: none !important; } + +@media print { + /* Do not print announcements */ + #announcement, section#announcement, #fp-announcement, section#fp-announcement { + display: none; + } +} + +#announcement, #fp-announcement { + > * { + color: inherit; + background: inherit; + } + + a { + color: inherit; + border-bottom: 1px solid #fff; + } + + a:hover { + color: inherit; + border-bottom: none; + } +} + +#announcement { + padding-top: 105px; + padding-bottom: 25px; +} + +.header-hero { + padding-top: 40px; +} + +/* Extra announcement height only for landscape viewports */ +@media (min-aspect-ratio: 8/9) { + #fp-announcement { + min-height: 25vh; + } +} + +#fp-announcement aside { + padding-top: 115px; + padding-bottom: 25px; +} + +.announcement { + .content { + margin-bottom: 0px; + } + + + > p { + .gridPage #announcement .content p, + .announcement > h4, + .announcement > h3 { + color: #ffffff; + } + } +} + diff --git a/config.toml b/config.toml index c0fdb72acf..d77c315331 100644 --- a/config.toml +++ b/config.toml @@ -13,7 +13,7 @@ disableBrowserError = true disableKinds = ["taxonomy", "taxonomyTerm"] -ignoreFiles = [ "^OWNERS$", "README[-]+[a-z]*\\.md", "^node_modules$", "content/en/docs/doc-contributor-tools" ] +ignoreFiles = [ "(?:^|/)OWNERS$", "README[-]+[a-z]*\\.md", "^node_modules$", "content/en/docs/doc-contributor-tools" ] timeout = 3000 @@ -154,11 +154,6 @@ githubWebsiteRaw = "raw.githubusercontent.com/kubernetes/website" # GitHub repository link for editing a page and opening issues. github_repo = "https://github.com/kubernetes/website" -# param for displaying an announcement block on every page. -# See /i18n/en.toml for message text and title. -announcement = true -announcement_bg = "#000000" #choose a dark color – text is white - #Searching k8s_search = true diff --git a/content/de/community/_index.html b/content/de/community/_index.html index d636807134..adff754f03 100644 --- a/content/de/community/_index.html +++ b/content/de/community/_index.html @@ -8,7 +8,7 @@ cid: community

Die Gewissheit, dass Kubernetes überall und für alle gut funktioniert.

-

Verbinden Sie sich mit der Kubernetes-Community in unserem Slack Kanal, Diskussionsforum, oder beteiligen Sie sich an der Kubernetes-dev-Google-Gruppe. Eine wöchentliches Community-Meeting findet per Videokonferenz statt, um den Stand der Dinge zu diskutieren, folgen Sie +

Verbinden Sie sich mit der Kubernetes-Community in unserem Slack Kanal, Diskussionsforum, oder beteiligen Sie sich an der Kubernetes-dev-Google-Gruppe. Eine wöchentliches Community-Meeting findet per Videokonferenz statt, um den Stand der Dinge zu diskutieren, folgen Sie diesen Anweisungen für Informationen wie Sie teilnehmen können.

Sie können Kubernetes auch auf der ganzen Welt über unsere Kubernetes Meetup Community und der diff --git a/content/de/community/static/cncf-code-of-conduct.md b/content/de/community/static/cncf-code-of-conduct.md index e94bc7b7fa..7a7bee34e7 100644 --- a/content/de/community/static/cncf-code-of-conduct.md +++ b/content/de/community/static/cncf-code-of-conduct.md @@ -23,7 +23,7 @@ Dieser Verhaltenskodex gilt sowohl innerhalb von Projekträumen als auch in öff Fälle von missbräuchlichem, belästigendem oder anderweitig unzumutbarem Verhalten in Kubernetes können gemeldet werden, indem Sie sich an das [Kubernetes Komitee für Verhaltenskodex](https://git.k8s.io/community/committee-code-of-conduct) wenden unter . Für andere Projekte wenden Sie sich bitte an einen CNCF-Projektbetreuer oder an unseren Mediator, Mishi Choudhary . -Dieser Verhaltenskodex wurde aus dem Contributor Covenant übernommen (http://contributor-covenant.org), Version 1.2.0, verfügbar unter http://contributor-covenant.org/version/1/2/0/ +Dieser Verhaltenskodex wurde aus dem Contributor Covenant übernommen (https://contributor-covenant.org), Version 1.2.0, verfügbar unter https://contributor-covenant.org/version/1/2/0/ ### CNCF Verhaltenskodex für Veranstaltungen diff --git a/content/en/blog/_posts/2016-01-00-Why-Kubernetes-Doesnt-Use-Libnetwork.md b/content/en/blog/_posts/2016-01-00-Why-Kubernetes-Doesnt-Use-Libnetwork.md index 590548aea2..553bd6ad96 100644 --- a/content/en/blog/_posts/2016-01-00-Why-Kubernetes-Doesnt-Use-Libnetwork.md +++ b/content/en/blog/_posts/2016-01-00-Why-Kubernetes-Doesnt-Use-Libnetwork.md @@ -26,7 +26,7 @@ On the other hand, CNI is more philosophically aligned with Kubernetes. It's far Additionally, it's trivial to wrap a CNI plugin and produce a more customized CNI plugin — it can be done with a simple shell script. CNM is much more complex in this regard. This makes CNI an attractive option for rapid development and iteration. Early prototypes have proven that it's possible to eject almost 100% of the currently hard-coded network logic in kubelet into a plugin. -We investigated [writing a "bridge" CNM driver](https://groups.google.com/forum/#!topic/kubernetes-sig-network/5MWRPxsURUw) for Docker that ran CNI drivers. This turned out to be very complicated. First, the CNM and CNI models are very different, so none of the "methods" lined up. We still have the global vs. local and key-value issues discussed above. Assuming this driver would declare itself local, we have to get info about logical networks from Kubernetes. +We investigated [writing a "bridge" CNM driver](https://groups.google.com/g/kubernetes-sig-network/c/5MWRPxsURUw) for Docker that ran CNI drivers. This turned out to be very complicated. First, the CNM and CNI models are very different, so none of the "methods" lined up. We still have the global vs. local and key-value issues discussed above. Assuming this driver would declare itself local, we have to get info about logical networks from Kubernetes. Unfortunately, Docker drivers are hard to map to other control planes like Kubernetes. Specifically, drivers are not told the name of the network to which a container is being attached — just an ID that Docker allocates internally. This makes it hard for a driver to map back to any concept of network that exists in another system. @@ -34,6 +34,6 @@ This and other issues have been brought up to Docker developers by network vendo For all of these reasons we have chosen to invest in CNI as the Kubernetes plugin model. There will be some unfortunate side-effects of this. Most of them are relatively minor (for example, `docker inspect` will not show an IP address), but some are significant. In particular, containers started by `docker run` might not be able to communicate with containers started by Kubernetes, and network integrators will have to provide CNI drivers if they want to fully integrate with Kubernetes. On the other hand, Kubernetes will get simpler and more flexible, and a lot of the ugliness of early bootstrapping (such as configuring Docker to use our bridge) will go away. -As we proceed down this path, we’ll certainly keep our eyes and ears open for better ways to integrate and simplify. If you have thoughts on how we can do that, we really would like to hear them — find us on [slack](http://slack.k8s.io/) or on our [network SIG mailing-list](https://groups.google.com/forum/#!forum/kubernetes-sig-network). +As we proceed down this path, we’ll certainly keep our eyes and ears open for better ways to integrate and simplify. If you have thoughts on how we can do that, we really would like to hear them — find us on [slack](http://slack.k8s.io/) or on our [network SIG mailing-list](https://groups.google.com/g/kubernetes-sig-network). Tim Hockin, Software Engineer, Google diff --git a/content/en/blog/_posts/2017-11-00-Containerd-Container-Runtime-Options-Kubernetes.md b/content/en/blog/_posts/2017-11-00-Containerd-Container-Runtime-Options-Kubernetes.md index 3edf5625c2..2a105c6396 100644 --- a/content/en/blog/_posts/2017-11-00-Containerd-Container-Runtime-Options-Kubernetes.md +++ b/content/en/blog/_posts/2017-11-00-Containerd-Container-Runtime-Options-Kubernetes.md @@ -56,13 +56,13 @@ Cri-containerd uses containerd to manage the full container lifecycle and all co Let’s use an example to demonstrate how cri-containerd works for the case when Kubelet creates a single-container pod: -1. 1.Kubelet calls cri-containerd, via the CRI runtime service API, to create a pod; -2. 2.cri-containerd uses containerd to create and start a special [pause container](https://www.ianlewis.org/en/almighty-pause-container) (the _sandbox container_) and put that container inside the pod’s cgroups and namespace (steps omitted for brevity); -3. 3.cri-containerd configures the pod’s network namespace using CNI; -4. 4.Kubelet subsequently calls cri-containerd, via the CRI image service API, to pull the application container image; -5. 5.cri-containerd further uses containerd to pull the image if the image is not present on the node; -6. 6.Kubelet then calls cri-containerd, via the CRI runtime service API, to create and start the application container inside the pod using the pulled container image; -7. 7.cri-containerd finally calls containerd to create the application container, put it inside the pod’s cgroups and namespace, then to start the pod’s new application container. +1. Kubelet calls cri-containerd, via the CRI runtime service API, to create a pod; +2. cri-containerd uses containerd to create and start a special [pause container](https://www.ianlewis.org/en/almighty-pause-container) (the _sandbox container_) and put that container inside the pod’s cgroups and namespace (steps omitted for brevity); +3. cri-containerd configures the pod’s network namespace using CNI; +4. Kubelet subsequently calls cri-containerd, via the CRI image service API, to pull the application container image; +5. cri-containerd further uses containerd to pull the image if the image is not present on the node; +6. Kubelet then calls cri-containerd, via the CRI runtime service API, to create and start the application container inside the pod using the pulled container image; +7. cri-containerd finally calls containerd to create the application container, put it inside the pod’s cgroups and namespace, then to start the pod’s new application container. After these steps, a pod and its corresponding application container is created and running. diff --git a/content/en/blog/_posts/2020-09-30-writing-crl-scheduler/image01.png b/content/en/blog/_posts/2020-09-30-writing-crl-scheduler/image01.png new file mode 100644 index 0000000000..91e8856139 Binary files /dev/null and b/content/en/blog/_posts/2020-09-30-writing-crl-scheduler/image01.png differ diff --git a/content/en/blog/_posts/2020-09-30-writing-crl-scheduler/image02.png b/content/en/blog/_posts/2020-09-30-writing-crl-scheduler/image02.png new file mode 100644 index 0000000000..dfd14d7cdc Binary files /dev/null and b/content/en/blog/_posts/2020-09-30-writing-crl-scheduler/image02.png differ diff --git a/content/en/blog/_posts/2020-09-30-writing-crl-scheduler/image03.png b/content/en/blog/_posts/2020-09-30-writing-crl-scheduler/image03.png new file mode 100644 index 0000000000..443a6f2d67 Binary files /dev/null and b/content/en/blog/_posts/2020-09-30-writing-crl-scheduler/image03.png differ diff --git a/content/en/blog/_posts/2020-09-30-writing-crl-scheduler/image04.png b/content/en/blog/_posts/2020-09-30-writing-crl-scheduler/image04.png new file mode 100644 index 0000000000..e107adc88b Binary files /dev/null and b/content/en/blog/_posts/2020-09-30-writing-crl-scheduler/image04.png differ diff --git a/content/en/blog/_posts/2020-09-30-writing-crl-scheduler/image05.png b/content/en/blog/_posts/2020-09-30-writing-crl-scheduler/image05.png new file mode 100644 index 0000000000..6d80447d09 Binary files /dev/null and b/content/en/blog/_posts/2020-09-30-writing-crl-scheduler/image05.png differ diff --git a/content/en/blog/_posts/2020-09-30-writing-crl-scheduler/image06.png b/content/en/blog/_posts/2020-09-30-writing-crl-scheduler/image06.png new file mode 100644 index 0000000000..d40b2eb0b6 Binary files /dev/null and b/content/en/blog/_posts/2020-09-30-writing-crl-scheduler/image06.png differ diff --git a/content/en/blog/_posts/2020-09-30-writing-crl-scheduler/image07.png b/content/en/blog/_posts/2020-09-30-writing-crl-scheduler/image07.png new file mode 100644 index 0000000000..fc3976040f Binary files /dev/null and b/content/en/blog/_posts/2020-09-30-writing-crl-scheduler/image07.png differ diff --git a/content/en/blog/_posts/2020-09-30-writing-crl-scheduler/index.md b/content/en/blog/_posts/2020-09-30-writing-crl-scheduler/index.md new file mode 100644 index 0000000000..0fab185f98 --- /dev/null +++ b/content/en/blog/_posts/2020-09-30-writing-crl-scheduler/index.md @@ -0,0 +1,96 @@ +--- +layout: blog +title: "A Custom Kubernetes Scheduler to Orchestrate Highly Available Applications" +date: 2020-12-21 +slug: writing-crl-scheduler +--- + +**Author**: Chris Seto (Cockroach Labs) + +As long as you're willing to follow the rules, deploying on Kubernetes and air travel can be quite pleasant. More often than not, things will "just work". However, if one is interested in travelling with an alligator that must remain alive or scaling a database that must remain available, the situation is likely to become a bit more complicated. It may even be easier to build one's own plane or database for that matter. Travelling with reptiles aside, scaling a highly available stateful system is no trivial task. + +Scaling any system has two main components: +1. Adding or removing infrastructure that the system will run on, and +2. Ensuring that the system knows how to handle additional instances of itself being added and removed. + +Most stateless systems, web servers for example, are created without the need to be aware of peers. Stateful systems, which includes databases like CockroachDB, have to coordinate with their peer instances and shuffle around data. As luck would have it, CockroachDB handles data redistribution and replication. The tricky part is being able to tolerate failures during these operations by ensuring that data and instances are distributed across many failure domains (availability zones). + +One of Kubernetes' responsibilities is to place "resources" (e.g, a disk or container) into the cluster and satisfy the constraints they request. For example: "I must be in availability zone _A_" (see [Running in multiple zones](/docs/setup/best-practices/multiple-zones/#nodes-are-labeled)), or "I can't be placed onto the same node as this other Pod" (see [Affinity and anti-affinity](/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity)). + +As an addition to those constraints, Kubernetes offers [Statefulsets](/docs/concepts/workloads/controllers/statefulset/) that provide identity to Pods as well as persistent storage that "follows" these identified pods. Identity in a StatefulSet is handled by an increasing integer at the end of a pod's name. It's important to note that this integer must always be contiguous: in a StatefulSet, if pods 1 and 3 exist then pod 2 must also exist. + +Under the hood, CockroachCloud deploys each region of CockroachDB as a StatefulSet in its own Kubernetes cluster - see [Orchestrate CockroachDB in a Single Kubernetes Cluster](https://www.cockroachlabs.com/docs/stable/orchestrate-cockroachdb-with-kubernetes.html). +In this article, I'll be looking at an individual region, one StatefulSet and one Kubernetes cluster which is distributed across at least three availability zones. + +A three-node CockroachCloud cluster would look something like this: + +![3-node, multi-zone cockroachdb cluster](image01.png) + +When adding additional resources to the cluster we also distribute them across zones. For the speediest user experience, we add all Kubernetes nodes at the same time and then scale up the StatefulSet. + +![illustration of phases: adding Kubernetes nodes to the multi-zone cockroachdb cluster](image02.png) + +Note that anti-affinities are satisfied no matter the order in which pods are assigned to Kubernetes nodes. In the example, pods 0, 1 and 2 were assigned to zones A, B, and C respectively, but pods 3 and 4 were assigned in a different order, to zones B and A respectively. The anti-affinity is still satisfied because the pods are still placed in different zones. + +To remove resources from a cluster, we perform these operations in reverse order. + +We first scale down the StatefulSet and then remove from the cluster any nodes lacking a CockroachDB pod. + +![illustration of phases: scaling down pods in a multi-zone cockroachdb cluster in Kubernetes](image03.png) + +Now, remember that pods in a StatefulSet of size _n_ must have ids in the range `[0,n)`. When scaling down a StatefulSet by _m_, Kubernetes removes _m_ pods, starting from the highest ordinals and moving towards the lowest, [the reverse in which they were added](/docs/concepts/workloads/controllers/statefulset/#deployment-and-scaling-guarantees). +Consider the cluster topology below: + +![illustration: cockroachdb cluster: 6 nodes distributed across 3 availability zones](image04.png) + +As ordinals 5 through 3 are removed from this cluster, the statefulset continues to have a presence across all 3 availability zones. + +![illustration: removing 3 nodes from a 6-node, 3-zone cockroachdb cluster](image05.png) + +However, Kubernetes' scheduler doesn't _guarantee_ the placement above as we expected at first. + +Our combined knowledge of the following is what lead to this misconception. +* Kubernetes' ability to [automatically spread Pods across zone](/docs/setup/best-practices/multiple-zones/#pods-are-spread-across-zones) +* The behavior that a StatefulSet with _n_ replicas, when Pods are being deployed, they are created sequentially, in order from `{0..n-1}`. See [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#deployment-and-scaling-guarantees) for more details. + +Consider the following topology: + +![illustration: 6-node cockroachdb cluster distributed across 3 availability zones](image06.png) + +These pods were created in order and they are spread across all availability zones in the cluster. When ordinals 5 through 3 are terminated, this cluster will lose its presence in zone C! + +![illustration: terminating 3 nodes in 6-node cluster spread across 3 availability zones, where 2/2 nodes in the same availability zone are terminated, knocking out that AZ](image07.png) + +Worse yet, our automation, at the time, would remove Nodes A-2, B-2, and C-2. Leaving CRDB-1 in an unscheduled state as persistent volumes are only available in the zone they are initially created in. + +To correct the latter issue, we now employ a "hunt and peck" approach to removing machines from a cluster. Rather than blindly removing Kubernetes nodes from the cluster, only nodes without a CockroachDB pod would be removed. The much more daunting task was to wrangle the Kubernetes scheduler. + +## A session of brainstorming left us with 3 options: + +### 1. Upgrade to kubernetes 1.18 and make use of Pod Topology Spread Constraints + +While this seems like it could have been the perfect solution, at the time of writing Kubernetes 1.18 was unavailable on the two most common managed Kubernetes services in public cloud, EKS and GKE. +Furthermore, [pod topology spread constraints](/docs/concepts/workloads/pods/pod-topology-spread-constraints/) were still a [beta feature in 1.18](https://v1-18.docs.kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) which meant that it [wasn't guaranteed to be available in managed clusters](https://cloud.google.com/kubernetes-engine/docs/concepts/types-of-clusters#kubernetes_feature_choices) even when v1.18 became available. +The entire endeavour was concerningly reminiscent of checking [caniuse.com](https://caniuse.com/) when Internet Explorer 8 was still around. + +### 2. Deploy a statefulset _per zone_. + +Rather than having one StatefulSet distributed across all availability zones, a single StatefulSet with node affinities per zone would allow manual control over our zonal topology. +Our team had considered this as an option in the past which made it particularly appealing. +Ultimately, we decided to forego this option as it would have required a massive overhaul to our codebase and performing the migration on existing customer clusters would have been an equally large undertaking. + +### 3. Write a custom Kubernetes scheduler. + +Thanks to an example from [Kelsey Hightower](https://github.com/kelseyhightower/scheduler) and a blog post from [Banzai Cloud](https://banzaicloud.com/blog/k8s-custom-scheduler/), we decided to dive in head first and write our own [custom Kubernetes scheduler](/docs/tasks/extend-kubernetes/configure-multiple-schedulers/). +Once our proof-of-concept was deployed and running, we quickly discovered that the Kubernetes' scheduler is also responsible for mapping persistent volumes to the Pods that it schedules. +The output of [`kubectl get events`](/docs/tasks/extend-kubernetes/configure-multiple-schedulers/#verifying-that-the-pods-were-scheduled-using-the-desired-schedulers) had led us to believe there was another system at play. +In our journey to find the component responsible for storage claim mapping, we discovered the [kube-scheduler plugin system](/docs/concepts/scheduling-eviction/scheduling-framework/). Our next POC was a `Filter` plugin that determined the appropriate availability zone by pod ordinal, and it worked flawlessly! + +Our [custom scheduler plugin](https://github.com/cockroachlabs/crl-scheduler) is open source and runs in all of our CockroachCloud clusters. +Having control over how our StatefulSet pods are being scheduled has let us scale out with confidence. +We may look into retiring our plugin once pod topology spread constraints are available in GKE and EKS, but the maintenance overhead has been surprisingly low. +Better still: the plugin's implementation is orthogonal to our business logic. Deploying it, or retiring it for that matter, is as simple as changing the `schedulerName` field in our StatefulSet definitions. + +--- + +_[Chris Seto](https://twitter.com/_ostriches) is a software engineer at Cockroach Labs and works on their Kubernetes automation for [CockroachCloud](https://cockroachlabs.cloud), CockroachDB._ diff --git a/content/en/blog/_posts/2020-12-02-dockershim-faq.md b/content/en/blog/_posts/2020-12-02-dockershim-faq.md index 687921a7ce..f8dbe7f7c7 100644 --- a/content/en/blog/_posts/2020-12-02-dockershim-faq.md +++ b/content/en/blog/_posts/2020-12-02-dockershim-faq.md @@ -55,7 +55,7 @@ All your existing images will still work exactly the same. ### What about private images? -Also yes. All CRI runtimes support the same pull secrets configuration used in +Yes. All CRI runtimes support the same pull secrets configuration used in Kubernetes, either via the PodSpec or ServiceAccount. @@ -82,7 +82,7 @@ usability of other container runtimes. As an example, OpenShift 4.x has been using the [CRI-O] runtime in production since June 2019. For other examples and references you can look at the adopters of containerd and -cri-o, two container runtimes under the Cloud Native Computing Foundation ([CNCF]). +CRI-O, two container runtimes under the Cloud Native Computing Foundation ([CNCF]). - [containerd](https://github.com/containerd/containerd/blob/master/ADOPTERS.md) - [CRI-O](https://github.com/cri-o/cri-o/blob/master/ADOPTERS.md) @@ -110,7 +110,7 @@ provide an end-to-end standard for managing containers. That’s a complex question and it depends on a lot of factors. If Docker is working for you, moving to containerd should be a relatively easy swap and -has have strictly better performance and less overhead. However we encourage you +will have strictly better performance and less overhead. However, we encourage you to explore all the options from the [CNCF landscape] in case another would be an even better fit for your environment. @@ -129,7 +129,7 @@ common things to consider when migrating are: - Kubectl plugins that require docker CLI or the control socket - Kubernetes tools that require direct access to Docker (e.g. kube-imagepuller) - Configuration of functionality like `registry-mirrors` and insecure registries -- Other support scripts or daemons that expect docker to be available and are run +- Other support scripts or daemons that expect Docker to be available and are run outside of Kubernetes (e.g. monitoring or security agents) - GPUs or special hardware and how they integrate with your runtime and Kubernetes @@ -141,13 +141,14 @@ runtime where possible. Another thing to look out for is anything expecting to run for system maintenance or nested inside a container when building images will no longer work. For the former, you can use the [`crictl`][cr] tool as a drop-in replacement (see [mapping from docker cli to crictl](https://kubernetes.io/docs/tasks/debug-application-cluster/crictl/#mapping-from-docker-cli-to-crictl)) and for the -latter you can use newer container build options like [img], [buildah], or -[kaniko] that don’t require Docker. +latter you can use newer container build options like [img], [buildah], +[kaniko], or [buildkit-cli-for-kubectl] that don’t require Docker. [cr]: https://github.com/kubernetes-sigs/cri-tools [img]: https://github.com/genuinetools/img [buildah]: https://github.com/containers/buildah [kaniko]: https://github.com/GoogleContainerTools/kaniko +[buildkit-cli-for-kubectl]: https://github.com/vmware-tanzu/buildkit-cli-for-kubectl For containerd, you can start with their [documentation] to see what configuration options are available as you migrate things over. diff --git a/content/en/blog/_posts/2020-12-02-dont-panic-kubernetes-and-docker.md b/content/en/blog/_posts/2020-12-02-dont-panic-kubernetes-and-docker.md index e6df8971a6..944704967b 100644 --- a/content/en/blog/_posts/2020-12-02-dont-panic-kubernetes-and-docker.md +++ b/content/en/blog/_posts/2020-12-02-dont-panic-kubernetes-and-docker.md @@ -13,8 +13,8 @@ as a container runtime after v1.20. **You do not need to panic. It’s not as dramatic as it sounds.** -tl;dr Docker as an underlying runtime is being deprecated in favor of runtimes -that use the [Container Runtime Interface(CRI)](https://kubernetes.io/blog/2016/12/container-runtime-interface-cri-in-kubernetes/) +TL;DR Docker as an underlying runtime is being deprecated in favor of runtimes +that use the [Container Runtime Interface (CRI)](https://kubernetes.io/blog/2016/12/container-runtime-interface-cri-in-kubernetes/) created for Kubernetes. Docker-produced images will continue to work in your cluster with all runtimes, as they always have. @@ -48,7 +48,7 @@ is a popular choice for that runtime (other common options include containerd and CRI-O), but Docker was not designed to be embedded inside Kubernetes, and that causes a problem. -You see, the thing we call “Docker” isn’t actually one thing -- it’s an entire +You see, the thing we call “Docker” isn’t actually one thing—it’s an entire tech stack, and one part of it is a thing called “containerd,” which is a high-level container runtime by itself. Docker is cool and useful because it has a lot of UX enhancements that make it really easy for humans to interact with @@ -66,11 +66,11 @@ does Kubernetes need the Dockershim? Docker isn’t compliant with CRI, the [Container Runtime Interface](https://kubernetes.io/blog/2016/12/container-runtime-interface-cri-in-kubernetes/). If it were, we wouldn’t need the shim, and this wouldn’t be a thing. But it’s -not the end of the world, and you don’t need to panic -- you just need to change +not the end of the world, and you don’t need to panic—you just need to change your container runtime from Docker to another supported container runtime. One thing to note: If you are relying on the underlying docker socket -(/var/run/docker.sock) as part of a workflow within your cluster today, moving +(`/var/run/docker.sock`) as part of a workflow within your cluster today, moving to a different runtime will break your ability to use it. This pattern is often called Docker in Docker. There are lots of options out there for this specific use case including things like @@ -82,10 +82,10 @@ use case including things like This change addresses a different environment than most folks use to interact with Docker. The Docker installation you’re using in development is unrelated to -the Docker runtime inside your Kubernetes cluster. It’s confusing, I know. As a -developer, Docker is still useful to you in all the ways it was before this +the Docker runtime inside your Kubernetes cluster. It’s confusing, we understand. +As a developer, Docker is still useful to you in all the ways it was before this change was announced. The image that Docker produces isn’t really a -Docker-specific image -- it’s an OCI ([Open Container Initiative](https://opencontainers.org/)) image. +Docker-specific image—it’s an OCI ([Open Container Initiative](https://opencontainers.org/)) image. Any OCI-compliant image, regardless of the tool you use to build it, will look the same to Kubernetes. Both [containerd](https://containerd.io/) and [CRI-O](https://cri-o.io/) know how to pull those images and run them. This is @@ -95,10 +95,10 @@ So, this change is coming. It’s going to cause issues for some, but it isn’t catastrophic, and generally it’s a good thing. Depending on how you interact with Kubernetes, this could mean nothing to you, or it could mean a bit of work. In the long run, it’s going to make things easier. If this is still confusing -for you, that’s okay -- there’s a lot going on here, Kubernetes has a lot of +for you, that’s okay—there’s a lot going on here; Kubernetes has a lot of moving parts, and nobody is an expert in 100% of it. We encourage any and all questions regardless of experience level or complexity! Our goal is to make sure -everyone is educated as much as possible on the upcoming changes. `<3` We hope -this has answered most of your questions and soothed some anxieties! +everyone is educated as much as possible on the upcoming changes. We hope +this has answered most of your questions and soothed some anxieties! ❤️ Looking for more answers? Check out our accompanying [Dockershim Deprecation FAQ](/blog/2020/12/02/dockershim-faq/). diff --git a/content/en/blog/_posts/2020-12-10-Kubernetes-Volume-Snapshot-Moves-to-GA.md b/content/en/blog/_posts/2020-12-10-Kubernetes-Volume-Snapshot-Moves-to-GA.md index de55f9c91a..0924cbcc91 100644 --- a/content/en/blog/_posts/2020-12-10-Kubernetes-Volume-Snapshot-Moves-to-GA.md +++ b/content/en/blog/_posts/2020-12-10-Kubernetes-Volume-Snapshot-Moves-to-GA.md @@ -52,10 +52,13 @@ Currently more than [50 CSI drivers](https://kubernetes-csi.github.io/docs/drive As of the publishing of this blog, the following participants from the [Kubernetes Data Protection Working Group](https://github.com/kubernetes/community/tree/master/wg-data-protection) are building products or have already built products using Kubernetes volume snapshots. - [Dell-EMC: PowerProtect](https://www.delltechnologies.com/en-us/data-protection/powerprotect-data-manager.htm) -- Druva +- [Druva](https://www.druva.com/) - [Kasten K10](https://www.kasten.io/) -- Pure Storage (Pure Service Orchestrator) -- Red Hat OpenShift Container Storage +- [NetApp: Project Astra](https://cloud.netapp.com/project-astra) +- [Portworx (PX-Backup)](https://portworx.com/products/px-backup/) +- [Pure Storage (Pure Service Orchestrator)](https://github.com/purestorage/pso-csi) +- [Red Hat OpenShift Container Storage](https://www.redhat.com/en/technologies/cloud-computing/openshift-container-storage) +- [Robin Cloud Native Storage](https://robin.io/storage/) - [TrilioVault for Kubernetes](https://docs.trilio.io/kubernetes/) - [Velero plugin for CSI](https://github.com/vmware-tanzu/velero-plugin-for-csi) @@ -198,7 +201,7 @@ There are many more people who have helped to move the snapshot feature from bet - [Grant Griffiths](https://github.com/ggriffiths) - [Humble Devassy Chirammal](https://github.com/humblec) - [Jan Šafránek](https://github.com/jsafrane) -- [Jiawei Wang](https://github.com/jiawei0277) +- [Jiawei Wang](https://github.com/Jiawei0227) - [Jing Xu](https://github.com/jingxu97) - [Jordan Liggitt](https://github.com/liggitt) - [Kartik Sharma](https://github.com/Kartik494) @@ -209,7 +212,7 @@ There are many more people who have helped to move the snapshot feature from bet - [Prafull Ladha](https://github.com/prafull01) - [Prateek Pandey](https://github.com/prateekpandey14) - [Raunak Shah](https://github.com/RaunakShah) -- [Saad Ali](https://github.com/saadali) +- [Saad Ali](https://github.com/saad-ali) - [Saikat Roychowdhury](https://github.com/saikat-royc) - [Tim Hockin](https://github.com/thockin) - [Xiangqian Yu](https://github.com/yuxiangqian) diff --git a/content/en/blog/_posts/2020-12-10-Pod-Impersonation-and-Short-lived-Volumes-in-CSI-Drivers.md b/content/en/blog/_posts/2020-12-10-Pod-Impersonation-and-Short-lived-Volumes-in-CSI-Drivers.md new file mode 100644 index 0000000000..ef20ad5210 --- /dev/null +++ b/content/en/blog/_posts/2020-12-10-Pod-Impersonation-and-Short-lived-Volumes-in-CSI-Drivers.md @@ -0,0 +1,53 @@ +--- +layout: blog +title: 'Kubernetes 1.20: Pod Impersonation and Short-lived Volumes in CSI Drivers' +date: 2020-12-18 +slug: kubernetes-1.20-pod-impersonation-short-lived-volumes-in-csi +--- + +**Author**: Shihang Zhang (Google) + +Typically when a [CSI](https://github.com/container-storage-interface/spec/blob/baa71a34651e5ee6cb983b39c03097d7aa384278/spec.md) driver mounts credentials such as secrets and certificates, it has to authenticate against storage providers to access the credentials. However, the access to those credentials are controlled on the basis of the pods' identities rather than the CSI driver's identity. CSI drivers, therefore, need some way to retrieve pod's service account token. + +Currently there are two suboptimal approaches to achieve this, either by granting CSI drivers the permission to use TokenRequest API or by reading tokens directly from the host filesystem. + +Both of them exhibit the following drawbacks: + +- Violating the principle of least privilege +- Every CSI driver needs to re-implement the logic of getting the pod’s service account token + +The second approach is more problematic due to: + +- The audience of the token defaults to the kube-apiserver +- The token is not guaranteed to be available (e.g. `AutomountServiceAccountToken=false`) +- The approach does not work for CSI drivers that run as a different (non-root) user from the pods. See [file permission section for service account token](https://github.com/kubernetes/enhancements/blob/f40c24a5da09390bd521be535b38a4dbab09380c/keps/sig-storage/20180515-svcacct-token-volumes.md#file-permission) +- The token might be legacy Kubernetes service account token which doesn’t expire if `BoundServiceAccountTokenVolume=false` + +Kubernetes 1.20 introduces an alpha feature, `CSIServiceAccountToken`, to improve the security posture. The new feature allows CSI drivers to receive pods' [bound service account tokens](https://github.com/kubernetes/enhancements/blob/master/keps/sig-auth/1205-bound-service-account-tokens/README.md). + +This feature also provides a knob to re-publish volumes so that short-lived volumes can be refreshed. + +## Pod Impersonation + +### Using GCP APIs + +Using [Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity), a Kubernetes service account can authenticate as a Google service account when accessing Google Cloud APIs. If a CSI driver needs to access GCP APIs on behalf of the pods that it is mounting volumes for, it can use the pod's service account token to [exchange for GCP tokens](https://cloud.google.com/iam/docs/reference/sts/rest). The pod's service account token is plumbed through the volume context in `NodePublishVolume` RPC calls when the feature `CSIServiceAccountToken` is enabled. For example: accessing [Google Secret Manager](https://cloud.google.com/secret-manager/) via a [secret store CSI driver](https://github.com/GoogleCloudPlatform/secrets-store-csi-driver-provider-gcp). + +### Using Vault + +If users configure [Kubernetes as an auth method](https://www.vaultproject.io/docs/auth/kubernetes), Vault uses the `TokenReview` API to validate the Kubernetes service account token. For CSI drivers using Vault as resources provider, they need to present the pod's service account to Vault. For example, [secrets store CSI driver](https://github.com/hashicorp/secrets-store-csi-driver-provider-vault) and [cert manager CSI driver](https://github.com/jetstack/cert-manager-csi). + +## Short-lived Volumes + +To keep short-lived volumes such as certificates effective, CSI drivers can specify `RequiresRepublish=true` in their`CSIDriver` object to have the kubelet periodically call `NodePublishVolume` on mounted volumes. These republishes allow CSI drivers to ensure that the volume content is up-to-date. + +## Next steps + +This feature is alpha and projected to move to beta in 1.21. See more in the following KEP and CSI documentation: + +- [KEP-1855: Service Account Token for CSI Driver](https://github.com/kubernetes/enhancements/blob/master/keps/sig-storage/1855-csi-driver-service-account-token/README.md) +- [Token Requests](https://kubernetes-csi.github.io/docs/token-requests.html) + +Your feedback is always welcome! +- SIG-Auth [meets regularly](https://github.com/kubernetes/community/tree/master/sig-auth#meetings) and can be reached via [Slack and the mailing list](https://github.com/kubernetes/community/tree/master/sig-auth#contact) +- SIG-Storage [meets regularly](https://github.com/kubernetes/community/tree/master/sig-storage#meetings) and can be reached via [Slack and the mailing list](https://github.com/kubernetes/community/tree/master/sig-storage#contact). diff --git a/content/en/blog/_posts/2020-12-14-Granular-Control-of-Volume-Permission-Changes.md b/content/en/blog/_posts/2020-12-14-Granular-Control-of-Volume-Permission-Changes.md new file mode 100644 index 0000000000..0a0ad0879c --- /dev/null +++ b/content/en/blog/_posts/2020-12-14-Granular-Control-of-Volume-Permission-Changes.md @@ -0,0 +1,59 @@ +--- +layout: blog +title: 'Kubernetes 1.20: Granular Control of Volume Permission Changes' +date: 2020-12-14 +slug: kubernetes-release-1.20-fsGroupChangePolicy-fsGroupPolicy +--- + +**Authors**: Hemant Kumar, Red Hat & Christian Huffman, Red Hat + +Kubernetes 1.20 brings two important beta features, allowing Kubernetes admins and users alike to have more adequate control over how volume permissions are applied when a volume is mounted inside a Pod. + +### Allow users to skip recursive permission changes on mount +Traditionally if your pod is running as a non-root user ([which you should](https://twitter.com/thockin/status/1333892204490735617)), you must specify a `fsGroup` inside the pod’s security context so that the volume can be readable and writable by the Pod. This requirement is covered in more detail in [here](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/). + +But one side-effect of setting `fsGroup` is that, each time a volume is mounted, Kubernetes must recursively `chown()` and `chmod()` all the files and directories inside the volume - with a few exceptions noted below. This happens even if group ownership of the volume already matches the requested `fsGroup`, and can be pretty expensive for larger volumes with lots of small files, which causes pod startup to take a long time. This scenario has been a [known problem](https://github.com/kubernetes/kubernetes/issues/69699) for a while, and in Kubernetes 1.20 we are providing knobs to opt-out of recursive permission changes if the volume already has the correct permissions. + +When configuring a pod’s security context, set `fsGroupChangePolicy` to "OnRootMismatch" so if the root of the volume already has the correct permissions, the recursive permission change can be skipped. Kubernetes ensures that permissions of the top-level directory are changed last the first time it applies permissions. + +```yaml +securityContext: + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 + fsGroupChangePolicy: "OnRootMismatch" +``` +You can learn more about this in [Configure volume permission and ownership change policy for Pods](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#configure-volume-permission-and-ownership-change-policy-for-pods). + +### Allow CSI Drivers to declare support for fsGroup based permissions + +Although the previous section implied that Kubernetes _always_ recursively changes permissions of a volume if a Pod has a `fsGroup`, this is not strictly true. For certain multi-writer volume types, such as NFS or Gluster, the cluster doesn’t perform recursive permission changes even if the pod has a `fsGroup`. Other volume types may not even support `chown()`/`chmod()`, which rely on Unix-style permission control primitives. + +So how do we know when to apply recursive permission changes and when we shouldn't? For in-tree storage drivers, this was relatively simple. For [CSI](https://kubernetes-csi.github.io/docs/introduction.html#introduction) drivers that could span a multitude of platforms and storage types, this problem can be a bigger challenge. + +Previously, whenever a CSI volume was mounted to a Pod, Kubernetes would attempt to automatically determine if the permissions and ownership should be modified. These methods were imprecise and could cause issues as we already mentioned, depending on the storage type. + +The CSIDriver custom resource now has a `.spec.fsGroupPolicy` field, allowing storage drivers to explicitly opt in or out of these recursive modifications. By having the CSI driver specify a policy for the backing volumes, Kubernetes can avoid needless modification attempts. This optimization helps to reduce volume mount time and also cuts own reporting errors about modifications that would never succeed. + +#### CSIDriver FSGroupPolicy API + +Three FSGroupPolicy values are available as of Kubernetes 1.20, with more planned for future releases. + +- **ReadWriteOnceWithFSType** - This is the default policy, applied if no `fsGroupPolicy` is defined; this preserves the behavior from previous Kubernetes releases. Each volume is examined at mount time to determine if permissions should be recursively applied. +- **File** - Always attempt to apply permission modifications, regardless of the filesystem type or PersistentVolumeClaim’s access mode. +- **None** - Never apply permission modifications. + +#### How do I use it? +The only configuration needed is defining `fsGroupPolicy` inside of the `.spec` for a CSIDriver. Once that element is defined, any subsequently mounted volumes will automatically use the defined policy. There’s no additional deployment required! + +#### What’s next? + +Depending on feedback and adoption, the Kubernetes team plans to push these implementations to GA in either 1.21 or 1.22. + +### How can I learn more? +This feature is explained in more detail in Kubernetes project documentation: [CSI Driver fsGroup Support](https://kubernetes-csi.github.io/docs/support-fsgroup.html) and [Configure volume permission and ownership change policy for Pods ](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#configure-volume-permission-and-ownership-change-policy-for-pods). + +### How do I get involved? +The [Kubernetes Slack channel #csi](https://kubernetes.slack.com/messages/csi) and any of the [standard SIG Storage communication channels](https://github.com/kubernetes/community/blob/master/sig-storage/README.md#contact) are great mediums to reach out to the SIG Storage and the CSI team. + +Those interested in getting involved with the design and development of CSI or any part of the Kubernetes Storage system, join the [Kubernetes Storage Special Interest Group (SIG)](https://github.com/kubernetes/community/tree/master/sig-storage). We’re rapidly growing and always welcome new contributors. diff --git a/content/en/blog/_posts/2020-12-16-third-party-device-metrics-hits-ga.md b/content/en/blog/_posts/2020-12-16-third-party-device-metrics-hits-ga.md new file mode 100644 index 0000000000..a4fa02e4fc --- /dev/null +++ b/content/en/blog/_posts/2020-12-16-third-party-device-metrics-hits-ga.md @@ -0,0 +1,134 @@ +--- +layout: blog +title: 'Third Party Device Metrics Reaches GA' +date: 2020-12-16 +slug: third-party-device-metrics-reaches-ga +--- + + **Authors:** Renaud Gaubert (NVIDIA), David Ashpole (Google), and Pramod Ramarao (NVIDIA) + + With Kubernetes 1.20, infrastructure teams who manage large scale Kubernetes clusters, are seeing the graduation of two exciting and long awaited features: + * The Pod Resources API (introduced in 1.13) is finally graduating to GA. This allows Kubernetes plugins to obtain information about the node’s resource usage and assignment; for example: which pod/container consumes which device. + * The `DisableAcceleratorMetrics` feature (introduced in 1.19) is graduating to beta and will be enabled by default. This removes device metrics reported by the kubelet in favor of the new plugin architecture. + +Many of the features related to fundamental device support (device discovery, plugin, and monitoring) are reaching a strong level of stability. +Kubernetes users should see these features as stepping stones to enable more complex use cases (networking, scheduling, storage, etc.)! + +One such example is Non Uniform Memory Access (NUMA) placement where, when selecting a device, an application typically wants to ensure that data transfer between CPU Memory and Device Memory is as fast as possible. In some cases, incorrect NUMA placement can nullify the benefit of offloading compute to an external device. + +If these are topics of interest to you, consider joining the [Kubernetes Node Special Insterest Group](https://github.com/kubernetes/community/tree/master/sig-node) (SIG) for all topics related to the Kubernetes node, the COD (container orchestrated device) workgroup for topics related to runtimes, or the resource management forum for topics related to resource management! + +## The Pod Resources API - Why does it need to exist? + +Kubernetes is a vendor neutral platform. If we want it to support device monitoring, adding vendor-specific code in the Kubernetes code base is not an ideal solution. Ultimately, devices are a domain where deep expertise is needed and the best people to add and maintain code in that area are the device vendors themselves. + +The Pod Resources API was built as a solution to this issue. Each vendor can build and maintain their own out-of-tree monitoring plugin. This monitoring plugin, often deployed as a separate pod within a cluster, can then associate the metrics a device emits with the associated pod that's using it. + +For example, use the NVIDIA GPU dcgm-exporter to scrape metrics in Prometheus format: + +``` +$ curl -sL http://127.0.01:8080/metrics + + +# HELP DCGM_FI_DEV_SM_CLOCK SM clock frequency (in MHz). +# TYPE DCGM_FI_DEV_SM_CLOCK gauge +# HELP DCGM_FI_DEV_MEM_CLOCK Memory clock frequency (in MHz). +# TYPE DCGM_FI_DEV_MEM_CLOCK gauge +# HELP DCGM_FI_DEV_MEMORY_TEMP Memory temperature (in C). +# TYPE DCGM_FI_DEV_MEMORY_TEMP gauge +... +DCGM_FI_DEV_SM_CLOCK{gpu="0", UUID="GPU-604ac76c-d9cf-fef3-62e9-d92044ab6e52",container="foo",namespace="bar",pod="baz"} 139 +DCGM_FI_DEV_MEM_CLOCK{gpu="0", UUID="GPU-604ac76c-d9cf-fef3-62e9-d92044ab6e52",container="foo",namespace="bar",pod="baz"} 405 +DCGM_FI_DEV_MEMORY_TEMP{gpu="0", UUID="GPU-604ac76c-d9cf-fef3-62e9-d92044ab6e52",container="foo",namespace="bar",pod="baz"} 9223372036854775794 +``` + +Each agent is expected to adhere to the node monitoring guidelines. In other words, plugins are expected to generate metrics in Prometheus format, and new metrics should not have any dependency on the Kubernetes base directly. + +This allows consumers of the metrics to use a compatible monitoring pipeline to collect and analyze metrics from a variety of agents, even if they are maintained by different vendors. + +![Device metrics flowchart](/images/blog/2020-12-16-third-party-device-metrics-hits-ga/metrics-chart.png) + +## Disabling the NVIDIA GPU metrics - Warning {#nvidia-gpu-metrics-deprecated} + +With the graduation of the plugin monitoring system, Kubernetes is deprecating the NVIDIA GPU metrics that are being reported by the kubelet. + +With the [DisableAcceleratorMetrics](/docs/concepts/cluster-administration/system-metrics/#disable-accelerator-metrics) feature being enabled by default in Kubernetes 1.20, NVIDIA GPUs are no longer special citizens in Kubernetes. This is a good thing in the spirit of being vendor-neutral, and enables the most suited people to maintain their plugin on their own release schedule! + +Users will now need to either install the [NVIDIA GDGM exporter](https://github.com/NVIDIA/gpu-monitoring-tools) or use [bindings](https://github.com/nvidia/go-nvml) to gather more accurate and complete metrics about NVIDIA GPUs. This deprecation means that you can no longer rely on metrics that were reported by kubelet, such as `container_accelerator_duty_cycle` or `container_accelerator_memory_used_bytes` which were used to gather NVIDIA GPU memory utilization. + +This means that users who used to rely on the NVIDIA GPU metrics reported by the kubelet, will need to update their reference and deploy the NVIDIA plugin. Namely the different metrics reported by Kubernetes map to the following metrics: + +| Kubernetes Metrics | NVIDIA dcgm-exporter metric | +| ------------------------------------------ | ------------------------------------------- | +| `container_accelerator_duty_cycle` | `DCGM_FI_DEV_GPU_UTIL` | +| `container_accelerator_memory_used_bytes` | `DCGM_FI_DEV_FB_USED` | +| `container_accelerator_memory_total_bytes` | `DCGM_FI_DEV_FB_FREE + DCGM_FI_DEV_FB_USED` | + +You might also be interested in other metrics such as `DCGM_FI_DEV_GPU_TEMP` (the GPU temperature) or DCGM_FI_DEV_POWER_USAGE (the power usage). The [default set](https://github.com/NVIDIA/gpu-monitoring-tools/blob/d5c9bb55b4d1529ca07068b7f81e690921ce2b59/etc/dcgm-exporter/default-counters.csv) is available in Nvidia's [Data Center GPU Manager documentation](https://docs.nvidia.com/datacenter/dcgm/latest/dcgm-api/group__dcgmFieldIdentifiers.html). + +Note that for this release you can still set the `DisableAcceleratorMetrics` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) to _false_, effectively re-enabling the ability for the kubelet to report NVIDIA GPU metrics. + +Paired with the graduation of the Pod Resources API, these tools can be used to generate GPU telemetry [that can be used in visualization dashboards](https://grafana.com/grafana/dashboards/12239), below is an example: + +![Grafana visualization of device metrics](/images/blog/2020-12-16-third-party-device-metrics-hits-ga/grafana.png) + +## The Pod Resources API - What can I go on to do with this? + +As soon as this interface was introduced, many vendors started using it for widely different use cases! To list a few examples: + +The [kuryr-kubernetes](https://github.com/openstack/kuryr-kubernetes) CNI plugin in tandem with [intel-sriov-device-plugin](https://github.com/intel/sriov-network-device-plugin). This allowed the CNI plugin to know which allocation of SR-IOV Virtual Functions (VFs) the kubelet made and use that information to correctly setup the container network namespace and use a device with the appropriate NUMA node. We also expect this interface to be used to track the allocated and available resources with information about the NUMA topology of the worker node. + +Another use-case is GPU telemetry, where GPU metrics can be associated with the containers and pods that the GPU is assigned to. One such example is the NVIDIA `dcgm-exporter`, but others can be easily built in the same paradigm. + +The Pod Resources API is a simple gRPC service which informs clients of the pods the kubelet knows. The information concerns the devices assignment the kubelet made and the assignment of CPUs. This information is obtained from the internal state of the kubelet's Device Manager and CPU Manager respectively. + +You can see below a sample example of the API and how a go client could use that information in a few lines: + +``` +service PodResourcesLister { + rpc List(ListPodResourcesRequest) returns (ListPodResourcesResponse) {} + rpc GetAllocatableResources(AllocatableResourcesRequest) returns (AllocatableResourcesResponse) {} + + // Kubernetes 1.21 + rpc Watch(WatchPodResourcesRequest) returns (stream WatchPodResourcesResponse) {} +} +``` + +```go +func main() { + ctx, cancel := context.WithTimeout(context.Background(), connectionTimeout) + defer cancel() + + socket := "/var/lib/kubelet/pod-resources/kubelet.sock" + conn, err := grpc.DialContext(ctx, socket, grpc.WithInsecure(), grpc.WithBlock(), + grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout("unix", addr, timeout) + }), + ) + + if err != nil { + panic(err) + } + + client := podresourcesapi.NewPodResourcesListerClient(conn) + resp, err := client.List(ctx, &podresourcesapi.ListPodResourcesRequest{}) + if err != nil { + panic(err) + } + net.Printf("%+v\n", resp) +} +``` + +Finally, note that you can watch the number of requests made to the Pod Resources endpoint by watching the new kubelet metric called `pod_resources_endpoint_requests_total` on the kubelet's `/metrics` endpoint. + +## Is device monitoring suitable for production? Can I extend it? Can I contribute? + +Yes! This feature released in 1.13, almost 2 years ago, has seen broad adoption, is already used by different cloud managed services, and with its graduation to G.A in Kubernetes 1.20 is production ready! + +If you are a device vendor, you can start using it today! If you just want to monitor the devices in your cluster, go get the latest version of your monitoring plugin! + +If you feel passionate about that area, join the kubernetes community, help improve the API or contribute the device monitoring plugins! + +## Acknowledgements + +We thank the members of the community who have contributed to this feature or given feedback including members of WG-Resource-Management, SIG-Node and the Resource management forum! \ No newline at end of file diff --git a/content/en/blog/_posts/image01.png b/content/en/blog/_posts/image01.png new file mode 100644 index 0000000000..91e8856139 Binary files /dev/null and b/content/en/blog/_posts/image01.png differ diff --git a/content/en/blog/_posts/image02.png b/content/en/blog/_posts/image02.png new file mode 100644 index 0000000000..dfd14d7cdc Binary files /dev/null and b/content/en/blog/_posts/image02.png differ diff --git a/content/en/blog/_posts/image03.png b/content/en/blog/_posts/image03.png new file mode 100644 index 0000000000..443a6f2d67 Binary files /dev/null and b/content/en/blog/_posts/image03.png differ diff --git a/content/en/blog/_posts/image04.png b/content/en/blog/_posts/image04.png new file mode 100644 index 0000000000..e107adc88b Binary files /dev/null and b/content/en/blog/_posts/image04.png differ diff --git a/content/en/blog/_posts/image05.png b/content/en/blog/_posts/image05.png new file mode 100644 index 0000000000..6d80447d09 Binary files /dev/null and b/content/en/blog/_posts/image05.png differ diff --git a/content/en/blog/_posts/image06.png b/content/en/blog/_posts/image06.png new file mode 100644 index 0000000000..d40b2eb0b6 Binary files /dev/null and b/content/en/blog/_posts/image06.png differ diff --git a/content/en/blog/_posts/image07.png b/content/en/blog/_posts/image07.png new file mode 100644 index 0000000000..fc3976040f Binary files /dev/null and b/content/en/blog/_posts/image07.png differ diff --git a/content/en/case-studies/appdirect/index.html b/content/en/case-studies/appdirect/index.html index ffb06c67b9..dbb902d06b 100644 --- a/content/en/case-studies/appdirect/index.html +++ b/content/en/case-studies/appdirect/index.html @@ -13,7 +13,7 @@ new_case_study_styles: true heading_background: /images/case-studies/appdirect/banner1.jpg heading_title_logo: /images/appdirect_logo.png subheading: > - AppDirect: How AppDirect Supported the 10x Growth of Its Engineering Staff with Kubernetess + AppDirect: How AppDirect Supported the 10x Growth of Its Engineering Staff with Kubernetes case_study_details: - Company: AppDirect - Location: San Francisco, California diff --git a/content/en/community/static/cncf-code-of-conduct.md b/content/en/community/static/cncf-code-of-conduct.md index 05aefcc9fe..d07444c418 100644 --- a/content/en/community/static/cncf-code-of-conduct.md +++ b/content/en/community/static/cncf-code-of-conduct.md @@ -37,8 +37,8 @@ when an individual is representing the project or its community. Instances of abusive, harassing, or otherwise unacceptable behavior in Kubernetes may be reported by contacting the [Kubernetes Code of Conduct Committee](https://git.k8s.io/community/committee-code-of-conduct) via . For other projects, please contact a CNCF project maintainer or our mediator, Mishi Choudhary . This Code of Conduct is adapted from the Contributor Covenant -(http://contributor-covenant.org), version 1.2.0, available at -http://contributor-covenant.org/version/1/2/0/ +(https://contributor-covenant.org), version 1.2.0, available at +https://contributor-covenant.org/version/1/2/0/ ### CNCF Events Code of Conduct diff --git a/content/en/docs/concepts/architecture/nodes.md b/content/en/docs/concepts/architecture/nodes.md index a9abf48856..7bd4b355b6 100644 --- a/content/en/docs/concepts/architecture/nodes.md +++ b/content/en/docs/concepts/architecture/nodes.md @@ -242,7 +242,7 @@ checks the state of each node every `--node-monitor-period` seconds. Heartbeats, sent by Kubernetes nodes, help determine the availability of a node. There are two forms of heartbeats: updates of `NodeStatus` and the -[Lease object](/docs/reference/generated/kubernetes-api/{{< latest-version >}}/#lease-v1-coordination-k8s-io). +[Lease object](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#lease-v1-coordination-k8s-io). Each Node has an associated Lease object in the `kube-node-lease` {{< glossary_tooltip term_id="namespace" text="namespace">}}. Lease is a lightweight resource, which improves the performance diff --git a/content/en/docs/concepts/cluster-administration/certificates.md b/content/en/docs/concepts/cluster-administration/certificates.md index 8cc45252ec..6314420c01 100644 --- a/content/en/docs/concepts/cluster-administration/certificates.md +++ b/content/en/docs/concepts/cluster-administration/certificates.md @@ -130,11 +130,11 @@ Finally, add the same parameters into the API server start parameters. Note that you may need to adapt the sample commands based on the hardware architecture and cfssl version you are using. - curl -L https://github.com/cloudflare/cfssl/releases/download/v1.4.1/cfssl_1.4.1_linux_amd64 -o cfssl + curl -L https://github.com/cloudflare/cfssl/releases/download/v1.5.0/cfssl_1.5.0_linux_amd64 -o cfssl chmod +x cfssl - curl -L https://github.com/cloudflare/cfssl/releases/download/v1.4.1/cfssljson_1.4.1_linux_amd64 -o cfssljson + curl -L https://github.com/cloudflare/cfssl/releases/download/v1.5.0/cfssljson_1.5.0_linux_amd64 -o cfssljson chmod +x cfssljson - curl -L https://github.com/cloudflare/cfssl/releases/download/v1.4.1/cfssl-certinfo_1.4.1_linux_amd64 -o cfssl-certinfo + curl -L https://github.com/cloudflare/cfssl/releases/download/v1.5.0/cfssl-certinfo_1.5.0_linux_amd64 -o cfssl-certinfo chmod +x cfssl-certinfo 1. Create a directory to hold the artifacts and initialize cfssl: diff --git a/content/en/docs/concepts/cluster-administration/flow-control.md b/content/en/docs/concepts/cluster-administration/flow-control.md index 9c1bcb817a..6cb4c386d3 100644 --- a/content/en/docs/concepts/cluster-administration/flow-control.md +++ b/content/en/docs/concepts/cluster-administration/flow-control.md @@ -527,5 +527,5 @@ When you enable the API Priority and Fairness feature, the kube-apiserver serves For background information on design details for API priority and fairness, see the [enhancement proposal](https://github.com/kubernetes/enhancements/blob/master/keps/sig-api-machinery/20190228-priority-and-fairness.md). -You can make suggestions and feature requests via [SIG API -Machinery](https://github.com/kubernetes/community/tree/master/sig-api-machinery). +You can make suggestions and feature requests via [SIG API Machinery](https://github.com/kubernetes/community/tree/master/sig-api-machinery) +or the feature's [slack channel](http://kubernetes.slack.com/messages/api-priority-and-fairness). diff --git a/content/en/docs/concepts/cluster-administration/kubelet-garbage-collection.md b/content/en/docs/concepts/cluster-administration/kubelet-garbage-collection.md index 9f6dc7599d..ea51a566ac 100644 --- a/content/en/docs/concepts/cluster-administration/kubelet-garbage-collection.md +++ b/content/en/docs/concepts/cluster-administration/kubelet-garbage-collection.md @@ -1,13 +1,13 @@ --- reviewers: -title: Configuring kubelet Garbage Collection +title: Garbage collection for container images content_type: concept weight: 70 --- -Garbage collection is a helpful function of kubelet that will clean up unused images and unused containers. Kubelet will perform garbage collection for containers every minute and garbage collection for images every five minutes. +Garbage collection is a helpful function of kubelet that will clean up unused [images](/docs/concepts/containers/#container-images) and unused [containers](/docs/concepts/containers/). Kubelet will perform garbage collection for containers every minute and garbage collection for images every five minutes. External garbage collection tools are not recommended as these tools can potentially break the behavior of kubelet by removing containers expected to exist. diff --git a/content/en/docs/concepts/cluster-administration/networking.md b/content/en/docs/concepts/cluster-administration/networking.md index 15f4222224..c517b13175 100644 --- a/content/en/docs/concepts/cluster-administration/networking.md +++ b/content/en/docs/concepts/cluster-administration/networking.md @@ -114,7 +114,7 @@ Additionally, the CNI can be run alongside [Calico for network policy enforcemen ### Azure CNI for Kubernetes [Azure CNI](https://docs.microsoft.com/en-us/azure/virtual-network/container-networking-overview) is an [open source](https://github.com/Azure/azure-container-networking/blob/master/docs/cni.md) plugin that integrates Kubernetes Pods with an Azure Virtual Network (also known as VNet) providing network performance at par with VMs. Pods can connect to peered VNet and to on-premises over Express Route or site-to-site VPN and are also directly reachable from these networks. Pods can access Azure services, such as storage and SQL, that are protected by Service Endpoints or Private Link. You can use VNet security policies and routing to filter Pod traffic. The plugin assigns VNet IPs to Pods by utilizing a pool of secondary IPs pre-configured on the Network Interface of a Kubernetes node. -Azure CNI is available natively in the [Azure Kubernetes Service (AKS)] (https://docs.microsoft.com/en-us/azure/aks/configure-azure-cni). +Azure CNI is available natively in the [Azure Kubernetes Service (AKS)](https://docs.microsoft.com/en-us/azure/aks/configure-azure-cni). ### Big Cloud Fabric from Big Switch Networks diff --git a/content/en/docs/concepts/cluster-administration/system-metrics.md b/content/en/docs/concepts/cluster-administration/system-metrics.md index e2e037ff88..3c7e137ded 100644 --- a/content/en/docs/concepts/cluster-administration/system-metrics.md +++ b/content/en/docs/concepts/cluster-administration/system-metrics.md @@ -50,39 +50,41 @@ rules: ## Metric lifecycle -Alpha metric → Stable metric → Deprecated metric → Hidden metric → Deletion +Alpha metric → Stable metric → Deprecated metric → Hidden metric → Deleted metric -Alpha metrics have no stability guarantees; as such they can be modified or deleted at any time. +Alpha metrics have no stability guarantees. These metrics can be modified or deleted at any time. -Stable metrics can be guaranteed to not change; Specifically, stability means: +Stable metrics are guaranteed to not change. This means: +* A stable metric without a deprecated signature will not be deleted or renamed +* A stable metric's type will not be modified -* the metric itself will not be deleted (or renamed) -* the type of metric will not be modified +Deprecated metrics are slated for deletion, but are still available for use. +These metrics include an annotation about the version in which they became deprecated. -Deprecated metric signal that the metric will eventually be deleted; to find which version, you need to check annotation, which includes from which kubernetes version that metric will be considered deprecated. +For example: -Before deprecation: +* Before deprecation -``` -# HELP some_counter this counts things -# TYPE some_counter counter -some_counter 0 -``` + ``` + # HELP some_counter this counts things + # TYPE some_counter counter + some_counter 0 + ``` -After deprecation: +* After deprecation -``` -# HELP some_counter (Deprecated since 1.15.0) this counts things -# TYPE some_counter counter -some_counter 0 -``` + ``` + # HELP some_counter (Deprecated since 1.15.0) this counts things + # TYPE some_counter counter + some_counter 0 + ``` -Once a metric is hidden then by default the metrics is not published for scraping. To use a hidden metric, you need to override the configuration for the relevant cluster component. +Hidden metrics are no longer published for scraping, but are still available for use. To use a hidden metric, please refer to the [Show hidden metrics](#show-hidden-metrics) section. -Once a metric is deleted, the metric is not published. You cannot change this using an override. +Deleted metrics are no longer published and cannot be used. -## Show Hidden Metrics +## Show hidden metrics As described above, admins can enable hidden metrics through a command-line flag on a specific binary. This intends to be used as an escape hatch for admins if they missed the migration of the metrics deprecated in the last release. @@ -154,5 +156,4 @@ endpoint on the scheduler. You must use the `--show-hidden-metrics-for-version=1 ## {{% heading "whatsnext" %}} * Read about the [Prometheus text format](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#text-based-format) for metrics -* See the list of [stable Kubernetes metrics](https://github.com/kubernetes/kubernetes/blob/master/test/instrumentation/testdata/stable-metrics-list.yaml) * Read about the [Kubernetes deprecation policy](/docs/reference/using-api/deprecation-policy/#deprecating-a-feature-or-behavior) diff --git a/content/en/docs/concepts/configuration/configmap.md b/content/en/docs/concepts/configuration/configmap.md index d9889604ce..9a134dfc99 100644 --- a/content/en/docs/concepts/configuration/configmap.md +++ b/content/en/docs/concepts/configuration/configmap.md @@ -40,7 +40,7 @@ separate database or file service. A ConfigMap is an API [object](/docs/concepts/overview/working-with-objects/kubernetes-objects/) that lets you store configuration for other objects to use. Unlike most Kubernetes objects that have a `spec`, a ConfigMap has `data` and `binaryData` -fields. These fields accepts key-value pairs as their values. Both the `data` +fields. These fields accept key-value pairs as their values. Both the `data` field and the `binaryData` are optional. The `data` field is designed to contain UTF-8 byte sequences while the `binaryData` field is designed to contain binary data. diff --git a/content/en/docs/concepts/configuration/manage-resources-containers.md b/content/en/docs/concepts/configuration/manage-resources-containers.md index d267b83dd2..2668050d26 100644 --- a/content/en/docs/concepts/configuration/manage-resources-containers.md +++ b/content/en/docs/concepts/configuration/manage-resources-containers.md @@ -396,7 +396,7 @@ The kubelet supports different ways to measure Pod storage use: {{< tabs name="resource-emphemeralstorage-measurement" >}} {{% tab name="Periodic scanning" %}} -The kubelet performs regular, schedules checks that scan each +The kubelet performs regular, scheduled checks that scan each `emptyDir` volume, container log directory, and writeable container layer. The scan measures how much space is used. diff --git a/content/en/docs/concepts/configuration/overview.md b/content/en/docs/concepts/configuration/overview.md index 5882ce95dc..2239fe08ac 100644 --- a/content/en/docs/concepts/configuration/overview.md +++ b/content/en/docs/concepts/configuration/overview.md @@ -69,6 +69,8 @@ A Service can be made to span multiple Deployments by omitting release-specific A desired state of an object is described by a Deployment, and if changes to that spec are _applied_, the deployment controller changes the actual state to the desired state at a controlled rate. +- Use the [Kubernetes common labels](/docs/concepts/overview/working-with-objects/common-labels/) for common use cases. These standardized labels enrich the metadata in a way that allows tools, including `kubectl` and [dashboard](/docs/tasks/access-application-cluster/web-ui-dashboard), to work in an interoperable way. + - You can manipulate labels for debugging. Because Kubernetes controllers (such as ReplicaSet) and Services match to Pods using selector labels, removing the relevant labels from a Pod will stop it from being considered by a controller or from being served traffic by a Service. If you remove the labels of an existing Pod, its controller will create a new Pod to take its place. This is a useful way to debug a previously "live" Pod in a "quarantine" environment. To interactively remove or add labels, use [`kubectl label`](/docs/reference/generated/kubectl/kubectl-commands#label). ## Container Images diff --git a/content/en/docs/concepts/configuration/secret.md b/content/en/docs/concepts/configuration/secret.md index d2bfe274c2..e0512f8b9b 100644 --- a/content/en/docs/concepts/configuration/secret.md +++ b/content/en/docs/concepts/configuration/secret.md @@ -24,6 +24,16 @@ a password, a token, or a key. Such information might otherwise be put in a Pod specification or in an image. Users can create Secrets and the system also creates some Secrets. +{{< caution >}} +Kubernetes Secrets are, by default, stored as unencrypted base64-encoded +strings. By default they can be retrieved - as plain text - by anyone with API +access, or anyone with access to Kubernetes' underlying data store, etcd. In +order to safely use Secrets, we recommend you (at a minimum): + +1. [Enable Encryption at Rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/) for Secrets. +2. [Enable RBAC rules that restrict reading and writing the Secret](https://kubernetes.io/docs/reference/access-authn-authz/authorization/). Be aware that secrets can be obtained implicitly by anyone with the permission to create a Pod. +{{< /caution >}} + ## Overview of Secrets @@ -271,6 +281,13 @@ However, using the builtin Secret type helps unify the formats of your credentia and the API server does verify if the required keys are provided in a Secret configuration. +{{< caution >}} +SSH private keys do not establish trusted communication between an SSH client and +host server on their own. A secondary means of establishing trust is needed to +mitigate "man in the middle" attacks, such as a `known_hosts` file added to a +ConfigMap. +{{< /caution >}} + ### TLS secrets Kubernetes provides a builtin Secret type `kubernetes.io/tls` for to storing @@ -351,7 +368,7 @@ data: A bootstrap type Secret has the following keys specified under `data`: -- `token_id`: A random 6 character string as the token identifier. Required. +- `token-id`: A random 6 character string as the token identifier. Required. - `token-secret`: A random 16 character string as the actual token secret. Required. - `description`: A human-readable string that describes what the token is used for. Optional. @@ -769,7 +786,7 @@ these pods. The `imagePullSecrets` field is a list of references to secrets in the same namespace. You can use an `imagePullSecrets` to pass a secret that contains a Docker (or other) image registry password to the kubelet. The kubelet uses this information to pull a private image on behalf of your Pod. -See the [PodSpec API](/docs/reference/generated/kubernetes-api/{{< latest-version >}}/#podspec-v1-core) for more information about the `imagePullSecrets` field. +See the [PodSpec API](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podspec-v1-core) for more information about the `imagePullSecrets` field. #### Manually specifying an imagePullSecret @@ -788,7 +805,6 @@ See [Add ImagePullSecrets to a service account](/docs/tasks/configure-pod-contai Manually created secrets (for example, one containing a token for accessing a GitHub account) can be automatically attached to pods based on their service account. -See [Injecting Information into Pods Using a PodPreset](/docs/tasks/inject-data-application/podpreset/) for a detailed explanation of that process. ## Details diff --git a/content/en/docs/concepts/containers/runtime-class.md b/content/en/docs/concepts/containers/runtime-class.md index 1ba631e350..ace905c657 100644 --- a/content/en/docs/concepts/containers/runtime-class.md +++ b/content/en/docs/concepts/containers/runtime-class.md @@ -37,10 +37,10 @@ but with different settings. Ensure the RuntimeClass feature gate is enabled (it is by default). See [Feature Gates](/docs/reference/command-line-tools-reference/feature-gates/) for an explanation of enabling -feature gates. The `RuntimeClass` feature gate must be enabled on apiservers _and_ kubelets. +feature gates. The `RuntimeClass` feature gate must be enabled on API server _and_ kubelets. -1. Configure the CRI implementation on nodes (runtime dependent) -2. Create the corresponding RuntimeClass resources +1. Configure the CRI implementation on nodes (runtime dependent). +2. Create the corresponding RuntimeClass resources. ### 1. Configure the CRI implementation on nodes @@ -51,7 +51,7 @@ CRI implementation for how to configure. {{< note >}} RuntimeClass assumes a homogeneous node configuration across the cluster by default (which means that all nodes are configured the same way with respect to container runtimes). To support -heterogenous node configurations, see [Scheduling](#scheduling) below. +heterogeneous node configurations, see [Scheduling](#scheduling) below. {{< /note >}} The configurations have a corresponding `handler` name, referenced by the RuntimeClass. The @@ -98,7 +98,7 @@ spec: # ... ``` -This will instruct the Kubelet to use the named RuntimeClass to run this pod. If the named +This will instruct the kubelet to use the named RuntimeClass to run this pod. If the named RuntimeClass does not exist, or the CRI cannot run the corresponding handler, the pod will enter the `Failed` terminal [phase](/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase). Look for a corresponding [event](/docs/tasks/debug-application-cluster/debug-application-introspection/) for an @@ -144,7 +144,7 @@ See CRI-O's [config documentation](https://raw.githubusercontent.com/cri-o/cri-o {{< feature-state for_k8s_version="v1.16" state="beta" >}} -As of Kubernetes v1.16, RuntimeClass includes support for heterogenous clusters through its +As of Kubernetes v1.16, RuntimeClass includes support for heterogeneous clusters through its `scheduling` fields. Through the use of these fields, you can ensure that pods running with this RuntimeClass are scheduled to nodes that support it. To use the scheduling support, you must have the [RuntimeClass admission controller](/docs/reference/access-authn-authz/admission-controllers/#runtimeclass) diff --git a/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md b/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md index e2b7ab1f34..8b1747b857 100644 --- a/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md +++ b/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md @@ -201,7 +201,7 @@ Monitoring agents for device plugin resources can be deployed as a daemon, or as The canonical directory `/var/lib/kubelet/pod-resources` requires privileged access, so monitoring agents must run in a privileged security context. If a device monitoring agent is running as a DaemonSet, `/var/lib/kubelet/pod-resources` must be mounted as a -{{< glossary_tooltip term_id="volume" >}} in the plugin's +{{< glossary_tooltip term_id="volume" >}} in the device monitoring agent's [PodSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podspec-v1-core). Support for the "PodResources service" requires `KubeletPodResources` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) to be enabled. diff --git a/content/en/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md b/content/en/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md index 0384754e35..7b53fa326f 100644 --- a/content/en/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md +++ b/content/en/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md @@ -159,7 +159,7 @@ This option is provided to the network-plugin; currently **only kubenet supports ## Usage Summary * `--network-plugin=cni` specifies that we use the `cni` network plugin with actual CNI plugin binaries located in `--cni-bin-dir` (default `/opt/cni/bin`) and CNI plugin configuration located in `--cni-conf-dir` (default `/etc/cni/net.d`). -* `--network-plugin=kubenet` specifies that we use the `kubenet` network plugin with CNI `bridge` and `host-local` plugins placed in `/opt/cni/bin` or `cni-bin-dir`. +* `--network-plugin=kubenet` specifies that we use the `kubenet` network plugin with CNI `bridge`, `lo` and `host-local` plugins placed in `/opt/cni/bin` or `cni-bin-dir`. * `--network-plugin-mtu=9001` specifies the MTU to use, currently only used by the `kubenet` network plugin. ## {{% heading "whatsnext" %}} diff --git a/content/en/docs/concepts/extend-kubernetes/operator.md b/content/en/docs/concepts/extend-kubernetes/operator.md index 140cf2557a..0e9d227a53 100644 --- a/content/en/docs/concepts/extend-kubernetes/operator.md +++ b/content/en/docs/concepts/extend-kubernetes/operator.md @@ -103,7 +103,7 @@ as well as keeping the existing service in good shape. ## Writing your own Operator {#writing-operator} If there isn't an Operator in the ecosystem that implements the behavior you -want, you can code your own. In [What's next](#whats-next) you'll find a few +want, you can code your own. In [What's next](#what-s-next) you'll find a few links to libraries and tools you can use to write your own cloud native Operator. diff --git a/content/en/docs/concepts/overview/kubernetes-api.md b/content/en/docs/concepts/overview/kubernetes-api.md index 506c76cde7..07b5d559d7 100644 --- a/content/en/docs/concepts/overview/kubernetes-api.md +++ b/content/en/docs/concepts/overview/kubernetes-api.md @@ -19,7 +19,7 @@ is the {{< glossary_tooltip text="API server" term_id="kube-apiserver" >}}. The exposes an HTTP API that lets end users, different parts of your cluster, and external components communicate with one another. -The Kubernetes API lets you query and manipulate the state of objects in the Kubernetes API +The Kubernetes API lets you query and manipulate the state of API objects in Kubernetes (for example: Pods, Namespaces, ConfigMaps, and Events). Most operations can be performed through the diff --git a/content/en/docs/concepts/overview/working-with-objects/common-labels.md b/content/en/docs/concepts/overview/working-with-objects/common-labels.md index a0a68c6dff..29af899b4e 100644 --- a/content/en/docs/concepts/overview/working-with-objects/common-labels.md +++ b/content/en/docs/concepts/overview/working-with-objects/common-labels.md @@ -59,8 +59,8 @@ metadata: ## Applications And Instances Of Applications An application can be installed one or more times into a Kubernetes cluster and, -in some cases, the same namespace. For example, wordpress can be installed more -than once where different websites are different installations of wordpress. +in some cases, the same namespace. For example, WordPress can be installed more +than once where different websites are different installations of WordPress. The name of an application and the instance name are recorded separately. For example, WordPress has a `app.kubernetes.io/name` of `wordpress` while it has @@ -168,6 +168,6 @@ metadata: ... ``` -With the MySQL `StatefulSet` and `Service` you'll notice information about both MySQL and Wordpress, the broader application, are included. +With the MySQL `StatefulSet` and `Service` you'll notice information about both MySQL and WordPress, the broader application, are included. diff --git a/content/en/docs/concepts/policy/pod-security-policy.md b/content/en/docs/concepts/policy/pod-security-policy.md index 063f1234fe..17f30906bf 100644 --- a/content/en/docs/concepts/policy/pod-security-policy.md +++ b/content/en/docs/concepts/policy/pod-security-policy.md @@ -216,12 +216,17 @@ kubectl-user create -f- <}}/#networkpolicy-v1-networking-k8s-io) reference for a full definition of the resource. diff --git a/content/en/docs/concepts/services-networking/service.md b/content/en/docs/concepts/services-networking/service.md index 58d9bb9a2d..368bf02fb1 100644 --- a/content/en/docs/concepts/services-networking/service.md +++ b/content/en/docs/concepts/services-networking/service.md @@ -134,7 +134,7 @@ For example: * You want to point your Service to a Service in a different {{< glossary_tooltip term_id="namespace" >}} or on another cluster. * You are migrating a workload to Kubernetes. While evaluating the approach, - you run only a proportion of your backends in Kubernetes. + you run only a portion of your backends in Kubernetes. In any of these scenarios you can define a Service _without_ a Pod selector. For example: @@ -238,7 +238,7 @@ There are a few reasons for using proxying for Services: ### User space proxy mode {#proxy-mode-userspace} -In this mode, kube-proxy watches the Kubernetes master for the addition and +In this mode, kube-proxy watches the Kubernetes control plane for the addition and removal of Service and Endpoint objects. For each Service it opens a port (randomly chosen) on the local node. Any connections to this "proxy port" are proxied to one of the Service's backend Pods (as reported via diff --git a/content/en/docs/concepts/storage/persistent-volumes.md b/content/en/docs/concepts/storage/persistent-volumes.md index 8266647b6d..971939d882 100644 --- a/content/en/docs/concepts/storage/persistent-volumes.md +++ b/content/en/docs/concepts/storage/persistent-volumes.md @@ -231,7 +231,7 @@ the following types of volumes: * Azure Disk * Portworx * FlexVolumes -* CSI +* {{< glossary_tooltip text="CSI" term_id="csi" >}} You can only expand a PVC if its storage class's `allowVolumeExpansion` field is set to true. @@ -311,7 +311,7 @@ If expanding underlying storage fails, the cluster administrator can manually re PersistentVolume types are implemented as plugins. Kubernetes currently supports the following plugins: * [`awsElasticBlockStore`](/docs/concepts/storage/volumes/#awselasticblockstore) - AWS Elastic Block Store (EBS) -* [`azureDisk`](/docs/concepts/sotrage/volumes/#azuredisk) - Azure Disk +* [`azureDisk`](/docs/concepts/storage/volumes/#azuredisk) - Azure Disk * [`azureFile`](/docs/concepts/storage/volumes/#azurefile) - Azure File * [`cephfs`](/docs/concepts/storage/volumes/#cephfs) - CephFS volume * [`cinder`](/docs/concepts/storage/volumes/#cinder) - Cinder (OpenStack block storage) @@ -735,7 +735,7 @@ Only statically provisioned volumes are supported for alpha release. Administrat {{< feature-state for_k8s_version="v1.20" state="stable" >}} Volume snapshots only support the out-of-tree CSI volume plugins. For details, see [Volume Snapshots](/docs/concepts/storage/volume-snapshots/). -In-tree volume plugins are deprecated. You can read about the deprecated volume plugins in the [Volume Plugin FAQ] (https://github.com/kubernetes/community/blob/master/sig-storage/volume-plugin-faq.md). +In-tree volume plugins are deprecated. You can read about the deprecated volume plugins in the [Volume Plugin FAQ](https://github.com/kubernetes/community/blob/master/sig-storage/volume-plugin-faq.md). ### Create a PersistentVolumeClaim from a Volume Snapshot {#create-persistent-volume-claim-from-volume-snapshot} diff --git a/content/en/docs/concepts/storage/storage-capacity.md b/content/en/docs/concepts/storage/storage-capacity.md index 836d5d2c36..d5993d4f59 100644 --- a/content/en/docs/concepts/storage/storage-capacity.md +++ b/content/en/docs/concepts/storage/storage-capacity.md @@ -34,7 +34,7 @@ text="Container Storage Interface" term_id="csi" >}} (CSI) drivers and ## API There are two API extensions for this feature: -- [CSIStorageCapacity](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#csistoragecapacity-v1alpha1-storage-k8s-io) objects: +- CSIStorageCapacity objects: these get produced by a CSI driver in the namespace where the driver is installed. Each object contains capacity information for one storage class and defines which nodes have diff --git a/content/en/docs/concepts/storage/volume-snapshot-classes.md b/content/en/docs/concepts/storage/volume-snapshot-classes.md index 06382e5fba..ee781d665f 100644 --- a/content/en/docs/concepts/storage/volume-snapshot-classes.md +++ b/content/en/docs/concepts/storage/volume-snapshot-classes.md @@ -72,7 +72,7 @@ used for provisioning VolumeSnapshots. This field must be specified. ### DeletionPolicy -Volume snapshot classes have a deletionPolicy. It enables you to configure what happens to a VolumeSnapshotContent when the VolumeSnapshot object it is bound to is to be deleted. The deletionPolicy of a volume snapshot can either be `Retain` or `Delete`. This field must be specified. +Volume snapshot classes have a deletionPolicy. It enables you to configure what happens to a VolumeSnapshotContent when the VolumeSnapshot object it is bound to is to be deleted. The deletionPolicy of a volume snapshot class can either be `Retain` or `Delete`. This field must be specified. If the deletionPolicy is `Delete`, then the underlying storage snapshot will be deleted along with the VolumeSnapshotContent object. If the deletionPolicy is `Retain`, then both the underlying snapshot and VolumeSnapshotContent remain. diff --git a/content/en/docs/concepts/workloads/_index.md b/content/en/docs/concepts/workloads/_index.md index 21e57aed03..2c9dd8aa8e 100644 --- a/content/en/docs/concepts/workloads/_index.md +++ b/content/en/docs/concepts/workloads/_index.md @@ -8,50 +8,74 @@ no_list: true {{< glossary_definition term_id="workload" length="short" >}} Whether your workload is a single component or several that work together, on Kubernetes you run -it inside a set of [Pods](/docs/concepts/workloads/pods). -In Kubernetes, a Pod represents a set of running {{< glossary_tooltip text="containers" term_id="container" >}} -on your cluster. +it inside a set of [_pods_](/docs/concepts/workloads/pods). +In Kubernetes, a `Pod` represents a set of running +{{< glossary_tooltip text="containers" term_id="container" >}} on your cluster. -A Pod has a defined lifecycle. For example, once a Pod is running in your cluster then -a critical failure on the {{< glossary_tooltip text="node" term_id="node" >}} where that -Pod is running means that all the Pods on that node fail. Kubernetes treats that level -of failure as final: you would need to create a new Pod even if the node later recovers. +Kubernetes pods have a [defined lifecycle](/docs/concepts/workloads/pods/pod-lifecycle/). +For example, once a pod is running in your cluster then a critical fault on the +{{< glossary_tooltip text="node" term_id="node" >}} where that pod is running means that +all the pods on that node fail. Kubernetes treats that level of failure as final: you +would need to create a new `Pod` to recover, even if the node later becomes healthy. -However, to make life considerably easier, you don't need to manage each Pod directly. -Instead, you can use _workload resources_ that manage a set of Pods on your behalf. +However, to make life considerably easier, you don't need to manage each `Pod` directly. +Instead, you can use _workload resources_ that manage a set of pods on your behalf. These resources configure {{< glossary_tooltip term_id="controller" text="controllers" >}} -that make sure the right number of the right kind of Pod are running, to match the state +that make sure the right number of the right kind of pod are running, to match the state you specified. -Those workload resources include: +Kubernetes provides several built-in workload resources: -* [Deployment](/docs/concepts/workloads/controllers/deployment/) and [ReplicaSet](/docs/concepts/workloads/controllers/replicaset/) - (replacing the legacy resource {{< glossary_tooltip text="ReplicationController" term_id="replication-controller" >}}); -* [StatefulSet](/docs/concepts/workloads/controllers/statefulset/); -* [DaemonSet](/docs/concepts/workloads/controllers/daemonset/) for running Pods that provide - node-local facilities, such as a storage driver or network plugin; -* [Job](/docs/concepts/workloads/controllers/job/) and - [CronJob](/docs/concepts/workloads/controllers/cron-jobs/) - for tasks that run to completion. +* [`Deployment`](/docs/concepts/workloads/controllers/deployment/) and [`ReplicaSet`](/docs/concepts/workloads/controllers/replicaset/) + (replacing the legacy resource + {{< glossary_tooltip text="ReplicationController" term_id="replication-controller" >}}). + `Deployment` is a good fit for managing a stateless application workload on your cluster, + where any `Pod` in the `Deployment` is interchangeable and can be replaced if needed. +* [`StatefulSet`](/docs/concepts/workloads/controllers/statefulset/) lets you + run one or more related Pods that do track state somehow. For example, if your workload + records data persistently, you can run a `StatefulSet` that matches each `Pod` with a + [`PersistentVolume`](/docs/concepts/storage/persistent-volumes/). Your code, running in the + `Pods` for that `StatefulSet`, can replicate data to other `Pods` in the same `StatefulSet` + to improve overall resilience. +* [`DaemonSet`](/docs/concepts/workloads/controllers/daemonset/) defines `Pods` that provide + node-local facilities. These might be fundamental to the operation of your cluster, such + as a networking helper tool, or be part of an + {{< glossary_tooltip text="add-on" term_id="addons" >}}. + Every time you add a node to your cluster that matches the specification in a `DaemonSet`, + the control plane schedules a `Pod` for that `DaemonSet` onto the new node. +* [`Job`](/docs/concepts/workloads/controllers/job/) and + [`CronJob`](/docs/concepts/workloads/controllers/cron-jobs/) + define tasks that run to completion and then stop. Jobs represent one-off tasks, whereas + `CronJobs` recur according to a schedule. -There are also two supporting concepts that you might find relevant: -* [Garbage collection](/docs/concepts/workloads/controllers/garbage-collection/) tidies up objects - from your cluster after their _owning resource_ has been removed. -* The [_time-to-live after finished_ controller](/docs/concepts/workloads/controllers/ttlafterfinished/) - removes Jobs once a defined time has passed since they completed. +In the wider Kubernetes ecosystem, you can find third-party workload resources that provide +additional behaviors. Using a +[custom resource definition](/docs/concepts/extend-kubernetes/api-extension/custom-resources/), +you can add in a third-party workload resource if you want a specific behavior that's not part +of Kubernetes' core. For example, if you wanted to run a group of `Pods` for your application but +stop work unless _all_ the Pods are available (perhaps for some high-throughput distributed task), +then you can implement or install an extension that does provide that feature. ## {{% heading "whatsnext" %}} As well as reading about each resource, you can learn about specific tasks that relate to them: -* [Run a stateless application using a Deployment](/docs/tasks/run-application/run-stateless-application-deployment/) +* [Run a stateless application using a `Deployment`](/docs/tasks/run-application/run-stateless-application-deployment/) * Run a stateful application either as a [single instance](/docs/tasks/run-application/run-single-instance-stateful-application/) or as a [replicated set](/docs/tasks/run-application/run-replicated-stateful-application/) -* [Run Automated Tasks with a CronJob](/docs/tasks/job/automated-tasks-with-cron-jobs/) +* [Run automated tasks with a `CronJob`](/docs/tasks/job/automated-tasks-with-cron-jobs/) + +To learn about Kubernetes' mechanisms for separating code from configuration, +visit [Configuration](/docs/concepts/configuration/). + +There are two supporting concepts that provide backgrounds about how Kubernetes manages pods +for applications: +* [Garbage collection](/docs/concepts/workloads/controllers/garbage-collection/) tidies up objects + from your cluster after their _owning resource_ has been removed. +* The [_time-to-live after finished_ controller](/docs/concepts/workloads/controllers/ttlafterfinished/) + removes Jobs once a defined time has passed since they completed. Once your application is running, you might want to make it available on the internet as -a [Service](/docs/concepts/services-networking/service/) or, for web application only, -using an [Ingress](/docs/concepts/services-networking/ingress). +a [`Service`](/docs/concepts/services-networking/service/) or, for web application only, +using an [`Ingress`](/docs/concepts/services-networking/ingress). -You can also visit [Configuration](/docs/concepts/configuration/) to learn about Kubernetes' -mechanisms for separating code from configuration. diff --git a/content/en/docs/concepts/workloads/controllers/cron-jobs.md b/content/en/docs/concepts/workloads/controllers/cron-jobs.md index c4206facb5..481c6f5017 100644 --- a/content/en/docs/concepts/workloads/controllers/cron-jobs.md +++ b/content/en/docs/concepts/workloads/controllers/cron-jobs.md @@ -49,6 +49,37 @@ This example CronJob manifest prints the current time and a hello message every ([Running Automated Tasks with a CronJob](/docs/tasks/job/automated-tasks-with-cron-jobs/) takes you through this example in more detail). +### Cron schedule syntax + +``` +# ┌───────────── minute (0 - 59) +# │ ┌───────────── hour (0 - 23) +# │ │ ┌───────────── day of the month (1 - 31) +# │ │ │ ┌───────────── month (1 - 12) +# │ │ │ │ ┌───────────── day of the week (0 - 6) (Sunday to Saturday; +# │ │ │ │ │ 7 is also Sunday on some systems) +# │ │ │ │ │ +# │ │ │ │ │ +# * * * * * +``` + + +| Entry | Description | Equivalent to | +| ------------- | ------------- |------------- | +| @yearly (or @annually) | Run once a year at midnight of 1 January | 0 0 1 1 * | +| @monthly | Run once a month at midnight of the first day of the month | 0 0 1 * * | +| @weekly | Run once a week at midnight on Sunday morning | 0 0 * * 0 | +| @daily (or @midnight) | Run once a day at midnight | 0 0 * * * | +| @hourly | Run once an hour at the beginning of the hour | 0 * * * * | + + + +For example, the line below states that the task must be started every Friday at midnight, as well as on the 13th of each month at midnight: + +`0 0 13 * 5` + +To generate CronJob schedule expressions, you can also use web tools like [crontab.guru](https://crontab.guru/). + ## CronJob limitations {#cron-job-limitations} A cron job creates a job object _about_ once per execution time of its schedule. We say "about" because there diff --git a/content/en/docs/concepts/workloads/controllers/garbage-collection.md b/content/en/docs/concepts/workloads/controllers/garbage-collection.md index 0e9b4f746a..1b1de5d0a4 100644 --- a/content/en/docs/concepts/workloads/controllers/garbage-collection.md +++ b/content/en/docs/concepts/workloads/controllers/garbage-collection.md @@ -150,14 +150,17 @@ curl -X DELETE localhost:8080/apis/apps/v1/namespaces/default/replicasets/my-rep ``` kubectl also supports cascading deletion. -To delete dependents automatically using kubectl, set `--cascade` to true. To -orphan dependents, set `--cascade` to false. The default value for `--cascade` -is true. + +To delete dependents in the foreground using kubectl, set `--cascade=foreground`. To +orphan dependents, set `--cascade=orphan`. + +The default behavior is to delete the dependents in the background which is the +behavior when `--cascade` is omitted or explicitly set to `background`. Here's an example that orphans the dependents of a ReplicaSet: ```shell -kubectl delete replicaset my-repset --cascade=false +kubectl delete replicaset my-repset --cascade=orphan ``` ### Additional note on Deployments diff --git a/content/en/docs/concepts/workloads/controllers/job.md b/content/en/docs/concepts/workloads/controllers/job.md index b65ed27ae2..14f218ddc8 100644 --- a/content/en/docs/concepts/workloads/controllers/job.md +++ b/content/en/docs/concepts/workloads/controllers/job.md @@ -38,6 +38,7 @@ You can run the example with this command: ```shell kubectl apply -f https://kubernetes.io/examples/controllers/job.yaml ``` +The output is similar to this: ``` job.batch/pi created ``` @@ -47,6 +48,7 @@ Check on the status of the Job with `kubectl`: ```shell kubectl describe jobs/pi ``` +The output is similar to this: ``` Name: pi Namespace: default @@ -91,6 +93,7 @@ To list all the Pods that belong to a Job in a machine readable form, you can us pods=$(kubectl get pods --selector=job-name=pi --output=jsonpath='{.items[*].metadata.name}') echo $pods ``` +The output is similar to this: ``` pi-5rwd7 ``` @@ -398,10 +401,11 @@ Therefore, you delete Job `old` but _leave its pods running_, using `kubectl delete jobs/old --cascade=false`. Before deleting it, you make a note of what selector it uses: -``` +```shell kubectl get job old -o yaml ``` -``` +The output is similar to this: +```yaml kind: Job metadata: name: old @@ -420,7 +424,7 @@ they are controlled by Job `new` as well. You need to specify `manualSelector: true` in the new Job since you are not using the selector that the system normally generates for you automatically. -``` +```yaml kind: Job metadata: name: new diff --git a/content/en/docs/concepts/workloads/controllers/replicaset.md b/content/en/docs/concepts/workloads/controllers/replicaset.md index ba0270b573..e45d20c8f7 100644 --- a/content/en/docs/concepts/workloads/controllers/replicaset.md +++ b/content/en/docs/concepts/workloads/controllers/replicaset.md @@ -283,7 +283,7 @@ curl -X DELETE 'localhost:8080/apis/apps/v1/namespaces/default/replicasets/fron ### Deleting just a ReplicaSet -You can delete a ReplicaSet without affecting any of its Pods using [`kubectl delete`](/docs/reference/generated/kubectl/kubectl-commands#delete) with the `--cascade=false` option. +You can delete a ReplicaSet without affecting any of its Pods using [`kubectl delete`](/docs/reference/generated/kubectl/kubectl-commands#delete) with the `--cascade=orphan` option. When using the REST API or the `client-go` library, you must set `propagationPolicy` to `Orphan`. For example: ```shell diff --git a/content/en/docs/concepts/workloads/controllers/replicationcontroller.md b/content/en/docs/concepts/workloads/controllers/replicationcontroller.md index fa481fb3f8..36ae4a880a 100644 --- a/content/en/docs/concepts/workloads/controllers/replicationcontroller.md +++ b/content/en/docs/concepts/workloads/controllers/replicationcontroller.md @@ -54,6 +54,7 @@ Run the example job by downloading the example file and then running this comman ```shell kubectl apply -f https://k8s.io/examples/controllers/replication.yaml ``` +The output is similar to this: ``` replicationcontroller/nginx created ``` @@ -63,6 +64,7 @@ Check on the status of the ReplicationController using this command: ```shell kubectl describe replicationcontrollers/nginx ``` +The output is similar to this: ``` Name: nginx Namespace: default @@ -101,6 +103,7 @@ To list all the pods that belong to the ReplicationController in a machine reada pods=$(kubectl get pods --selector=app=nginx --output=jsonpath={.items..metadata.name}) echo $pods ``` +The output is similar to this: ``` nginx-3ntk0 nginx-4ok8v nginx-qrm3m ``` diff --git a/content/en/docs/concepts/workloads/pods/_index.md b/content/en/docs/concepts/workloads/pods/_index.md index 5bd954dcef..5dd6bac9de 100644 --- a/content/en/docs/concepts/workloads/pods/_index.md +++ b/content/en/docs/concepts/workloads/pods/_index.md @@ -15,8 +15,7 @@ card: _Pods_ are the smallest deployable units of computing that you can create and manage in Kubernetes. A _Pod_ (as in a pod of whales or pea pod) is a group of one or more -{{< glossary_tooltip text="containers" term_id="container" >}}, with shared storage/network resources, and a specification -for how to run the containers. A Pod's contents are always co-located and +{{< glossary_tooltip text="containers" term_id="container" >}}, with shared storage and network resources, and a specification for how to run the containers. A Pod's contents are always co-located and co-scheduled, and run in a shared context. A Pod models an application-specific "logical host": it contains one or more application containers which are relatively tightly coupled. @@ -191,6 +190,35 @@ details are abstracted away. That abstraction and separation of concerns simplif system semantics, and makes it feasible to extend the cluster's behavior without changing existing code. +## Pod update and replacement + +As mentioned in the previous section, when the Pod template for a workload +resource is changed, the controller creates new Pods based on the updated +template instead of updating or patching the existing Pods. + +Kubernetes doesn't prevent you from managing Pods directly. It is possible to +update some fields of a running Pod, in place. However, Pod update operations +like +[`patch`](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#patch-pod-v1-core), and +[`replace`](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#replace-pod-v1-core) +have some limitations: + +- Most of the metadata about a Pod is immutable. For example, you cannot + change the `namespace`, `name`, `uid`, or `creationTimestamp` fields; + the `generation` field is unique. It only accepts updates that increment the + field's current value. +- If the `metadata.deletionTimestamp` is set, no new entry can be added to the + `metadata.finalizers` list. +- Pod updates may not change fields other than `spec.containers[*].image`, + `spec.initContainers[*].image`, `spec.activeDeadlineSeconds` or + `spec.tolerations`. For `spec.tolerations`, you can only add new entries. +- When updating the `spec.activeDeadlineSeconds` field, two types of updates + are allowed: + + 1. setting the unassigned field to a positive number; + 1. updating the field from a positive number to a smaller, non-negative + number. + ## Resource sharing and communication Pods enable data sharing and communication among their constituent @@ -266,9 +294,10 @@ but cannot be controlled from there. object definition describes the object in detail. * [The Distributed System Toolkit: Patterns for Composite Containers](https://kubernetes.io/blog/2015/06/the-distributed-system-toolkit-patterns) explains common layouts for Pods with more than one container. -To understand the context for why Kubernetes wraps a common Pod API in other resources (such as {{< glossary_tooltip text="StatefulSets" term_id="statefulset" >}} or {{< glossary_tooltip text="Deployments" term_id="deployment" >}}, you can read about the prior art, including: - * [Aurora](https://aurora.apache.org/documentation/latest/reference/configuration/#job-schema) - * [Borg](https://research.google.com/pubs/pub43438.html) - * [Marathon](https://mesosphere.github.io/marathon/docs/rest-api.html) - * [Omega](https://research.google/pubs/pub41684/) - * [Tupperware](https://engineering.fb.com/data-center-engineering/tupperware/). +To understand the context for why Kubernetes wraps a common Pod API in other resources (such as {{< glossary_tooltip text="StatefulSets" term_id="statefulset" >}} or {{< glossary_tooltip text="Deployments" term_id="deployment" >}}), you can read about the prior art, including: + +* [Aurora](https://aurora.apache.org/documentation/latest/reference/configuration/#job-schema) +* [Borg](https://research.google.com/pubs/pub43438.html) +* [Marathon](https://mesosphere.github.io/marathon/docs/rest-api.html) +* [Omega](https://research.google/pubs/pub41684/) +* [Tupperware](https://engineering.fb.com/data-center-engineering/tupperware/). diff --git a/content/en/docs/concepts/workloads/pods/disruptions.md b/content/en/docs/concepts/workloads/pods/disruptions.md index 78e8b39a47..3d4248443d 100644 --- a/content/en/docs/concepts/workloads/pods/disruptions.md +++ b/content/en/docs/concepts/workloads/pods/disruptions.md @@ -49,7 +49,7 @@ Cluster administrator actions include: - [Draining a node](/docs/tasks/administer-cluster/safely-drain-node/) for repair or upgrade. - Draining a node from a cluster to scale the cluster down (learn about -[Cluster Autoscaling](/docs/tasks/administer-cluster/cluster-management/#cluster-autoscaler) +[Cluster Autoscaling](https://github.com/kubernetes/autoscaler/#readme) ). - Removing a pod from a node to permit something else to fit on that node. diff --git a/content/en/docs/concepts/workloads/pods/init-containers.md b/content/en/docs/concepts/workloads/pods/init-containers.md index 5c92f07423..363c38a39b 100644 --- a/content/en/docs/concepts/workloads/pods/init-containers.md +++ b/content/en/docs/concepts/workloads/pods/init-containers.md @@ -49,9 +49,9 @@ as documented in [Resources](#resources). Also, init containers do not support `lifecycle`, `livenessProbe`, `readinessProbe`, or `startupProbe` because they must run to completion before the Pod can be ready. -If you specify multiple init containers for a Pod, Kubelet runs each init +If you specify multiple init containers for a Pod, kubelet runs each init container sequentially. Each init container must succeed before the next can run. -When all of the init containers have run to completion, Kubelet initializes +When all of the init containers have run to completion, kubelet initializes the application containers for the Pod and runs them as usual. ## Using init containers @@ -133,6 +133,7 @@ You can start this Pod by running: ```shell kubectl apply -f myapp.yaml ``` +The output is similar to this: ``` pod/myapp-pod created ``` @@ -141,6 +142,7 @@ And check on its status with: ```shell kubectl get -f myapp.yaml ``` +The output is similar to this: ``` NAME READY STATUS RESTARTS AGE myapp-pod 0/1 Init:0/2 0 6m @@ -150,6 +152,7 @@ or for more details: ```shell kubectl describe -f myapp.yaml ``` +The output is similar to this: ``` Name: myapp-pod Namespace: default @@ -224,6 +227,7 @@ To create the `mydb` and `myservice` services: ```shell kubectl apply -f services.yaml ``` +The output is similar to this: ``` service/myservice created service/mydb created @@ -235,6 +239,7 @@ Pod moves into the Running state: ```shell kubectl get -f myapp.yaml ``` +The output is similar to this: ``` NAME READY STATUS RESTARTS AGE myapp-pod 1/1 Running 0 9m @@ -257,7 +262,7 @@ if the Pod `restartPolicy` is set to Always, the init containers use A Pod cannot be `Ready` until all init containers have succeeded. The ports on an init container are not aggregated under a Service. A Pod that is initializing -is in the `Pending` state but should have a condition `Initialized` set to true. +is in the `Pending` state but should have a condition `Initialized` set to false. If the Pod [restarts](#pod-restart-reasons), or is restarted, all init containers must execute again. @@ -319,11 +324,9 @@ reasons: - ## {{% heading "whatsnext" %}} * Read about [creating a Pod that has an init container](/docs/tasks/configure-pod-container/configure-pod-initialization/#create-a-pod-that-has-an-init-container) * Learn how to [debug init containers](/docs/tasks/debug-application-cluster/debug-init-containers/) - diff --git a/content/en/docs/concepts/workloads/pods/pod-lifecycle.md b/content/en/docs/concepts/workloads/pods/pod-lifecycle.md index 81822a7f17..df83f7c5f3 100644 --- a/content/en/docs/concepts/workloads/pods/pod-lifecycle.md +++ b/content/en/docs/concepts/workloads/pods/pod-lifecycle.md @@ -85,6 +85,13 @@ Value | Description `Failed` | All containers in the Pod have terminated, and at least one container has terminated in failure. That is, the container either exited with non-zero status or was terminated by the system. `Unknown` | For some reason the state of the Pod could not be obtained. This phase typically occurs due to an error in communicating with the node where the Pod should be running. +{{< note >}} +When a Pod is being deleted, it is shown as `Terminating` by some kubectl commands. +This `Terminating` status is not one of the Pod phases. +A Pod is granted a term to terminate gracefully, which defaults to 30 seconds. +You can use the flag `--force` to [terminate a Pod by force](/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination-forced). +{{< /note >}} + If a node dies or is disconnected from the rest of the cluster, Kubernetes applies a policy for setting the `phase` of all Pods on the lost node to Failed. @@ -325,7 +332,7 @@ a time longer than the liveness interval would allow. If your container usually starts in more than `initialDelaySeconds + failureThreshold × periodSeconds`, you should specify a startup probe that checks the same endpoint as the liveness probe. The default for -`periodSeconds` is 30s. You should then set its `failureThreshold` high enough to +`periodSeconds` is 10s. You should then set its `failureThreshold` high enough to allow the container to start, without changing the default values of the liveness probe. This helps to protect against deadlocks. diff --git a/content/en/docs/concepts/workloads/pods/pod-topology-spread-constraints.md b/content/en/docs/concepts/workloads/pods/pod-topology-spread-constraints.md index 4b33db8703..2e8a915c62 100644 --- a/content/en/docs/concepts/workloads/pods/pod-topology-spread-constraints.md +++ b/content/en/docs/concepts/workloads/pods/pod-topology-spread-constraints.md @@ -66,7 +66,7 @@ Instead of manually applying labels, you can also reuse the [well-known labels]( The API field `pod.spec.topologySpreadConstraints` is defined as below: -``` +```yaml apiVersion: v1 kind: Pod metadata: diff --git a/content/en/docs/contribute/new-content/new-features.md b/content/en/docs/contribute/new-content/new-features.md index 4e70f6c9ec..a0e3600562 100644 --- a/content/en/docs/contribute/new-content/new-features.md +++ b/content/en/docs/contribute/new-content/new-features.md @@ -95,14 +95,16 @@ deadlines. ### Open a placeholder PR -1. Open a pull request against the +1. Open a **draft** pull request against the `dev-{{< skew nextMinorVersion >}}` branch in the `kubernetes/website` repository, with a small -commit that you will amend later. +commit that you will amend later. To create a draft pull request, use the +Create Pull Request drop-down and select **Create Draft Pull Request**, +then click **Draft Pull Request**. 2. Edit the pull request description to include links to [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) PR(s) and [kubernetes/enhancements](https://github.com/kubernetes/enhancements) issue(s). -3. Use the Prow command `/milestone {{< skew nextMinorVersion >}}` to -assign the PR to the relevant milestone. This alerts the docs person managing -this release that the feature docs are coming. +3. Leave a comment on the related [kubernetes/enhancements](https://github.com/kubernetes/enhancements) +issue with a link to the PR to notify the docs person managing this release that +the feature docs are coming and should be tracked for the release. If your feature does not need any documentation changes, make sure the sig-release team knows this, by @@ -112,7 +114,9 @@ milestone. ### PR ready for review -When ready, populate your placeholder PR with feature documentation. +When ready, populate your placeholder PR with feature documentation and change +the state of the PR from draft to **ready for review**. To mark a pull request +as ready for review, navigate to the merge box and click **Ready for review**. Do your best to describe your feature and how to use it. If you need help structuring your documentation, ask in the `#sig-docs` slack channel. @@ -120,6 +124,13 @@ When you complete your content, the documentation person assigned to your featur To ensure technical accuracy, the content may also require a technical review from corresponding SIG(s). Use their suggestions to get the content to a release ready state. +If your feature is an Alpha or Beta feature and is behind a feature gate, +make sure you add it to [Alpha/Beta Feature gates](/docs/reference/command-line-tools-reference/feature-gates/#feature-gates-for-alpha-or-beta-features) +table as part of your pull request. With new feature gates, a description of +the feature gate is also required. If your feature is GA'ed or deprecated, +make sure to move it from that table to [Feature gates for graduated or deprecated features](/docs/reference/command-line-tools-reference/feature-gates/#feature-gates-for-graduated-or-deprecated-features) +table with Alpha and Beta history intact. + If your feature needs documentation and the first draft content is not received, the feature may be removed from the milestone. @@ -128,10 +139,4 @@ content is not received, the feature may be removed from the milestone. If your PR has not yet been merged into the `dev-{{< skew nextMinorVersion >}}` branch by the release deadline, work with the docs person managing the release to get it in by the deadline. If your feature needs documentation and the docs are not ready, the feature may be removed from the -milestone. - -If your feature is an Alpha feature and is behind a feature gate, make sure you -add it to [Alpha/Beta Feature gates](/docs/reference/command-line-tools-reference/feature-gates/#feature-gates-for-alpha-or-beta-features) table -as part of your pull request. If your feature is moving out of Alpha, make sure to -remove it from that table. - +milestone. \ No newline at end of file diff --git a/content/en/docs/contribute/style/hugo-shortcodes/example1.md b/content/en/docs/contribute/style/hugo-shortcodes/example1.md index fbe21a1457..9e9f45b0a6 100644 --- a/content/en/docs/contribute/style/hugo-shortcodes/example1.md +++ b/content/en/docs/contribute/style/hugo-shortcodes/example1.md @@ -6,4 +6,4 @@ This is an **example** content file inside the **includes** leaf bundle. {{< note >}} Included content files can also contain shortcodes. -{{< /note >}} \ No newline at end of file +{{< /note >}} diff --git a/content/en/docs/contribute/style/hugo-shortcodes/index.md b/content/en/docs/contribute/style/hugo-shortcodes/index.md index dd9ed06c31..fa25966e45 100644 --- a/content/en/docs/contribute/style/hugo-shortcodes/index.md +++ b/content/en/docs/contribute/style/hugo-shortcodes/index.md @@ -1,33 +1,30 @@ --- -approvers: -- chenopis title: Custom Hugo Shortcodes content_type: concept --- -This page explains the custom Hugo shortcodes that can be used in Kubernetes markdown documentation. +This page explains the custom Hugo shortcodes that can be used in Kubernetes Markdown documentation. Read more about shortcodes in the [Hugo documentation](https://gohugo.io/content-management/shortcodes). - ## Feature state -In a markdown page (`.md` file) on this site, you can add a shortcode to display version and state of the documented feature. +In a Markdown page (`.md` file) on this site, you can add a shortcode to display version and state of the documented feature. ### Feature state demo -Below is a demo of the feature state snippet, which displays the feature as stable in Kubernetes version 1.10. +Below is a demo of the feature state snippet, which displays the feature as stable in the latest Kubernetes version. ``` -{{}} +{{}} ``` Renders to: -{{< feature-state for_k8s_version="v1.10" state="stable" >}} +{{< feature-state state="stable" >}} The valid values for `state` are: @@ -38,62 +35,22 @@ The valid values for `state` are: ### Feature state code -The displayed Kubernetes version defaults to that of the page or the site. This can be changed by passing the for_k8s_version shortcode parameter. +The displayed Kubernetes version defaults to that of the page or the site. You can change the +feature state version by passing the `for_k8s_version` shortcode parameter. For example: ``` -{{}} +{{}} ``` Renders to: -{{< feature-state for_k8s_version="v1.10" state="stable" >}} - -#### Alpha feature - -``` -{{}} -``` - -Renders to: - -{{< feature-state state="alpha" >}} - -#### Beta feature - -``` -{{}} -``` - -Renders to: - -{{< feature-state state="beta" >}} - -#### Stable feature - -``` -{{}} -``` - -Renders to: - -{{< feature-state state="stable" >}} - -#### Deprecated feature - -``` -{{}} -``` - -Renders to: - -{{< feature-state state="deprecated" >}} +{{< feature-state for_k8s_version="v1.10" state="beta" >}} ## Glossary -There are two glossary tooltips. +There are two glossary shortcodes: `glossary_tooltip` and `glossary_definition`. -You can reference glossary terms with an inclusion that automatically updates and replaces content with the relevant links from [our glossary](/docs/reference/glossary/). When the term is moused-over by someone -using the online documentation, the glossary entry displays a tooltip. +You can reference glossary terms with an inclusion that automatically updates and replaces content with the relevant links from [our glossary](/docs/reference/glossary/). When the glossary term is moused-over, the glossary entry displays a tooltip. The glossary term also displays as a link. As well as inclusions with tooltips, you can reuse the definitions from the glossary in page content. @@ -102,7 +59,7 @@ The raw data for glossary terms is stored at [https://github.com/kubernetes/webs ### Glossary demo -For example, the following include within the markdown renders to {{< glossary_tooltip text="cluster" term_id="cluster" >}} with a tooltip: +For example, the following include within the Markdown renders to {{< glossary_tooltip text="cluster" term_id="cluster" >}} with a tooltip: ``` {{}} @@ -113,13 +70,16 @@ Here's a short glossary definition: ``` {{}} ``` + which renders as: {{< glossary_definition prepend="A cluster is" term_id="cluster" length="short" >}} You can also include a full definition: + ``` {{}} ``` + which renders as: {{< glossary_definition term_id="cluster" length="all" >}} @@ -255,7 +215,63 @@ Renders to: {{< tab name="JSON File" include="podtemplate.json" />}} {{< /tabs >}} +## Version strings +To generate a version string for inclusion in the documentation, you can choose from +several version shortcodes. Each version shortcode displays a version string derived from +the value of a version parameter found in the site configuration file, `config.toml`. +The two most commonly used version parameters are `latest` and `version`. + +### `{{}}` + +The `{{}}` shortcode generates the value of the current version of +the Kubernetes documentation from the `version` site parameter. The `param` shortcode accepts the name of one site parameter, in this case: `version`. + +{{< note >}} +In previously released documentation, `latest` and `version` parameter values are not equivalent. +After a new version is released, `latest` is incremented and the value of `version` for the documentation set remains unchanged. For example, a previously released version of the documentation displays `version` as +`v1.19` and `latest` as `v1.20`. +{{< /note >}} + +Renders to: + +{{< param "version" >}} + +### `{{}}` + +The `{{}}` shortcode returns the value of the `latest` site parameter. +The `latest` site parameter is updated when a new version of the documentation is released. +This parameter does not always match the value of `version` in a documentation set. + +Renders to: + +{{< latest-version >}} + +### `{{}}` + +The `{{}}` shortcode generates the value of `latest` without the "v" prefix. + +Renders to: + +{{< latest-semver >}} + +### `{{}}` + +The `{{}}` shortcode checks if the `min-kubernetes-server-version` +page parameter is present and then uses this value to compare to `version`. + +Renders to: + +{{< version-check >}} + +### `{{}}` + +The `{{}}` shortcode generates a version string from `latest` and removes +the "v" prefix. The shortcode prints a new URL for the release note CHANGELOG page with the modified version string. + +Renders to: + +{{< latest-release-notes >}} ## {{% heading "whatsnext" %}} @@ -264,4 +280,3 @@ Renders to: * Learn about [page content types](/docs/contribute/style/page-content-types/). * Learn about [opening a pull request](/docs/contribute/new-content/open-a-pr/). * Learn about [advanced contributing](/docs/contribute/advanced/). - diff --git a/content/en/docs/contribute/style/style-guide.md b/content/en/docs/contribute/style/style-guide.md index c0efac7959..b4864dbabf 100644 --- a/content/en/docs/contribute/style/style-guide.md +++ b/content/en/docs/contribute/style/style-guide.md @@ -143,7 +143,7 @@ Do | Don't :--| :----- Set the value of the `replicas` field in the configuration file. | Set the value of the "replicas" field in the configuration file. The value of the `exec` field is an ExecAction object. | The value of the "exec" field is an ExecAction object. -Run the process as a Daemonset in the `kube-system` namespace. | Run the process as a Daemonset in the kube-system namespace. +Run the process as a DaemonSet in the `kube-system` namespace. | Run the process as a DaemonSet in the kube-system namespace. {{< /table >}} ### Use code style for Kubernetes command tool and component names diff --git a/content/en/docs/reference/_index.md b/content/en/docs/reference/_index.md index bc95e7efeb..5b22271c86 100644 --- a/content/en/docs/reference/_index.md +++ b/content/en/docs/reference/_index.md @@ -18,7 +18,7 @@ This section of the Kubernetes documentation contains references. ## API Reference -* [Kubernetes API Reference {{< latest-version >}}](/docs/reference/generated/kubernetes-api/{{< latest-version >}}/) +* [API Reference for Kubernetes {{< param "version" >}}](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) * [Using The Kubernetes API](/docs/reference/using-api/) - overview of the API for Kubernetes. ## API Client Libraries @@ -54,4 +54,3 @@ An archive of the design docs for Kubernetes functionality. Good starting points [Kubernetes Architecture](https://git.k8s.io/community/contributors/design-proposals/architecture/architecture.md) and [Kubernetes Design Overview](https://git.k8s.io/community/contributors/design-proposals). - diff --git a/content/en/docs/reference/access-authn-authz/admission-controllers.md b/content/en/docs/reference/access-authn-authz/admission-controllers.md index e57813ec12..0cdcbf2f36 100644 --- a/content/en/docs/reference/access-authn-authz/admission-controllers.md +++ b/content/en/docs/reference/access-authn-authz/admission-controllers.md @@ -669,13 +669,6 @@ allowVolumeExpansion: true For more information about persistent volume claims, see [PersistentVolumeClaims](/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims). -### PodPreset {#podpreset} - -This admission controller injects a pod with the fields specified in a matching PodPreset. -See also [PodPreset concept](/docs/concepts/workloads/pods/podpreset/) and -[Inject Information into Pods Using a PodPreset](/docs/tasks/inject-data-application/podpreset) -for more information. - ### PodSecurityPolicy {#podsecuritypolicy} This admission controller acts on creation and modification of the pod and determines if it should be admitted @@ -792,25 +785,8 @@ versions 1.9 and later). ## Is there a recommended set of admission controllers to use? -Yes. For Kubernetes version 1.10 and later, the recommended admission controllers are enabled by default (shown [here](/docs/reference/command-line-tools-reference/kube-apiserver/#options)), so you do not need to explicitly specify them. You can enable additional admission controllers beyond the default set using the `--enable-admission-plugins` flag (**order doesn't matter**). +Yes. The recommended admission controllers are enabled by default (shown [here](/docs/reference/command-line-tools-reference/kube-apiserver/#options)), so you do not need to explicitly specify them. You can enable additional admission controllers beyond the default set using the `--enable-admission-plugins` flag (**order doesn't matter**). {{< note >}} `--admission-control` was deprecated in 1.10 and replaced with `--enable-admission-plugins`. {{< /note >}} - -For Kubernetes 1.9 and earlier, we recommend running the following set of admission controllers using the `--admission-control` flag (**order matters**). - -* v1.9 - - ```shell - --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota - ``` - - * It's worth reiterating that in 1.9, these happen in a mutating phase -and a validating phase, and that for example `ResourceQuota` runs in the validating -phase, and therefore is the last admission controller to run. -`MutatingAdmissionWebhook` appears before it in this list, because it runs -in the mutating phase. - - For earlier versions, there was no concept of validating versus mutating and the -admission controllers ran in the exact order specified. diff --git a/content/en/docs/reference/access-authn-authz/rbac.md b/content/en/docs/reference/access-authn-authz/rbac.md index 8ff03a171c..4bc2b86dd6 100644 --- a/content/en/docs/reference/access-authn-authz/rbac.md +++ b/content/en/docs/reference/access-authn-authz/rbac.md @@ -86,8 +86,9 @@ Because ClusterRoles are cluster-scoped, you can also use them to grant access t * cluster-scoped resources (like {{< glossary_tooltip text="nodes" term_id="node" >}}) * non-resource endpoints (like `/healthz`) * namespaced resources (like Pods), across all namespaces + For example: you can use a ClusterRole to allow a particular user to run - `kubectl get pods --all-namespaces`. + `kubectl get pods --all-namespaces` Here is an example of a ClusterRole that can be used to grant read access to {{< glossary_tooltip text="secrets" term_id="secret" >}} in any particular namespace, @@ -514,7 +515,7 @@ subjects: namespace: kube-system ``` -For all service accounts in the "qa" namespace: +For all service accounts in the "qa" group in any namespace: ```yaml subjects: @@ -522,6 +523,15 @@ subjects: name: system:serviceaccounts:qa apiGroup: rbac.authorization.k8s.io ``` +For all service accounts in the "dev" group in the "development" namespace: + +```yaml +subjects: +- kind: Group + name: system:serviceaccounts:dev + apiGroup: rbac.authorization.k8s.io + namespace: development +``` For all service accounts in any namespace: diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates.md b/content/en/docs/reference/command-line-tools-reference/feature-gates.md index 4f4d0fda94..ed18f36c77 100644 --- a/content/en/docs/reference/command-line-tools-reference/feature-gates.md +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates.md @@ -166,7 +166,8 @@ different Kubernetes components. | `StorageVersionHash` | `true` | Beta | 1.15 | | | `Sysctls` | `true` | Beta | 1.11 | | | `TTLAfterFinished` | `false` | Alpha | 1.12 | | -| `TopologyManager` | `false` | Alpha | 1.16 | | +| `TopologyManager` | `false` | Alpha | 1.16 | 1.17 | +| `TopologyManager` | `true` | Beta | 1.18 | | | `ValidateProxyRedirects` | `false` | Alpha | 1.12 | 1.13 | | `ValidateProxyRedirects` | `true` | Beta | 1.14 | | | `WindowsEndpointSliceProxying` | `false` | Alpha | 1.19 | | diff --git a/content/en/docs/reference/command-line-tools-reference/kubelet.md b/content/en/docs/reference/command-line-tools-reference/kubelet.md index 501a62fb9c..b569177dda 100644 --- a/content/en/docs/reference/command-line-tools-reference/kubelet.md +++ b/content/en/docs/reference/command-line-tools-reference/kubelet.md @@ -73,13 +73,6 @@ kubelet [flags] Enables anonymous requests to the Kubelet server. Requests that are not rejected by another authentication method are treated as anonymous requests. Anonymous requests have a username of `system:anonymous`, and a group name of `system:unauthenticated`. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.) - ---application-metrics-count-limit int     Default: 100 - - -Max number of application metrics to store (per container) (DEPRECATED: This is a cadvisor flag that was mistakenly registered with the Kubelet. Due to legacy concerns,it will follow the standard CLI deprecation timeline before being removed.) - - --authentication-token-webhook @@ -122,13 +115,6 @@ kubelet [flags] Path to the file containing Azure container registry configuration information. - ---boot-id-file string     Default: `/proc/sys/kernel/random/boot_id` - - -Comma-separated list of files to check for `boot-id`. Use the first one that exists. (DEPRECATED: This is a cadvisor flag that was mistakenly registered with the Kubelet. Due to legacy concerns, it will follow the standard CLI deprecation timeline before being removed.) - - --bootstrap-kubeconfig string @@ -234,13 +220,6 @@ kubelet [flags] The Kubelet will load its initial configuration from this file. The path may be absolute or relative; relative paths start at the Kubelet's current working directory. Omit this flag to use the built-in default configuration values. Command-line flags override configuration from this file. - ---container-hints string     Default: `/etc/cadvisor/container_hints.json` - - -location of the container hints file. (DEPRECATED: This is a cadvisor flag that was mistakenly registered with the Kubelet. Due to legacy concerns, it will follow the standard CLI deprecation timeline before being removed.) - - --container-log-max-files int32     Default: 5 @@ -269,13 +248,7 @@ kubelet [flags] [Experimental] The endpoint of remote runtime service. Currently unix socket endpoint is supported on Linux, while npipe and tcp endpoints are supported on windows. Examples: `unix:///var/run/dockershim.sock`, `npipe:////./pipe/dockershim`. - ---containerd string     Default: `/run/containerd/containerd.sock` - - -The `containerd` endpoint. (DEPRECATED: This is a cadvisor flag that was mistakenly registered with the Kubelet. Due to legacy concerns, it will follow the standard CLI deprecation timeline before being removed.) - - + --contention-profiling @@ -311,13 +284,6 @@ kubelet [flags] <Warning: Alpha feature> CPU Manager reconciliation period. Examples: `10s`, or `1m`. If not supplied, defaults to node status update frequency. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.) - ---docker string     Default: `unix:///var/run/docker.sock` - - -The `docker` endpoint. (DEPRECATED: This is a cadvisor flag that was mistakenly registered with the Kubelet. Due to legacy concerns, it will follow the standard CLI deprecation timeline before being removed.) - - --docker-endpoint string     Default: `unix:///var/run/docker.sock` @@ -325,55 +291,6 @@ kubelet [flags] Use this for the `docker` endpoint to communicate with. This docker-specific flag only works when container-runtime is set to `docker`. - ---docker-env-metadata-whitelist string - - -a comma-separated list of environment variable keys that needs to be collected for docker containers (DEPRECATED: This is a cadvisor flag that was mistakenly registered with the Kubelet. Due to legacy concerns, it will follow the standard CLI deprecation timeline before being removed.) - - - ---docker-only - - -Only report docker containers in addition to root stats (DEPRECATED: This is a cadvisor flag that was mistakenly registered with the Kubelet. Due to legacy concerns, it will follow the standard CLI deprecation timeline before being removed.) - - - ---docker-root string     Default: `/var/lib/docker` - - -DEPRECATED: docker root is read from docker info (this is a fallback). - - - ---docker-tls - - -use TLS to connect to docker (DEPRECATED: This is a cadvisor flag that was mistakenly registered with the Kubelet. Due to legacy concerns, it will follow the standard CLI deprecation timeline before being removed.) - - - ---docker-tls-ca string     Default: `ca.pem` - - -path to trusted CA. (DEPRECATED: This is a cadvisor flag that was mistakenly registered with the Kubelet. Due to legacy concerns, it will follow the standard CLI deprecation timeline before being removed.) - - - ---docker-tls-cert string     Default: `cert.pem` - - -path to client certificate. (DEPRECATED: This is a cadvisor flag that was mistakenly registered with the Kubelet. Due to legacy concerns, it will follow the standard CLI deprecation timeline before being removed.) - - - ---docker-tls-key string     Default: `key.pem` - - -Path to private key. (DEPRECATED: This is a cadvisor flag that was mistakenly registered with the Kubelet. Due to legacy concerns, it will follow the standard CLI deprecation timeline before being removed.) - - --dynamic-config-dir string @@ -402,13 +319,6 @@ kubelet [flags] Enables server endpoints for log collection and local running of containers and commands. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.) - ---enable-load-reader - - -Whether to enable CPU load reader (DEPRECATED: This is a cadvisor flag that was mistakenly registered with the Kubelet. Due to legacy concerns, it will follow the standard CLI deprecation timeline before being removed.) - - --enable-server     Default: `true` @@ -438,24 +348,10 @@ kubelet [flags] ---event-storage-age-limit string     Default: `default=0` +--eviction-hard mapStringString     Default: `imagefs.available<15%,memory.available<100Mi,nodefs.available<10%` -Max length of time for which to store events (per type). Value is a comma separated list of key values, where the keys are event types (e.g.: `creation`, `oom`) or `default` and the value is a duration. Default is applied to all non-specified event types. (DEPRECATED: This is a cadvisor flag that was mistakenly registered with the Kubelet. Due to legacy concerns, it will follow the standard CLI deprecation timeline before being removed.) - - - ---event-storage-event-limit string     Default: `default=0` - - -Max number of events to store (per type). Value is a comma separated list of key values, where the keys are event types (e.g.: `creation`, `oom`) or `default` and the value is an integer. Default is applied to all non-specified event types. (DEPRECATED: This is a cadvisor flag that was mistakenly registered with the Kubelet. Due to legacy concerns, it will follow the standard CLI deprecation timeline before being removed.) - - - ---eviction-hard mapStringString     Default: `imagefs.available<15%,memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%` - - -A set of eviction thresholds (e.g. `memory.available<1Gi`) that if met would trigger a pod eviction. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.) +A set of eviction thresholds (e.g. `memory.available<1Gi`) that if met would trigger a pod eviction. On a Linux node, the default value also includes `nodefs.inodesFree<5%`. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.) @@ -528,6 +424,13 @@ kubelet [flags] If enabled, the kubelet will integrate with the kernel memcg notification to determine if memory eviction thresholds are crossed rather than polling. This flag will be removed in 1.23. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.) + +--experimental-log-sanitization bool + + +[Experimental] When enabled prevents logging of fields tagged as sensitive (passwords, keys, tokens). Runtime log sanitization may introduce significant computation overhead and therefore should not be enabled in production. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.) + + --experimental-mounter-path string     Default: `mount` @@ -548,8 +451,9 @@ kubelet [flags] A set of `key=value` pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
-APIPriorityAndFairness=true|false (ALPHA - default=false)
+APIPriorityAndFairness=true|false (BETA - default=true)
APIResponseCompression=true|false (BETA - default=true)
+APIServerIdentity=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
AllowInsecureBackendProxy=true|false (BETA - default=true)
@@ -573,31 +477,40 @@ CSIMigrationOpenStack=true|false (BETA - default=false)
CSIMigrationOpenStackComplete=true|false (ALPHA - default=false)
CSIMigrationvSphere=true|false (BETA - default=false)
CSIMigrationvSphereComplete=true|false (BETA - default=false)
+CSIServiceAccountToken=true|false (ALPHA - default=false)
CSIStorageCapacity=true|false (ALPHA - default=false)
-CSIVolumeFSGroupPolicy=true|false (ALPHA - default=false)
-ConfigurableFSGroupPolicy=true|false (ALPHA - default=false)
+CSIVolumeFSGroupPolicy=true|false (BETA - default=true)
+ConfigurableFSGroupPolicy=true|false (BETA - default=true)
+CronJobControllerV2=true|false (ALPHA - default=false)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
-DefaultPodTopologySpread=true|false (ALPHA - default=false)
+DefaultPodTopologySpread=true|false (BETA - default=true)
DevicePlugins=true|false (BETA - default=true)
DisableAcceleratorUsageMetrics=true|false (BETA - default=true)
+DownwardAPIHugePages=true|false (ALPHA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
+EfficientWatchResumption=true|false (ALPHA - default=false)
EndpointSlice=true|false (BETA - default=true)
+EndpointSliceNodeName=true|false (ALPHA - default=false)
EndpointSliceProxying=true|false (BETA - default=true)
+EndpointSliceTerminatingCondition=true|false (ALPHA - default=false)
EphemeralContainers=true|false (ALPHA - default=false)
ExpandCSIVolumes=true|false (BETA - default=true)
ExpandInUsePersistentVolumes=true|false (BETA - default=true)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
GenericEphemeralVolume=true|false (ALPHA - default=false)
+GracefulNodeShutdown=true|false (ALPHA - default=false)
+HPAContainerMetrics=true|false (ALPHA - default=false)
HPAScaleToZero=true|false (ALPHA - default=false)
HugePageStorageMediumSize=true|false (BETA - default=true)
-HyperVContainer=true|false (ALPHA - default=false)
IPv6DualStack=true|false (ALPHA - default=false)
ImmutableEphemeralVolumes=true|false (BETA - default=true)
+KubeletCredentialProviders=true|false (ALPHA - default=false)
KubeletPodResources=true|false (BETA - default=true)
LegacyNodeRoleBehavior=true|false (BETA - default=true)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
+MixedProtocolLBService=true|false (ALPHA - default=false)
NodeDisruptionExclusion=true|false (BETA - default=true)
NonPreemptingPriority=true|false (BETA - default=true)
PodDisruptionBudget=true|false (BETA - default=true)
@@ -605,31 +518,26 @@ PodOverhead=true|false (BETA - default=true)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
RemainingItemCount=true|false (BETA - default=true)
-RemoveSelfLink=true|false (ALPHA - default=false)
+RemoveSelfLink=true|false (BETA - default=true)
+RootCAConfigMap=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
RunAsGroup=true|false (BETA - default=true)
-RuntimeClass=true|false (BETA - default=true)
-SCTPSupport=true|false (BETA - default=true)
-SelectorIndex=true|false (BETA - default=true)
ServerSideApply=true|false (BETA - default=true)
-ServiceAccountIssuerDiscovery=true|false (ALPHA - default=false)
-ServiceAppProtocol=true|false (BETA - default=true)
+ServiceAccountIssuerDiscovery=true|false (BETA - default=true)
+ServiceLBNodePortControl=true|false (ALPHA - default=false)
ServiceNodeExclusion=true|false (BETA - default=true)
ServiceTopology=true|false (ALPHA - default=false)
-SetHostnameAsFQDN=true|false (ALPHA - default=false)
+SetHostnameAsFQDN=true|false (BETA - default=true)
+SizeMemoryBackedVolumes=true|false (ALPHA - default=false)
+StorageVersionAPI=true|false (ALPHA - default=false)
StorageVersionHash=true|false (BETA - default=true)
-SupportNodePidsLimit=true|false (BETA - default=true)
-SupportPodPidsLimit=true|false (BETA - default=true)
Sysctls=true|false (BETA - default=true)
TTLAfterFinished=true|false (ALPHA - default=false)
-TokenRequest=true|false (BETA - default=true)
-TokenRequestProjection=true|false (BETA - default=true)
TopologyManager=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (BETA - default=true)
-VolumeSnapshotDataSource=true|false (BETA - default=true)
WarningHeaders=true|false (BETA - default=true)
WinDSR=true|false (ALPHA - default=false)
-WinOverlay=true|false (ALPHA - default=false)
+WinOverlay=true|false (BETA - default=true)
WindowsEndpointSliceProxying=true|false (ALPHA - default=false)
(DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.) @@ -641,13 +549,6 @@ WindowsEndpointSliceProxying=true|false (ALPHA - default=false)
Duration between checking config files for new data. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.) - ---global-housekeeping-interval duration     Default: `1m0s` - - -Interval between global housekeepings. (DEPRECATED: This is a cadvisor flag that was mistakenly registered with the Kubelet. Due to legacy concerns, it will follow the standard CLI deprecation timeline before being removed.) - - --hairpin-mode string     Default: `promiscuous-bridge` @@ -697,6 +598,20 @@ WindowsEndpointSliceProxying=true|false (ALPHA - default=false)
Duration between checking HTTP for new data. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.) + +--image-credential-provider-bin-dir string + + +The path to the directory where credential provider plugin binaries are located. + + + +--image-credential-provider-config string + + +The path to the credential provider plugin config file. + + --image-gc-high-threshold int32     Default: 85 @@ -757,7 +672,7 @@ WindowsEndpointSliceProxying=true|false (ALPHA - default=false)
--kube-api-burst int32     Default: 10 - Burst to use while talking with kubernetes API server. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.) +Burst to use while talking with kubernetes API server. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.) @@ -778,7 +693,7 @@ WindowsEndpointSliceProxying=true|false (ALPHA - default=false)
--kube-reserved mapStringString     Default: <None> -A set of `=` (e.g. `cpu=200m,memory=500Mi,ephemeral-storage=1Gi`) pairs that describe resources reserved for kubernetes system components. Currently `cpu`, `memory` and local `ephemeral-storage` for root file system are supported. See http://kubernetes.io/docs/user-guide/compute-resources for more detail. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.) +A set of `=` (e.g. `cpu=200m,memory=500Mi,ephemeral-storage=1Gi,pid='100'`) pairs that describe resources reserved for kubernetes system components. Currently `cpu`, `memory` and local `ephemeral-storage` for root file system are supported. See http://kubernetes.io/docs/user-guide/compute-resources for more detail. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.) @@ -816,13 +731,6 @@ WindowsEndpointSliceProxying=true|false (ALPHA - default=false)
When logging hits line `:`, emit a stack trace. - ---log-cadvisor-usage - - -Whether to log the usage of the cAdvisor container (DEPRECATED: This is a cadvisor flag that was mistakenly registered with the Kubelet. Due to legacy concerns, it will follow the standard CLI deprecation timeline before being removed.) - - --log-dir string @@ -855,7 +763,7 @@ WindowsEndpointSliceProxying=true|false (ALPHA - default=false)
--logging-format string     Default: `text` -Sets the log format. Permitted formats: `text`, `json`.\nNon-default formats don't honor these flags: `--add-dir-header`, `--alsologtostderr`, `--log-backtrace-at`, `--log_dir`, `--log-file`, `--log-file-max-size`, `--logtostderr`, `--skip_headers`, `--skip_log_headers`, `--stderrthreshold`, `--log-flush-frequency`.\nNon-default choices are currently alpha and subject to change without warning. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.) +Sets the log format. Permitted formats: `text`, `json`.\nNon-default formats don't honor these flags: `--add-dir-header`, `--alsologtostderr`, `--log-backtrace-at`, `--log-dir`, `--log-file`, `--log-file-max-size`, `--logtostderr`, `--skip_headers`, `--skip_log_headers`, `--stderrthreshold`, `--log-flush-frequency`.\nNon-default choices are currently alpha and subject to change without warning. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.) @@ -865,13 +773,6 @@ WindowsEndpointSliceProxying=true|false (ALPHA - default=false)
log to standard error instead of files. - ---machine-id-file string     Default: `/etc/machine-id,/var/lib/dbus/machine-id` - - -Comma-separated list of files to check for `machine-id`. Use the first one that exists. (DEPRECATED: This is a cadvisor flag that was mistakenly registered with the Kubelet. Due to legacy concerns, it will follow the standard CLI deprecation timeline before being removed.) - - --make-iptables-util-chains     Default: `true` @@ -990,6 +891,14 @@ WindowsEndpointSliceProxying=true|false (ALPHA - default=false)
Traffic to IPs outside this range will use IP masquerade. Set to `0.0.0.0/0` to never masquerade. (DEPRECATED: will be removed in a future version) + +--one-output + + +If true, only write logs to their native severity level (vs also writing to each lower severity level. + + + --oom-score-adj int32     Default: -999 @@ -1082,7 +991,7 @@ WindowsEndpointSliceProxying=true|false (ALPHA - default=false)
---register-node +--register-node     Default: `true` Register the node with the API server. If `--kubeconfig` is not provided, this flag is irrelevant, as the Kubelet won't have an API server to register with. Default to `true`. @@ -1096,7 +1005,7 @@ WindowsEndpointSliceProxying=true|false (ALPHA - default=false)
---register-with-taints []api.Taint +--register-with-taints mapStringString Register the node with the given list of taints (comma separated `=:`). No-op if `--register-node` is `false`. @@ -1202,61 +1111,12 @@ WindowsEndpointSliceProxying=true|false (ALPHA - default=false)
---stderrthreshold severity     Default: 2 +--stderrthreshold int     Default: 2 logs at or above this threshold go to stderr. - ---storage-driver-buffer-duration duration     Default: `1m0s` - - -Writes in the storage driver will be buffered for this duration, and committed to the non memory backends as a single transaction. (DEPRECATED: This is a cadvisor flag that was mistakenly registered with the Kubelet. Due to legacy concerns, it will follow the standard CLI deprecation timeline before being removed.) - - - ---storage-driver-db string     Default: `cadvisor` - - -Database name. (DEPRECATED: This is a cadvisor flag that was mistakenly registered with the Kubelet. Due to legacy concerns, it will follow the standard CLI deprecation timeline before being removed.) - - - ---storage-driver-host string     Default: `localhost:8086` - - -Database `host:port`. (DEPRECATED: This is a cadvisor flag that was mistakenly registered with the Kubelet. Due to legacy concerns, it will follow the standard CLI deprecation timeline before being removed.) - - - ---storage-driver-password string     Default: `root` - - -Database password. (DEPRECATED: This is a cadvisor flag that was mistakenly registered with the Kubelet. Due to legacy concerns, it will follow the standard CLI deprecation timeline before being removed.) - - - ---storage-driver-secure - - -Use secure connection with database (DEPRECATED: This is a cadvisor flag that was mistakenly registered with the Kubelet. Due to legacy concerns, it will follow the standard CLI deprecation timeline before being removed.) - - - ---storage-driver-table string     Default: `stats` - - -Table name. (DEPRECATED: This is a cadvisor flag that was mistakenly registered with the Kubelet. Due to legacy concerns, it will follow the standard CLI deprecation timeline before being removed.) - - - ---storage-driver-user string     Default: `root` - - -Database username. (DEPRECATED: This is a cadvisor flag that was mistakenly registered with the Kubelet. Due to legacy concerns, it will follow the standard CLI deprecation timeline before being removed.) - - --streaming-connection-idle-timeout duration     Default: `4h0m0s` @@ -1282,7 +1142,7 @@ WindowsEndpointSliceProxying=true|false (ALPHA - default=false)
--system-reserved mapStringString     Default: \ -A set of `=` (e.g. `cpu=200m,memory=500Mi,ephemeral-storage=1Gi`) pairs that describe resources reserved for non-kubernetes components. Currently only `cpu` and `memory` are supported. See http://kubernetes.io/docs/user-guide/compute-resources for more detail. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.) +A set of `=` (e.g. `cpu=200m,memory=500Mi,ephemeral-storage=1Gi,pid='100'`) pairs that describe resources reserved for non-kubernetes components. Currently only `cpu` and `memory` are supported. See http://kubernetes.io/docs/user-guide/compute-resources for more detail. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.) @@ -1331,6 +1191,13 @@ Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_R Topology Manager policy to use. Possible values: `none`, `best-effort`, `restricted`, `single-numa-node`. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's `--config` flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.) + +--topology-manager-scope string     Default: `container` + + +Scope to which topology hints applied. Topology Manager collects hints from Hint Providers and applies them to defined scope to ensure the pod admission. Possible values: 'container' (default), 'pod'. (default "container") (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.) + + -v, --v Level diff --git a/content/en/docs/reference/glossary/podpreset.md b/content/en/docs/reference/glossary/podpreset.md deleted file mode 100755 index f63187ff71..0000000000 --- a/content/en/docs/reference/glossary/podpreset.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: PodPreset -id: podpreset -date: 2018-04-12 -full_link: -short_description: > - An API object that injects information such as secrets, volume mounts, and environment variables into pods at creation time. - -aka: -tags: -- operation ---- - An API object that injects information such as secrets, volume mounts, and environment variables into {{< glossary_tooltip text="Pods" term_id="pod" >}} at creation time. - - - -This object chooses the Pods to inject information into using standard selectors. This allows the podspec definitions to be nonspecific, decoupling the podspec from environment specific configuration. - diff --git a/content/en/docs/reference/kubectl/cheatsheet.md b/content/en/docs/reference/kubectl/cheatsheet.md index ab4fde5469..2ed62c2e2a 100644 --- a/content/en/docs/reference/kubectl/cheatsheet.md +++ b/content/en/docs/reference/kubectl/cheatsheet.md @@ -194,6 +194,9 @@ kubectl get pods --show-labels JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}' \ && kubectl get nodes -o jsonpath="$JSONPATH" | grep "Ready=True" +# Output decoded secrets without external tools +kubectl get secret ${secret_name} -o go-template='{{range $k,$v := .data}}{{$k}}={{$v|base64decode}}{{"\n"}}{{end}}' + # List all Secrets currently in use by a pod kubectl get pods -o json | jq '.items[].spec.containers[].env[]?.valueFrom.secretKeyRef.name' | grep -v null | sort | uniq @@ -314,6 +317,7 @@ kubectl exec my-pod -- ls / # Run command in existing po kubectl exec --stdin --tty my-pod -- /bin/sh # Interactive shell access to a running pod (1 container case) kubectl exec my-pod -c my-container -- ls / # Run command in existing pod (multi-container case) kubectl top pod POD_NAME --containers # Show metrics for a given pod and its containers +kubectl top pod POD_NAME --sort-by=cpu # Show metrics for a given pod and sort it by 'cpu' or 'memory' ``` ## Interacting with Nodes and cluster @@ -391,6 +395,7 @@ Verbosity | Description `--v=2` | Useful steady state information about the service and important log messages that may correlate to significant changes in the system. This is the recommended default log level for most systems. `--v=3` | Extended information about changes. `--v=4` | Debug level verbosity. +`--v=5` | Trace level verbosity. `--v=6` | Display requested resources. `--v=7` | Display HTTP request headers. `--v=8` | Display HTTP request contents. diff --git a/content/en/docs/reference/kubectl/conventions.md b/content/en/docs/reference/kubectl/conventions.md index 8b5ab20cdb..c8016453cd 100644 --- a/content/en/docs/reference/kubectl/conventions.md +++ b/content/en/docs/reference/kubectl/conventions.md @@ -37,23 +37,22 @@ All `kubectl run` generators are deprecated. See the Kubernetes v1.17 documentat #### Generators You can generate the following resources with a kubectl command, `kubectl create --dry-run=client -o yaml`: -``` - clusterrole Create a ClusterRole. - clusterrolebinding Create a ClusterRoleBinding for a particular ClusterRole. - configmap Create a configmap from a local file, directory or literal value. - cronjob Create a cronjob with the specified name. - deployment Create a deployment with the specified name. - job Create a job with the specified name. - namespace Create a namespace with the specified name. - poddisruptionbudget Create a pod disruption budget with the specified name. - priorityclass Create a priorityclass with the specified name. - quota Create a quota with the specified name. - role Create a role with single rule. - rolebinding Create a RoleBinding for a particular Role or ClusterRole. - secret Create a secret using specified subcommand. - service Create a service using specified subcommand. - serviceaccount Create a service account with the specified name. -``` + +* `clusterrole`: Create a ClusterRole. +* `clusterrolebinding`: Create a ClusterRoleBinding for a particular ClusterRole. +* `configmap`: Create a ConfigMap from a local file, directory or literal value. +* `cronjob`: Create a CronJob with the specified name. +* `deployment`: Create a Deployment with the specified name. +* `job`: Create a Job with the specified name. +* `namespace`: Create a Namespace with the specified name. +* `poddisruptionbudget`: Create a PodDisruptionBudget with the specified name. +* `priorityclass`: Create a PriorityClass with the specified name. +* `quota`: Create a Quota with the specified name. +* `role`: Create a Role with single rule. +* `rolebinding`: Create a RoleBinding for a particular Role or ClusterRole. +* `secret`: Create a Secret using specified subcommand. +* `service`: Create a Service using specified subcommand. +* `serviceaccount`: Create a ServiceAccount with the specified name. ### `kubectl apply` diff --git a/content/en/docs/reference/kubectl/docker-cli-to-kubectl.md b/content/en/docs/reference/kubectl/docker-cli-to-kubectl.md index 5afdebd557..6c214513a0 100644 --- a/content/en/docs/reference/kubectl/docker-cli-to-kubectl.md +++ b/content/en/docs/reference/kubectl/docker-cli-to-kubectl.md @@ -37,16 +37,11 @@ kubectl: # start the pod running nginx kubectl create deployment --image=nginx nginx-app ``` - -```shell -# add env to nginx-app -kubectl set env deployment/nginx-app DOMAIN=cluster -``` ``` deployment.apps/nginx-app created ``` -``` +```shell # add env to nginx-app kubectl set env deployment/nginx-app DOMAIN=cluster ``` diff --git a/content/en/docs/reference/scheduling/config.md b/content/en/docs/reference/scheduling/config.md index 0dca862fb9..7754d7cb7d 100644 --- a/content/en/docs/reference/scheduling/config.md +++ b/content/en/docs/reference/scheduling/config.md @@ -108,7 +108,7 @@ extension points: - `SelectorSpread`: Favors spreading across nodes for Pods that belong to {{< glossary_tooltip text="Services" term_id="service" >}}, {{< glossary_tooltip text="ReplicaSets" term_id="replica-set" >}} and - {{< glossary_tooltip text="StatefulSets" term_id="statefulset" >}} + {{< glossary_tooltip text="StatefulSets" term_id="statefulset" >}}. Extension points: `PreScore`, `Score`. - `ImageLocality`: Favors nodes that already have the container images that the Pod runs. diff --git a/content/en/docs/reference/using-api/client-libraries.md b/content/en/docs/reference/using-api/client-libraries.md index c14f0fa9c7..96589c6a55 100644 --- a/content/en/docs/reference/using-api/client-libraries.md +++ b/content/en/docs/reference/using-api/client-libraries.md @@ -74,6 +74,7 @@ their authors, not the Kubernetes team. | Rust | [github.com/ynqa/kubernetes-rust](https://github.com/ynqa/kubernetes-rust) | | Scala | [github.com/doriordan/skuber](https://github.com/doriordan/skuber) | | Scala | [github.com/joan38/kubernetes-client](https://github.com/joan38/kubernetes-client) | +| Swift | [github.com/swiftkube/client](https://github.com/swiftkube/client) | | DotNet | [github.com/tonnyeremin/kubernetes_gen](https://github.com/tonnyeremin/kubernetes_gen) | | DotNet (RestSharp) | [github.com/masroorhasan/Kubernetes.DotNet](https://github.com/masroorhasan/Kubernetes.DotNet) | | Elixir | [github.com/obmarg/kazan](https://github.com/obmarg/kazan/) | diff --git a/content/en/docs/reference/using-api/server-side-apply.md b/content/en/docs/reference/using-api/server-side-apply.md index c945765d76..c281eb9400 100644 --- a/content/en/docs/reference/using-api/server-side-apply.md +++ b/content/en/docs/reference/using-api/server-side-apply.md @@ -71,7 +71,7 @@ the appliers, results in a conflict. Shared field owners may give up ownership of a field by removing it from their configuration. Field management is stored in a`managedFields` field that is part of an object's -[`metadata`](/docs/reference/generated/kubernetes-api/{{< latest-version >}}/#objectmeta-v1-meta). +[`metadata`](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#objectmeta-v1-meta). A simple example of an object created by Server Side Apply could look like this: diff --git a/content/en/docs/setup/production-environment/container-runtimes.md b/content/en/docs/setup/production-environment/container-runtimes.md index 8ba5300375..15bd4a131d 100644 --- a/content/en/docs/setup/production-environment/container-runtimes.md +++ b/content/en/docs/setup/production-environment/container-runtimes.md @@ -125,6 +125,62 @@ sudo mkdir -p /etc/containerd sudo containerd config default | sudo tee /etc/containerd/config.toml ``` +```shell +# Restart containerd +sudo systemctl restart containerd +``` +{{% /tab %}} +{{% tab name="Ubuntu 18.04/20.04" %}} + +```shell +# (Install containerd) +sudo apt-get update && sudo apt-get install -y containerd +``` + +```shell +# Configure containerd +sudo mkdir -p /etc/containerd +sudo containerd config default | sudo tee /etc/containerd/config.toml +``` + +```shell +# Restart containerd +sudo systemctl restart containerd +``` +{{% /tab %}} +{{% tab name="Debian 9+" %}} + +```shell +# (Install containerd) +## Set up the repository +### Install packages to allow apt to use a repository over HTTPS +sudo apt-get update && sudo apt-get install -y apt-transport-https ca-certificates curl software-properties-common +``` + +```shell +## Add Docker's official GPG key +curl -fsSL https://download.docker.com/linux/debian/gpg | sudo apt-key --keyring /etc/apt/trusted.gpg.d/docker.gpg add - +``` + +```shell +## Add Docker apt repository. +sudo add-apt-repository \ + "deb [arch=amd64] https://download.docker.com/linux/debian \ + $(lsb_release -cs) \ + stable" +``` + +```shell +## Install containerd +sudo apt-get update && sudo apt-get install -y containerd.io +``` + +```shell +# Set default containerd configuration +sudo mkdir -p /etc/containerd +containerd config default | sudo tee /etc/containerd/config.toml +``` + ```shell # Restart containerd sudo systemctl restart containerd @@ -154,7 +210,7 @@ sudo yum update -y && sudo yum install -y containerd.io ```shell ## Configure containerd sudo mkdir -p /etc/containerd -sudo containerd config default > /etc/containerd/config.toml +sudo containerd config default | sudo tee /etc/containerd/config.toml ``` ```shell @@ -212,12 +268,18 @@ Use the following commands to install CRI-O on your system: {{< note >}} The CRI-O major and minor versions must match the Kubernetes major and minor versions. -For more information, see the [CRI-O compatibility matrix](https://github.com/cri-o/cri-o). +For more information, see the [CRI-O compatibility matrix](https://github.com/cri-o/cri-o#compatibility-matrix-cri-o--kubernetes). {{< /note >}} Install and configure prerequisites: ```shell +# Create the .conf file to load the modules at bootup +cat < Then, set `$VERSION` to the CRI-O version that matches your Kubernetes version. -For instance, if you want to install CRI-O 1.18, set `VERSION=1.18`. +For instance, if you want to install CRI-O 1.20, set `VERSION=1.20`. You can pin your installation to a specific release. -To install version 1.18.3, set `VERSION=1.18:1.18.3`. +To install version 1.20.0, set `VERSION=1.20:1.20.0`.
Then run @@ -280,9 +342,9 @@ To install on the following operating systems, set the environment variable `OS`
Then, set `$VERSION` to the CRI-O version that matches your Kubernetes version. -For instance, if you want to install CRI-O 1.18, set `VERSION=1.18`. +For instance, if you want to install CRI-O 1.20, set `VERSION=1.20`. You can pin your installation to a specific release. -To install version 1.18.3, set `VERSION=1.18:1.18.3`. +To install version 1.20.0, set `VERSION=1.20:1.20.0`.
Then run @@ -314,9 +376,9 @@ To install on the following operating systems, set the environment variable `OS`
Then, set `$VERSION` to the CRI-O version that matches your Kubernetes version. -For instance, if you want to install CRI-O 1.18, set `VERSION=1.18`. +For instance, if you want to install CRI-O 1.20, set `VERSION=1.20`. You can pin your installation to a specific release. -To install version 1.18.3, set `VERSION=1.18:1.18.3`. +To install version 1.20.0, set `VERSION=1.20:1.20.0`.
Then run @@ -337,7 +399,7 @@ sudo zypper install cri-o {{% tab name="Fedora" %}} Set `$VERSION` to the CRI-O version that matches your Kubernetes version. -For instance, if you want to install CRI-O 1.18, `VERSION=1.18`. +For instance, if you want to install CRI-O 1.20, `VERSION=1.20`. You can find available versions with: ```shell @@ -361,10 +423,26 @@ sudo systemctl daemon-reload sudo systemctl start crio ``` -Refer to the [CRI-O installation guide](https://github.com/kubernetes-sigs/cri-o#getting-started) +Refer to the [CRI-O installation guide](https://github.com/cri-o/cri-o/blob/master/install.md) for more information. +#### cgroup driver + +CRI-O uses the systemd cgroup driver per default. To switch to the `cgroupfs` +cgroup driver, either edit `/etc/crio/crio.conf` or place a drop-in +configuration in `/etc/crio/crio.conf.d/02-cgroup-manager.conf`, for example: + +```toml +[crio.runtime] +conmon_cgroup = "pod" +cgroup_manager = "cgroupfs" +``` + +Please also note the changed `conmon_cgroup`, which has to be set to the value +`pod` when using CRI-O with `cgroupfs`. It is generally necessary to keep the +cgroup driver configuration of the kubelet (usually done via kubeadm) and CRI-O +in sync. ### Docker @@ -407,6 +485,11 @@ sudo apt-get update && sudo apt-get install -y \ docker-ce-cli=5:19.03.11~3-0~ubuntu-$(lsb_release -cs) ``` +```shell +## Create /etc/docker +sudo mkdir /etc/docker +``` + ```shell # Set up the Docker daemon cat <}} -Since `--cgroup-driver` flag has been deprecated by kubelet, if you have that in `/var/lib/kubelet/kubeadm-flags.env` +Since `--cgroup-driver` flag has been deprecated by the kubelet, if you have that in `/var/lib/kubelet/kubeadm-flags.env` or `/etc/default/kubelet`(`/etc/sysconfig/kubelet` for RPMs), please remove it and use the KubeletConfiguration instead (stored in `/var/lib/kubelet/config.yaml` by default). {{< /note >}} diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md b/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md index 2e347b0ef6..ded4250787 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md @@ -102,8 +102,7 @@ This may be caused by a number of problems. The most common are: 1. Install Docker again following instructions [here](/docs/setup/production-environment/container-runtimes/#docker). - 1. Change the kubelet config to match the Docker cgroup driver manually, you can refer to - [Configure cgroup driver used by kubelet on Master Node](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#configure-cgroup-driver-used-by-kubelet-on-master-node) + 1. Change the kubelet config to match the Docker cgroup driver manually, you can refer to [Configure cgroup driver used by kubelet on control-plane node](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#configure-cgroup-driver-used-by-kubelet-on-control-plane-node) - control plane Docker containers are crashlooping or hanging. You can check this by running `docker ps` and investigating each container by running `docker logs`. diff --git a/content/en/docs/setup/release/version-skew-policy.md b/content/en/docs/setup/release/version-skew-policy.md index 0cd1ab597d..68ea7aef8a 100644 --- a/content/en/docs/setup/release/version-skew-policy.md +++ b/content/en/docs/setup/release/version-skew-policy.md @@ -140,6 +140,11 @@ Pre-requisites: Optionally upgrade `kubelet` instances to **{{< skew latestVersion >}}** (or they can be left at **{{< skew prevMinorVersion >}}** or **{{< skew oldestMinorVersion >}}**) +{{< note >}} +Before performing a minor version `kubelet` upgrade, [drain](/docs/tasks/administer-cluster/safely-drain-node/) pods from that node. +In-place minor version `kubelet` upgrades are not supported. +{{}} + {{< warning >}} Running a cluster with `kubelet` instances that are persistently two minor versions behind `kube-apiserver` is not recommended: diff --git a/content/en/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md b/content/en/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md index 7d1db15451..e32a6ba02c 100644 --- a/content/en/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md +++ b/content/en/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md @@ -160,7 +160,7 @@ for the pathnames of the certificate files. You need to change these to the actu of certificate files in your environment. Sometimes you may want to use Base64-encoded data embedded here instead of separate -certificate files; in that case you need add the suffix `-data` to the keys, for example, +certificate files; in that case you need to add the suffix `-data` to the keys, for example, `certificate-authority-data`, `client-certificate-data`, `client-key-data`. Each context is a triple (cluster, user, namespace). For example, the diff --git a/content/en/docs/tasks/access-application-cluster/connecting-frontend-backend.md b/content/en/docs/tasks/access-application-cluster/connecting-frontend-backend.md index bd54c71cf6..9573a1a68d 100644 --- a/content/en/docs/tasks/access-application-cluster/connecting-frontend-backend.md +++ b/content/en/docs/tasks/access-application-cluster/connecting-frontend-backend.md @@ -1,22 +1,24 @@ --- -title: Connect a Front End to a Back End Using a Service +title: Connect a Frontend to a Backend Using Services content_type: tutorial weight: 70 --- -This task shows how to create a frontend and a backend -microservice. The backend microservice is a hello greeter. The -frontend and backend are connected using a Kubernetes -{{< glossary_tooltip term_id="service" >}} object. +This task shows how to create a _frontend_ and a _backend_ microservice. The backend +microservice is a hello greeter. The frontend exposes the backend using nginx and a +Kubernetes {{< glossary_tooltip term_id="service" >}} object. ## {{% heading "objectives" %}} -* Create and run a microservice using a {{< glossary_tooltip term_id="deployment" >}} object. -* Route traffic to the backend using a frontend. -* Use a Service object to connect the frontend application to the - backend application. +* Create and run a sample `hello` backend microservice using a + {{< glossary_tooltip term_id="deployment" >}} object. +* Use a Service object to send traffic to the backend microservice's multiple replicas. +* Create and run a `nginx` frontend microservice, also using a Deployment object. +* Configure the frontend microservice to send traffic to the backend microservice. +* Use a Service object of `type=LoadBalancer` to expose the frontend microservice + outside the cluster. ## {{% heading "prerequisites" %}} @@ -34,24 +36,24 @@ require a supported environment. If your environment does not support this, you The backend is a simple hello greeter microservice. Here is the configuration file for the backend Deployment: -{{< codenew file="service/access/hello.yaml" >}} +{{< codenew file="service/access/backend-deployment.yaml" >}} Create the backend Deployment: ```shell -kubectl apply -f https://k8s.io/examples/service/access/hello.yaml +kubectl apply -f https://k8s.io/examples/service/access/backend-deployment.yaml ``` View information about the backend Deployment: ```shell -kubectl describe deployment hello +kubectl describe deployment backend ``` The output is similar to this: ``` -Name: hello +Name: backend Namespace: default CreationTimestamp: Mon, 24 Oct 2016 14:21:02 -0700 Labels: app=hello @@ -59,7 +61,7 @@ Labels: app=hello track=stable Annotations: deployment.kubernetes.io/revision=1 Selector: app=hello,tier=backend,track=stable -Replicas: 7 desired | 7 updated | 7 total | 7 available | 0 unavailable +Replicas: 3 desired | 3 updated | 3 total | 3 available | 0 unavailable StrategyType: RollingUpdate MinReadySeconds: 0 RollingUpdateStrategy: 1 max unavailable, 1 max surge @@ -80,14 +82,14 @@ Conditions: Available True MinimumReplicasAvailable Progressing True NewReplicaSetAvailable OldReplicaSets: -NewReplicaSet: hello-3621623197 (7/7 replicas created) +NewReplicaSet: hello-3621623197 (3/3 replicas created) Events: ... ``` -## Creating the backend Service object +## Creating the `hello` Service object -The key to connecting a frontend to a backend is the backend +The key to sending requests from a frontend to a backend is the backend Service. A Service creates a persistent IP address and DNS name entry so that the backend microservice can always be reached. A Service uses {{< glossary_tooltip text="selectors" term_id="selector" >}} to find @@ -95,42 +97,51 @@ the Pods that it routes traffic to. First, explore the Service configuration file: -{{< codenew file="service/access/hello-service.yaml" >}} +{{< codenew file="service/access/backend-service.yaml" >}} -In the configuration file, you can see that the Service routes traffic to Pods -that have the labels `app: hello` and `tier: backend`. +In the configuration file, you can see that the Service, named `hello` routes +traffic to Pods that have the labels `app: hello` and `tier: backend`. -Create the `hello` Service: +Create the backend Service: ```shell -kubectl apply -f https://k8s.io/examples/service/access/hello-service.yaml +kubectl apply -f https://k8s.io/examples/service/access/backend-service.yaml ``` -At this point, you have a backend Deployment running, and you have a -Service that can route traffic to it. +At this point, you have a `backend` Deployment running three replicas of your `hello` +application, and you have a Service that can route traffic to them. However, this +service is neither available nor resolvable outside the cluster. ## Creating the frontend -Now that you have your backend, you can create a frontend that connects to the backend. -The frontend connects to the backend worker Pods by using the DNS name -given to the backend Service. The DNS name is "hello", which is the value -of the `name` field in the preceding Service configuration file. +Now that you have your backend running, you can create a frontend that is accessible +outside the cluster, and connects to the backend by proxying requests to it. -The Pods in the frontend Deployment run an nginx image that is configured -to find the hello backend Service. Here is the nginx configuration file: +The frontend sends requests to the backend worker Pods by using the DNS name +given to the backend Service. The DNS name is `hello`, which is the value +of the `name` field in the `examples/service/access/backend-service.yaml` +configuration file. -{{< codenew file="service/access/frontend.conf" >}} +The Pods in the frontend Deployment run a nginx image that is configured +to proxy requests to the `hello` backend Service. Here is the nginx configuration file: -Similar to the backend, the frontend has a Deployment and a Service. The -configuration for the Service has `type: LoadBalancer`, which means that -the Service uses the default load balancer of your cloud provider. +{{< codenew file="service/access/frontend-nginx.conf" >}} -{{< codenew file="service/access/frontend.yaml" >}} +Similar to the backend, the frontend has a Deployment and a Service. An important +difference to notice between the backend and frontend services, is that the +configuration for the frontend Service has `type: LoadBalancer`, which means that +the Service uses a load balancer provisioned by your cloud provider and will be +accessible from outside the cluster. + +{{< codenew file="service/access/frontend-service.yaml" >}} + +{{< codenew file="service/access/frontend-deployment.yaml" >}} Create the frontend Deployment and Service: ```shell -kubectl apply -f https://k8s.io/examples/service/access/frontend.yaml +kubectl apply -f https://k8s.io/examples/service/access/frontend-deployment.yaml +kubectl apply -f https://k8s.io/examples/service/access/frontend-service.yaml ``` The output verifies that both resources were created: @@ -178,7 +189,7 @@ cluster. ## Send traffic through the frontend -The frontend and backends are now connected. You can hit the endpoint +The frontend and backend are now connected. You can hit the endpoint by using the curl command on the external IP of your frontend Service. ```shell @@ -196,17 +207,17 @@ The output shows the message generated by the backend: To delete the Services, enter this command: ```shell -kubectl delete services frontend hello +kubectl delete services frontend backend ``` To delete the Deployments, the ReplicaSets and the Pods that are running the backend and frontend applications, enter this command: ```shell -kubectl delete deployment frontend hello +kubectl delete deployment frontend backend ``` ## {{% heading "whatsnext" %}} * Learn more about [Services](/docs/concepts/services-networking/service/) * Learn more about [ConfigMaps](/docs/tasks/configure-pod-container/configure-pod-configmap/) - +* Learn more about [DNS for Service and Pods](/docs/concepts/services-networking/dns-pod-service/) diff --git a/content/en/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md b/content/en/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md index 3eceb4f6d2..62ddbdcbbc 100644 --- a/content/en/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md +++ b/content/en/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md @@ -158,10 +158,16 @@ for database debugging. Any of the above commands works. The output is similar to this: ``` - I0710 14:43:38.274550 3655 portforward.go:225] Forwarding from 127.0.0.1:7000 -> 6379 - I0710 14:43:38.274797 3655 portforward.go:225] Forwarding from [::1]:7000 -> 6379 + Forwarding from 127.0.0.1:7000 -> 6379 + Forwarding from [::1]:7000 -> 6379 ``` +{{< note >}} + +`kubectl port-forward` does not return. To continue with the exercises, you will need to open another terminal. + +{{< /note >}} + 2. Start the Redis command line interface: ```shell @@ -180,7 +186,23 @@ for database debugging. PONG ``` +### Optionally let _kubectl_ choose the local port {#let-kubectl-choose-local-port} +If you don't need a specific local port, you can let `kubectl` choose and allocate +the local port and thus relieve you from having to manage local port conflicts, with +the slightly simpler syntax: + +```shell +kubectl port-forward deployment/redis-master :6379 +``` + +The `kubectl` tool finds a local port number that is not in use (avoiding low ports numbers, +because these might be used by other applications). The output is similar to: + +``` +Forwarding from 127.0.0.1:62162 -> 6379 +Forwarding from [::1]:62162 -> 6379 +``` @@ -203,4 +225,3 @@ The support for UDP protocol is tracked in ## {{% heading "whatsnext" %}} Learn more about [kubectl port-forward](/docs/reference/generated/kubectl/kubectl-commands/#port-forward). - diff --git a/content/en/docs/tasks/administer-cluster/developing-cloud-controller-manager.md b/content/en/docs/tasks/administer-cluster/developing-cloud-controller-manager.md index 0f6579d915..a3732c68de 100644 --- a/content/en/docs/tasks/administer-cluster/developing-cloud-controller-manager.md +++ b/content/en/docs/tasks/administer-cluster/developing-cloud-controller-manager.md @@ -30,7 +30,7 @@ The Kubernetes project provides skeleton cloud-controller-manager code with Go i To build an out-of-tree cloud-controller-manager for your cloud: 1. Create a go package with an implementation that satisfies [cloudprovider.Interface](https://github.com/kubernetes/cloud-provider/blob/master/cloud.go). -2. Use [`main.go` in cloud-controller-manager](https://github.com/kubernetes/kubernetes/blob/master/cmd/cloud-controller-manager/controller-manager.go) from Kubernetes core as a template for your `main.go`. As mentioned above, the only difference should be the cloud package that will be imported. +2. Use [`main.go` in cloud-controller-manager](https://github.com/kubernetes/kubernetes/blob/master/cmd/cloud-controller-manager/main.go) from Kubernetes core as a template for your `main.go`. As mentioned above, the only difference should be the cloud package that will be imported. 3. Import your cloud package in `main.go`, ensure your package has an `init` block to run [`cloudprovider.RegisterCloudProvider`](https://github.com/kubernetes/cloud-provider/blob/master/plugins.go). Many cloud providers publish their controller manager code as open source. If you are creating diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md index 0164d5aea1..5af9d27b82 100644 --- a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md +++ b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md @@ -231,6 +231,14 @@ without compromising the minimum required capacity for running your workloads. {{% /tab %}} {{< /tabs >}} +### Call "kubeadm upgrade" + +- For worker nodes this upgrades the local kubelet configuration: + + ```shell + sudo kubeadm upgrade node + ``` + ### Drain the node - Prepare the node for maintenance by marking it unschedulable and evicting the workloads: @@ -240,14 +248,6 @@ without compromising the minimum required capacity for running your workloads. kubectl drain --ignore-daemonsets ``` -### Call "kubeadm upgrade" - -- For worker nodes this upgrades the local kubelet configuration: - - ```shell - sudo kubeadm upgrade node - ``` - ### Upgrade kubelet and kubectl - Upgrade the kubelet and kubectl: diff --git a/content/en/docs/tasks/administer-cluster/limit-storage-consumption.md b/content/en/docs/tasks/administer-cluster/limit-storage-consumption.md index 1347dc85a7..89f130a010 100644 --- a/content/en/docs/tasks/administer-cluster/limit-storage-consumption.md +++ b/content/en/docs/tasks/administer-cluster/limit-storage-consumption.md @@ -12,7 +12,6 @@ The following resources are used in the demonstration: [ResourceQuota](/docs/con and [PersistentVolumeClaim](/docs/concepts/storage/persistent-volumes/). - ## {{% heading "prerequisites" %}} @@ -41,7 +40,7 @@ the values set by the admin. In this example, a PVC requesting 10Gi of storage would be rejected because it exceeds the 2Gi max. -``` +```yaml apiVersion: v1 kind: LimitRange metadata: @@ -67,7 +66,7 @@ In this example, a 6th PVC in the namespace would be rejected because it exceeds a 5Gi maximum quota when combined with the 2Gi max limit above, cannot have 3 PVCs where each has 2Gi. That would be 6Gi requested for a namespace capped at 5Gi. -``` +```yaml apiVersion: v1 kind: ResourceQuota metadata: @@ -78,8 +77,6 @@ spec: requests.storage: "5Gi" ``` - - ## Summary @@ -87,7 +84,3 @@ spec: A limit range can put a ceiling on how much storage is requested while a resource quota can effectively cap the storage consumed by a namespace through claim counts and cumulative storage capacity. The allows a cluster-admin to plan their cluster's storage budget without risk of any one project going over their allotment. - - - - diff --git a/content/en/docs/tasks/administer-cluster/namespaces.md b/content/en/docs/tasks/administer-cluster/namespaces.md index 00dec41774..08b2868806 100644 --- a/content/en/docs/tasks/administer-cluster/namespaces.md +++ b/content/en/docs/tasks/administer-cluster/namespaces.md @@ -13,7 +13,7 @@ This page shows how to view, work in, and delete {{< glossary_tooltip text="name ## {{% heading "prerequisites" %}} * Have an [existing Kubernetes cluster](/docs/setup/). -2. You have a basic understanding of Kubernetes {{< glossary_tooltip text="Pods" term_id="pod" >}}, {{< glossary_tooltip term_id="service" text="Services" >}}, and {{< glossary_tooltip text="Deployments" term_id="deployment" >}}. +* You have a basic understanding of Kubernetes {{< glossary_tooltip text="Pods" term_id="pod" >}}, {{< glossary_tooltip term_id="service" text="Services" >}}, and {{< glossary_tooltip text="Deployments" term_id="deployment" >}}. diff --git a/content/en/docs/tasks/administer-cluster/nodelocaldns.md b/content/en/docs/tasks/administer-cluster/nodelocaldns.md index 8aa6b9249b..b0f0596599 100644 --- a/content/en/docs/tasks/administer-cluster/nodelocaldns.md +++ b/content/en/docs/tasks/administer-cluster/nodelocaldns.md @@ -49,12 +49,14 @@ This is the path followed by DNS Queries after NodeLocal DNSCache is enabled: {{< figure src="/images/docs/nodelocaldns.svg" alt="NodeLocal DNSCache flow" title="Nodelocal DNSCache flow" caption="This image shows how NodeLocal DNSCache handles DNS queries." >}} ## Configuration -{{< note >}} The local listen IP address for NodeLocal DNSCache can be any IP in the 169.254.20.0/16 space or any other IP address that can be guaranteed to not collide with any existing IP. This document uses 169.254.20.10 as an example. +{{< note >}} The local listen IP address for NodeLocal DNSCache can be any address that can be guaranteed to not collide with any existing IP in your cluster. It's recommended to use an address with a local scope, per example, from the link-local range 169.254.0.0/16 for IPv4 or from the Unique Local Address range in IPv6 fd00::/8. {{< /note >}} This feature can be enabled using the following steps: * Prepare a manifest similar to the sample [`nodelocaldns.yaml`](https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/dns/nodelocaldns/nodelocaldns.yaml) and save it as `nodelocaldns.yaml.` +* If using IPv6, the CoreDNS configuration file need to enclose all the IPv6 addresses into square brackets if used in IP:Port format. +If you are using the sample manifest from the previous point, this will require to modify [the configuration line L70](https://github.com/kubernetes/kubernetes/blob/b2ecd1b3a3192fbbe2b9e348e095326f51dc43dd/cluster/addons/dns/nodelocaldns/nodelocaldns.yaml#L70) like this `health [__PILLAR__LOCAL__DNS__]:8080` * Substitute the variables in the manifest with the right values: * kubedns=`kubectl get svc kube-dns -n kube-system -o jsonpath={.spec.clusterIP}` diff --git a/content/en/docs/tasks/configure-pod-container/attach-handler-lifecycle-event.md b/content/en/docs/tasks/configure-pod-container/attach-handler-lifecycle-event.md index 00b9251be8..4576b0f02b 100644 --- a/content/en/docs/tasks/configure-pod-container/attach-handler-lifecycle-event.md +++ b/content/en/docs/tasks/configure-pod-container/attach-handler-lifecycle-event.md @@ -9,7 +9,7 @@ weight: 140 This page shows how to attach handlers to Container lifecycle events. Kubernetes supports the postStart and preStop events. Kubernetes sends the postStart event immediately after a Container is started, and it sends the preStop event immediately before the -Container is terminated. +Container is terminated. A Container may specify one handler per event. diff --git a/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md b/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md index 5896ab8357..45d56531f2 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md +++ b/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md @@ -366,7 +366,7 @@ have additional fields that can be set on `httpGet`: * `host`: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. * `scheme`: Scheme to use for connecting to the host (HTTP or HTTPS). Defaults to HTTP. -* `path`: Path to access on the HTTP server. +* `path`: Path to access on the HTTP server. Defaults to /. * `httpHeaders`: Custom headers to set in the request. HTTP allows repeated headers. * `port`: Name or number of the port to access on the container. Number must be in the range 1 to 65535. @@ -389,24 +389,32 @@ You can override the default headers by defining `.httpHeaders` for the probe; f ```yaml livenessProbe: - httpHeaders: - Accept: application/json + httpGet: + httpHeaders: + - name: Accept + value: application/json startupProbe: - httpHeaders: - User-Agent: MyUserAgent + httpGet: + httpHeaders: + - name: User-Agent + value: MyUserAgent ``` You can also remove these two headers by defining them with an empty value. ```yaml livenessProbe: - httpHeaders: - Accept: "" + httpGet: + httpHeaders: + - name: Accept + value: "" startupProbe: - httpHeaders: - User-Agent: "" + httpGet: + httpHeaders: + - name: User-Agent + value: "" ``` ### TCP probes diff --git a/content/en/docs/tasks/configure-pod-container/configure-service-account.md b/content/en/docs/tasks/configure-pod-container/configure-service-account.md index 4cd5eaa905..ca3d0b2966 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-service-account.md +++ b/content/en/docs/tasks/configure-pod-container/configure-service-account.md @@ -82,6 +82,7 @@ You can list this and any other serviceAccount resources in the namespace with t ```shell kubectl get serviceaccounts ``` + The output is similar to this: ``` @@ -108,9 +109,10 @@ If you get a complete dump of the service account object, like this: ```shell kubectl get serviceaccounts/build-robot -o yaml ``` + The output is similar to this: -``` +```yaml apiVersion: v1 kind: ServiceAccount metadata: @@ -164,6 +166,7 @@ Any tokens for non-existent service accounts will be cleaned up by the token con ```shell kubectl describe secrets/build-robot-secret ``` + The output is similar to this: ``` @@ -227,7 +230,7 @@ kubectl get serviceaccounts default -o yaml > ./sa.yaml The output of the `sa.yaml` file is similar to this: -```shell +```yaml apiVersion: v1 kind: ServiceAccount metadata: @@ -244,7 +247,7 @@ Using your editor of choice (for example `vi`), open the `sa.yaml` file, delete The output of the `sa.yaml` file is similar to this: -```shell +```yaml apiVersion: v1 kind: ServiceAccount metadata: @@ -319,7 +322,8 @@ kubectl create -f https://k8s.io/examples/pods/pod-projected-svc-token.yaml ``` The kubelet will request and store the token on behalf of the pod, make the -token available to the pod at a configurable file path, and refresh the token as it approaches expiration. Kubelet proactively rotates the token if it is older than 80% of its total TTL, or if the token is older than 24 hours. +token available to the pod at a configurable file path, and refresh the token as it approaches expiration. +The kubelet proactively rotates the token if it is older than 80% of its total TTL, or if the token is older than 24 hours. The application is responsible for reloading the token when it rotates. Periodic reloading (e.g. once every 5 minutes) is sufficient for most use cases. @@ -380,7 +384,6 @@ JWKS URI is required to use the `https` scheme. ## {{% heading "whatsnext" %}} - See also: - [Cluster Admin Guide to Service Accounts](/docs/reference/access-authn-authz/service-accounts-admin/) diff --git a/content/en/docs/tasks/debug-application-cluster/crictl.md b/content/en/docs/tasks/debug-application-cluster/crictl.md index 92c397987d..343e3b1cb0 100644 --- a/content/en/docs/tasks/debug-application-cluster/crictl.md +++ b/content/en/docs/tasks/debug-application-cluster/crictl.md @@ -361,7 +361,7 @@ for more information. The exact versions for below mapping table are for docker cli v1.40 and crictl v1.19.0. Please note that the list is not exhaustive. For example, it doesn't include experimental commands of docker cli. {{< note >}} -Warn: the output format of CRICTL is similar to Docker CLI, despite some missing columns for some CLI. Make sure to check output for the specific command if your script output parsing. +The output format of CRICTL is similar to Docker CLI, despite some missing columns for some CLI. Make sure to check output for the specific command if your script output parsing. {{< /note >}} ### Retrieve Debugging Information diff --git a/content/en/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md b/content/en/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md index 8f7c97c4e8..8a972e1365 100644 --- a/content/en/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md +++ b/content/en/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md @@ -49,7 +49,7 @@ case you can try several things: * Add more nodes to the cluster. -* [Terminate unneeded pods](/docs/concepts/workloads/pods/#pod-termination) +* [Terminate unneeded pods](/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination) to make room for pending pods. * Check that the pod is not larger than your nodes. For example, if all diff --git a/content/en/docs/tasks/debug-application-cluster/debug-running-pod.md b/content/en/docs/tasks/debug-application-cluster/debug-running-pod.md index c0dc2ebf69..54e474429c 100644 --- a/content/en/docs/tasks/debug-application-cluster/debug-running-pod.md +++ b/content/en/docs/tasks/debug-application-cluster/debug-running-pod.md @@ -192,7 +192,7 @@ this scenario using `kubectl run`: kubectl run myapp --image=busybox --restart=Never -- sleep 1d ``` -Run this command to create a copy of `myapp` named `myapp-copy` that adds a +Run this command to create a copy of `myapp` named `myapp-debug` that adds a new Ubuntu container for debugging: ```shell diff --git a/content/en/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions.md b/content/en/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions.md index f69859eb2d..de9ab8181c 100644 --- a/content/en/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions.md +++ b/content/en/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions.md @@ -314,7 +314,18 @@ CustomResourceDefinitions store validated resource data in the cluster's persist {{< note >}} CRDs converted from `apiextensions.k8s.io/v1beta1` to `apiextensions.k8s.io/v1` might lack structural schemas, and `spec.preserveUnknownFields` might be `true`. -For migrated CustomResourceDefinitions where `spec.preserveUnknownFields` is set, pruning is _not_ enabled and you can store arbitrary data. For best compatibility, you should update your custom resources to meet an OpenAPI schema, and you should set `spec.preserveUnknownFields` to true for the CustomResourceDefinition itself. +For legacy CustomResourceDefinition objects created as +`apiextensions.k8s.io/v1beta1` with `spec.preserveUnknownFields` set to +`true`, the following is also true: + +* Pruning is not enabled. +* You can store arbitrary data. + +For compatibility with `apiextensions.k8s.io/v1`, update your custom +resource definitions to: + +1. Use a structural OpenAPI schema. +2. Set `spec.preserveUnknownFields` to `false`. {{< /note >}} If you save the following YAML to `my-crontab.yaml`: diff --git a/content/en/docs/tasks/extend-kubernetes/setup-konnectivity.md b/content/en/docs/tasks/extend-kubernetes/setup-konnectivity.md index 82eecc9c38..34936014f6 100644 --- a/content/en/docs/tasks/extend-kubernetes/setup-konnectivity.md +++ b/content/en/docs/tasks/extend-kubernetes/setup-konnectivity.md @@ -37,8 +37,22 @@ by providing the following flags to the kube-apiserver: 1. Create an egress configuration file such as `admin/konnectivity/egress-selector-configuration.yaml`. 1. Set the `--egress-selector-config-file` flag of the API Server to the path of your API Server egress configuration file. +1. If you use UDS connection, add volumes config to the kube-apiserver: + ```yaml + spec: + containers: + volumeMounts: + - name: konnectivity-uds + mountPath: /etc/kubernetes/konnectivity-server + readOnly: false + volumes: + - name: konnectivity-uds + hostPath: + path: /etc/kubernetes/konnectivity-server + type: DirectoryOrCreate + ``` -Generate or obtain a certificate and kubeconfig for konnectivity-server. +Generate or obtain a certificate and kubeconfig for konnectivity-server. For example, you can use the OpenSSL command line tool to issue a X.509 certificate, using the cluster CA certificate `/etc/kubernetes/pki/ca.crt` from a control-plane host. diff --git a/content/en/docs/tasks/inject-data-application/podpreset.md b/content/en/docs/tasks/inject-data-application/podpreset.md deleted file mode 100644 index 91c3c7beb6..0000000000 --- a/content/en/docs/tasks/inject-data-application/podpreset.md +++ /dev/null @@ -1,323 +0,0 @@ ---- -reviewers: -- jessfraz -title: Inject Information into Pods Using a PodPreset -min-kubernetes-server-version: v1.6 -content_type: task -weight: 60 ---- - - - -{{< feature-state for_k8s_version="v1.6" state="alpha" >}} - -This page shows how to use PodPreset objects to inject information like {{< glossary_tooltip text="Secrets" term_id="secret" >}}, volume mounts, and {{< glossary_tooltip text="environment variables" term_id="container-env-variables" >}} into Pods at creation time. - - - -## {{% heading "prerequisites" %}} - - -You need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one using [Minikube](https://minikube.sigs.k8s.io/docs/). -Make sure that you have [enabled PodPreset](/docs/concepts/workloads/pods/podpreset/#enable-pod-preset) in your cluster. - - - - - - -## Use Pod presets to inject environment variables and volumes - -In this step, you create a preset that has a volume mount and one environment variable. -Here is the manifest for the PodPreset: - -{{< codenew file="podpreset/preset.yaml" >}} - -The name of a PodPreset object must be a valid -[DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). - -In the manifest, you can see that the preset has an environment variable definition called `DB_PORT` -and a volume mount definition called `cache-volume` which is mounted under `/cache`. The {{< glossary_tooltip text="selector" term_id="selector" >}} specifies that -the preset will act upon any Pod that is labeled `role:frontend`. - -Create the PodPreset: - -```shell -kubectl apply -f https://k8s.io/examples/podpreset/preset.yaml -``` - -Verify that the PodPreset has been created: - -```shell -kubectl get podpreset -``` -``` -NAME CREATED AT -allow-database 2020-01-24T08:54:29Z -``` - -This manifest defines a Pod labelled `role: frontend` (matching the PodPreset's selector): - -{{< codenew file="podpreset/pod.yaml" >}} - -Create the Pod: - -```shell -kubectl create -f https://k8s.io/examples/podpreset/pod.yaml -``` - -Verify that the Pod is running: - -```shell -kubectl get pods -``` - -The output shows that the Pod is running: - -``` -NAME READY STATUS RESTARTS AGE -website 1/1 Running 0 4m -``` - -View the Pod spec altered by the admission controller in order to see the effects of the preset -having been applied: - -```shell -kubectl get pod website -o yaml -``` - -{{< codenew file="podpreset/merged.yaml" >}} - -The `DB_PORT` environment variable, the `volumeMount` and the `podpreset.admission.kubernetes.io` annotation -of the Pod verify that the preset has been applied. - -## Pod spec with ConfigMap example - -This is an example to show how a Pod spec is modified by a Pod preset -that references a ConfigMap containing environment variables. - -Here is the manifest containing the definition of the ConfigMap: - -{{< codenew file="podpreset/configmap.yaml" >}} - -Create the ConfigMap: - -```shell -kubectl create -f https://k8s.io/examples/podpreset/configmap.yaml -``` - -Here is a PodPreset manifest referencing that ConfigMap: - -{{< codenew file="podpreset/allow-db.yaml" >}} - -Create the preset that references the ConfigMap: - -```shell -kubectl create -f https://k8s.io/examples/podpreset/allow-db.yaml -``` - -The following manifest defines a Pod matching the PodPreset for this example: - -{{< codenew file="podpreset/pod.yaml" >}} - -Create the Pod: - -```shell -kubectl create -f https://k8s.io/examples/podpreset/pod.yaml -``` - -View the Pod spec altered by the admission controller in order to see the effects of the preset -having been applied: - -```shell -kubectl get pod website -o yaml -``` - -{{< codenew file="podpreset/allow-db-merged.yaml" >}} - -The `DB_PORT` environment variable and the `podpreset.admission.kubernetes.io` annotation of the Pod -verify that the preset has been applied. - -## ReplicaSet with Pod spec example - -This is an example to show that only Pod specs are modified by Pod presets. Other workload types -like ReplicaSets or Deployments are unaffected. - -Here is the manifest for the PodPreset for this example: - -{{< codenew file="podpreset/preset.yaml" >}} - -Create the preset: - -```shell -kubectl apply -f https://k8s.io/examples/podpreset/preset.yaml -``` - -This manifest defines a ReplicaSet that manages three application Pods: - -{{< codenew file="podpreset/replicaset.yaml" >}} - -Create the ReplicaSet: - -```shell -kubectl create -f https://k8s.io/examples/podpreset/replicaset.yaml -``` - -Verify that the Pods created by the ReplicaSet are running: - -```shell -kubectl get pods -``` - -The output shows that the Pods are running: - -``` -NAME READY STATUS RESTARTS AGE -frontend-2l94q 1/1 Running 0 2m18s -frontend-6vdgn 1/1 Running 0 2m18s -frontend-jzt4p 1/1 Running 0 2m18s -``` - -View the `spec` of the ReplicaSet: - -```shell -kubectl get replicasets frontend -o yaml -``` - -{{< note >}} -The ReplicaSet object's `spec` was not changed, nor does the ReplicaSet contain a -`podpreset.admission.kubernetes.io` annotation. This is because a PodPreset only -applies to Pod objects. - -To see the effects of the preset having been applied, you need to look at individual Pods. -{{< /note >}} - -The command to view the specs of the affected Pods is: - -```shell -kubectl get pod --selector=role=frontend -o yaml -``` - -{{< codenew file="podpreset/replicaset-merged.yaml" >}} - -Again the `podpreset.admission.kubernetes.io` annotation of the Pods -verifies that the preset has been applied. - -## Multiple Pod presets example - -This is an example to show how a Pod spec is modified by multiple Pod presets. - - -Here is the manifest for the first PodPreset: - -{{< codenew file="podpreset/preset.yaml" >}} - -Create the first PodPreset for this example: - -```shell -kubectl apply -f https://k8s.io/examples/podpreset/preset.yaml -``` - -Here is the manifest for the second PodPreset: - -{{< codenew file="podpreset/proxy.yaml" >}} - -Create the second preset: - -```shell -kubectl apply -f https://k8s.io/examples/podpreset/proxy.yaml -``` - -Here's a manifest containing the definition of an applicable Pod (matched by two PodPresets): - -{{< codenew file="podpreset/pod.yaml" >}} - -Create the Pod: - -```shell -kubectl create -f https://k8s.io/examples/podpreset/pod.yaml -``` - -View the Pod spec altered by the admission controller in order to see the effects of both presets -having been applied: - -```shell -kubectl get pod website -o yaml -``` - -{{< codenew file="podpreset/multi-merged.yaml" >}} - -The `DB_PORT` environment variable, the `proxy-volume` VolumeMount and the two `podpreset.admission.kubernetes.io` -annotations of the Pod verify that both presets have been applied. - -## Conflict example - -This is an example to show how a Pod spec is not modified by a Pod preset when there is a conflict. -The conflict in this example consists of a `VolumeMount` in the PodPreset conflicting with a Pod that defines the same `mountPath`. - -Here is the manifest for the PodPreset: - -{{< codenew file="podpreset/conflict-preset.yaml" >}} - -Note the `mountPath` value of `/cache`. - -Create the preset: - -```shell -kubectl apply -f https://k8s.io/examples/podpreset/conflict-preset.yaml -``` - -Here is the manifest for the Pod: - -{{< codenew file="podpreset/conflict-pod.yaml" >}} - -Note the volumeMount element with the same path as in the PodPreset. - -Create the Pod: - -```shell -kubectl create -f https://k8s.io/examples/podpreset/conflict-pod.yaml -``` - -View the Pod spec: - -```shell -kubectl get pod website -o yaml -``` - -{{< codenew file="podpreset/conflict-pod.yaml" >}} - -You can see there is no preset annotation (`podpreset.admission.kubernetes.io`). Seeing no annotation tells you that no preset has not been applied to the Pod. - -However, the -[PodPreset admission controller](/docs/reference/access-authn-authz/admission-controllers/#podpreset) -logs a warning containing details of the conflict. -You can view the warning using `kubectl`: - -```shell -kubectl -n kube-system logs -l=component=kube-apiserver -``` - -The output should look similar to: - -``` -W1214 13:00:12.987884 1 admission.go:147] conflict occurred while applying podpresets: allow-database on pod: err: merging volume mounts for allow-database has a conflict on mount path /cache: -v1.VolumeMount{Name:"other-volume", ReadOnly:false, MountPath:"/cache", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""} -does not match -core.VolumeMount{Name:"cache-volume", ReadOnly:false, MountPath:"/cache", SubPath:"", MountPropagation:(*core.MountPropagationMode)(nil), SubPathExpr:""} - in container -``` - -Note the conflict message on the path for the VolumeMount. - -## Deleting a PodPreset - -Once you don't need a PodPreset anymore, you can delete it with `kubectl`: - -```shell -kubectl delete podpreset allow-database -``` -The output shows that the PodPreset was deleted: -``` -podpreset "allow-database" deleted -``` diff --git a/content/en/docs/tasks/manage-hugepages/scheduling-hugepages.md b/content/en/docs/tasks/manage-hugepages/scheduling-hugepages.md index 36b92ac18a..8d4cd4afe6 100644 --- a/content/en/docs/tasks/manage-hugepages/scheduling-hugepages.md +++ b/content/en/docs/tasks/manage-hugepages/scheduling-hugepages.md @@ -104,17 +104,19 @@ spec: - Huge page requests must equal the limits. This is the default if limits are specified, but requests are not. -- Huge pages are isolated at a container scope, so each container has own limit on their cgroup sandbox as requested in a container spec. +- Huge pages are isolated at a container scope, so each container has own + limit on their cgroup sandbox as requested in a container spec. - EmptyDir volumes backed by huge pages may not consume more huge page memory than the pod request. - Applications that consume huge pages via `shmget()` with `SHM_HUGETLB` must run with a supplemental group that matches `proc/sys/vm/hugetlb_shm_group`. - Huge page usage in a namespace is controllable via ResourceQuota similar -to other compute resources like `cpu` or `memory` using the `hugepages-` -token. + to other compute resources like `cpu` or `memory` using the `hugepages-` + token. - Support of multiple sizes huge pages is feature gated. It can be - disabled with the `HugePageStorageMediumSize` [feature -gate](/docs/reference/command-line-tools-reference/feature-gates/) on the {{< -glossary_tooltip text="kubelet" term_id="kubelet" >}} and {{< -glossary_tooltip text="kube-apiserver" -term_id="kube-apiserver" >}} (`--feature-gates=HugePageStorageMediumSize=true`). + disabled with the `HugePageStorageMediumSize` + [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) + on the {{< glossary_tooltip text="kubelet" term_id="kubelet" >}} and + {{< glossary_tooltip text="kube-apiserver" term_id="kube-apiserver" >}} + (`--feature-gates=HugePageStorageMediumSize=false`). + diff --git a/content/en/docs/tasks/manage-kubernetes-objects/kustomization.md b/content/en/docs/tasks/manage-kubernetes-objects/kustomization.md index f5e20a94c0..c1722f694a 100644 --- a/content/en/docs/tasks/manage-kubernetes-objects/kustomization.md +++ b/content/en/docs/tasks/manage-kubernetes-objects/kustomization.md @@ -392,8 +392,8 @@ spec: containers: - name: my-nginx resources: - limits: - memory: 512Mi + limits: + memory: 512Mi EOF cat <./kustomization.yaml @@ -424,11 +424,12 @@ spec: spec: containers: - image: nginx - limits: - memory: 512Mi name: my-nginx ports: - containerPort: 80 + resources: + limits: + memory: 512Mi ``` Not all Resources or fields support strategic merge patches. To support modifying arbitrary fields in arbitrary Resources, @@ -590,7 +591,7 @@ spec: containers: - name: my-nginx image: nginx - command: ["start", "--host", "\$(MY_SERVICE_NAME)"] + command: ["start", "--host", "$(MY_SERVICE_NAME)"] EOF # Create a service.yaml file diff --git a/content/en/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch.md b/content/en/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch.md index 2613579ccf..b4d7b11a7e 100644 --- a/content/en/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch.md +++ b/content/en/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch.md @@ -445,6 +445,9 @@ and [kubectl apply](/docs/reference/generated/kubectl/kubectl-commands/#apply). +{{< note >}} +Strategic merge patch is not supported for custom resources. +{{< /note >}} ## {{% heading "whatsnext" %}} diff --git a/content/en/docs/tasks/run-application/configure-pdb.md b/content/en/docs/tasks/run-application/configure-pdb.md index 8113e07128..3823cac4ee 100644 --- a/content/en/docs/tasks/run-application/configure-pdb.md +++ b/content/en/docs/tasks/run-application/configure-pdb.md @@ -236,9 +236,6 @@ You can use a PDB with pods controlled by another type of controller, by an - only an integer value can be used with `.spec.minAvailable`, not a percentage. You can use a selector which selects a subset or superset of the pods belonging to a built-in -controller. However, when there are multiple PDBs in a namespace, you must be careful not -to create PDBs whose selectors overlap. - - - - +controller. The eviction API will disallow eviction of any pod covered by multiple PDBs, +so most users will want to avoid overlapping selectors. One reasonable use of overlapping +PDBs is when pods are being transitioned from one PDB to another. diff --git a/content/en/docs/tasks/tls/managing-tls-in-a-cluster.md b/content/en/docs/tasks/tls/managing-tls-in-a-cluster.md index 62e5cfc9cf..6ea02a06af 100644 --- a/content/en/docs/tasks/tls/managing-tls-in-a-cluster.md +++ b/content/en/docs/tasks/tls/managing-tls-in-a-cluster.md @@ -60,7 +60,7 @@ This tutorial uses CFSSL: Cloudflare's PKI and TLS toolkit [click here](https:// ## Download and install CFSSL The cfssl tools used in this example can be downloaded at -[https://pkg.cfssl.org/](https://pkg.cfssl.org/). +[https://github.com/cloudflare/cfssl/releases](https://github.com/cloudflare/cfssl/releases). ## Create a Certificate Signing Request diff --git a/content/en/docs/test.md b/content/en/docs/test.md index c052cb5f9c..aadfc9a9e3 100644 --- a/content/en/docs/test.md +++ b/content/en/docs/test.md @@ -337,7 +337,7 @@ sequenceDiagram Alice->John: Yes... John, how are you? {{}} -
More [examples](https://mermaid-js.github.io/mermaid/#/examples) from the offical docs. +
More [examples](https://mermaid-js.github.io/mermaid/#/examples) from the official docs. ## Sidebars and Admonitions diff --git a/content/en/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html b/content/en/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html index 6d8e43ebfc..2ee67382fd 100644 --- a/content/en/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html +++ b/content/en/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html @@ -92,9 +92,7 @@ weight: 10

- For your first Deployment, you'll use a Node.js application packaged in a Docker container. (If you didn't already try creating a - Node.js application and deploying it using a container, you can do that first by following the - instructions from the Hello Minikube tutorial). + For your first Deployment, you'll use a hello-node application packaged in a Docker container that uses NGINX to echo back all the requests. (If you didn't already try creating a hello-node application and deploying it using a container, you can do that first by following the instructions from the Hello Minikube tutorial).

Now that you know what Deployments are, let's go to the online tutorial and deploy our first app!

diff --git a/content/en/docs/tutorials/stateless-application/expose-external-ip-address.md b/content/en/docs/tutorials/stateless-application/expose-external-ip-address.md index 5babc2c0b0..8368d24132 100644 --- a/content/en/docs/tutorials/stateless-application/expose-external-ip-address.md +++ b/content/en/docs/tutorials/stateless-application/expose-external-ip-address.md @@ -9,78 +9,73 @@ weight: 10 This page shows how to create a Kubernetes Service object that exposes an external IP address. - - - ## {{% heading "prerequisites" %}} - - * Install [kubectl](/docs/tasks/tools/install-kubectl/). - - * Use a cloud provider like Google Kubernetes Engine or Amazon Web Services to - create a Kubernetes cluster. This tutorial creates an - [external load balancer](/docs/tasks/access-application-cluster/create-external-load-balancer/), - which requires a cloud provider. - - * Configure `kubectl` to communicate with your Kubernetes API server. For - instructions, see the documentation for your cloud provider. - - - +* Install [kubectl](/docs/tasks/tools/install-kubectl/). +* Use a cloud provider like Google Kubernetes Engine or Amazon Web Services to + create a Kubernetes cluster. This tutorial creates an + [external load balancer](/docs/tasks/access-application-cluster/create-external-load-balancer/), + which requires a cloud provider. +* Configure `kubectl` to communicate with your Kubernetes API server. For instructions, see the + documentation for your cloud provider. ## {{% heading "objectives" %}} - * Run five instances of a Hello World application. * Create a Service object that exposes an external IP address. * Use the Service object to access the running application. - - - ## Creating a service for an application running in five pods 1. Run a Hello World application in your cluster: -{{< codenew file="service/load-balancer-example.yaml" >}} + {{< codenew file="service/load-balancer-example.yaml" >}} -```shell -kubectl apply -f https://k8s.io/examples/service/load-balancer-example.yaml -``` - - -The preceding command creates a - {{< glossary_tooltip text="Deployment" term_id="deployment" >}} - and an associated - {{< glossary_tooltip term_id="replica-set" text="ReplicaSet" >}}. - The ReplicaSet has five - {{< glossary_tooltip text="Pods" term_id="pod" >}} - each of which runs the Hello World application. + ```shell + kubectl apply -f https://k8s.io/examples/service/load-balancer-example.yaml + ``` + The preceding command creates a + {{< glossary_tooltip text="Deployment" term_id="deployment" >}} + and an associated + {{< glossary_tooltip term_id="replica-set" text="ReplicaSet" >}}. + The ReplicaSet has five + {{< glossary_tooltip text="Pods" term_id="pod" >}} + each of which runs the Hello World application. 1. Display information about the Deployment: - kubectl get deployments hello-world - kubectl describe deployments hello-world + ```shell + kubectl get deployments hello-world + kubectl describe deployments hello-world + ``` 1. Display information about your ReplicaSet objects: - kubectl get replicasets - kubectl describe replicasets + ```shell + kubectl get replicasets + kubectl describe replicasets + ``` 1. Create a Service object that exposes the deployment: - kubectl expose deployment hello-world --type=LoadBalancer --name=my-service + ```shell + kubectl expose deployment hello-world --type=LoadBalancer --name=my-service + ``` 1. Display information about the Service: - kubectl get services my-service + ```shell + kubectl get services my-service + ``` - The output is similar to this: + The output is similar to: - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - my-service LoadBalancer 10.3.245.137 104.198.205.71 8080/TCP 54s + ```console + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + my-service LoadBalancer 10.3.245.137 104.198.205.71 8080/TCP 54s + ``` {{< note >}} @@ -96,23 +91,27 @@ The preceding command creates a 1. Display detailed information about the Service: - kubectl describe services my-service + ```shell + kubectl describe services my-service + ``` - The output is similar to this: + The output is similar to: - Name: my-service - Namespace: default - Labels: app.kubernetes.io/name=load-balancer-example - Annotations: - Selector: app.kubernetes.io/name=load-balancer-example - Type: LoadBalancer - IP: 10.3.245.137 - LoadBalancer Ingress: 104.198.205.71 - Port: 8080/TCP - NodePort: 32377/TCP - Endpoints: 10.0.0.6:8080,10.0.1.6:8080,10.0.1.7:8080 + 2 more... - Session Affinity: None - Events: + ```console + Name: my-service + Namespace: default + Labels: app.kubernetes.io/name=load-balancer-example + Annotations: + Selector: app.kubernetes.io/name=load-balancer-example + Type: LoadBalancer + IP: 10.3.245.137 + LoadBalancer Ingress: 104.198.205.71 + Port: 8080/TCP + NodePort: 32377/TCP + Endpoints: 10.0.0.6:8080,10.0.1.6:8080,10.0.1.7:8080 + 2 more... + Session Affinity: None + Events: + ``` Make a note of the external IP address (`LoadBalancer Ingress`) exposed by your service. In this example, the external IP address is 104.198.205.71. @@ -124,21 +123,27 @@ The preceding command creates a addresses of the pods that are running the Hello World application. To verify these are pod addresses, enter this command: - kubectl get pods --output=wide + ```shell + kubectl get pods --output=wide + ``` - The output is similar to this: + The output is similar to: - NAME ... IP NODE - hello-world-2895499144-1jaz9 ... 10.0.1.6 gke-cluster-1-default-pool-e0b8d269-1afc - hello-world-2895499144-2e5uh ... 10.0.1.8 gke-cluster-1-default-pool-e0b8d269-1afc - hello-world-2895499144-9m4h1 ... 10.0.0.6 gke-cluster-1-default-pool-e0b8d269-5v7a - hello-world-2895499144-o4z13 ... 10.0.1.7 gke-cluster-1-default-pool-e0b8d269-1afc - hello-world-2895499144-segjf ... 10.0.2.5 gke-cluster-1-default-pool-e0b8d269-cpuc + ```console + NAME ... IP NODE + hello-world-2895499144-1jaz9 ... 10.0.1.6 gke-cluster-1-default-pool-e0b8d269-1afc + hello-world-2895499144-2e5uh ... 10.0.1.8 gke-cluster-1-default-pool-e0b8d269-1afc + hello-world-2895499144-9m4h1 ... 10.0.0.6 gke-cluster-1-default-pool-e0b8d269-5v7a + hello-world-2895499144-o4z13 ... 10.0.1.7 gke-cluster-1-default-pool-e0b8d269-1afc + hello-world-2895499144-segjf ... 10.0.2.5 gke-cluster-1-default-pool-e0b8d269-cpuc + ``` 1. Use the external IP address (`LoadBalancer Ingress`) to access the Hello World application: - curl http://: + ```shell + curl http://: + ``` where `` is the external IP address (`LoadBalancer Ingress`) of your Service, and `` is the value of `Port` in your Service @@ -148,29 +153,26 @@ The preceding command creates a The response to a successful request is a hello message: - Hello Kubernetes! - - - + ```shell + Hello Kubernetes! + ``` ## {{% heading "cleanup" %}} - To delete the Service, enter this command: - kubectl delete services my-service +```shell +kubectl delete services my-service +``` To delete the Deployment, the ReplicaSet, and the Pods that are running the Hello World application, enter this command: - kubectl delete deployment hello-world - - - +```shell +kubectl delete deployment hello-world +``` ## {{% heading "whatsnext" %}} - Learn more about [connecting applications with services](/docs/concepts/services-networking/connect-applications-service/). - diff --git a/content/en/examples/application/deployment-patch.yaml b/content/en/examples/application/deployment-patch.yaml index 7b32e2fcae..af12f4cb0c 100644 --- a/content/en/examples/application/deployment-patch.yaml +++ b/content/en/examples/application/deployment-patch.yaml @@ -1,4 +1,4 @@ -apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2 +apiVersion: apps/v1 kind: Deployment metadata: name: patch-demo diff --git a/content/en/examples/application/deployment-retainkeys.yaml b/content/en/examples/application/deployment-retainkeys.yaml index b5e04f0cc1..af63f46d37 100644 --- a/content/en/examples/application/deployment-retainkeys.yaml +++ b/content/en/examples/application/deployment-retainkeys.yaml @@ -1,4 +1,4 @@ -apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2 +apiVersion: apps/v1 kind: Deployment metadata: name: retainkeys-demo diff --git a/content/en/examples/application/deployment-scale.yaml b/content/en/examples/application/deployment-scale.yaml index 68801c971d..01fe96d845 100644 --- a/content/en/examples/application/deployment-scale.yaml +++ b/content/en/examples/application/deployment-scale.yaml @@ -1,4 +1,4 @@ -apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2 +apiVersion: apps/v1 kind: Deployment metadata: name: nginx-deployment diff --git a/content/en/examples/application/deployment-update.yaml b/content/en/examples/application/deployment-update.yaml index 18e8be65fb..1c0b9d1ab8 100644 --- a/content/en/examples/application/deployment-update.yaml +++ b/content/en/examples/application/deployment-update.yaml @@ -1,4 +1,4 @@ -apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2 +apiVersion: apps/v1 kind: Deployment metadata: name: nginx-deployment diff --git a/content/en/examples/application/deployment.yaml b/content/en/examples/application/deployment.yaml index 2cd599218d..dbed8bc72b 100644 --- a/content/en/examples/application/deployment.yaml +++ b/content/en/examples/application/deployment.yaml @@ -1,4 +1,4 @@ -apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2 +apiVersion: apps/v1 kind: Deployment metadata: name: nginx-deployment diff --git a/content/en/examples/application/guestbook/frontend-deployment.yaml b/content/en/examples/application/guestbook/frontend-deployment.yaml index 50d6e1f0d4..23d64be644 100644 --- a/content/en/examples/application/guestbook/frontend-deployment.yaml +++ b/content/en/examples/application/guestbook/frontend-deployment.yaml @@ -1,4 +1,4 @@ -apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2 +apiVersion: apps/v1 kind: Deployment metadata: name: frontend diff --git a/content/en/examples/application/guestbook/redis-master-deployment.yaml b/content/en/examples/application/guestbook/redis-master-deployment.yaml index fc6f418c39..478216d1ac 100644 --- a/content/en/examples/application/guestbook/redis-master-deployment.yaml +++ b/content/en/examples/application/guestbook/redis-master-deployment.yaml @@ -1,4 +1,4 @@ -apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2 +apiVersion: apps/v1 kind: Deployment metadata: name: redis-master diff --git a/content/en/examples/application/guestbook/redis-slave-deployment.yaml b/content/en/examples/application/guestbook/redis-slave-deployment.yaml index 7dcfb6c263..1a7b04386a 100644 --- a/content/en/examples/application/guestbook/redis-slave-deployment.yaml +++ b/content/en/examples/application/guestbook/redis-slave-deployment.yaml @@ -1,4 +1,4 @@ -apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2 +apiVersion: apps/v1 kind: Deployment metadata: name: redis-slave diff --git a/content/en/examples/application/mysql/mysql-deployment.yaml b/content/en/examples/application/mysql/mysql-deployment.yaml index 518457777e..419fbe03d3 100644 --- a/content/en/examples/application/mysql/mysql-deployment.yaml +++ b/content/en/examples/application/mysql/mysql-deployment.yaml @@ -9,7 +9,7 @@ spec: app: mysql clusterIP: None --- -apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2 +apiVersion: apps/v1 kind: Deployment metadata: name: mysql diff --git a/content/en/examples/application/wordpress/mysql-deployment.yaml b/content/en/examples/application/wordpress/mysql-deployment.yaml index 8b92b76f54..c6b221512d 100644 --- a/content/en/examples/application/wordpress/mysql-deployment.yaml +++ b/content/en/examples/application/wordpress/mysql-deployment.yaml @@ -25,7 +25,7 @@ spec: requests: storage: 20Gi --- -apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2 +apiVersion: apps/v1 kind: Deployment metadata: name: wordpress-mysql diff --git a/content/en/examples/application/wordpress/wordpress-deployment.yaml b/content/en/examples/application/wordpress/wordpress-deployment.yaml index d898474211..c8ed239142 100644 --- a/content/en/examples/application/wordpress/wordpress-deployment.yaml +++ b/content/en/examples/application/wordpress/wordpress-deployment.yaml @@ -25,7 +25,7 @@ spec: requests: storage: 20Gi --- -apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2 +apiVersion: apps/v1 kind: Deployment metadata: name: wordpress diff --git a/content/en/examples/examples_test.go b/content/en/examples/examples_test.go index d653d8303e..012a2acaa7 100644 --- a/content/en/examples/examples_test.go +++ b/content/en/examples/examples_test.go @@ -32,7 +32,6 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apimachinery/pkg/util/yaml" - // "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/apps" @@ -56,9 +55,6 @@ import ( "k8s.io/kubernetes/pkg/apis/rbac" rbac_validation "k8s.io/kubernetes/pkg/apis/rbac/validation" - "k8s.io/kubernetes/pkg/apis/settings" - settings_validation "k8s.io/kubernetes/pkg/apis/settings/validation" - "k8s.io/kubernetes/pkg/apis/storage" storage_validation "k8s.io/kubernetes/pkg/apis/storage/validation" @@ -73,7 +69,6 @@ import ( _ "k8s.io/kubernetes/pkg/apis/networking/install" _ "k8s.io/kubernetes/pkg/apis/policy/install" _ "k8s.io/kubernetes/pkg/apis/rbac/install" - _ "k8s.io/kubernetes/pkg/apis/settings/install" _ "k8s.io/kubernetes/pkg/apis/storage/install" ) @@ -111,7 +106,6 @@ func initGroups() { networking.GroupName, policy.GroupName, rbac.GroupName, - settings.GroupName, storage.GroupName, } @@ -296,11 +290,6 @@ func validateObject(obj runtime.Object) (errors field.ErrorList) { case *rbac.ClusterRoleBinding: // clusterolebinding does not accept namespace errors = rbac_validation.ValidateClusterRoleBinding(t) - case *settings.PodPreset: - if t.Namespace == "" { - t.Namespace = api.NamespaceDefault - } - errors = settings_validation.ValidatePodPreset(t) case *storage.StorageClass: // storageclass does not accept namespace errors = storage_validation.ValidateStorageClass(t) @@ -518,20 +507,6 @@ func TestExampleObjectSchemas(t *testing.T) { "node-problem-detector-configmap": {&apps.DaemonSet{}}, "termination": {&api.Pod{}}, }, - "podpreset": { - "allow-db": {&settings.PodPreset{}}, - "allow-db-merged": {&api.Pod{}}, - "configmap": {&api.ConfigMap{}}, - "conflict-pod": {&api.Pod{}}, - "conflict-preset": {&settings.PodPreset{}}, - "merged": {&api.Pod{}}, - "multi-merged": {&api.Pod{}}, - "pod": {&api.Pod{}}, - "preset": {&settings.PodPreset{}}, - "proxy": {&settings.PodPreset{}}, - "replicaset-merged": {&api.Pod{}}, - "replicaset": {&apps.ReplicaSet{}}, - }, "pods": { "commands": {&api.Pod{}}, "init-containers": {&api.Pod{}}, diff --git a/content/en/examples/podpreset/allow-db-merged.yaml b/content/en/examples/podpreset/allow-db-merged.yaml deleted file mode 100644 index 7f52cc1fa4..0000000000 --- a/content/en/examples/podpreset/allow-db-merged.yaml +++ /dev/null @@ -1,31 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: website - labels: - app: website - role: frontend - annotations: - podpreset.admission.kubernetes.io/podpreset-allow-database: "resource version" -spec: - containers: - - name: website - image: nginx - volumeMounts: - - mountPath: /cache - name: cache-volume - ports: - - containerPort: 80 - env: - - name: DB_PORT - value: "6379" - - name: duplicate_key - value: FROM_ENV - - name: expansion - value: $(REPLACE_ME) - envFrom: - - configMapRef: - name: etcd-env-config - volumes: - - name: cache-volume - emptyDir: {} diff --git a/content/en/examples/podpreset/allow-db.yaml b/content/en/examples/podpreset/allow-db.yaml deleted file mode 100644 index 2c511e650d..0000000000 --- a/content/en/examples/podpreset/allow-db.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: settings.k8s.io/v1alpha1 -kind: PodPreset -metadata: - name: allow-database -spec: - selector: - matchLabels: - role: frontend - env: - - name: DB_PORT - value: "6379" - - name: duplicate_key - value: FROM_ENV - - name: expansion - value: $(REPLACE_ME) - envFrom: - - configMapRef: - name: etcd-env-config - volumeMounts: - - mountPath: /cache - name: cache-volume - volumes: - - name: cache-volume - emptyDir: {} diff --git a/content/en/examples/podpreset/configmap.yaml b/content/en/examples/podpreset/configmap.yaml deleted file mode 100644 index 806a880bff..0000000000 --- a/content/en/examples/podpreset/configmap.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: etcd-env-config -data: - number_of_members: "1" - initial_cluster_state: new - initial_cluster_token: DUMMY_ETCD_INITIAL_CLUSTER_TOKEN - discovery_token: DUMMY_ETCD_DISCOVERY_TOKEN - discovery_url: http://etcd_discovery:2379 - etcdctl_peers: http://etcd:2379 - duplicate_key: FROM_CONFIG_MAP - REPLACE_ME: "a value" - diff --git a/content/en/examples/podpreset/conflict-pod.yaml b/content/en/examples/podpreset/conflict-pod.yaml deleted file mode 100644 index 6949f7e162..0000000000 --- a/content/en/examples/podpreset/conflict-pod.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: website - labels: - app: website - role: frontend -spec: - containers: - - name: website - image: nginx - volumeMounts: - - mountPath: /cache - name: cache-volume - ports: - - containerPort: 80 - volumes: - - name: cache-volume - emptyDir: {} diff --git a/content/en/examples/podpreset/conflict-preset.yaml b/content/en/examples/podpreset/conflict-preset.yaml deleted file mode 100644 index a2ad96c48a..0000000000 --- a/content/en/examples/podpreset/conflict-preset.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: settings.k8s.io/v1alpha1 -kind: PodPreset -metadata: - name: allow-database -spec: - selector: - matchLabels: - role: frontend - env: - - name: DB_PORT - value: "6379" - volumeMounts: - - mountPath: /cache - name: other-volume - volumes: - - name: other-volume - emptyDir: {} - diff --git a/content/en/examples/podpreset/merged.yaml b/content/en/examples/podpreset/merged.yaml deleted file mode 100644 index 97c022c86c..0000000000 --- a/content/en/examples/podpreset/merged.yaml +++ /dev/null @@ -1,25 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: website - labels: - app: website - role: frontend - annotations: - podpreset.admission.kubernetes.io/podpreset-allow-database: "resource version" -spec: - containers: - - name: website - image: nginx - volumeMounts: - - mountPath: /cache - name: cache-volume - ports: - - containerPort: 80 - env: - - name: DB_PORT - value: "6379" - volumes: - - name: cache-volume - emptyDir: {} - diff --git a/content/en/examples/podpreset/multi-merged.yaml b/content/en/examples/podpreset/multi-merged.yaml deleted file mode 100644 index 7fcaae62e3..0000000000 --- a/content/en/examples/podpreset/multi-merged.yaml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: website - labels: - app: website - role: frontend - annotations: - podpreset.admission.kubernetes.io/podpreset-allow-database: "resource version" - podpreset.admission.kubernetes.io/podpreset-proxy: "resource version" -spec: - containers: - - name: website - image: nginx - volumeMounts: - - mountPath: /cache - name: cache-volume - - mountPath: /etc/proxy/configs - name: proxy-volume - ports: - - containerPort: 80 - env: - - name: DB_PORT - value: "6379" - volumes: - - name: cache-volume - emptyDir: {} - - name: proxy-volume - emptyDir: {} diff --git a/content/en/examples/podpreset/pod.yaml b/content/en/examples/podpreset/pod.yaml deleted file mode 100644 index b6b4e60d6e..0000000000 --- a/content/en/examples/podpreset/pod.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: website - labels: - app: website - role: frontend -spec: - containers: - - name: website - image: nginx - ports: - - containerPort: 80 - diff --git a/content/en/examples/podpreset/preset.yaml b/content/en/examples/podpreset/preset.yaml deleted file mode 100644 index f300289c82..0000000000 --- a/content/en/examples/podpreset/preset.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: settings.k8s.io/v1alpha1 -kind: PodPreset -metadata: - name: allow-database -spec: - selector: - matchLabels: - role: frontend - env: - - name: DB_PORT - value: "6379" - volumeMounts: - - mountPath: /cache - name: cache-volume - volumes: - - name: cache-volume - emptyDir: {} diff --git a/content/en/examples/podpreset/proxy.yaml b/content/en/examples/podpreset/proxy.yaml deleted file mode 100644 index d854475ac2..0000000000 --- a/content/en/examples/podpreset/proxy.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: settings.k8s.io/v1alpha1 -kind: PodPreset -metadata: - name: proxy -spec: - selector: - matchLabels: - role: frontend - volumeMounts: - - mountPath: /etc/proxy/configs - name: proxy-volume - volumes: - - name: proxy-volume - emptyDir: {} diff --git a/content/en/examples/podpreset/replicaset-merged.yaml b/content/en/examples/podpreset/replicaset-merged.yaml deleted file mode 100644 index 95cf846ebc..0000000000 --- a/content/en/examples/podpreset/replicaset-merged.yaml +++ /dev/null @@ -1,31 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: frontend - labels: - app: guestbook - role: frontend - annotations: - podpreset.admission.kubernetes.io/podpreset-allow-database: "resource version" -spec: - containers: - - name: php-redis - image: gcr.io/google_samples/gb-frontend:v3 - resources: - requests: - cpu: 100m - memory: 100Mi - volumeMounts: - - mountPath: /cache - name: cache-volume - env: - - name: GET_HOSTS_FROM - value: dns - - name: DB_PORT - value: "6379" - ports: - - containerPort: 80 - volumes: - - name: cache-volume - emptyDir: {} - diff --git a/content/en/examples/podpreset/replicaset.yaml b/content/en/examples/podpreset/replicaset.yaml deleted file mode 100644 index e9d49a9d1d..0000000000 --- a/content/en/examples/podpreset/replicaset.yaml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: apps/v1 -kind: ReplicaSet -metadata: - name: frontend -spec: - replicas: 3 - selector: - matchLabels: - role: frontend - matchExpressions: - - {key: role, operator: In, values: [frontend]} - template: - metadata: - labels: - app: guestbook - role: frontend - spec: - containers: - - name: php-redis - image: gcr.io/google_samples/gb-frontend:v3 - resources: - requests: - cpu: 100m - memory: 100Mi - env: - - name: GET_HOSTS_FROM - value: dns - ports: - - containerPort: 80 diff --git a/content/en/examples/service/access/Dockerfile b/content/en/examples/service/access/Dockerfile index b7b09d492a..61c8ce2831 100644 --- a/content/en/examples/service/access/Dockerfile +++ b/content/en/examples/service/access/Dockerfile @@ -1,4 +1,4 @@ FROM nginx:1.17.3 RUN rm /etc/nginx/conf.d/default.conf -COPY frontend.conf /etc/nginx/conf.d +COPY frontend-nginx.conf /etc/nginx/conf.d diff --git a/content/en/examples/service/access/hello.yaml b/content/en/examples/service/access/backend-deployment.yaml similarity index 91% rename from content/en/examples/service/access/hello.yaml rename to content/en/examples/service/access/backend-deployment.yaml index 85dff18ee1..5c95e38a3b 100644 --- a/content/en/examples/service/access/hello.yaml +++ b/content/en/examples/service/access/backend-deployment.yaml @@ -1,14 +1,15 @@ +--- apiVersion: apps/v1 kind: Deployment metadata: - name: hello + name: backend spec: selector: matchLabels: app: hello tier: backend track: stable - replicas: 7 + replicas: 3 template: metadata: labels: @@ -22,3 +23,4 @@ spec: ports: - name: http containerPort: 80 +... \ No newline at end of file diff --git a/content/en/examples/service/access/hello-service.yaml b/content/en/examples/service/access/backend-service.yaml similarity index 95% rename from content/en/examples/service/access/hello-service.yaml rename to content/en/examples/service/access/backend-service.yaml index 71344ecb8b..9262d29bb8 100644 --- a/content/en/examples/service/access/hello-service.yaml +++ b/content/en/examples/service/access/backend-service.yaml @@ -1,3 +1,4 @@ +--- apiVersion: v1 kind: Service metadata: @@ -10,3 +11,4 @@ spec: - protocol: TCP port: 80 targetPort: http +... \ No newline at end of file diff --git a/content/en/examples/service/access/frontend-deployment.yaml b/content/en/examples/service/access/frontend-deployment.yaml new file mode 100644 index 0000000000..182b0e708e --- /dev/null +++ b/content/en/examples/service/access/frontend-deployment.yaml @@ -0,0 +1,27 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: frontend +spec: + selector: + matchLabels: + app: hello + tier: frontend + track: stable + replicas: 1 + template: + metadata: + labels: + app: hello + tier: frontend + track: stable + spec: + containers: + - name: nginx + image: "gcr.io/google-samples/hello-frontend:1.0" + lifecycle: + preStop: + exec: + command: ["/usr/sbin/nginx","-s","quit"] +... \ No newline at end of file diff --git a/content/en/examples/service/access/frontend-nginx.conf b/content/en/examples/service/access/frontend-nginx.conf new file mode 100644 index 0000000000..39a911a09a --- /dev/null +++ b/content/en/examples/service/access/frontend-nginx.conf @@ -0,0 +1,14 @@ +# The identifier Backend is internal to nginx, and used to name this specific upstream +upstream Backend { + # hello is the internal DNS name used by the backend Service inside Kubernetes + server hello; +} + +server { + listen 80; + + location / { + # The following statement will proxy traffic to the upstream named Backend + proxy_pass http://Backend; + } +} diff --git a/content/en/examples/service/access/frontend-service.yaml b/content/en/examples/service/access/frontend-service.yaml new file mode 100644 index 0000000000..898a5ed51b --- /dev/null +++ b/content/en/examples/service/access/frontend-service.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: frontend +spec: + selector: + app: hello + tier: frontend + ports: + - protocol: "TCP" + port: 80 + targetPort: 80 + type: LoadBalancer +... \ No newline at end of file diff --git a/content/en/examples/service/access/frontend.conf b/content/en/examples/service/access/frontend.conf deleted file mode 100644 index 9a1f5a0ed6..0000000000 --- a/content/en/examples/service/access/frontend.conf +++ /dev/null @@ -1,11 +0,0 @@ -upstream hello { - server hello; -} - -server { - listen 80; - - location / { - proxy_pass http://hello; - } -} diff --git a/content/en/examples/service/access/frontend.yaml b/content/en/examples/service/access/frontend.yaml deleted file mode 100644 index 9f5b6b757f..0000000000 --- a/content/en/examples/service/access/frontend.yaml +++ /dev/null @@ -1,39 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: frontend -spec: - selector: - app: hello - tier: frontend - ports: - - protocol: "TCP" - port: 80 - targetPort: 80 - type: LoadBalancer ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: frontend -spec: - selector: - matchLabels: - app: hello - tier: frontend - track: stable - replicas: 1 - template: - metadata: - labels: - app: hello - tier: frontend - track: stable - spec: - containers: - - name: nginx - image: "gcr.io/google-samples/hello-frontend:1.0" - lifecycle: - preStop: - exec: - command: ["/usr/sbin/nginx","-s","quit"] diff --git a/content/es/community/_index.html b/content/es/community/_index.html index 8f557f3526..56e42c4899 100644 --- a/content/es/community/_index.html +++ b/content/es/community/_index.html @@ -9,7 +9,7 @@ cid: community

Garantizando el funcionamiento de Kubernetes para todo el mundo y en cualquier lugar.

Conecte con la comunidad Kubernetes en nuestro canal de Slack, foro, o únete al - Kubernetes-dev Google group. + Kubernetes-dev Google group. Cada semana se lleva a cabo una reunión de la comunidad por videoconferencia para discutir el estado de cosas, revise el documento Community Meeting para obtener información sobre cómo participar.

También puede formar parte de la comunidad en cualquier parte del mundo a través de la @@ -59,4 +59,4 @@ cid: community             

        
    
- \ No newline at end of file + diff --git a/content/es/community/static/cncf-code-of-conduct.md b/content/es/community/static/cncf-code-of-conduct.md index 440b20ee04..c426e05db6 100644 --- a/content/es/community/static/cncf-code-of-conduct.md +++ b/content/es/community/static/cncf-code-of-conduct.md @@ -22,7 +22,7 @@ Este Código de Conducta se aplica tanto dentro de los espacios relacionados con Los casos de comportamiento abusivo, acosador o de cualquier otro modo inaceptable podrán ser denunciados poniéndose en contacto con el [Comité del Código de Conducta de Kubernetes](https://git.k8s.io/community/committee-code-of-conduct) en . Para otros proyectos, comuníquese con un mantenedor de proyectos de CNCF o con nuestra mediadora, Mishi Choudhary . -Este Código de Conducta está adaptado del Compromiso de Colaboradores (http://contributor-covenant.org), versión 1.2.0, disponible en http://contributor-covenant.org/version/1/2/0/ +Este Código de Conducta está adaptado del Compromiso de Colaboradores (https://contributor-covenant.org), versión 1.2.0, disponible en https://contributor-covenant.org/version/1/2/0/ ### Código de Conducta para la Comunidad de la CNCF diff --git a/content/es/docs/tasks/tools/_index.md b/content/es/docs/tasks/tools/_index.md index 02f8add154..e29abd9398 100644 --- a/content/es/docs/tasks/tools/_index.md +++ b/content/es/docs/tasks/tools/_index.md @@ -1,4 +1,41 @@ --- title: "Instalar herramientas" +description: Configurar las herramientas de Kubernetes en su computadora. weight: 10 ---- \ No newline at end of file +no_list: true +--- + +## kubectl + +Usa la herramienta de línea de comandos de Kubernetes, [kubectl](/docs/user-guide/kubectl/), para desplegar y gestionar aplicaciones en Kubernetes. Usando kubectl, puedes inspeccionar recursos del clúster; crear, eliminar, y actualizar componentes; explorar tu nuevo clúster y arrancar aplicaciones. + +Ver [Instalar y Configurar `kubectl`](/docs/tasks/tools/install-kubectl/) para más información sobre cómo descargar y instalar `kubectl` y configurarlo para acceder su clúster. + +Ver la guía de instalación y configuración de kubectl + +También se puede leer [la documentación de referencia](/docs/reference/kubectl) de `kubectl`. + +## kind +[`kind`](https://kind.sigs.k8s.io/docs/) le permite usar Kubernetes en su máquina local. Esta herramienta require que [Docker](https://docs.docker.com/get-docker/) instalado y configurado. + +En la página de [inicio rápido](https://kind.sigs.k8s.io/docs/user/quick-start/) encontrarás toda la información necesaria para empezar con kind. + +Ver la guía de inicio rápido + +## minikube + +De forma similar a `kind`, [`minikube`](https://minikube.sigs.k8s.io/) es una herramienta que le permite usar Kubernetes en su máquina local. `minikube` le permite ejecutar un único nodo en su computadora personal (PC de Windows, macOS y Linux) para que se pueda probar Kubernetes, o para su trabajo de desarrollo. + +Se puede seguir la guía oficial de [`minikube`](https://minikube.sigs.k8s.io/docs/start/) si su enfoque esta instalando la herramienta. + +Ver la guía de minikube + +Una vez `minikube` ha terminado de instalarse, está lista para desplegar un aplicación de ejemplo (/docs/tutorials/hello-minikube/). + +## kubeadm + +Se puede usar la utilidad {{< glossary_tooltip term_id="kubeadm" text="kubeadm" >}} para crear y gestionar clústeres de Kubernetes. + +En [instalando kubeadm](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/) se muestra como instalar kubeadm. Una vez instalado, se puede utilizar [para crear un clúster](/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/). + +Ver la guía de instalación diff --git a/content/es/docs/tasks/tools/install-kubectl.md b/content/es/docs/tasks/tools/install-kubectl.md index 8a0791e262..af1abeb63d 100644 --- a/content/es/docs/tasks/tools/install-kubectl.md +++ b/content/es/docs/tasks/tools/install-kubectl.md @@ -1,6 +1,4 @@ --- -reviewers: -- mikedanese title: Instalar y Configurar kubectl content_type: task weight: 10 @@ -11,7 +9,7 @@ card: --- -Usa la herramienta de línea de comandos de Kubernetes, [kubectl](/docs/user-guide/kubectl/), para desplegar y gestionar aplicaciones en Kubernetes. Usando kubectl, puedes inspeccionar recursos del clúster; crear, eliminar, y actualizar componentes; explorar tu nuevo clúster; y arrancar aplicaciones de ejemplo. +Usa la herramienta de línea de comandos de Kubernetes, [kubectl](/docs/reference/kubectl/kubectl/), para desplegar y gestionar aplicaciones en Kubernetes. Usando kubectl, puedes inspeccionar recursos del clúster; crear, eliminar, y actualizar componentes; explorar tu nuevo clúster; y arrancar aplicaciones de ejemplo. Para ver la lista completa de operaciones de kubectl, se puede ver [el resumen de kubectl](/docs/reference/kubectl/overview/). ## {{% heading "prerequisites" %}} @@ -22,21 +20,55 @@ Debes usar una versión de kubectl que esté a menos de una versión menor de di -## Instalar kubectl +## Instalar kubectl en Linux -Estos son algunos métodos para instalar kubectl. +### Instalar el binario de kubectl con curl en Linux -## Instalar el binario de kubectl usando gestión nativa de paquetes +1. Descargar la última entrega: + + ``` + curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl" + ``` + +Para descargar una versión específica, remplaza el comando `$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)` con la versión específica. + +Por ejemplo, para descarga la versión {{< param "fullversion" >}} en Linux, teclea: + + ``` + curl -LO https://storage.googleapis.com/kubernetes-release/release/{{< param "fullversion" >}}/bin/linux/amd64/kubectl + ``` + +2. Habilita los permisos de ejecución del binario `kubectl`. + + ``` + chmod +x ./kubectl + ``` + +3. Mueve el binario dentro de tu PATH. + + ``` + sudo mv ./kubectl /usr/local/bin/kubectl + ``` + +4. Comprueba que la versión que se ha instalado es la más reciente. + + ``` + kubectl version --client + ``` + + +## Instalar mediante el gestor de paquetes del sistema {{< tabs name="kubectl_install" >}} -{{< tab name="Ubuntu, Debian o HypriotOS" codelang="bash" >}} -sudo apt-get update && sudo apt-get install -y apt-transport-https +{{< tab name="Ubuntu, Debian or HypriotOS" codelang="bash" >}} +sudo apt-get update && sudo apt-get install -y apt-transport-https gnupg2 curl curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee -a /etc/apt/sources.list.d/kubernetes.list sudo apt-get update sudo apt-get install -y kubectl {{< /tab >}} -{{< tab name="CentOS, RHEL o Fedora" codelang="bash" >}}cat < /etc/yum.repos.d/kubernetes.repo + +{{< tab name="CentOS, RHEL or Fedora" codelang="bash" >}}cat < /etc/yum.repos.d/kubernetes.repo [kubernetes] name=Kubernetes baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 @@ -49,65 +81,146 @@ yum install -y kubectl {{< /tab >}} {{< /tabs >}} +### Instalar usando otro gestor de paquetes -## Instalar con snap en Ubuntu - +{{< tabs name="other_kubectl_install" >}} +{{% tab name="Snap" %}} Si usas Ubuntu o alguna de las otras distribuciones de Linux que soportan el gestor de paquetes [snap](https://snapcraft.io/docs/core/install), kubectl está disponible como una aplicación [snap](https://snapcraft.io/). -1. Cambia al usuario de snap y ejecuta el comando de instalación: +```shell +snap install kubectl --classic - ``` - sudo snap install kubectl --classic +kubectl version --client +``` + +{{% /tab %}} + +{{% tab name="Homebrew" %}} +Si usas alguna de las otras distribuciones de Linux que soportan el gestor de paquetes [Homebrew](https://docs.brew.sh/Homebrew-on-Linux), kubectl está disponible como una aplicación de [Homebrew]((https://docs.brew.sh/Homebrew-on-Linux#install). + +```shell +brew install kubectl + +kubectl version --client +``` + +{{% /tab %}} + +{{< /tabs >}} + + +## Instalar kubectl en macOS + +### Instalar el binario de kubectl usando curl en macOS + +1. Descarga la última entrega: + + ```bash + curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/darwin/amd64/kubectl" + ``` + + Para descargar una versión específica, remplaza el comando `$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)` con la versión específica. + + Por ejemplo, para descargar la versión {{< param "fullversion" >}} en macOS, teclea: + + ```bash + curl -LO https://storage.googleapis.com/kubernetes-release/release/{{< param "fullversion" >}}/bin/darwin/amd64/kubectl + ``` + +2. Habilita los permisos de ejecución del binario `kubectl`. + + ```bash + chmod +x ./kubectl + ``` + +3. Mueve el binario dentro de tu PATH. + + ```bash + sudo mv ./kubectl /usr/local/bin/kubectl ``` -2. Para asegurar que la versión utilizada sea la más actual puedes probar: +4. Para asegurar que la versión utilizada sea la más actual puedes probar: - ``` - kubectl version - ``` + ```bash + kubectl version --client + ``` -## Instalar con Homebrew en macOS +### Instalar con Homebrew en macOS -Si estás usando macOS y el gestor de paquetes es [Homebrew](https://brew.sh/), puedes instalar kubectl con Homebrew. +Si estás usando macOS y el gestor de paquetes es [Homebrew](https://brew.sh/), puedes instalar `kubectl` con `brew`. 1. Ejecuta el comando de instalación: + ```bash + brew install kubectl ``` + + o + + ```bash brew install kubernetes-cli ``` -2. Para asegurar que la versión utilizada sea la más actual puedes probar: +2. Para asegurar que la versión utilizada sea la más actual, puedes ejecutar: - ``` - kubectl version + ```bash + kubectl version --client ``` -## Instalar con Macports en macOS +### Instalar con Macports en macOS -Si estás en macOS y usando el gestor de paquetes [Macports](https://macports.org/), puedes instalar kubectl con Macports. +Si estás en macOS y utilizas el gestor de paquetes [Macports](https://macports.org/), puedes instalar `kubectl` con `port`. 1. Ejecuta los comandos de instalación: - ``` + ```bash sudo port selfupdate sudo port install kubectl ``` -2. Para asegurar que la versión utilizada sea la más actual puedes probar: +2. Para asegurar que la versión utilizada sea la más actual puedes ejecutar: - ``` - kubectl version + ```bash + kubectl version --client ``` -## Instalar con Powershell desde PSGallery +# Instalar kubectl en Windows -Si estás en Windows y usando el gestor de paquetes [Powershell Gallery](https://www.powershellgallery.com/), puedes instalar y actualizar kubectl con Powershell. +### Instalar el binario de kubectl con curl en Windows + +1. Descargar la última entrega {{< param "fullversion" >}} de [este link]((https://storage.googleapis.com/kubernetes-release/release/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe). + + o si tiene `curl` instalada, utiliza este comando: + + ```bash + curl -LO https://storage.googleapis.com/kubernetes-release/release/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe + ``` + + Para averiguar la última versión estable (por ejemplo, para secuencias de comandos), echa un vistazo a [https://storage.googleapis.com/kubernetes-release/release/stable.txt](https://storage.googleapis.com/kubernetes-release/release/stable.txt). + +2. Añade el binario a tu PATH. + +3. Para asegurar que la versión utilizada sea la más actual, puedes ejecutar: + + ```bash + kubectl version --client + ``` + +{{< note >}} +[Docker Desktop para Windows](https://docs.docker.com/docker-for-windows/#kubernetes) añade su propia versión de `kubectl` a PATH. + +Si tienes Docker Desktop instalado, es posible que tengas que modificar tu PATH al PATH añadido por Docker Desktop o eliminar la versión de `kubectl` proporcionada por Docker Desktop. +{{< /note >}} + +### Instalar con Powershell desde PSGallery + +Si estás en Windows y utilizas el gestor de paquetes [Powershell Gallery](https://www.powershellgallery.com/), puedes instalar y actualizar kubectl con Powershell. 1. Ejecuta los comandos de instalación (asegurándote de especificar una `DownloadLocation`): - ``` - Install-Script -Name install-kubectl -Scope CurrentUser -Force - install-kubectl.ps1 [-DownloadLocation ] + ```powershell + Install-Script -Name 'install-kubectl' -Scope CurrentUser -Force + install-kubectl.ps1 [-DownloadLocation ] ``` {{< note >}}Si no especificas una `DownloadLocation`, `kubectl` se instalará en el directorio temporal del usuario.{{< /note >}} @@ -116,57 +229,66 @@ Si estás en Windows y usando el gestor de paquetes [Powershell Gallery](https:/ 2. Para asegurar que la versión utilizada sea la más actual puedes probar: - ``` - kubectl version + ```powershell + kubectl version --client ``` - {{< note >}}Actualizar la instalación se realiza mediante la re-ejecución de los dos comandos listados en el paso 1.{{< /note >}} +{{< note >}} +Actualizar la instalación se realiza mediante la re-ejecución de los dos comandos listados en el paso 1.{{< /note >}} -## Instalar en Windows usando Chocolatey o scoop +### Instalar en Windows usando Chocolatey o scoop -Para instalar kubectl en Windows puedes usar bien el gestor de paquetes [Chocolatey](https://chocolatey.org) o el instalador de línea de comandos [scoop](https://scoop.sh). -{{< tabs name="kubectl_win_install" >}} -{{% tab name="choco" %}} +1. Para instalar kubectl en Windows puedes usar el gestor de paquetes [Chocolatey](https://chocolatey.org) o el instalador de línea de comandos [scoop](https://scoop.sh). + {{< tabs name="kubectl_win_install" >}} + {{% tab name="choco" %}} +Using [Chocolatey](https://chocolatey.org). + + ```powershell choco install kubernetes-cli + ``` + {{% /tab %}} + {{% tab name="scoop" %}} +Using [scoop](https://scoop.sh). -{{% /tab %}} -{{% tab name="scoop" %}} - + ```powershell scoop install kubectl + ``` + {{% /tab %}} + {{< /tabs >}} -{{% /tab %}} -{{< /tabs >}} 2. Para asegurar que la versión utilizada sea la más actual puedes probar: - ``` - kubectl version + ```powershell + kubectl version --client ``` 3. Navega a tu directorio de inicio: + ```powershell + # Si estas usando cmd.exe, ejecuta: cd %USERPROFILE% + cd ~ ``` - cd %USERPROFILE% - ``` + 4. Crea el directorio `.kube`: - ``` + ```powershell mkdir .kube ``` 5. Cambia al directorio `.kube` que acabas de crear: - ``` + ```powershell cd .kube ``` 6. Configura kubectl para usar un clúster remoto de Kubernetes: - ``` + ```powershell New-Item config -type file ``` - {{< note >}}Edita el fichero de configuración con un editor de texto de tu elección, como Notepad.{{< /note >}} +{{< note >}}Edita el fichero de configuración con un editor de texto de tu elección, como Notepad.{{< /note >}} ## Descarga como parte del Google Cloud SDK @@ -175,107 +297,32 @@ Puedes instalar kubectl como parte del Google Cloud SDK. 1. Instala el [Google Cloud SDK](https://cloud.google.com/sdk/). 2. Ejecuta el comando de instalación de `kubectl`: - ``` + ```shell gcloud components install kubectl ``` 3. Para asegurar que la versión utilizada sea la más actual puedes probar: - ``` - kubectl version + ```shell + kubectl version --client ``` -## Instalar el binario de kubectl usando curl - -{{< tabs name="kubectl_install_curl" >}} -{{% tab name="macOS" %}} -1. Descarga la última entrega: - - ``` - curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/darwin/amd64/kubectl - ``` - - Para descargar una versión específica, remplaza el comando `$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)` con la versión específica. - - Por ejemplo, para descargar la versión {{< param "fullversion" >}} en macOS, teclea: - - ``` - curl -LO https://storage.googleapis.com/kubernetes-release/release/{{< param "fullversion" >}}/bin/darwin/amd64/kubectl - ``` - -2. Haz el binario de kubectl ejecutable. - - ``` - chmod +x ./kubectl - ``` - -3. Mueve el binario dentro de tu PATH. - - ``` - sudo mv ./kubectl /usr/local/bin/kubectl - ``` -{{% /tab %}} -{{% tab name="Linux" %}} - -1. Descarga la última entrega con el comando: - - ``` - curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl - ``` - - Para descargar una versión específica, remplaza el trozo del comando `$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)` con la versión específica. - - Por ejemplo, para descargar la versión {{< param "fullversion" >}} en Linux, teclea: - - ``` - curl -LO https://storage.googleapis.com/kubernetes-release/release/{{< param "fullversion" >}}/bin/linux/amd64/kubectl - ``` - -2. Haz el binario de kubectl ejecutable. - - ``` - chmod +x ./kubectl - ``` - -3. Mueve el binario dentro de tu PATH. - - ``` - sudo mv ./kubectl /usr/local/bin/kubectl - ``` -{{% /tab %}} -{{% tab name="Windows" %}} -1. Descarga la última entrega {{< param "fullversion" >}} desde [este enlace](https://storage.googleapis.com/kubernetes-release/release/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe). - - O si tienes `curl` instalado, usa este comando: - - ``` - curl -LO https://storage.googleapis.com/kubernetes-release/release/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe - ``` - - Para averiguar la última versión estable (por ejemplo, para secuencias de comandos), echa un vistazo a [https://storage.googleapis.com/kubernetes-release/release/stable.txt](https://storage.googleapis.com/kubernetes-release/release/stable.txt). - -2. Añade el binario a tu PATH. -{{% /tab %}} -{{< /tabs >}} - - - -## Configurar kubectl +## Comprobar la configuración kubectl Para que kubectl pueda encontrar y acceder a un clúster de Kubernetes, necesita un [fichero kubeconfig](/docs/tasks/access-application-cluster/configure-access-multiple-clusters/), que se crea de forma automática cuando creas un clúster usando [kube-up.sh](https://github.com/kubernetes/kubernetes/blob/master/cluster/kube-up.sh) o despliegas de forma satisfactoria un clúster de Minikube. Revisa las [guías para comenzar](/docs/setup/) para más información acerca de crear clústers. Si necesitas acceso a un clúster que no has creado, ver el [documento de Compartir Acceso a un Clúster](/docs/tasks/access-application-cluster/configure-access-multiple-clusters/). Por defecto, la configuración de kubectl se encuentra en `~/.kube/config`. -## Comprobar la configuración kubectl Comprueba que kubectl está correctamente configurado obteniendo el estado del clúster: ```shell kubectl cluster-info ``` + Si ves una respuesta en forma de URL, kubectl está correctamente configurado para acceder a tu clúster. Si ves un mensaje similar al siguiente, kubectl no está correctamente configurado o no es capaz de conectar con un clúster de Kubernetes. -```shell +``` The connection to the server was refused - did you specify the right host or port? ``` @@ -287,7 +334,9 @@ Si kubectl cluster-info devuelve la respuesta en forma de url, pero no puedes ac kubectl cluster-info dump ``` -## Habilitar el auto-completado en el intérprete de comandos +## kubectl configuraciones opcionales + +### Habilitar el auto-completado en el intérprete de comandos kubectl provee de soporte para auto-completado para Bash y Zsh, ¡que te puede ahorrar mucho uso del teclado! @@ -323,16 +372,23 @@ Debes asegurarte que la secuencia de comandos de completado de kubectl corre en - Corre la secuencia de comandos de completado en tu `~/.bashrc`: - ```shell + ```bash echo 'source <(kubectl completion bash)' >>~/.bashrc ``` - Añade la secuencia de comandos de completado al directorio `/etc/bash_completion.d`: - ```shell + ```bash kubectl completion bash >/etc/bash_completion.d/kubectl ``` +Si tienes un alias para `kubectl`, puedes extender los comandos de shell para funcionar con ese alias: + +```bash +echo 'alias k=kubectl' >>~/.bashrc +echo 'complete -F __start_kubectl k' >>~/.bashrc +``` + {{< note >}} bash-completion corre todas las secuencias de comandos de completado en `/etc/bash_completion.d`. {{< /note >}} @@ -344,21 +400,43 @@ Ambas estrategias son equivalentes. Tras recargar tu intérprete de comandos, el {{% tab name="Bash en macOS" %}} -{{< warning>}} -macOS incluye Bash 3.2 por defecto. La secuencia de comandos de completado de kubectl requiere Bash 4.1+ y no funciona con Bash 3.2. Una posible alternativa es instalar una nueva versión de Bash en macOS (ver instrucciones [aquí](https://itnext.io/upgrading-bash-on-macos-7138bd1066ba)). Las instrucciones de abajo sólo funcionan si estás usando Bash 4.1+. -{{< /warning >}} - ### Introducción La secuencia de comandos de completado de kubectl para Bash puede generarse con el comando `kubectl completion bash`. Corriendo la secuencia de comandos de completado en tu intérprete de comandos habilita el auto-completado de kubectl. Sin embargo, la secuencia de comandos de completado depende de [*bash-completion**](https://github.com/scop/bash-completion), lo que significa que tienes que instalar primero este programa (puedes probar si ya tienes bash-completion instalado ejecutando `type _init_completion`). +{{< warning>}} +macOS incluye Bash 3.2 por defecto. La secuencia de comandos de completado de kubectl requiere Bash 4.1+ y no funciona con Bash 3.2. Una posible alternativa es instalar una nueva versión de Bash en macOS (ver instrucciones [aquí](https://itnext.io/upgrading-bash-on-macos-7138bd1066ba)). Las instrucciones de abajo sólo funcionan si estás usando Bash 4.1+. +{{< /warning >}} + +### Actualizar bash + +Las instrucciones asumen que usa Bash 4.1+. Puedes comprobar tu versión de bash con: + +```bash +echo $BASH_VERSION +``` + +Si no es 4.1+, puede actualizar bash con Homebrew: + +```bash +brew install bash +``` + +Recarga tu intérprete de comandos y verifica que estás usando la versión deseada: + +```bash +echo $BASH_VERSION $SHELL +``` + +Usualmente, Homebrew lo instala en `/usr/local/bin/bash`. + ### Instalar bash-completion Puedes instalar bash-completion con Homebrew: -```shell +```bash brew install bash-completion@2 ``` @@ -368,9 +446,9 @@ El `@2` simboliza bash-completion 2, que es requerido por la secuencia de comand Como se indicaba en la salida de `brew install` (sección "Caveats"), añade las siguientes líneas a tu `~/.bashrc` o `~/.bash_profile`: -```shell -export BASH_COMPLETION_COMPAT_DIR=/usr/local/etc/bash_completion.d -[[ -r /usr/local/etc/profile.d/bash_completion.sh ]] && . /usr/local/etc/profile.d/bash_completion.sh +```bash +export BASH_COMPLETION_COMPAT_DIR="/usr/local/etc/bash_completion.d" +[[ -r "/usr/local/etc/profile.d/bash_completion.sh" ]] && . "/usr/local/etc/profile.d/bash_completion.sh" ``` Recarga tu intérprete de comandos y verifica que bash-completion está correctamente instalado tecleando `type _init_completion`. @@ -407,25 +485,31 @@ La secuencia de comandos de completado de kubectl para Zsh puede ser generada co Para hacerlo en todas tus sesiones de tu intérprete de comandos, añade lo siguiente a tu `~/.zshrc`: -```shell +```zsh source <(kubectl completion zsh) ``` +Si tienes alias para kubectl, puedes extender el completado de intérprete de comandos para funcionar con ese alias. + +```zsh +echo 'alias k=kubectl' >>~/.zshrc +echo 'complete -F __start_kubectl k' >>~/.zshrc +``` + Tras recargar tu intérprete de comandos, el auto-completado de kubectl debería funcionar. Si obtienes un error como `complete:13: command not found: compdef`, entonces añade lo siguiente al principio de tu `~/.zshrc`: -```shell +```zsh autoload -Uz compinit compinit ``` {{% /tab %}} {{< /tabs >}} - - ## {{% heading "whatsnext" %}} - -[Aprender cómo lanzar y exponer tu aplicación.](/docs/tasks/access-application-cluster/service-access-application-cluster/) - - +* [Instalar Minikube](https://minikube.sigs.k8s.io/docs/start/) +* Ver las [guías](/docs/setup/) para ver mas información sobre como crear clusteres. +* [Aprender cómo lanzar y exponer tu aplicación.](/docs/tasks/access-application-cluster/service-access-application-cluster/). +* Si necesita acceso a un clúster que no se creó, ver el documento de [compartiendo acceso a clúster](/docs/tasks/access-application-cluster/configure-access-multiple-clusters/). +* Leer ´la documentación de kubectl reference](/docs/reference/kubectl/kubectl/) diff --git a/content/es/docs/tasks/tools/install-minikube.md b/content/es/docs/tasks/tools/install-minikube.md deleted file mode 100644 index e19912e636..0000000000 --- a/content/es/docs/tasks/tools/install-minikube.md +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: Instalar Minikube -content_type: task -weight: 20 -card: - name: tasks - weight: 10 ---- - - - -Esta página muestra cómo instalar [Minikube](/docs/tutorials/hello-minikube), una herramienta que despliega un clúster de Kubernetes con un único nodo en una máquina virtual. - - - -## {{% heading "prerequisites" %}} - - -La virtualización VT-x o AMD-v debe estar habilitada en la BIOS de tu ordenador. En Linux, puedes comprobar si la tienes habilitada buscando 'vmx' o 'svm' en el fichero `/proc/cpuinfo`: -```shell -egrep --color 'vmx|svm' /proc/cpuinfo -``` - - - - - -## Instalar un Hipervisor - -Si todavía no tienes un hipervisor instalado, puedes instalar uno de los siguientes: - -Sistema Operativo | Hipervisores soportados -:-----------------|:------------------------ -macOS | [VirtualBox](https://www.virtualbox.org/wiki/Downloads), [VMware Fusion](https://www.vmware.com/products/fusion), [HyperKit](https://github.com/moby/hyperkit) -Linux | [VirtualBox](https://www.virtualbox.org/wiki/Downloads), [KVM](http://www.linux-kvm.org/) -Windows | [VirtualBox](https://www.virtualbox.org/wiki/Downloads), [Hyper-V](https://msdn.microsoft.com/en-us/virtualization/hyperv_on_windows/quick_start/walkthrough_install) - -{{< note >}} -Minikube también soporta una opción `--vm-driver=none` que ejecuta los componentes de Kubernetes directamente en el servidor y no en una máquina virtual (MV). Para usar este modo, se requiere Docker y un entorno Linux, pero no es necesario tener un hipervisor. -{{< /note >}} - -## Instalar kubectl - -* Instala kubectl siguiendo las instrucciones disponibles en [Instalar y Configurar kubectl](/docs/tasks/tools/install-kubectl/). - -## Instalar Minikube - -### macOS - -La forma más fácil de instalar Minikube en macOS es usar [Homebrew](https://brew.sh): - -```shell -brew install minikube -``` - -También puedes instalarlo en macOS descargando un ejecutable autocontenido: - -```shell -curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-darwin-amd64 \ - && chmod +x minikube -``` - -Para tener disponible en la consola el comando `minikube`, puedes añadir el comando al $PATH o moverlo por ejemplo a `/usr/local/bin`: - -```shell -sudo mv minikube /usr/local/bin -``` - -### Linux - -{{< note >}} -Este documento muestra cómo instalar Minikube en Linux usando un ejecutable autocontenido. Para métodos alternativos de instalación en Linux, ver [Otros métodos de Instalación](https://github.com/kubernetes/minikube#other-ways-to-install) en el repositorio GitHub oficial de Minikube. -{{< /note >}} - -Puedes instalar Minikube en Linux descargando un ejecutable autocontenido: - -```shell -curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 \ - && chmod +x minikube -``` - -Para tener disponible en la consola el comando `minikube`, puedes añadir el comando al $PATH o moverlo por ejemplo a `/usr/local/bin`: - -```shell -sudo cp minikube /usr/local/bin && rm minikube -``` - -### Windows - -{{< note >}} -Para ejecutar Minikube en Windows, necesitas instalar [Hyper-V](https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/quick-start/enable-hyper-v) primero, que puede ejecutarse en las tres versiones de Windows 10: Windows 10 Enterprise, Windows 10 Professional, y Windows 10 Education. -{{< /note >}} - -La forma más fácil de instalar Minikube en Windows es usando [Chocolatey](https://chocolatey.org/) (ejecutar como administrador): - -```shell -choco install minikube kubernetes-cli -``` - -Una vez Minikube ha terminado de instalarse, cierra la sesión cliente actual y reinicia. Minikube debería haberse añadido a tu $PATH automáticamente. - -#### Instalación manual en Windows - -Para instalar Minikube manualmente en Windows, descarga [`minikube-windows-amd64`](https://github.com/kubernetes/minikube/releases/latest), renómbralo a `minikube.exe`, y añádelo a tu PATH. - -#### Instalador de Windows - -Para instalar Minikube manualmente en Windows usando [Windows Installer](https://docs.microsoft.com/en-us/windows/desktop/msi/windows-installer-portal), descarga [`minikube-installer.exe`](https://github.com/kubernetes/minikube/releases/latest) y ejecuta el instalador. - - -## Limpiar todo para comenzar de cero - -Si habías instalado previamente minikube, y ejecutas: -```shell -minikube start -``` - -Y dicho comando devuelve un error: -```shell -machine does not exist -``` - -Necesitas eliminar permanentemente los siguientes archivos de configuración: -```shell -rm -rf ~/.minikube -``` - -## {{% heading "whatsnext" %}} - - -* [Ejecutar Kubernetes Localmente via Minikube](/docs/setup/minikube/) \ No newline at end of file diff --git a/content/fr/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md b/content/fr/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md index 69d3467d11..bcf325df43 100644 --- a/content/fr/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md +++ b/content/fr/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md @@ -1,16 +1,17 @@ --- title: Installer kubeadm -description: kubeadm installation Kubernetes content_type: task -weight: 20 +weight: 10 +card: + name: setup + weight: 20 + title: Installez l'outil de configuration kubeadm --- -Cette page vous -apprend comment installer la boîte à outils `kubeadm`. -Pour plus d'informations sur la création d'un cluster avec kubeadm, une fois que vous avez -effectué ce processus d'installation, voir la page: [Utiliser kubeadm pour créer un cluster](/fr/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/). +Cette page vous apprend comment installer la boîte à outils `kubeadm`. +Pour plus d'informations sur la création d'un cluster avec kubeadm, une fois que vous avez effectué ce processus d'installation, voir la page: [Utiliser kubeadm pour créer un cluster](/fr/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/). @@ -19,39 +20,53 @@ effectué ce processus d'installation, voir la page: [Utiliser kubeadm pour cré * Une ou plusieurs machines exécutant: - Ubuntu 16.04+ - - Debian 9 + - Debian 9+ - CentOS 7 - - RHEL 7 - - Fedora 25/26 (best-effort) + - Red Hat Enterprise Linux (RHEL) 7 + - Fedora 25+ - HypriotOS v1.0.1+ - - Container Linux (testé avec 1800.6.0) + - Flatcar Container Linux (testé avec 2512.3.0) * 2 Go ou plus de RAM par machine (toute quantité inférieure laissera peu de place à vos applications) * 2 processeurs ou plus * Connectivité réseau complète entre toutes les machines du cluster (réseau public ou privé) * Nom d'hôte, adresse MAC et product_uuid uniques pour chaque nœud. Voir [ici](#verify-the-mac-address-and-product-uuid-are-unique-for-every-node) pour plus de détails. * Certains ports doivent êtres ouverts sur vos machines. Voir [ici](#check-required-ports) pour plus de détails. -* Swap désactivé. Vous devez impérativement désactiver le swap pour que la kubelet fonctionne correctement. +* Swap désactivé. Vous **devez** impérativement désactiver le swap pour que la kubelet fonctionne correctement. -## Vérifiez que les adresses MAC et product_uuid sont uniques pour chaque nœud {#verify-the-mac-address-and-product-uuid-are-unique-for-every-node} +## Vérifiez que les adresses MAC et product_uuid sont uniques pour chaque nœud {#verify-mac-address} * Vous pouvez obtenir l'adresse MAC des interfaces réseau en utilisant la commande `ip link` ou` ifconfig -a` * Le product_uuid peut être vérifié en utilisant la commande `sudo cat/sys/class/dmi/id/product_uuid` Il est très probable que les périphériques matériels aient des adresses uniques, bien que -certaines machines virtuelles puissent avoir des valeurs identiques. Kubernetes utilise -ces valeurs pour identifier de manière unique les nœuds du cluster. +certaines machines virtuelles puissent avoir des valeurs identiques. Kubernetes utilise ces valeurs pour identifier de manière unique les nœuds du cluster. Si ces valeurs ne sont pas uniques à chaque nœud, le processus d'installation peut [échouer](https://github.com/kubernetes/kubeadm/issues/31). ## Vérifiez les cartes réseaux -Si vous avez plusieurs cartes réseaux et que vos composants Kubernetes ne sont pas accessibles par la -route par défaut, nous vous recommandons d’ajouter une ou plusieurs routes IP afin que les adresses -de cluster Kubernetes soient acheminées via la carte approprié. +Si vous avez plusieurs cartes réseaux et que vos composants Kubernetes ne sont pas accessibles par la route par défaut, +nous vous recommandons d’ajouter une ou plusieurs routes IP afin que les adresses de cluster Kubernetes soient acheminées via la carte approprié. + +## Permettre à iptables de voir le trafic ponté + +Assurez-vous que le module `br_netfilter` est chargé. Cela peut être fait en exécutant `lsmod | grep br_netfilter`. Pour le charger explicitement, appelez `sudo modprobe br_netfilter`. + +Pour que les iptables de votre nœud Linux voient correctement le trafic ponté, vous devez vous assurer que `net.bridge.bridge-nf-call-iptables` est défini sur 1 dans votre configuration` sysctl`, par ex. + +```bash +cat <}}. -Les autres runtimes basés sur la CRI incluent: +{{< tabs name="container_runtime" >}} +{{% tab name="Linux nodes" %}} -- [containerd](https://github.com/containerd/cri) (plugin CRI construit dans containerd) -- [cri-o](https://cri-o.io/) -- [frakti](https://github.com/kubernetes/frakti) +Par défaut, Kubernetes utilise le +{{< glossary_tooltip term_id="cri" text="Container Runtime Interface">}} (CRI) +pour s'interfacer avec votre environnement d'exécution de conteneur choisi. + +Si vous ne spécifiez pas de runtime, kubeadm essaie automatiquement de détecter un +Runtime de conteneur en parcourant une liste de sockets de domaine Unix bien connus. +Le tableau suivant répertorie les environnements d'exécution des conteneurs et leurs chemins de socket associés: + +{{< table caption = "Les environnements d'exécution des conteneurs et leurs chemins de socket" >}} +| Runtime | Chemin vers le socket de domaine Unix | +|------------|---------------------------------------| +| Docker | `/var/run/docker.sock` | +| containerd | `/run/containerd/containerd.sock` | +| CRI-O | `/var/run/crio/crio.sock` | +{{< /table >}} + +
+Si Docker et containerd sont détectés, Docker est prioritaire. C'est +nécessaire car Docker 18.09 est livré avec containerd et les deux sont détectables même si vous +installez Docker. +Si deux autres environnements d'exécution ou plus sont détectés, kubeadm se ferme avec une erreur. + +Le kubelet s'intègre à Docker via l'implémentation CRI intégrée de `dockershim`. + +Voir [runtimes de conteneur](/docs/setup/production-environment/container-runtimes/) +pour plus d'informations. +{{% /tab %}} +{{% tab name="autres systèmes d'exploitation" %}} +Par défaut, kubeadm utilise {{< glossary_tooltip term_id="docker" >}} comme environnement d'exécution du conteneur. +Le kubelet s'intègre à Docker via l'implémentation CRI intégrée de `dockershim`. + +Voir [runtimes de conteneur](/docs/setup/production-environment/container-runtimes/) +pour plus d'informations. +{{% /tab %}} +{{< /tabs >}} -Reportez-vous aux [instructions d'installation de la CRI](/docs/setup/cri) pour plus d'informations. ## Installation de kubeadm, des kubelets et de kubectl @@ -108,17 +156,17 @@ Vous installerez ces paquets sur toutes vos machines: * `kubectl`: la ligne de commande utilisée pour parler à votre cluster. kubeadm **n'installera pas** ni ne gèrera les `kubelet` ou` kubectl` pour vous. -Vous devez vous assurer qu'ils correspondent à la version du control plane de Kubernetes que vous - souhaitez que kubeadm installe pour vous. Si vous ne le faites pas, vous risquez qu' - une erreur de version se produise, qui pourrait conduire à un comportement inattendu. - Cependant, une version mineure entre les kubelets et le control plane est pris en charge, - mais la version de la kubelet ne doit jamais dépasser la version de l'API server. Par exemple, - les kubelets exécutant la version 1.7.0 devraient être entièrement compatibles avec un API - server en 1.8.0, mais pas l'inverse. +Vous devez vous assurer qu'ils correspondent à la version du control plane de Kubernetes que vous souhaitez que kubeadm installe pour vous. Si vous ne le faites pas, vous risquez qu'une +erreur de version se produise, qui pourrait conduire à un comportement inattendu. +Cependant, une version mineure entre les kubelets et le control plane est pris en charge, +mais la version de la kubelet ne doit jamais dépasser la version de l'API server. +Par exemple, les kubelets exécutant la version 1.7.0 devraient être entièrement compatibles avec un API server en 1.8.0, +mais pas l'inverse. + +For information about installing `kubectl`, see [Installation et configuration kubectl](/fr/docs/tasks/tools/install-kubectl/). {{< warning >}} -Ces instructions excluent tous les packages Kubernetes de toutes les mises à niveau du système -d'exploitation. +Ces instructions excluent tous les packages Kubernetes de toutes les mises à niveau du système d'exploitation. C’est parce que kubeadm et Kubernetes ont besoin d'une [attention particulière lors de la mise à niveau](/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-11/). {{}} @@ -131,118 +179,132 @@ Pour plus d'informations sur les compatibilités de version, voir: {{< tabs name="k8s_install" >}} {{% tab name="Ubuntu, Debian or HypriotOS" %}} ```bash -apt-get update && apt-get install -y apt-transport-https curl -curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - -cat </etc/apt/sources.list.d/kubernetes.list +sudo apt-get update && sudo apt-get install -y apt-transport-https curl +curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - +cat < /etc/yum.repos.d/kubernetes.repo +cat < /etc/sysctl.d/k8s.conf - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - EOF - sysctl --system - ``` - - Assurez-vous que le module `br_netfilter` est chargé avant cette étape. Cela peut être fait en exécutant `lsmod | grep br_netfilter`. Pour le charger explicitement, lancez `modprobe br_netfilter`. {{% /tab %}} -{{% tab name="Container Linux" %}} -Installez les plugins CNI (requis pour la plupart des réseaux de pod): +{{% tab name="Fedora CoreOS ou Flatcar Container Linux" %}} +Installez les plugins CNI (requis pour la plupart des réseaux de pods) : ```bash -CNI_VERSION="v0.6.0" -mkdir -p /opt/cni/bin -curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-amd64-${CNI_VERSION}.tgz" | tar -C /opt/cni/bin -xz +CNI_VERSION="v0.8.2" +sudo mkdir -p /opt/cni/bin +curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-linux-amd64-${CNI_VERSION}.tgz" | sudo tar -C /opt/cni/bin -xz ``` -Installez crictl (obligatoire pour kubeadm / Kubelet Container Runtime Interface (CRI)) +Définissez le répertoire pour télécharger les fichiers de commande + +{{< note >}} +La variable DOWNLOAD_DIR doit être définie sur un répertoire accessible en écriture. +Si vous exécutez Flatcar Container Linux, définissez DOWNLOAD_DIR=/opt/bin +{{< /note >}} ```bash -CRICTL_VERSION="v1.11.1" -mkdir -p /opt/bin -curl -L "https://github.com/kubernetes-incubator/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-amd64.tar.gz" | tar -C /opt/bin -xz +DOWNLOAD_DIR=/usr/local/bin +sudo mkdir -p $DOWNLOAD_DIR ``` -Installez `kubeadm`, `kubelet`, `kubectl` et ajouter un service systemd `kubelet`: +Installez crictl (requis pour Kubeadm / Kubelet Container Runtime Interface (CRI)) + +```bash +CRICTL_VERSION="v1.17.0" +curl -L "https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-amd64.tar.gz" | sudo tar -C $DOWNLOAD_DIR -xz +``` + +Installez `kubeadm`,` kubelet`, `kubectl` et ajoutez un service systemd` kubelet`: + +RELEASE_VERSION="v0.6.0" ```bash RELEASE="$(curl -sSL https://dl.k8s.io/release/stable.txt)" +cd $DOWNLOAD_DIR +sudo curl -L --remote-name-all https://storage.googleapis.com/kubernetes-release/release/${RELEASE}/bin/linux/amd64/{kubeadm,kubelet,kubectl} +sudo chmod +x {kubeadm,kubelet,kubectl} -mkdir -p /opt/bin -cd /opt/bin -curl -L --remote-name-all https://storage.googleapis.com/kubernetes-release/release/${RELEASE}/bin/linux/amd64/{kubeadm,kubelet,kubectl} -chmod +x {kubeadm,kubelet,kubectl} - -curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/kubelet.service" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service -mkdir -p /etc/systemd/system/kubelet.service.d -curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/10-kubeadm.conf" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf +curl -sSL "https://raw.githubusercontent.com/kubernetes/release/${RELEASE_VERSION}/cmd/kubepkg/templates/latest/deb/kubelet/lib/systemd/system/kubelet.service" | sed "s:/usr/bin:${DOWNLOAD_DIR}:g" | sudo tee /etc/systemd/system/kubelet.service +sudo mkdir -p /etc/systemd/system/kubelet.service.d +curl -sSL "https://raw.githubusercontent.com/kubernetes/release/${RELEASE_VERSION}/cmd/kubepkg/templates/latest/deb/kubeadm/10-kubeadm.conf" | sed "s:/usr/bin:${DOWNLOAD_DIR}:g" | sudo tee /etc/systemd/system/kubelet.service.d/10-kubeadm.conf ``` -Activez et démarrez la `kubelet`: +Activez et démarrez `kubelet` : ```bash -systemctl enable --now kubelet +sudo systemctl enable --now kubelet ``` + +{{< note >}} +La distribution Linux Flatcar Container monte le répertoire `/usr` comme un système de fichiers en lecture seule. +Avant de démarrer votre cluster, vous devez effectuer des étapes supplémentaires pour configurer un répertoire accessible en écriture. +Consultez le [Guide de dépannage de Kubeadm](/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/#usr-mounted-read-only/) pour savoir comment configurer un répertoire accessible en écriture. +{{< /note >}} {{% /tab %}} {{< /tabs >}} -La kubelet redémarre maintenant toutes les secondes et quelques, car elle attend dans une boucle -kubeadm, pour lui dire quoi faire. +Kubelet redémarre maintenant toutes les quelques secondes, +car il attend les instructions de kubeadm dans une boucle de crash. ## Configurer le driver de cgroup utilisé par la kubelet sur un nœud master -Lorsque vous utilisez Docker, kubeadm détecte automatiquement le pilote ( driver ) de cgroup pour la kubelet -et le configure dans le fichier `/var/lib/kubelet/kubeadm-flags.env` lors de son éxecution. +Lorsque vous utilisez Docker, kubeadm détecte automatiquement le pilote ( driver ) de cgroup pour kubelet +et le configure dans le fichier `/var/lib/kubelet/config.yaml` lors de son éxecution. -Si vous utilisez un autre CRI, vous devez modifier le fichier `/etc/default/kubelet` avec votre -valeur de `cgroup-driver` comme ceci: +Si vous utilisez un autre CRI, vous devez passer votre valeur `cgroupDriver` avec `kubeadm init`, comme ceci : -```bash -KUBELET_EXTRA_ARGS=--cgroup-driver= +```yaml +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +cgroupDriver: ``` -Ce fichier sera utilisé par `kubeadm init` et` kubeadm join` pour sourcer des arguments supplémentaires définis par l'utilisateur pour la kubelet. +Pour plus de détails, veuillez lire [Utilisation de kubeadm init avec un fichier de configuration](/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file). Veuillez noter que vous devez **seulement** le faire si le driver de cgroupe de votre CRI n'est pas `cgroupfs`, car c'est déjà la valeur par défaut dans la kubelet. +{{< note >}} +Depuis que le paramètre `--cgroup-driver` est obsolète par kubelet, si vous l'avez dans`/var/lib/kubelet/kubeadm-flags.env` +ou `/etc/default/kubelet`(`/etc/sysconfig/kubelet` pour les RPM), veuillez le supprimer et utiliser à la place KubeletConfiguration +(stocké dans`/var/lib/kubelet/config.yaml` par défaut). +{{< /note >}} + Il est nécessaire de redémarrer la kubelet: ```bash @@ -250,6 +312,10 @@ sudo systemctl daemon-reload sudo systemctl restart kubelet ``` +La détection automatique du pilote cgroup pour d'autres runtimes de conteneur +comme CRI-O et containerd est un travail en cours. + + ## Dépannage Si vous rencontrez des difficultés avec kubeadm, veuillez consulter notre [documentation de dépannage](/fr/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/). @@ -258,5 +324,3 @@ Si vous rencontrez des difficultés avec kubeadm, veuillez consulter notre [docu * [Utiliser kubeadm pour créer un cluster](/fr/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/) - - diff --git a/content/fr/examples/application/guestbook/frontend-deployment.yaml b/content/fr/examples/application/guestbook/frontend-deployment.yaml index 50d6e1f0d4..23d64be644 100644 --- a/content/fr/examples/application/guestbook/frontend-deployment.yaml +++ b/content/fr/examples/application/guestbook/frontend-deployment.yaml @@ -1,4 +1,4 @@ -apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2 +apiVersion: apps/v1 kind: Deployment metadata: name: frontend diff --git a/content/fr/examples/application/guestbook/redis-master-deployment.yaml b/content/fr/examples/application/guestbook/redis-master-deployment.yaml index fc6f418c39..478216d1ac 100644 --- a/content/fr/examples/application/guestbook/redis-master-deployment.yaml +++ b/content/fr/examples/application/guestbook/redis-master-deployment.yaml @@ -1,4 +1,4 @@ -apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2 +apiVersion: apps/v1 kind: Deployment metadata: name: redis-master diff --git a/content/fr/examples/application/guestbook/redis-slave-deployment.yaml b/content/fr/examples/application/guestbook/redis-slave-deployment.yaml index 7dcfb6c263..1a7b04386a 100644 --- a/content/fr/examples/application/guestbook/redis-slave-deployment.yaml +++ b/content/fr/examples/application/guestbook/redis-slave-deployment.yaml @@ -1,4 +1,4 @@ -apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2 +apiVersion: apps/v1 kind: Deployment metadata: name: redis-slave diff --git a/content/id/docs/concepts/services-networking/ingress.md b/content/id/docs/concepts/services-networking/ingress.md index 1cc56c5960..6baae2adc9 100644 --- a/content/id/docs/concepts/services-networking/ingress.md +++ b/content/id/docs/concepts/services-networking/ingress.md @@ -89,7 +89,7 @@ spec: ``` Seperti layaknya *resource* Kubernetes yang lain, sebuah Ingress membutuhkan *field* `apiVersion`, `kind`, dan `metadata`. - Untuk informasi umum soal bagaimana cara bekerja dengan menggunakan file konfigurasi, silahkan merujuk pada [melakukan deploy aplikasi](/docs/tasks/run-application/run-stateless-application-deployment/), [konfigurasi kontainer](/id/docs/tasks/configure-pod-container/configure-pod-configmap/), [mengatur *resource*](/id/docs/concepts/cluster-administration/manage-deployment/). + Untuk informasi umum soal bagaimana cara bekerja dengan menggunakan berkas konfigurasi, silahkan merujuk pada [melakukan deploy aplikasi](/docs/tasks/run-application/run-stateless-application-deployment/), [konfigurasi kontainer](/id/docs/tasks/configure-pod-container/configure-pod-configmap/), [mengatur *resource*](/id/docs/concepts/cluster-administration/manage-deployment/). Ingress seringkali menggunakan anotasi untuk melakukan konfigurasi beberapa opsi yang ada bergantung pada kontroler Ingress yang digunakan, sebagai contohnya adalah [anotasi rewrite-target](https://github.com/kubernetes/ingress-nginx/blob/master/docs/examples/rewrite/README.md). [Kontroler Ingress](/id/docs/concepts/services-networking/ingress-controllers) yang berbeda memiliki jenis anotasi yang berbeda. Pastikan kamu sudah terlebih dahulu memahami dokumentasi @@ -442,7 +442,7 @@ Events: Normal ADD 45s loadbalancer-controller default/test ``` -Kamu juga dapat mengubah Ingress dengan menggunakan perintah `kubectl replace -f` pada file konfigurasi +Kamu juga dapat mengubah Ingress dengan menggunakan perintah `kubectl replace -f` pada berkas konfigurasi Ingress yang ingin diubah. ## Mekanisme *failing* pada beberapa zona *availability* diff --git a/content/id/docs/reference/glossary/kuantitas.md b/content/id/docs/reference/glossary/kuantitas.md deleted file mode 100644 index ddac5a5ded..0000000000 --- a/content/id/docs/reference/glossary/kuantitas.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: Kuantitas -id: kuantitas -date: 2018-08-07 -full_link: -short_description: > - Representasi bilangan bulat dari bilangan kecil atau bilangan besar menggunakan sufiks SI. - -aka: -tags: -- core-object ---- - Representasi bilangan bulat dari bilangan kecil atau besar menggunakan sufiks SI. - - - -Kuantitas adalah representasi dari bilangan kecil atau besar menggunakan notasi -bilangan bulat kompak dengan sufiks SI. Bilangan pecahan direpresentasikan dengan satuan mili, -sedangkan bilangan besar direpresentasikan dengan satuan kilo, mega, atau giga. - -Misalnya, angka `1,5` direpresentasikan sebagai` 1500m`, sedangkan angka `1000` dapat direpresentasikan sebagai` 1k`, dan `1000000` sebagai` 1M`. Kamu juga dapat menentukan sufiks notasi biner; angka 2048 dapat -ditulis sebagai `2Ki`. - -Satuan desimal yang diterima (pangkat 10) adalah `m` (mili),` k` (kilo, -sengaja huruf kecil), `M` (mega),` G` (giga), `T` (tera),` P` (peta), -`E` (exa). - -Unit biner (pangkat-2) yang diterima adalah `Ki` (kibi),` Mi` (mebi), `Gi` (gibi),` Ti` (tebi), `Pi` (pebi),` Ei` (exbi). diff --git a/content/id/docs/reference/glossary/kube-controller-manager.md b/content/id/docs/reference/glossary/kube-controller-manager.md index 436927afc7..2d0d5d0d9a 100644 --- a/content/id/docs/reference/glossary/kube-controller-manager.md +++ b/content/id/docs/reference/glossary/kube-controller-manager.md @@ -4,16 +4,15 @@ id: kube-controller-manager date: 2019-04-21 full_link: /docs/reference/generated/kube-controller-manager/ short_description: > - Komponen di master yang menjalankan kontroler. + Komponen _control plane_ yang menjalankan pengontrol. aka: tags: - architecture - fundamental --- - Komponen di master yang menjalankan kontroler. +Komponen _control plane_ yang menjalankan pengontrol. -Secara logis, setiap kontroler adalah sebuah proses yang berbeda, tetapi untuk mengurangi kompleksitas, kontroler-kontroler ini dikompilasi menjadi sebuah binary yang dijalankan sebagai satu proses. - +Secara logis, setiap pengontrol adalah sebuah proses yang berbeda, tetapi untuk mengurangi kompleksitas, kesemuanya dikompilasi menjadi sebuah biner (_binary_) yang dijalankan sebagai satu proses. diff --git a/content/id/docs/reference/glossary/kube-proxy.md b/content/id/docs/reference/glossary/kube-proxy.md new file mode 100644 index 0000000000..e842c98386 --- /dev/null +++ b/content/id/docs/reference/glossary/kube-proxy.md @@ -0,0 +1,20 @@ +--- +title: kube-proxy +id: kube-proxy +date: 2018-04-12 +full_link: /docs/reference/command-line-tools-reference/kube-proxy/ +short_description: > + `kube-proxy` merupakan proksi jaringan yang berjalan pada setiap node di dalam klaster. + +aka: +tags: +- fundamental +- networking +--- +kube-proxy merupakan proksi jaringan yang berjalan pada setiap {{< glossary_tooltip term_id="node" >}} di dalam klastermu, yang mengimplementasikan bagian dari konsep {{< glossary_tooltip text="layanan" term_id="service">}} Kubernetes. + + + +[kube-proxy](/docs/reference/command-line-tools-reference/kube-proxy/) mengelola aturan jaringan pada node. Aturan jaringan tersebut memungkinkan komunikasi jaringan ke Pod-mu melalui sesi jaringan dari dalam ataupun luar klaster. + +kube-proxy menggunakan lapisan pemfilteran paket sistem operasi jika ada dan tersedia. Jika tidak, maka kube-proxy akan meneruskan lalu lintas jaringan itu sendiri. diff --git a/content/id/docs/reference/glossary/kube-scheduler.md b/content/id/docs/reference/glossary/kube-scheduler.md index e1bf37a36b..f61e99a8b8 100644 --- a/content/id/docs/reference/glossary/kube-scheduler.md +++ b/content/id/docs/reference/glossary/kube-scheduler.md @@ -4,15 +4,14 @@ id: kube-scheduler date: 2019-04-21 full_link: /docs/reference/generated/kube-scheduler/ short_description: > - Komponen di master yang bertugas mengamati pod yang baru dibuat dan belum di-assign ke suatu node dan kemudian akan memilih sebuah node dimana pod baru tersebut akan dijalankan. + Komponen _control plane_ yang bertugas mengamati Pod baru yang belum ditempatkan di node manapun dan kemudian memilihkan node di mana Pod baru tersebut akan dijalankan. aka: tags: - architecture --- - Komponen di master yang bertugas mengamati pod yang baru dibuat dan belum di-assign ke suatu node dan kemudian akan memilih sebuah node dimana pod baru tersebut akan dijalankan. +Komponen _control plane_ yang bertugas mengamati {{< glossary_tooltip term_id="pod" >}} baru yang belum ditempatkan di node manapun dan kemudian memilihkan {{< glossary_tooltip term_id="node" >}} di mana Pod baru tersebut akan dijalankan. -Faktor-faktor yang diperhatikan dalam proses ini adalah kebutuhan resource secara individual dan kolektif, konstrain perangkat keras/perangkat lunak/peraturan, spesifikasi afinitas dan non-afinitas, lokalisasi data, interferensi inter-workload dan deadlines. - +Faktor-faktor yang dipertimbangkan untuk keputusan penjadwalan termasuk: kebutuhan sumber daya secara individual dan kolektif, batasan perangkat keras/perangkat lunak/peraturan, spesifikasi afinitas dan nonafinitas, lokalisasi data, interferensi antar beban kerja dan tenggat waktu. diff --git a/content/id/docs/reference/glossary/kubectl.md b/content/id/docs/reference/glossary/kubectl.md index 579a4003c2..059cd29b59 100644 --- a/content/id/docs/reference/glossary/kubectl.md +++ b/content/id/docs/reference/glossary/kubectl.md @@ -11,7 +11,7 @@ tags: - tool - fundamental --- -Sebuah utilitas baris perintah untuk berkomunikasi dengan suatu server {{< glossary_tooltip text="API Kubernetes" term_id="kubernetes-api" >}}. +Sebuah utilitas baris perintah untuk berkomunikasi dengan suatu server {{< glossary_tooltip term_id="kubernetes-api" >}}. diff --git a/content/id/docs/reference/glossary/kubelet.md b/content/id/docs/reference/glossary/kubelet.md index 10cc51ea07..ee0e239616 100644 --- a/content/id/docs/reference/glossary/kubelet.md +++ b/content/id/docs/reference/glossary/kubelet.md @@ -4,12 +4,11 @@ id: kubelet date: 2019-04-21 full_link: /docs/reference/generated/kubelet short_description: > - Agen yang dijalankan pada setiap node di klaster dan bertugas memastikan kontainer dijalankan di dalam pod. + Agen yang dijalankan pada setiap node di klaster yang bertugas untuk memastikan kontainer dijalankan di dalam Pod. aka: tags: - fundamental - core-object --- - Agen yang dijalankan pada setiap node di klaster dan bertugas memastikan kontainer dijalankan di dalam pod. - +Agen yang dijalankan pada setiap node di klaster yang bertugas untuk memastikan kontainer dijalankan di dalam Pod. diff --git a/content/id/docs/reference/glossary/quantity.md b/content/id/docs/reference/glossary/quantity.md new file mode 100644 index 0000000000..7369fc20dc --- /dev/null +++ b/content/id/docs/reference/glossary/quantity.md @@ -0,0 +1,22 @@ +--- +title: Kuantitas +id: quantity +date: 2018-08-07 +full_link: +short_description: > + Representasi bilangan bulat dari bilangan kecil atau besar menggunakan sufiks SI. +aka: +tags: +- core-object +--- +Representasi bilangan bulat dari bilangan kecil atau besar menggunakan sufiks SI. + + + +Kuantitas adalah representasi dari bilangan kecil atau besar menggunakan notasi bilangan bulat kompak dengan sufiks SI. Bilangan pecahan direpresentasikan dengan satuan mili, sedangkan bilangan besar direpresentasikan dengan satuan kilo, mega, atau giga. + +Misalnya, angka `1,5` direpresentasikan sebagai` 1500m`, sedangkan angka `1000` dapat direpresentasikan sebagai `1k`, dan `1000000` sebagai `1M`. Kamu juga dapat menentukan sufiks notasi biner; angka 2048 dapat ditulis sebagai `2Ki`. + +Satuan desimal yang diterima (pangkat 10) adalah `m` (mili), `k` (kilo, sengaja huruf kecil), `M` (mega), `G` (giga), `T` (tera), `P` (peta), `E` (exa). + +Unit biner (pangkat 2) yang diterima adalah `Ki` (kibi), `Mi` (mebi), `Gi` (gibi), `Ti` (tebi), `Pi` (pebi), `Ei` (exbi). diff --git a/content/id/docs/setup/production-environment/container-runtimes.md b/content/id/docs/setup/production-environment/container-runtimes.md index bca967593c..39db9c9ed3 100644 --- a/content/id/docs/setup/production-environment/container-runtimes.md +++ b/content/id/docs/setup/production-environment/container-runtimes.md @@ -359,7 +359,7 @@ apt-get update && apt-get install -y containerd.io ```shell # Mengonfigure containerd mkdir -p /etc/containerd -containerd config default > /etc/containerd/config.toml +containerd config default | sudo tee /etc/containerd/config.toml ``` ```shell @@ -391,7 +391,7 @@ yum update -y && yum install -y containerd.io ```shell ## Mengonfigurasi containerd mkdir -p /etc/containerd -containerd config default > /etc/containerd/config.toml +containerd config default | sudo tee /etc/containerd/config.toml ``` ```shell diff --git a/content/id/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md b/content/id/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md index adcf73db77..fa0b0ce7f0 100644 --- a/content/id/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md +++ b/content/id/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md @@ -289,8 +289,8 @@ atau `/etc/default/kubelet`(`/etc/sysconfig/kubelet` untuk RPM), silakan hapus d Kamu harus melakukan _restart_ pada kubelet: ```bash -systemctl daemon-reload -systemctl restart kubelet +sudo systemctl daemon-reload +sudo systemctl restart kubelet ``` Deteksi _driver_ cgroup secara otomatis untuk _runtime_ Container lainnya diff --git a/content/id/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md b/content/id/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md index da873c4373..442e3178ec 100644 --- a/content/id/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md +++ b/content/id/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md @@ -426,7 +426,7 @@ pengambilan metrik. Terakhir, kondisi terakhir, `ScalingLimited`, menunjukkan ba ## Lampiran: Kuantitas -Semua metrik di HorizontalPodAutoscaler dan metrik API ditentukan menggunakan notasi bilangan bulat khusus yang dikenal di Kubernetes sebagai {{< glossary_tooltip term_id="kuantitas" text="kuantitas">}}. Misalnya, kuantitas `10500m` akan ditulis sebagai `10.5` dalam notasi desimal. Metrik API akan menampilkan bilangan bulat tanpa sufiks jika memungkinkan, dan secara umum akan mengembalikan kuantitas dalam satuan mili. Ini berarti Anda mungkin melihat nilai metrik Anda berfluktuasi antara `1` dan` 1500m`, atau `1` dan` 1,5` ketika ditulis dalam notasi desimal. +Semua metrik di HorizontalPodAutoscaler dan metrik API ditentukan menggunakan notasi bilangan bulat khusus yang dikenal di Kubernetes sebagai {{< glossary_tooltip term_id="quantity" text="kuantitas">}}. Misalnya, kuantitas `10500m` akan ditulis sebagai `10.5` dalam notasi desimal. Metrik API akan menampilkan bilangan bulat tanpa sufiks jika memungkinkan, dan secara umum akan mengembalikan kuantitas dalam satuan mili. Ini berarti kamu mungkin melihat nilai metrik berfluktuasi antara `1` dan `1500m`, atau `1` dan` 1,5` ketika ditulis dalam notasi desimal. ## Lampiran: Skenario lain yang memungkinkan diff --git a/content/ja/docs/concepts/configuration/secret.md b/content/ja/docs/concepts/configuration/secret.md index 26ce98ab50..9ae7c55067 100644 --- a/content/ja/docs/concepts/configuration/secret.md +++ b/content/ja/docs/concepts/configuration/secret.md @@ -457,7 +457,7 @@ Secretは直接Podが参照できるようにはされず、システムの別 PodのボリュームとしてSecretを使うには、 1. Secretを作成するか既存のものを使用します。複数のPodが同一のSecretを参照することができます。 -1. ボリュームを追加するため、Podの定義の`.spec.volumes[]`以下をを書き換えます。ボリュームに命名し、`.spec.volumes[].secret.secretName`フィールドはSecretオブジェクトの名称と同一にします。 +1. ボリュームを追加するため、Podの定義の`.spec.volumes[]`以下を書き換えます。ボリュームに命名し、`.spec.volumes[].secret.secretName`フィールドはSecretオブジェクトの名称と同一にします。 1. Secretを必要とするそれぞれのコンテナに`.spec.containers[].volumeMounts[]`を追加します。`.spec.containers[].volumeMounts[].readOnly = true`を指定して`.spec.containers[].volumeMounts[].mountPath`をSecretをマウントする未使用のディレクトリ名にします。 1. イメージやコマンドラインを変更し、プログラムがそのディレクトリを参照するようにします。連想配列`data`のキーは`mountPath`以下のファイル名になります。 diff --git a/content/ja/docs/reference/_index.md b/content/ja/docs/reference/_index.md index 4276875967..0496a730c4 100644 --- a/content/ja/docs/reference/_index.md +++ b/content/ja/docs/reference/_index.md @@ -31,7 +31,7 @@ content_type: concept ## CLIリファレンス * [kubectl](/docs/reference/kubectl/overview/) - コマンドの実行やKubernetesクラスターの管理に使う主要なCLIツールです。 - * [JSONPath](/docs/reference/kubectl/jsonpath/) - kubectlで[JSONPath記法](https://goessner.net/articles/JsonPath/)を使うための構文ガイドです。 + * [JSONPath](/ja/docs/reference/kubectl/jsonpath/) - kubectlで[JSONPath記法](https://goessner.net/articles/JsonPath/)を使うための構文ガイドです。 * [kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/) - セキュアなKubernetesクラスターを簡単にプロビジョニングするためのCLIツールです。 ## コンポーネントリファレンス diff --git a/content/ja/docs/reference/access-authn-authz/authentication.md b/content/ja/docs/reference/access-authn-authz/authentication.md index 3f713636f3..14875c8229 100644 --- a/content/ja/docs/reference/access-authn-authz/authentication.md +++ b/content/ja/docs/reference/access-authn-authz/authentication.md @@ -105,19 +105,6 @@ APIサーバーの`--enable-bootstrap-token-auth`フラグで、Bootstrap Token ブートストラップトークンの認証機能やコントローラーについての詳細な説明、`kubeadm`でこれらのトークンを管理する方法については、[ブートストラップトークン](/docs/reference/access-authn-authz/bootstrap-tokens/)を参照してください。 -### 静的なパスワードファイル - -APIサーバーに`--basic-auth-file=SOMEFILE`オプションを渡すことで、Basic認証を有効にすることができます。現在のところ、Basic認証の認証情報は有効期限が無く、APIサーバーを再起動しない限りパスワードを変更することはできません。よりセキュアなモードをさらに使いやすくするための改良が完了するまでの間、現時点では利便性のためにBasic認証がサポートされていることに注意してください。 - -Basic認証ファイルは、トークン、ユーザー名、ユーザーIDの少なくとも3つの列を持つcsvファイルです。 -Kubernetesのバージョン1.6以降では、オプションとしてカンマ区切りのグループ名を含む4列目を指定することができます。複数のグループがある場合は、4列目の値をダブルクォート(")で囲む必要があります。以下の例を参照してください。 - -```conf -password,user,uid,"group1,group2,group3" -``` - -HTTPクライアントからBasic認証を利用する場合、APIサーバーは`Basic BASE64ENCODED(USER:PASSWORD)`の値を持つ`Authorization`ヘッダーを待ち受けます。 - ### サービスアカウントトークン サービスアカウントは、自動的に有効化される認証機能で、署名されたBearerトークンを使ってリクエストを検証します。このプラグインは、オプションとして2つのフラグを取ります。 diff --git a/content/ja/docs/reference/kubectl/jsonpath.md b/content/ja/docs/reference/kubectl/jsonpath.md new file mode 100644 index 0000000000..9b9caca4bb --- /dev/null +++ b/content/ja/docs/reference/kubectl/jsonpath.md @@ -0,0 +1,112 @@ +--- +title: JSONPathのサポート +content_type: concept +weight: 25 +--- + + +kubectlはJSONPathのテンプレートをサポートしています。 + + + +JSONPathのテンプレートは、波括弧`{}`によって囲まれたJSONPathの式によって構成されています。 +kubectlでは、JSONPathの式を使うことで、JSONオブジェクトの特定のフィールドをフィルターしたり、出力のフォーマットを変更することができます。 +本来のJSONPathのテンプレートの構文に加え、以下の機能と構文が使えます: + +1. JSONPathの式の内部でテキストをクォートするために、ダブルクォーテーションを使用します。 +2. リストを反復するために、`range`、`end`オペレーターを使用します。 +3. リストを末尾側から参照するために、負の数のインデックスを使用します。負の数のインデックスはリストを「周回」せず、`-index + listLength >= 0`が満たされる限りにおいて有効になります。 + +{{< note >}} + +- 式は常にルートのオブジェクトから始まるので、`$`オペレーターの入力は任意になります。 + +- 結果のオブジェクトはString()関数を適用した形で表示されます。 + +{{< /note >}} + +以下のようなJSONの入力が与えられたとします。 + +```json +{ + "kind": "List", + "items":[ + { + "kind":"None", + "metadata":{"name":"127.0.0.1"}, + "status":{ + "capacity":{"cpu":"4"}, + "addresses":[{"type": "LegacyHostIP", "address":"127.0.0.1"}] + } + }, + { + "kind":"None", + "metadata":{"name":"127.0.0.2"}, + "status":{ + "capacity":{"cpu":"8"}, + "addresses":[ + {"type": "LegacyHostIP", "address":"127.0.0.2"}, + {"type": "another", "address":"127.0.0.3"} + ] + } + } + ], + "users":[ + { + "name": "myself", + "user": {} + }, + { + "name": "e2e", + "user": {"username": "admin", "password": "secret"} + } + ] +} +``` + +機能 | 説明 | 例 | 結果 +--------------------|---------------------------|-----------------------------------------------------------------|------------------ +`text` | プレーンテキスト | `kind is {.kind}` | `kind is List` +`@` | 現在のオブジェクト | `{@}` | 入力した値と同じ値 +`.` or `[]` | 子要素 | `{.kind}`, `{['kind']}` or `{['name\.type']}` | `List` +`..` | 子孫要素を再帰的に探す | `{..name}` | `127.0.0.1 127.0.0.2 myself e2e` +`*` | ワイルドカード。すべてのオブジェクトを取得する | `{.items[*].metadata.name}` | `[127.0.0.1 127.0.0.2]` +`[start:end:step]` | 添字 | `{.users[0].name}` | `myself` +`[,]` | 和集合 | `{.items[*]['metadata.name', 'status.capacity']}` | `127.0.0.1 127.0.0.2 map[cpu:4] map[cpu:8]` +`?()` | フィルター | `{.users[?(@.name=="e2e")].user.password}` | `secret` +`range`, `end` | リストの反復 | `{range .items[*]}[{.metadata.name}, {.status.capacity}] {end}` | `[127.0.0.1, map[cpu:4]] [127.0.0.2, map[cpu:8]]` +`''` | 解釈済みの文字列をクォートする | `{range .items[*]}{.metadata.name}{'\t'}{end}` | `127.0.0.1 127.0.0.2` + +`kubectl`とJSONPathの式を使った例: + +```shell +kubectl get pods -o json +kubectl get pods -o=jsonpath='{@}' +kubectl get pods -o=jsonpath='{.items[0]}' +kubectl get pods -o=jsonpath='{.items[0].metadata.name}' +kubectl get pods -o=jsonpath="{.items[*]['metadata.name', 'status.capacity']}" +kubectl get pods -o=jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.status.startTime}{"\n"}{end}' +``` + +{{< note >}} +Windowsでは、空白が含まれるJSONPathのテンプレートをクォートする場合は(上記のようにシングルクォーテーションを使うのではなく)、ダブルクォーテーションを使わなければなりません。 +また、テンプレート内のリテラルをクォートする際には、シングルクォーテーションか、エスケープされたダブルクォーテーションを使わなければなりません。例えば: + +```cmd +kubectl get pods -o=jsonpath="{range .items[*]}{.metadata.name}{'\t'}{.status.startTime}{'\n'}{end}" +kubectl get pods -o=jsonpath="{range .items[*]}{.metadata.name}{\"\t\"}{.status.startTime}{\"\n\"}{end}" +``` +{{< /note >}} + +{{< note >}} + +JSONPathの正規表現はサポートされていません。正規表現を利用した検索を行いたい場合は、`jq`のようなツールを使ってください。 + +```shell +# kubectlはJSONpathの出力として正規表現をサポートしていないので、以下のコマンドは動作しない +kubectl get pods -o jsonpath='{.items[?(@.metadata.name=~/^test$/)].metadata.name}' + +# 上のコマンドに期待される結果が欲しい場合、以下のコマンドを使うとよい +kubectl get pods -o json | jq -r '.items[] | select(.metadata.name | test("test-")).spec.containers[].image' +``` +{{< /note >}} diff --git a/content/ja/docs/reference/kubectl/overview.md b/content/ja/docs/reference/kubectl/overview.md index 71ce1844ec..2ddd60b62e 100644 --- a/content/ja/docs/reference/kubectl/overview.md +++ b/content/ja/docs/reference/kubectl/overview.md @@ -191,8 +191,8 @@ kubectl [command] [TYPE] [NAME] -o `-o custom-columns=` | [カスタムカラム](#custom-columns)のコンマ区切りのリストを使用して、テーブルを表示します。 `-o custom-columns-file=` | ``ファイル内の[カスタムカラム](#custom-columns)のテンプレートを使用して、テーブルを表示します。 `-o json` | JSON形式のAPIオブジェクトを出力します。 -`-o jsonpath=